query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Withdraw the image previously made downloadable. Depending on ironic settings, removes previously published file from where it has been published NFS & CIFS.
def _unpublish_image(self, object_name, image_share_root): if image_share_root: published_file = os.path.join(image_share_root, object_name) ironic_utils.unlink_without_raise(published_file)
[ "def _remove_existing(img):\n if os.path.exists(img):\n os.unlink(img)\n return img", "def cleanup_iso_image(context):\n datacenter_name = context.testbed.config['ISO_DATACENTER_NAME']\n datastore_path = context.testbed.config['ISO_DATASTORE_PATH']\n delete_file(context.client,\n context.service_instance,\n \"ISO Image\",\n datacenter_name,\n datastore_path)", "def cleanup_temp_images():\n if options[\"destination_kickstart_image\"] != options[\"destination_midway_kickstart_image\"]:\n midway_kickstart = os.path.join(options[\"destination_path\"],\n options[\"destination_midway_kickstart_image\"])\n remove_file(midway_kickstart)\n if options[\"destination_system_image\"] != options[\"destination_midway_system_image\"]:\n midway_system = os.path.join(options[\"destination_path\"],\n options[\"destination_midway_system_image\"])\n remove_file(midway_system)", "def purge():", "def _delete_unfunded(self):\n\n order_id = digest(json.dumps(self.contract, indent=4)).encode(\"hex\")\n if self.is_purchase:\n file_path = DATA_FOLDER + \"purchases/in progress/\" + order_id + \".json\"\n self.db.Purchases().delete_purchase(order_id)\n else:\n file_path = DATA_FOLDER + \"store/listings/in progress/\" + order_id + \".json\"\n self.db.Sales().delete_sale(order_id)\n if os.path.exists(file_path):\n os.remove(file_path)", "def erase_files(self):\n print('\\n\\n\\n We are erasing files!!! ')\n try:\n writeable_file = open('scrape-html-max/scrape.txt', 'w')\n writeable_file.close()\n print('\\n\\n opened file to erase and closed file.... ')\n writeable_file_2 = open('final-report/report.txt', 'w')\n writeable_file_2.close()\n except:\n print('\\n\\n Could not open file to erase')", "def delete_local_file(self):\n try:\n os.remove(self.absolute_file_path)\n except OSError:\n pass\n except TypeError:\n pass\n except Exception:\n logger.exception(\n \"Unexpected delete file exception.\", absolute_file_path=self.absolute_file_path\n )\n self.is_downloaded = False\n self.save()", "def download_off():\n mw.note_download_action.setEnabled(False)\n mw.side_download_action.setEnabled(False)\n mw.manual_download_action.setEnabled(False)", "def remove_all(self):\n log.info(\"In function remove_all (images).\")\n try:\n self.Zen.GUI.File.CloseAll.Execute()\n except Exception:\n raise HardwareError(\"Error in Removing all images (connect_zen_black.py).\")", "def wipe_asset(self, asset_key: AssetKey) -> None:", "def reload_image_folder():", "def test_unlink(self):\n path = ArtifactoryPath(\n \"http://artifactory.local/artifactory/ext-release-local/org/company/tool/1.0/tool-1.0.tar.gz\"\n )\n constructed_url = (\n \"http://artifactory.local/artifactory\"\n \"/api/storage\"\n \"/ext-release-local/org/company/tool/1.0/tool-1.0.tar.gz\"\n )\n responses.add(\n responses.GET,\n constructed_url,\n status=200,\n json=self.file_stat,\n )\n\n responses.add(\n responses.DELETE,\n str(path),\n status=200,\n )\n\n path.unlink()", "def force_clean(self): \n def remove_readonly(func, path, _):\n \"\"\"Clear the readonly bit and reattempt the removal\"\"\"\n os.chmod(path, stat.S_IWRITE)\n func(path) \n if os.path.exists(self.repo_dir):\n shutil.rmtree(self.repo_dir, onerror=remove_readonly)", "def delAsset(self):\n pass", "def delete_bitstream_file(self, context, filename):\n image_file_path = os.path.join(dconstants.DEVICE_IMAGE_PATH, filename)\n try:\n os.remove(image_file_path)\n except OSError:\n LOG.exception(\"Failed to delete bitstream file %s\" % image_file_path)\n # If no device image is uploaded, clear the in-progress alarm.\n images = self.dbapi.deviceimages_get_all()\n if not images:\n system_uuid = self.dbapi.isystem_get_one().uuid\n entity_instance_id = \"%s=%s\" % (fm_constants.FM_ENTITY_TYPE_SYSTEM, system_uuid)\n self.fm_api.clear_fault(fm_constants.FM_ALARM_ID_DEVICE_IMAGE_UPDATE_IN_PROGRESS,\n entity_instance_id)", "def cleanup_files():\n global options, log_hdl\n global del_system_image, del_kickstart_image\n\n poap_log(\"\\n\\nCleanup all files\")\n\n # Destination config\n cleanup_file_from_option(\"destination_config\")\n # Temporary split configs\n cleanup_file_from_option(\"split_config_first\", True)\n cleanup_file_from_option(\"split_config_second\", True)\n # Destination system or NXOS image\n if del_system_image == True:\n cleanup_file_from_option(\"destination_system_image\")\n # Destination kickstart image\n if del_kickstart_image == True:\n cleanup_file_from_option(\"destination_kickstart_image\")\n # Destination config\n cleanup_file_from_option(\"destination_config\")\n os.system(\"rm -rf /bootflash/poap_files\")\n os.system(\"rm -rf /bootflash_sup-remote/poap_files\")\n os.system(\"rm -rf /bootflash/poap_replay01.cfg\")", "def cleanup_files(self):\n os.system(\"rm -r /tmp/kernelpop\")", "def remove(self, force: bool = False) -> None:\n if not force:\n if not getinput.yes_no(\n f\"Are you sure you want to remove file {self.name}?\"\n ):\n return\n if self.kind == ArticleKind.electronic and self.path:\n os.unlink(self.get_path())\n print(f\"File {self.name} removed.\")\n self.kind = ArticleKind.removed # type: ignore", "def unpack_clear(self):\n if (not os.path.exists(self.unpack_path)):\n VERBOSE(\"no image found: %s\" % self.unpack_path)\n else:\n if (not os.path.isdir(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but is not a directory\"\n % self.unpack_path)\n if (not self.unpacked_p(self.unpack_path)):\n FATAL(\"can't flatten: %s exists but does not appear to be an image\"\n % self.unpack_path)\n VERBOSE(\"removing existing image: %s\" % self.unpack_path)\n rmtree(self.unpack_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare an ISO to boot the node. Build bootable ISO out of `kernel_href` and `ramdisk_href` (and `bootloader` if it's UEFI boot), then push built image up to NFS/CIFS and return a temporary URL.
def _prepare_iso_image(self, task, kernel_href, ramdisk_href, bootloader_href=None, configdrive=None, root_uuid=None, params=None): if not kernel_href or not ramdisk_href: raise exception.InvalidParameterValue(_( "Unable to find kernel or ramdisk for " "building ISO for %(node)s") % {'node': task.node.uuid}) i_info = task.node.instance_info driver_info = task.node.driver_info if driver_info.get('remote_image_share_type') == 'nfs': image_share_root = driver_info.get('remote_image_share_root') else: image_share_root = driver_info.get('image_share_root') if deploy_utils.get_boot_option(task.node) == "ramdisk": kernel_params = "root=/dev/ram0 text " kernel_params += i_info.get("ramdisk_kernel_arguments", "") else: kernel_params = i_info.get('kernel_append_params', "") if params: kernel_params = ' '.join( (kernel_params, ' '.join( '%s=%s' % kv for kv in params.items()))) boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node) LOG.debug("Trying to create %(boot_mode)s ISO image for node %(node)s " "with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, " "bootloader %(bootloader_href)s and kernel params %(params)s" "", {'node': task.node.uuid, 'boot_mode': boot_mode, 'kernel_href': kernel_href, 'ramdisk_href': ramdisk_href, 'bootloader_href': bootloader_href, 'params': kernel_params}) with tempfile.NamedTemporaryFile( dir=CONF.tempdir, suffix='.iso') as boot_fileobj: with tempfile.NamedTemporaryFile( dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj: configdrive_href = configdrive if configdrive: parsed_url = urlparse.urlparse(configdrive) if not parsed_url.scheme: cfgdrv_blob = base64.decode_as_bytes(configdrive) with open(cfgdrv_fileobj.name, 'wb') as f: f.write(cfgdrv_blob) configdrive_href = urlparse.urlunparse( ('file', '', cfgdrv_fileobj.name, '', '', '')) LOG.info("Burning configdrive %(url)s to boot ISO image " "for node %(node)s", {'url': configdrive_href, 'node': task.node.uuid}) boot_iso_tmp_file = boot_fileobj.name images.create_boot_iso( task.context, boot_iso_tmp_file, kernel_href, ramdisk_href, esp_image_href=bootloader_href, root_uuid=root_uuid, kernel_params=kernel_params, boot_mode=boot_mode) iso_object_name = self._get_iso_image_name(task.node) image_url = self._publish_image( boot_iso_tmp_file, iso_object_name, image_share_root) LOG.debug("Created ISO %(name)s in NFS/CIFS for node %(node)s, " "exposed as temporary URL " "%(url)s", {'node': task.node.uuid, 'name': iso_object_name, 'url': image_url}) return image_url
[ "def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)", "def _prepare_deploy_iso(self, task, params, mode):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n\n kernel_href = d_info.get('%s_kernel' % mode)\n ramdisk_href = d_info.get('%s_ramdisk' % mode)\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href, params=params)", "def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n remote_server_data = {}\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # NOTE(TheJulia): If this method is being called by something\n # aside from deployment, clean and rescue, such as conductor takeover,\n # we should treat this as a no-op and move on otherwise we would\n # modify the state of the node due to virtual media operations.\n if node.provision_state not in (states.DEPLOYING,\n states.CLEANING,\n states.RESCUING,\n states.INSPECTING):\n return\n\n # NOTE(TheJulia): Since we're deploying, cleaning, or rescuing,\n # with virtual media boot, we should generate a token!\n manager_utils.add_secret_token(node, pregenerated=True)\n node.save()\n ramdisk_params['ipa-agent-token'] = (\n node.driver_internal_info['agent_secret_token'])\n\n manager_utils.node_power_action(task, states.POWER_OFF)\n\n deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)\n ramdisk_params['BOOTIF'] = deploy_nic_mac\n if CONF.debug and 'ipa-debug' not in ramdisk_params:\n ramdisk_params['ipa-debug'] = '1'\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(task, boot_devices.CD.value.lower())\n\n LOG.debug(\"Node %(node)s is set to one time boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def setup_iso_image(context):\n iso_src_url = context.testbed.config['ISO_SRC_URL']\n datacenter_name = context.testbed.config['ISO_DATACENTER_NAME']\n datastore_path = context.testbed.config['ISO_DATASTORE_PATH']\n (datastore_name, path) = parse_datastore_path(datastore_path)\n datastore_mo = get_datastore_mo(context.client,\n context.service_instance._stub,\n datacenter_name,\n datastore_name)\n if not datastore_mo:\n raise Exception(\"Could not find datastore '{}'\".format(datastore_name))\n\n # See if the ISO image exists. Copy it into the system if it does not exist\n dsfile = datastore_file.File(datastore_mo)\n if not dsfile.exists(datastore_path):\n print(\"Putting ISO image file from '{}' at '{}'\".\n format(iso_src_url, datastore_path))\n dsfile.put(path=path, src_url=iso_src_url)", "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n prepare_node_for_deploy(task)\n if not http_utils.is_http_boot_requested(task.node):\n super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)\n else:\n node = task.node\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n http_info = http_utils.get_image_info(node, mode=mode)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n http_info.update(http_utils.get_instance_image_info(task))\n boot_mode_utils.sync_boot_mode(task)\n\n http_options = http_utils.build_http_config_options(task,\n http_info)\n http_options.update(ramdisk_params)\n http_config_template = deploy_utils.get_pxe_config_template(node)\n http_utils.create_http_config(task, http_options,\n http_config_template)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)\n if http_info:\n http_utils.cache_ramdisk_kernel(task, http_info)\n bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()\n if bfpv == 'true':\n node = task.node\n driver_internal_info = node.driver_internal_info\n driver_internal_info['bfpv_started'] = 'false'\n node.driver_internal_info = driver_internal_info\n node.save()", "def create_iso(iso_name, archive_dir):\n try:\n controller_0 = sysinv_api.get_host_data('controller-0')\n except Exception as e:\n e_log = \"Failed to retrieve controller-0 inventory details.\"\n LOG.exception(e_log)\n raise CloneFail(e_log)\n\n iso_dir = os.path.join(archive_dir, 'isolinux')\n clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR)\n output = None\n tmpdir = None\n total_steps = 6\n step = 1\n print (\"\\nCreating ISO:\")\n\n # Add the correct kick-start file to the image\n ks_file = \"controller_ks.cfg\"\n if utils.get_system_type() == si_const.TIS_AIO_BUILD:\n if si_const.LOWLATENCY in tsconfig.subfunctions:\n ks_file = \"smallsystem_lowlatency_ks.cfg\"\n else:\n ks_file = \"smallsystem_ks.cfg\"\n\n try:\n # prepare the iso files\n images_dir = os.path.join(iso_dir, 'images')\n os.mkdir(images_dir, 0o644)\n pxe_dir = os.path.join('/pxeboot',\n 'rel-' + tsconfig.SW_VERSION)\n os.symlink(pxe_dir + '/installer-bzImage',\n iso_dir + '/vmlinuz')\n os.symlink(pxe_dir + '/installer-initrd',\n iso_dir + '/initrd.img')\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n feed_dir = os.path.join('/www', 'pages', 'feed',\n 'rel-' + tsconfig.SW_VERSION)\n os.symlink(feed_dir + '/Packages', iso_dir + '/Packages')\n os.symlink(feed_dir + '/repodata', iso_dir + '/repodata')\n os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS')\n shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir)\n update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0)\n shutil.copyfile('/usr/share/syslinux/isolinux.bin',\n iso_dir + '/isolinux.bin')\n os.symlink('/usr/share/syslinux/vesamenu.c32',\n iso_dir + '/vesamenu.c32')\n for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')):\n shutil.copy(os.path.join(feed_dir, filename), iso_dir)\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT')\n os.makedirs(efiboot_dir, 0o644)\n l_efi_dir = os.path.join('/boot', 'efi', 'EFI')\n shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir)\n shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir)\n shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir)\n shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir)\n update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0)\n shutil.copytree(l_efi_dir + '/centos/fonts',\n efiboot_dir + '/fonts')\n # copy EFI boot image and update the grub.cfg file\n efi_img = images_dir + '/efiboot.img'\n shutil.copy2(pxe_dir + '/efiboot.img', efi_img)\n tmpdir = tempfile.mkdtemp(dir=archive_dir)\n output = subprocess.check_output(\n [\"mount\", \"-t\", \"vfat\", \"-o\", \"loop\",\n efi_img, tmpdir],\n stderr=subprocess.STDOUT)\n # replace the grub.cfg file with the updated file\n efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg')\n os.remove(efi_grub_f)\n shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f)\n subprocess.call(['umount', tmpdir])\n shutil.rmtree(tmpdir, ignore_errors=True)\n tmpdir = None\n\n epoch_time = \"%.9f\" % time.time()\n disc_info = [epoch_time, tsconfig.SW_VERSION, \"x86_64\"]\n with open(iso_dir + '/.discinfo', 'w') as f:\n f.write('\\n'.join(disc_info))\n\n # copy the latest install_clone executable\n shutil.copy2('/usr/bin/install_clone', iso_dir)\n subprocess.check_output(\"cat /pxeboot/post_clone_iso_ks.cfg >> \" +\n iso_dir + \"/\" + ks_file, shell=True)\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n # copy patches\n iso_patches_dir = os.path.join(iso_dir, 'patches')\n iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata')\n iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages')\n iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata')\n iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied')\n iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir,\n 'committed')\n\n os.mkdir(iso_patches_dir, 0o755)\n os.mkdir(iso_patch_repo_dir, 0o755)\n os.mkdir(iso_patch_pkgs_dir, 0o755)\n os.mkdir(iso_patch_metadata_dir, 0o755)\n os.mkdir(iso_patch_applied_dir, 0o755)\n os.mkdir(iso_patch_committed_dir, 0o755)\n\n repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION\n pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION\n patch_applied_dir = '/opt/patching/metadata/applied/'\n patch_committed_dir = '/opt/patching/metadata/committed/'\n subprocess.check_call(['rsync', '-a', repodata,\n '%s/' % iso_patch_repo_dir])\n if os.path.exists(pkgsdir):\n subprocess.check_call(['rsync', '-a', pkgsdir,\n '%s/' % iso_patch_pkgs_dir])\n if os.path.exists(patch_applied_dir):\n subprocess.check_call(['rsync', '-a', patch_applied_dir,\n '%s/' % iso_patch_applied_dir])\n if os.path.exists(patch_committed_dir):\n subprocess.check_call(['rsync', '-a', patch_committed_dir,\n '%s/' % iso_patch_committed_dir])\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n create_ini_file(clone_archive_dir, iso_name)\n\n os.chmod(iso_dir + '/isolinux.bin', 0o664)\n iso_file = os.path.join(archive_dir, iso_name + \".iso\")\n output = subprocess.check_output(\n [\"nice\", \"mkisofs\",\n \"-o\", iso_file, \"-R\", \"-D\",\n \"-A\", \"oe_iso_boot\", \"-V\", \"oe_iso_boot\",\n \"-f\", \"-quiet\",\n \"-b\", \"isolinux.bin\", \"-c\", \"boot.cat\", \"-no-emul-boot\",\n \"-boot-load-size\", \"4\", \"-boot-info-table\",\n \"-eltorito-alt-boot\", \"-e\", \"images/efiboot.img\",\n \"-no-emul-boot\",\n iso_dir],\n stderr=subprocess.STDOUT)\n LOG.info(\"{} created: [{}]\".format(iso_file, output))\n utils.progress(total_steps, step, 'iso created', 'DONE')\n step += 1\n\n output = subprocess.check_output(\n [\"nice\", \"isohybrid\",\n \"--uefi\",\n iso_file],\n stderr=subprocess.STDOUT)\n LOG.debug(\"isohybrid: {}\".format(output))\n\n output = subprocess.check_output(\n [\"nice\", \"implantisomd5\",\n iso_file],\n stderr=subprocess.STDOUT)\n LOG.debug(\"implantisomd5: {}\".format(output))\n utils.progress(total_steps, step, 'checksum implanted', 'DONE')\n print(\"Cloned iso image created: {}\".format(iso_file))\n\n except Exception as e:\n LOG.exception(e)\n e_log = \"ISO creation ({}) failed\".format(iso_name)\n if output:\n e_log += ' [' + output + ']'\n LOG.error(e_log)\n raise CloneFail(\"ISO creation failed.\")\n\n finally:\n if tmpdir:\n subprocess.call(['umount', tmpdir], stderr=DEVNULL)\n shutil.rmtree(tmpdir, ignore_errors=True)", "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepare_busybox(self):\n url = self._default_busybox_url\n platform = self.metadata.get('busybox_platform', 'armv6l')\n\n doc = self.download_file(url, tempfile.mkstemp()[1], silent=True)\n soup = BeautifulSoup(open(doc, 'r'))\n os.remove(doc)\n for link in soup.find_all('a'):\n if 'busybox-%s' % platform in link['href']:\n path = os.path.join(self.metadata['workdir'], 'busybox')\n if os.path.isfile(path):\n os.remove(path)\n file_name = self.download_file(url + link['href'], 'busybox')\n os.chmod(file_name, stat.S_IEXEC | stat.S_IREAD)\n break\n else:\n log.error(\"Couldn't find a busybox binary for platform '%s'\" % platform)", "def prepare(topology='devstack'):\n log.info(\"Preparing boxes for %s Openstack\" % topology)\n log.info(\"Preparing virtual machines for lab=%s\" % LAB)\n url = IMAGES_REPO + DEVSTACK_DISK\n local(\"test -e %s || wget -nv %s\" % (DEVSTACK_DISK, url))\n local(\"python ./tools/cloud/create.py -l {lab} -s /opt/imgs \"\n \"-z ./{disk} -t {topo} > config_file\".format(lab=LAB,\n disk=DEVSTACK_DISK,\n topo=topology))", "def _prepare_iso_image_on_host(self, context, instance, local_img_dir,\n host_dir, image_meta):\n pvm_op = self.pvm_operator\n\n # Check if media library exists. Error out if it's not there.\n if not pvm_op.check_media_library_exists():\n LOG.error(_(\"There is no media library\"))\n raise exception.IBMPowerVMMediaLibraryNotAvailable()\n\n # Create vopt with name being the glance ID/UUID.\n vopt_img_name = image_meta['id']\n if not pvm_op.check_vopt_exists(name=vopt_img_name):\n\n # check for space within CONF.powervm_img_local_path\n self._check_space(local_img_dir, image_meta['size'])\n\n # It's not VIOS yet:\n # Check available space in IVM staging area and media repository\n free_space = pvm_op.get_vopt_size()\n free_padmin_space = pvm_op.get_staging_size(host_dir)\n\n free_rep_mb = int(free_space[0])\n free_staging_mb = int(free_padmin_space[0]) / 1024\n iso_size_mb = image_meta['size'] / (1024 * 1024)\n\n LOG.debug(\"Free repository space: %s MB\" % free_rep_mb)\n LOG.debug(\"Free VIOS staging space: %s MB\" % free_staging_mb)\n LOG.debug(\"ISO file size: %s MB\" % iso_size_mb)\n\n if iso_size_mb > free_rep_mb:\n raise common_ex.IBMPowerVMMediaRepOutOfSpace(\n size=iso_size_mb, free=free_rep_mb)\n\n if iso_size_mb > free_staging_mb:\n raise exception.IBMPowerVMStagingAreaOutOfSpace(\n dir=host_dir, size=iso_size_mb, free=free_staging_mb)\n\n # Fetch ISO from Glance\n\n file_path = '%s/%s.%s.%s' % (local_img_dir,\n image_meta['id'],\n CONF.host,\n image_meta['disk_format'])\n LOG.debug(\"Fetching image '%(img-name)s' from glance \"\n \"to %(img-path)s\" %\n {'img-name': image_meta['name'],\n 'img-path': file_path})\n images.fetch(context, image_meta['id'], file_path,\n instance['user_id'],\n instance['project_id'])\n if (os.path.isfile(file_path)):\n # transfer ISO to VIOS\n try:\n\n iso_remote_path, iso_size = self.\\\n _copy_image_file(file_path,\n host_dir,\n context,\n image_meta)\n\n self.pvm_operator.create_vopt_device(\n vopt_img_name, iso_remote_path)\n # The create vopt device command copies\n # the ISO into the media repository. The file at\n # iso_remote_path can be removed now.\n try:\n self.pvm_operator.remove_ovf_env_iso(\n iso_remote_path)\n except (exception.IBMPowerVMCommandFailed, Exception) as e:\n msg = (_('Error cleaning up ISO:'\n ' %(iso_remote_path)s') %\n locals())\n LOG.exception(msg)\n raise e\n finally:\n os.remove(file_path)\n else:\n raise exception.IBMPowerVMISOFileNotFound(\n ISO_file=file_path)\n else:\n LOG.debug(\"Image with id %s already on VIOS.\" % image_meta['id'])\n\n return vopt_img_name", "def create_base_image(driver,\n source_image_family='debian-10', source_image_project='debian-cloud',\n repository_url='https://github.com/google/android-cuttlefish.git',\n repository_branch='main', build_branch='aosp-master',\n build_target='aosp_cf_x86_phone-userdebug', build_id='',\n build_instance='halyard-build', build_zone='europe-west4-a', tags=[],\n dest_image='', dest_family='', image_disk='halyard-image-disk', respin=False):\n\n # SETUP\n \n build_node = utils.find_instance(driver, build_instance, build_zone)\n if build_node:\n driver.destroy_node(build_node)\n print('successfully deleted', build_instance)\n\n build_volume = utils.find_disk(driver, image_disk, build_zone)\n if build_volume:\n driver.destroy_volume(build_volume)\n print('successfully deleted', image_disk)\n\n\n # BUILD INSTANCE CREATION\n\n build_volume = driver.create_volume(\n 30, image_disk,\n location=build_zone,\n ex_image_family=source_image_family)\n\n print('built', source_image_family, 'disk')\n\n gpu_type='nvidia-tesla-p100-vws'\n gpu = utils.find_gpu(driver, gpu_type, build_zone)\n if not gpu:\n utils.fatal_error(f'Please use a zone with {gpu_type} GPUs available')\n\n build_node = driver.create_node(\n build_instance,\n 'n1-standard-16',\n None,\n location=build_zone,\n ex_image_family=source_image_family,\n ex_accelerator_type=gpu_type,\n ex_on_host_maintenance='TERMINATE',\n ex_accelerator_count=1,\n ex_service_accounts=[{'scopes':['storage-ro']}],\n ex_disk_size=30,\n ex_tags=tags)\n print('successfully created', build_instance)\n\n utils.wait_for_instance(build_instance, build_zone)\n\n driver.attach_volume(build_node, build_volume)\n\n src_files = ['create_base_image_gce.sh', 'download_artifacts.sh']\n src_files = [PATH + '/' + file for file in src_files]\n src = ' '.join(list(map(str,src_files)))\n\n os.system(f'gcloud compute scp {src} {build_instance}: \\\n --zone={build_zone}')\n\n\n # IMAGE CREATION\n\n os.system(f'gcloud compute ssh --zone={build_zone} \\\n {build_instance} -- ./create_base_image_gce.sh \\\n {repository_url} {repository_branch} \\\n {build_branch} {build_target} {build_id}')\n\n dest_names = get_dest_names(\n build_branch, build_target, build_id,\n build_instance, build_zone, dest_image, dest_family)\n \n dest_image = dest_names['dest_image']\n dest_family = dest_names['dest_family']\n\n try:\n build_image = driver.ex_get_image(dest_image)\n except:\n build_image = None\n\n if build_image:\n if respin:\n driver.ex_delete_image(build_image)\n else:\n utils.fatal_error(f'''Image {dest_image} already exists.\n (To replace run with flag --respin)''')\n\n driver.destroy_node(build_node)\n\n driver.ex_create_image(\n dest_image,\n build_volume,\n ex_licenses=['https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx'],\n family=dest_family\n )\n\n print(f'Created image {dest_image} in {dest_family} family')\n\n driver.destroy_volume(build_volume)\n\n return {\"name\": dest_image, \"family\": dest_family}", "def _prepare_iso_image_on_host(self, context, instance, local_img_dir,\n host_dir, image_meta):\n pvm_op = self.pvm_operator\n\n # Check if media library exists. Error out if it's not there.\n if not pvm_op.check_media_library_exists():\n LOG.error(_(\"There is no media library\"))\n raise exception.IBMPowerVMMediaLibraryNotAvailable()\n\n # Create vopt with name being the glance ID/UUID.\n vopt_img_name = image_meta['id']\n if not pvm_op.check_vopt_exists(name=vopt_img_name):\n # check for space within CONF.powervm_img_local_path\n self._check_space(local_img_dir, image_meta['size'])\n\n # It's not VIOS yet:\n # Check available space in IVM staging area and media repository\n free_space = pvm_op.get_vopt_size()\n free_padmin_space = pvm_op.get_staging_size(host_dir)\n\n free_rep_mb = int(free_space[0])\n free_staging_mb = int(free_padmin_space[0]) / 1024\n iso_size_mb = image_meta['size'] / (1024 * 1024)\n\n LOG.debug(\"Free repository space: %s MB\" % free_rep_mb)\n LOG.debug(\"Free VIOS staging space: %s MB\" % free_staging_mb)\n LOG.debug(\"ISO file size: %s MB\" % iso_size_mb)\n\n if iso_size_mb > free_rep_mb:\n raise common_ex.IBMPowerVMMediaRepOutOfSpace(\n size=iso_size_mb, free=free_rep_mb)\n\n if iso_size_mb > free_staging_mb:\n raise exception.IBMPowerVMStagingAreaOutOfSpace(\n dir=host_dir, size=iso_size_mb, free=free_staging_mb)\n\n # Fetch ISO from Glance\n\n file_path = '%s/%s.%s.%s' % (local_img_dir,\n image_meta['id'],\n CONF.host,\n image_meta['disk_format'])\n LOG.debug(\"Fetching image '%(img-name)s' from glance \"\n \"to %(img-path)s\" %\n {'img-name': image_meta['name'],\n 'img-path': file_path})\n images.fetch(context, image_meta['id'], file_path,\n instance['user_id'],\n instance['project_id'])\n if (os.path.isfile(file_path)):\n # transfer ISO to VIOS\n try:\n\n iso_remote_path, iso_size = self.copy_image_file(file_path,\n host_dir)\n\n self.pvm_operator.create_vopt_device(\n vopt_img_name, iso_remote_path)\n # The create vopt device command copies\n # the ISO into the media repository. The file at\n # iso_remote_path can be removed now.\n try:\n self.pvm_operator.remove_ovf_env_iso(iso_remote_path)\n except Exception:\n msg = (_('Error cleaning up ISO:'\n ' %(iso_remote_path)s') %\n locals())\n LOG.exception(msg)\n finally:\n os.remove(file_path)\n else:\n raise exception.IBMPowerVMISOFileNotFound(\n ISO_file=file_path)\n else:\n LOG.debug(\"Image with id %s already on VIOS.\"\n % image_meta['id'])\n\n return vopt_img_name", "def create_custom_iso_image_redhat(os_type, server, config, kickstart_file):\n if os_type == \"rhel7\":\n rhel_iso_filename = config[\"OS_image_name\"]\n if not os.path.isfile(kickstart_file):\n print(\"Kickstart file is not present for RHEL installation\")\n return False \t\n else:\n print(\"Installation OS type {} is not supported\".format(os_type))\n return False\n destination_folder = config[\"HTTP_file_path\"]\n\n print(\"Creating modified installation file for RHEL Installation\")\n image_url = config[\"HTTP_server_base_url\"] + rhel_iso_filename\n file_presence = is_iso_file_present(image_url)\n if not file_presence:\n print(\"ISO file is not present in the given http location. Please check the http location and then try again.\")\n return False\n\n val = is_iso_image(rhel_iso_filename)\n if val:\n if os_type == \"rhel7\":\n base_iso_image_path = config[\"HTTP_file_path\"]\n filepath = base_iso_image_path + rhel_iso_filename\n server_serial_number = server[\"Server_serial_number\"]\n\n temppath = \"/tmp/\" + \"redhatmount_\" + server_serial_number + \"/\"\n mount_path = \"/tmp/\" + \"redhatorig_\" + server_serial_number\n\n kickstart_filepath = temppath + \"ks.cfg\"\n\n mount_proc_id = mount_iso_image(filepath, mount_path)\n if mount_proc_id == 0:\n print(\"Successfully mounted the image {}\".format(rhel_iso_filename))\n else:\n print(\"Attempting to unmount the previously mounted image\")\n umount_id = unmount_iso_image(mount_path)\n mount_proc_id = mount_iso_image(filepath, mount_path)\n if mount_proc_id == 0:\n print(\"Successfully unmounted the previously mounted image\") \n else:\n print(\"Failed to mount the image {}\".format(rhel_iso_filename))\n return False\n\n copy_iso_contents(mount_path, temppath)\n kickstart_status = create_kickstart_file_for_redhat(kickstart_filepath, kickstart_file, server)\n \n if(kickstart_status and os.path.isfile(kickstart_filepath)):\n redhat_label = update_ks_file_location_redhat_iso_efi(temppath + \"EFI/BOOT/\")\n redhat_label = redhat_label.replace(\"\\\\x20\",\" \")\n print(redhat_label)\n update_ks_file_location_redhat_iso_legacy(temppath + \"isolinux/\")\n \n destination_filename = get_custom_image_name(os_type, server_serial_number) \n \n recreate_iso_proc_id = rebuild_iso_redhat_image(temppath, destination_folder, destination_filename, redhat_label)\n if recreate_iso_proc_id.returncode == 0:\n print(\"Successfully re-created the iso image for server {} after modifying the content\".format(server_serial_number))\n status = True\n else:\n print(\"Error in recreating the iso image for server {} after modifying the content\".format(server_serial_number))\n status = False\n \n umount_proc_id = unmount_iso_image(mount_path)\n if umount_proc_id == 0:\n print(\"Successfully unmounted the iso image\")\n else:\n print(\"Error in umounting the iso image\") \n\n delete_temp_folder(temppath)\n return status\n else:\n print(\"Error in fetching custom kickstart file {}\".format(kickstart_file))\n return status\n else:\n print(\"File type is not supported\")\n return False\n return True", "def btldr_to_linux(pp, **kwargs):\n # Set local copy\n uut_conn = pp.uut_conn\n uut_prompts = pp.mode_mgr.uut_prompt_map\n uut_config = pp.ud.uut_config\n\n # Decide how some param defaults will be generated\n flash_device_default = {'name': 'flash', 'relative_dir': '', 'device_num': 3}\n device_mounts_default = {'primary': [(3, '/mnt/flash3')], 'secondary': None, 'tertiary': None}\n\n # Get params and set defaults where appropriate.\n linux_image = kwargs.get('linux', {}).get('image', uut_config.get('linux', {}).get('image', ''))\n flash_device = kwargs.get('flash_device', uut_config.get('flash_device', flash_device_default))\n disk_enums = kwargs.get('disk_enums', uut_config.get('disk_enums', None))\n device_mounts = kwargs.get('device_mounts', uut_config.get('device_mounts', device_mounts_default))\n uut_mac = None # Let the UUT autodiscover instead kwargs.get('MAC_ADDR', '')\n uut_sernum = kwargs.get('MOTHERBOARD_SERIAL_NUM', uut_config.get('MOTHERBOARD_SERIAL_NUM', '')) # Must be s/n that is associated w/ the MAC\n uut_ip = kwargs.get('uut_ip', uut_config.get('uut_ip', ''))\n server_ip = kwargs.get('server_ip', uut_config.get('server_ip', ''))\n netmask = kwargs.get('netmask', uut_config.get('netmask', ''))\n\n # Function specific vars\n device_name = kwargs.get('device_name', flash_device['name'])\n linux_dirs = kwargs.get('linux_dirs', {'local': '', 'remote': ''})\n do_primary_mount = kwargs.get('do_primary_mount', True)\n log.debug(\"do_primary_mount={0}\".format(do_primary_mount))\n\n ret = False\n do_load = True\n\n # Determine the most robust way to boot the target image IF expected conditions are NOT met.\n pattern = r'(?:vmlinu[xz][\\S]*)|(?:bzImage[\\S]*)'\n set_net_load, linux_image, linux_dir = __get_robust_load_details('Linux',\n pp,\n uut_conn,\n uut_prompts['BTLDR'],\n linux_image,\n linux_dirs,\n device_name,\n pattern,\n guess_image='vmlinux.mzip.SSA')\n\n # Set up the network connection via management port for a remote load as the target.\n # Disallow the load if network setup was required and unsuccessful.\n if set_net_load:\n do_load, device_name, linux_dir = __set_uut_network('Linux',\n pp,\n uut_conn,\n uut_prompts['BTLDR'],\n linux_image,\n uut_mac,\n uut_ip,\n netmask,\n server_ip,\n device_name,\n linux_dir,\n uut_sernum)\n\n # Now issue the commands for the Linux image and path that was determined as the target.\n if do_load:\n # Perform load\n try:\n log.debug(\"Linux boot...\")\n uut_conn.send('boot {0}:{1}{2}\\r'.format(device_name, linux_dir, linux_image),\n expectphrase=\".*\", timeout=30, regex=True)\n\n # Quick assessment of boot process\n log.debug(\"Linux boot load started...\")\n uut_conn.waitfor('(?:Reading)|(?:Loading)|(?:Attempting)|(?:{0})'.format(uut_prompts['BTLDR']), timeout=90, regex=True)\n if \"Reading\" in uut_conn.recbuf:\n log.debug(\"Reading image...\")\n elif \"Attempting\" in uut_conn.recbuf:\n log.debug(\"Attempting image...\")\n elif \"Boot process failed\" in uut_conn.recbuf:\n log.debug(\"Boot process failed...\")\n raise Exception(\"FAILED Linux boot.\")\n\n # Getting here means the image is trying to boot; wait on it.\n log.debug(\"Linux boot confirm load...\")\n uut_conn.waitfor('(Loading)|(done)', timeout=240, regex=True, idle_timeout=60)\n\n log.debug(\"Linux boot load done; wait for launch...\")\n boot_interim_msgs = r'(?:Launching Linux Kernel)|' \\\n r'(?:BusyBox)'\n boot_result, _ = pp.mode_mgr.wait_for_boot(boot_mode='LINUX',\n boot_msg='(?:Bootable image)|(?:Booting)',\n boot_interim_msgs=boot_interim_msgs)\n\n if boot_result:\n if pp.mode_mgr.is_mode('LINUX', refresh=True):\n ret = True\n log.debug(\"Linux ready.\")\n else:\n log.error(\"Waiting for boot result failed.\")\n\n except apexceptions.TimeoutException, apexceptions.IdleTimeoutException:\n log.error(\"TIMEOUT waiting for Linux response!\")\n ret = False\n\n if do_primary_mount:\n # Primary mount REQUIRED!\n log.debug(\"Primary mount is required.\")\n if ret:\n # Linux boot up maybe good; however, if primary mounting is required and fails,\n # the end result will be mode failure.\n #\n # Get disk enumerations\n if not disk_enums:\n disk_enums = pp.linux.get_disk_enumeration(disk_enums)\n uut_config['disk_enums'] = disk_enums\n\n # Mount the primary disk\n ret, _ = pp.linux.mount_disks(device_numbers=[3],\n disk_type='primary',\n device_mounts=device_mounts,\n disk_enums=disk_enums)\n\n if ret:\n map_dir = pp.ud.get_flash_mapped_dir()\n log.info(\"Flash map = '{0}'\".format(map_dir))\n if map_dir:\n uut_conn.send('cd {0}\\r'.format(map_dir), expectphrase=uut_prompts['LINUX'], timeout=30, regex=True)\n else:\n log.warning(\"Linux mount was good but no 'flash:' dir map was found; remaining in cwd.\")\n else:\n log.error(\"Linux mount was required but did not complete properly.\")\n\n else:\n log.error(\"Primary mount will NOT be performed since the Linux kernel mode was not confirmed.\")\n else:\n # Primary mount NOT required.\n if ret:\n # The mount is a \"don't care\" if the filesystem is not present.\n # This is to accomodate a net boot w/ blank flash in order to prepare the device.\n log.info(\"Primary mount NOT required during Linux load.\")\n else:\n log.error(\"Linux kernel mode was not confirmed.\")\n\n else:\n log.error(\"Unable to perform load.\")\n\n return ret", "def copy_initial_firmware(binary_path):\n # Change into directory containing tools\n os.chdir(FILE_DIR)\n bootloader = FILE_DIR / '..' / 'bootloader'\n shutil.copy(binary_path, bootloader / 'src' / 'firmware.bin')\n \n # Put secret key into directory (gen by cryptoDome)", "def do_prepare_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n rootfs_dir, native_sysroot):\n\n # We rely on the --label parameter and the naming convention\n # in partition.py prepare_rootfs() here to find the already\n # prepared rootfs partition image.\n pattern = '%s/rootfs_%s.*' % (cr_workdir, part.label)\n rootfs = glob.glob(pattern)\n if len(rootfs) != 1:\n raise WicError(\"%s shell pattern does not match exactly one rootfs image (missing --label parameter?): %s\" % (pattern, rootfs))\n else:\n rootfs = rootfs[0]\n logger.debug(\"Calculating dm-verity hash for rootfs %s (native %s).\" % (rootfs, native_sysroot))\n\n hashimg = '%s/dm-verity_%s.img' % (cr_workdir, part.label)\n # Reserve some fixed amount of space at the start of the hash image\n # for our own data (in particular, the signed root hash).\n # The content of that part is:\n # roothash=<....>\n # <potentially some more assignments in the future>\n # signature=<single line of base64 encoded OpenSSL sha256 digest>\n header_size = 4096\n ret, out = exec_native_cmd(\"veritysetup format '%s' '%s' --hash-offset=%d\" %\n (rootfs, hashimg, header_size),\n native_sysroot)\n m = re.search(r'^Root hash:\\s*(\\S+)$', out, re.MULTILINE)\n if ret or not m:\n raise WicError('veritysetup failed: %s' % out)\n else:\n root_hash = m.group(1)\n privkey = get_bitbake_var('REFKIT_DMVERITY_PRIVATE_KEY')\n password = get_bitbake_var('REFKIT_DMVERITY_PASSWORD')\n tmp = tempfile.mkdtemp(prefix='dm-verity-')\n try:\n data_filename = os.path.join(tmp, 'data')\n header = ('roothash=%s\\nheadersize=%d\\n' % (root_hash, header_size)).encode('ascii')\n with open(data_filename, 'wb') as data:\n data.write(header)\n # Must use a temporary file, exec_native_cmd() only supports UTF-8 output.\n signature = os.path.join(tmp, 'sig')\n ret, out = exec_native_cmd(\"openssl dgst -sha256 -passin '%s' -sign '%s' -out '%s' '%s'\" %\n (password, privkey, signature, data_filename),\n native_sysroot)\n if ret:\n raise WicError('openssl signing failed')\n with open(signature, 'rb') as f:\n header += b'signature=' + base64.standard_b64encode(f.read()) + b'\\n'\n if len(header) + 1 >= header_size:\n raise WicError('reserved space for dm-verity header too small')\n with open(hashimg, 'rb+') as hash:\n hash.write(header)\n finally:\n shutil.rmtree(tmp)\n\n data_bytes = os.stat(rootfs).st_size\n hash_bytes = os.stat(hashimg).st_size\n logger.debug(\"dm-verity data partition %d bytes, hash partition %d bytes, ratio %f.\" %\n (data_bytes, hash_bytes, data_bytes / hash_bytes))\n part.size = data_bytes // 1024\n part.source_file = hashimg", "def create(image_path, pool_path, name, cmd, network=False, port=None, terminal=False):\n if not os.path.exists(image_path):\n print(\"Error: This image could not be found in path %s\" % image_path, file=sys.stderr)\n return None\n\n container_path = os.path.join(pool_path, name)\n if os.path.exists(container_path):\n print(\"Error: A container is existing at that path: %s\" % container_path, file=sys.stderr)\n return None\n\n os.makedirs(container_path)\n rootfs = os.path.join(image_path, 'rootfs')\n command = \"cp -r %s %s\" % (rootfs, container_path)\n output = subprocess.run(args=command.split(\" \"), stdout=subprocess.PIPE)\n if output.returncode != 0:\n print(\"Error: While copying rootfs\", file=sys.stderr)\n return None\n\n if cmd is None:\n cmd_path = os.path.join(image_path, 'CMD')\n with open(cmd_path, 'r') as f:\n cmd = f.readline()\n f.close()\n\n config = generate_config(name, container_path, image_path, cmd.split(\" \"), network, terminal)\n config_path = os.path.join(container_path, 'config.json')\n with open(config_path, 'w+') as f:\n json.dump(config, f, indent=4)\n\n command = \"/usr/bin/rootlesskit\"\n if network:\n command += \" --net=slirp4netns --disable-host-loopback --copy-up=/etc\"\n if port is not None:\n command += \" --port-driver=builtin --publish %s\" % port\n command += \" /bin/sh -c\"\n\n namespace_path = os.path.join(container_path, 'namespace.pid')\n\n args = command.split(\" \")\n args.append(\"echo $$ > %s; sleep infinity\" % namespace_path)\n output = subprocess.Popen(args=args, stdout=None, stderr=None, stdin=None)\n\n while not os.path.exists(namespace_path):\n time.sleep(0.001)\n\n return name", "def install_images_7_x():\n poap_log(\"Checking if bios upgrade is needed\")\n if is_bios_upgrade_needed():\n poap_log(\"Installing new BIOS (will take up to 5 minutes. Don't abort)\")\n install_bios()\n\n poap_log(\"Installing NXOS image\")\n\n system_image_path = os.path.join(options[\"destination_path\"],\n options[\"destination_system_image\"])\n system_image_path = system_image_path.replace(\"/bootflash\", \"bootflash:\", 1)\n\n try:\n poap_log(\"config terminal ; boot nxos %s\" % system_image_path)\n cli(\"config terminal ; boot nxos %s\" % system_image_path)\n except Exception as e:\n poap_log(\"Failed to set NXOS boot variable to %s\" % system_image_path)\n abort(str(e))\n\n command_successful = False\n timeout = 10 # minutes\n first_time = time.time()\n endtime = first_time + timeout * 60 # sec per min\n retry_delay = 30 # seconds\n while not command_successful:\n new_time = time.time()\n try:\n cli(\"copy running-config startup-config\")\n command_successful = True\n except SyntaxError:\n poap_log(\"WARNING: copy run to start failed\")\n if new_time > endtime:\n poap_log(\"ERROR: time out waiting for \\\"copy run start\\\" to complete successfully\")\n exit(-1)\n poap_log(\"WARNING: retry in 30 seconds\")\n time.sleep(retry_delay)\n\n poap_log(\"INFO: Configuration successful\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare deploy or rescue ISO image Build bootable ISO out of `[driver_info]/deploy_kernel`/`[driver_info]/deploy_ramdisk` or `[driver_info]/rescue_kernel`/`[driver_info]/rescue_ramdisk` and `[driver_info]/bootloader`
def _prepare_deploy_iso(self, task, params, mode): node = task.node d_info = redfish_boot._parse_driver_info(node) kernel_href = d_info.get('%s_kernel' % mode) ramdisk_href = d_info.get('%s_ramdisk' % mode) bootloader_href = d_info.get('bootloader') return self._prepare_iso_image( task, kernel_href, ramdisk_href, bootloader_href, params=params)
[ "def _prepare_iso_image(self, task, kernel_href, ramdisk_href,\n bootloader_href=None, configdrive=None,\n root_uuid=None, params=None):\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"building ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n i_info = task.node.instance_info\n driver_info = task.node.driver_info\n if driver_info.get('remote_image_share_type') == 'nfs':\n image_share_root = driver_info.get('remote_image_share_root')\n else:\n image_share_root = driver_info.get('image_share_root')\n if deploy_utils.get_boot_option(task.node) == \"ramdisk\":\n kernel_params = \"root=/dev/ram0 text \"\n kernel_params += i_info.get(\"ramdisk_kernel_arguments\", \"\")\n\n else:\n kernel_params = i_info.get('kernel_append_params', \"\")\n\n if params:\n kernel_params = ' '.join(\n (kernel_params, ' '.join(\n '%s=%s' % kv for kv in params.items())))\n\n boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node)\n\n LOG.debug(\"Trying to create %(boot_mode)s ISO image for node %(node)s \"\n \"with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, \"\n \"bootloader %(bootloader_href)s and kernel params %(params)s\"\n \"\", {'node': task.node.uuid,\n 'boot_mode': boot_mode,\n 'kernel_href': kernel_href,\n 'ramdisk_href': ramdisk_href,\n 'bootloader_href': bootloader_href,\n 'params': kernel_params})\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.iso') as boot_fileobj:\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:\n\n configdrive_href = configdrive\n\n if configdrive:\n parsed_url = urlparse.urlparse(configdrive)\n if not parsed_url.scheme:\n cfgdrv_blob = base64.decode_as_bytes(configdrive)\n\n with open(cfgdrv_fileobj.name, 'wb') as f:\n f.write(cfgdrv_blob)\n\n configdrive_href = urlparse.urlunparse(\n ('file', '', cfgdrv_fileobj.name, '', '', ''))\n\n LOG.info(\"Burning configdrive %(url)s to boot ISO image \"\n \"for node %(node)s\", {'url': configdrive_href,\n 'node': task.node.uuid})\n boot_iso_tmp_file = boot_fileobj.name\n\n images.create_boot_iso(\n task.context, boot_iso_tmp_file,\n kernel_href, ramdisk_href,\n esp_image_href=bootloader_href,\n root_uuid=root_uuid,\n kernel_params=kernel_params,\n boot_mode=boot_mode)\n iso_object_name = self._get_iso_image_name(task.node)\n\n image_url = self._publish_image(\n boot_iso_tmp_file, iso_object_name, image_share_root)\n\n LOG.debug(\"Created ISO %(name)s in NFS/CIFS for node %(node)s, \"\n \"exposed as temporary URL \"\n \"%(url)s\", {'node': task.node.uuid,\n 'name': iso_object_name,\n 'url': image_url})\n\n return image_url", "def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)", "def prepare(topology='devstack'):\n log.info(\"Preparing boxes for %s Openstack\" % topology)\n log.info(\"Preparing virtual machines for lab=%s\" % LAB)\n url = IMAGES_REPO + DEVSTACK_DISK\n local(\"test -e %s || wget -nv %s\" % (DEVSTACK_DISK, url))\n local(\"python ./tools/cloud/create.py -l {lab} -s /opt/imgs \"\n \"-z ./{disk} -t {topo} > config_file\".format(lab=LAB,\n disk=DEVSTACK_DISK,\n topo=topology))", "def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n prepare_node_for_deploy(task)\n if not http_utils.is_http_boot_requested(task.node):\n super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)\n else:\n node = task.node\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n http_info = http_utils.get_image_info(node, mode=mode)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n http_info.update(http_utils.get_instance_image_info(task))\n boot_mode_utils.sync_boot_mode(task)\n\n http_options = http_utils.build_http_config_options(task,\n http_info)\n http_options.update(ramdisk_params)\n http_config_template = deploy_utils.get_pxe_config_template(node)\n http_utils.create_http_config(task, http_options,\n http_config_template)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)\n if http_info:\n http_utils.cache_ramdisk_kernel(task, http_info)\n bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()\n if bfpv == 'true':\n node = task.node\n driver_internal_info = node.driver_internal_info\n driver_internal_info['bfpv_started'] = 'false'\n node.driver_internal_info = driver_internal_info\n node.save()", "def install_images_7_x():\n poap_log(\"Checking if bios upgrade is needed\")\n if is_bios_upgrade_needed():\n poap_log(\"Installing new BIOS (will take up to 5 minutes. Don't abort)\")\n install_bios()\n\n poap_log(\"Installing NXOS image\")\n\n system_image_path = os.path.join(options[\"destination_path\"],\n options[\"destination_system_image\"])\n system_image_path = system_image_path.replace(\"/bootflash\", \"bootflash:\", 1)\n\n try:\n poap_log(\"config terminal ; boot nxos %s\" % system_image_path)\n cli(\"config terminal ; boot nxos %s\" % system_image_path)\n except Exception as e:\n poap_log(\"Failed to set NXOS boot variable to %s\" % system_image_path)\n abort(str(e))\n\n command_successful = False\n timeout = 10 # minutes\n first_time = time.time()\n endtime = first_time + timeout * 60 # sec per min\n retry_delay = 30 # seconds\n while not command_successful:\n new_time = time.time()\n try:\n cli(\"copy running-config startup-config\")\n command_successful = True\n except SyntaxError:\n poap_log(\"WARNING: copy run to start failed\")\n if new_time > endtime:\n poap_log(\"ERROR: time out waiting for \\\"copy run start\\\" to complete successfully\")\n exit(-1)\n poap_log(\"WARNING: retry in 30 seconds\")\n time.sleep(retry_delay)\n\n poap_log(\"INFO: Configuration successful\")", "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def create_base_image(driver,\n source_image_family='debian-10', source_image_project='debian-cloud',\n repository_url='https://github.com/google/android-cuttlefish.git',\n repository_branch='main', build_branch='aosp-master',\n build_target='aosp_cf_x86_phone-userdebug', build_id='',\n build_instance='halyard-build', build_zone='europe-west4-a', tags=[],\n dest_image='', dest_family='', image_disk='halyard-image-disk', respin=False):\n\n # SETUP\n \n build_node = utils.find_instance(driver, build_instance, build_zone)\n if build_node:\n driver.destroy_node(build_node)\n print('successfully deleted', build_instance)\n\n build_volume = utils.find_disk(driver, image_disk, build_zone)\n if build_volume:\n driver.destroy_volume(build_volume)\n print('successfully deleted', image_disk)\n\n\n # BUILD INSTANCE CREATION\n\n build_volume = driver.create_volume(\n 30, image_disk,\n location=build_zone,\n ex_image_family=source_image_family)\n\n print('built', source_image_family, 'disk')\n\n gpu_type='nvidia-tesla-p100-vws'\n gpu = utils.find_gpu(driver, gpu_type, build_zone)\n if not gpu:\n utils.fatal_error(f'Please use a zone with {gpu_type} GPUs available')\n\n build_node = driver.create_node(\n build_instance,\n 'n1-standard-16',\n None,\n location=build_zone,\n ex_image_family=source_image_family,\n ex_accelerator_type=gpu_type,\n ex_on_host_maintenance='TERMINATE',\n ex_accelerator_count=1,\n ex_service_accounts=[{'scopes':['storage-ro']}],\n ex_disk_size=30,\n ex_tags=tags)\n print('successfully created', build_instance)\n\n utils.wait_for_instance(build_instance, build_zone)\n\n driver.attach_volume(build_node, build_volume)\n\n src_files = ['create_base_image_gce.sh', 'download_artifacts.sh']\n src_files = [PATH + '/' + file for file in src_files]\n src = ' '.join(list(map(str,src_files)))\n\n os.system(f'gcloud compute scp {src} {build_instance}: \\\n --zone={build_zone}')\n\n\n # IMAGE CREATION\n\n os.system(f'gcloud compute ssh --zone={build_zone} \\\n {build_instance} -- ./create_base_image_gce.sh \\\n {repository_url} {repository_branch} \\\n {build_branch} {build_target} {build_id}')\n\n dest_names = get_dest_names(\n build_branch, build_target, build_id,\n build_instance, build_zone, dest_image, dest_family)\n \n dest_image = dest_names['dest_image']\n dest_family = dest_names['dest_family']\n\n try:\n build_image = driver.ex_get_image(dest_image)\n except:\n build_image = None\n\n if build_image:\n if respin:\n driver.ex_delete_image(build_image)\n else:\n utils.fatal_error(f'''Image {dest_image} already exists.\n (To replace run with flag --respin)''')\n\n driver.destroy_node(build_node)\n\n driver.ex_create_image(\n dest_image,\n build_volume,\n ex_licenses=['https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx'],\n family=dest_family\n )\n\n print(f'Created image {dest_image} in {dest_family} family')\n\n driver.destroy_volume(build_volume)\n\n return {\"name\": dest_image, \"family\": dest_family}", "def prepare_disk_template(disk_image):\n LOGGER.info(' Cleaning up disk image %s', disk_image)\n image_cleanup_command = [\n 'virt-sysprep',\n '--format=qcow2',\n '--selinux-relabel',\n '--add=' + disk_image,\n ]\n call(image_cleanup_command)\n\n LOGGER.info(' Sparsifying image %s', disk_image)\n image_sparse_command = [\n 'virt-sparsify',\n '--format=qcow2',\n '--in-place',\n disk_image,\n ]\n call(image_sparse_command)", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n\n # Label indicating a deploy or rescue operation being carried out on\n # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like\n # state, the mode is set to 'deploy', indicating deploy operation is\n # being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n if self.ipxe_enabled:\n # NOTE(mjturek): At this point, the ipxe boot script should\n # already exist as it is created at startup time. However, we\n # call the boot script create method here to assert its\n # existence and handle the unlikely case that it wasn't created\n # or was deleted.\n pxe_utils.create_ipxe_boot_script()\n\n # Generate options for both IPv4 and IPv6, and they can be\n # filtered down later based upon the port options.\n # TODO(TheJulia): This should be re-tooled during the Victoria\n # development cycle so that we call a single method and return\n # combined options. The method we currently call is relied upon\n # by two eternal projects, to changing the behavior is not ideal.\n dhcp_opts = pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=4)\n dhcp_opts += pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=6)\n provider = dhcp_factory.DHCPFactory()\n provider.update_dhcp(task, dhcp_opts)\n\n pxe_info = pxe_utils.get_image_info(node, mode=mode,\n ipxe_enabled=self.ipxe_enabled)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n pxe_info.update(\n pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled))\n\n boot_mode_utils.sync_boot_mode(task)\n\n pxe_options = pxe_utils.build_pxe_config_options(\n task, pxe_info, ipxe_enabled=self.ipxe_enabled,\n ramdisk_params=ramdisk_params)\n # TODO(dtantsur): backwards compability hack, remove in the V release\n if ramdisk_params.get(\"ipa-api-url\"):\n pxe_options[\"ipa-api-url\"] = ramdisk_params[\"ipa-api-url\"]\n\n if self.ipxe_enabled:\n pxe_config_template = deploy_utils.get_ipxe_config_template(node)\n else:\n pxe_config_template = deploy_utils.get_pxe_config_template(node)\n\n pxe_utils.create_pxe_config(task, pxe_options,\n pxe_config_template,\n ipxe_enabled=self.ipxe_enabled)\n manager_utils.node_set_boot_device(task, boot_devices.PXE,\n persistent=False)\n\n if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:\n kernel_label = '%s_kernel' % mode\n ramdisk_label = '%s_ramdisk' % mode\n pxe_info.pop(kernel_label, None)\n pxe_info.pop(ramdisk_label, None)\n\n if pxe_info:\n pxe_utils.cache_ramdisk_kernel(task, pxe_info,\n ipxe_enabled=self.ipxe_enabled)\n\n LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '\n 'with kernel params %(params)s',\n {'node': node.uuid, 'params': pxe_options})", "def _prepare_iso_image_on_host(self, context, instance, local_img_dir,\n host_dir, image_meta):\n pvm_op = self.pvm_operator\n\n # Check if media library exists. Error out if it's not there.\n if not pvm_op.check_media_library_exists():\n LOG.error(_(\"There is no media library\"))\n raise exception.IBMPowerVMMediaLibraryNotAvailable()\n\n # Create vopt with name being the glance ID/UUID.\n vopt_img_name = image_meta['id']\n if not pvm_op.check_vopt_exists(name=vopt_img_name):\n\n # check for space within CONF.powervm_img_local_path\n self._check_space(local_img_dir, image_meta['size'])\n\n # It's not VIOS yet:\n # Check available space in IVM staging area and media repository\n free_space = pvm_op.get_vopt_size()\n free_padmin_space = pvm_op.get_staging_size(host_dir)\n\n free_rep_mb = int(free_space[0])\n free_staging_mb = int(free_padmin_space[0]) / 1024\n iso_size_mb = image_meta['size'] / (1024 * 1024)\n\n LOG.debug(\"Free repository space: %s MB\" % free_rep_mb)\n LOG.debug(\"Free VIOS staging space: %s MB\" % free_staging_mb)\n LOG.debug(\"ISO file size: %s MB\" % iso_size_mb)\n\n if iso_size_mb > free_rep_mb:\n raise common_ex.IBMPowerVMMediaRepOutOfSpace(\n size=iso_size_mb, free=free_rep_mb)\n\n if iso_size_mb > free_staging_mb:\n raise exception.IBMPowerVMStagingAreaOutOfSpace(\n dir=host_dir, size=iso_size_mb, free=free_staging_mb)\n\n # Fetch ISO from Glance\n\n file_path = '%s/%s.%s.%s' % (local_img_dir,\n image_meta['id'],\n CONF.host,\n image_meta['disk_format'])\n LOG.debug(\"Fetching image '%(img-name)s' from glance \"\n \"to %(img-path)s\" %\n {'img-name': image_meta['name'],\n 'img-path': file_path})\n images.fetch(context, image_meta['id'], file_path,\n instance['user_id'],\n instance['project_id'])\n if (os.path.isfile(file_path)):\n # transfer ISO to VIOS\n try:\n\n iso_remote_path, iso_size = self.\\\n _copy_image_file(file_path,\n host_dir,\n context,\n image_meta)\n\n self.pvm_operator.create_vopt_device(\n vopt_img_name, iso_remote_path)\n # The create vopt device command copies\n # the ISO into the media repository. The file at\n # iso_remote_path can be removed now.\n try:\n self.pvm_operator.remove_ovf_env_iso(\n iso_remote_path)\n except (exception.IBMPowerVMCommandFailed, Exception) as e:\n msg = (_('Error cleaning up ISO:'\n ' %(iso_remote_path)s') %\n locals())\n LOG.exception(msg)\n raise e\n finally:\n os.remove(file_path)\n else:\n raise exception.IBMPowerVMISOFileNotFound(\n ISO_file=file_path)\n else:\n LOG.debug(\"Image with id %s already on VIOS.\" % image_meta['id'])\n\n return vopt_img_name", "def _prepare_iso_image_on_host(self, context, instance, local_img_dir,\n host_dir, image_meta):\n pvm_op = self.pvm_operator\n\n # Check if media library exists. Error out if it's not there.\n if not pvm_op.check_media_library_exists():\n LOG.error(_(\"There is no media library\"))\n raise exception.IBMPowerVMMediaLibraryNotAvailable()\n\n # Create vopt with name being the glance ID/UUID.\n vopt_img_name = image_meta['id']\n if not pvm_op.check_vopt_exists(name=vopt_img_name):\n # check for space within CONF.powervm_img_local_path\n self._check_space(local_img_dir, image_meta['size'])\n\n # It's not VIOS yet:\n # Check available space in IVM staging area and media repository\n free_space = pvm_op.get_vopt_size()\n free_padmin_space = pvm_op.get_staging_size(host_dir)\n\n free_rep_mb = int(free_space[0])\n free_staging_mb = int(free_padmin_space[0]) / 1024\n iso_size_mb = image_meta['size'] / (1024 * 1024)\n\n LOG.debug(\"Free repository space: %s MB\" % free_rep_mb)\n LOG.debug(\"Free VIOS staging space: %s MB\" % free_staging_mb)\n LOG.debug(\"ISO file size: %s MB\" % iso_size_mb)\n\n if iso_size_mb > free_rep_mb:\n raise common_ex.IBMPowerVMMediaRepOutOfSpace(\n size=iso_size_mb, free=free_rep_mb)\n\n if iso_size_mb > free_staging_mb:\n raise exception.IBMPowerVMStagingAreaOutOfSpace(\n dir=host_dir, size=iso_size_mb, free=free_staging_mb)\n\n # Fetch ISO from Glance\n\n file_path = '%s/%s.%s.%s' % (local_img_dir,\n image_meta['id'],\n CONF.host,\n image_meta['disk_format'])\n LOG.debug(\"Fetching image '%(img-name)s' from glance \"\n \"to %(img-path)s\" %\n {'img-name': image_meta['name'],\n 'img-path': file_path})\n images.fetch(context, image_meta['id'], file_path,\n instance['user_id'],\n instance['project_id'])\n if (os.path.isfile(file_path)):\n # transfer ISO to VIOS\n try:\n\n iso_remote_path, iso_size = self.copy_image_file(file_path,\n host_dir)\n\n self.pvm_operator.create_vopt_device(\n vopt_img_name, iso_remote_path)\n # The create vopt device command copies\n # the ISO into the media repository. The file at\n # iso_remote_path can be removed now.\n try:\n self.pvm_operator.remove_ovf_env_iso(iso_remote_path)\n except Exception:\n msg = (_('Error cleaning up ISO:'\n ' %(iso_remote_path)s') %\n locals())\n LOG.exception(msg)\n finally:\n os.remove(file_path)\n else:\n raise exception.IBMPowerVMISOFileNotFound(\n ISO_file=file_path)\n else:\n LOG.debug(\"Image with id %s already on VIOS.\"\n % image_meta['id'])\n\n return vopt_img_name", "def create_iso(iso_name, archive_dir):\n try:\n controller_0 = sysinv_api.get_host_data('controller-0')\n except Exception as e:\n e_log = \"Failed to retrieve controller-0 inventory details.\"\n LOG.exception(e_log)\n raise CloneFail(e_log)\n\n iso_dir = os.path.join(archive_dir, 'isolinux')\n clone_archive_dir = os.path.join(iso_dir, CLONE_ARCHIVE_DIR)\n output = None\n tmpdir = None\n total_steps = 6\n step = 1\n print (\"\\nCreating ISO:\")\n\n # Add the correct kick-start file to the image\n ks_file = \"controller_ks.cfg\"\n if utils.get_system_type() == si_const.TIS_AIO_BUILD:\n if si_const.LOWLATENCY in tsconfig.subfunctions:\n ks_file = \"smallsystem_lowlatency_ks.cfg\"\n else:\n ks_file = \"smallsystem_ks.cfg\"\n\n try:\n # prepare the iso files\n images_dir = os.path.join(iso_dir, 'images')\n os.mkdir(images_dir, 0o644)\n pxe_dir = os.path.join('/pxeboot',\n 'rel-' + tsconfig.SW_VERSION)\n os.symlink(pxe_dir + '/installer-bzImage',\n iso_dir + '/vmlinuz')\n os.symlink(pxe_dir + '/installer-initrd',\n iso_dir + '/initrd.img')\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n feed_dir = os.path.join('/www', 'pages', 'feed',\n 'rel-' + tsconfig.SW_VERSION)\n os.symlink(feed_dir + '/Packages', iso_dir + '/Packages')\n os.symlink(feed_dir + '/repodata', iso_dir + '/repodata')\n os.symlink(feed_dir + '/LiveOS', iso_dir + '/LiveOS')\n shutil.copy2(feed_dir + '/isolinux.cfg', iso_dir)\n update_bootloader_default(iso_dir + '/isolinux.cfg', controller_0)\n shutil.copyfile('/usr/share/syslinux/isolinux.bin',\n iso_dir + '/isolinux.bin')\n os.symlink('/usr/share/syslinux/vesamenu.c32',\n iso_dir + '/vesamenu.c32')\n for filename in glob.glob(os.path.join(feed_dir, '*ks.cfg')):\n shutil.copy(os.path.join(feed_dir, filename), iso_dir)\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n efiboot_dir = os.path.join(iso_dir, 'EFI', 'BOOT')\n os.makedirs(efiboot_dir, 0o644)\n l_efi_dir = os.path.join('/boot', 'efi', 'EFI')\n shutil.copy2(l_efi_dir + '/BOOT/BOOTX64.EFI', efiboot_dir)\n shutil.copy2(l_efi_dir + '/centos/MokManager.efi', efiboot_dir)\n shutil.copy2(l_efi_dir + '/centos/grubx64.efi', efiboot_dir)\n shutil.copy2('/pxeboot/EFI/grub.cfg', efiboot_dir)\n update_bootloader_default(efiboot_dir + '/grub.cfg', controller_0)\n shutil.copytree(l_efi_dir + '/centos/fonts',\n efiboot_dir + '/fonts')\n # copy EFI boot image and update the grub.cfg file\n efi_img = images_dir + '/efiboot.img'\n shutil.copy2(pxe_dir + '/efiboot.img', efi_img)\n tmpdir = tempfile.mkdtemp(dir=archive_dir)\n output = subprocess.check_output(\n [\"mount\", \"-t\", \"vfat\", \"-o\", \"loop\",\n efi_img, tmpdir],\n stderr=subprocess.STDOUT)\n # replace the grub.cfg file with the updated file\n efi_grub_f = os.path.join(tmpdir, 'EFI', 'BOOT', 'grub.cfg')\n os.remove(efi_grub_f)\n shutil.copy2(efiboot_dir + '/grub.cfg', efi_grub_f)\n subprocess.call(['umount', tmpdir])\n shutil.rmtree(tmpdir, ignore_errors=True)\n tmpdir = None\n\n epoch_time = \"%.9f\" % time.time()\n disc_info = [epoch_time, tsconfig.SW_VERSION, \"x86_64\"]\n with open(iso_dir + '/.discinfo', 'w') as f:\n f.write('\\n'.join(disc_info))\n\n # copy the latest install_clone executable\n shutil.copy2('/usr/bin/install_clone', iso_dir)\n subprocess.check_output(\"cat /pxeboot/post_clone_iso_ks.cfg >> \" +\n iso_dir + \"/\" + ks_file, shell=True)\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n # copy patches\n iso_patches_dir = os.path.join(iso_dir, 'patches')\n iso_patch_repo_dir = os.path.join(iso_patches_dir, 'repodata')\n iso_patch_pkgs_dir = os.path.join(iso_patches_dir, 'Packages')\n iso_patch_metadata_dir = os.path.join(iso_patches_dir, 'metadata')\n iso_patch_applied_dir = os.path.join(iso_patch_metadata_dir, 'applied')\n iso_patch_committed_dir = os.path.join(iso_patch_metadata_dir,\n 'committed')\n\n os.mkdir(iso_patches_dir, 0o755)\n os.mkdir(iso_patch_repo_dir, 0o755)\n os.mkdir(iso_patch_pkgs_dir, 0o755)\n os.mkdir(iso_patch_metadata_dir, 0o755)\n os.mkdir(iso_patch_applied_dir, 0o755)\n os.mkdir(iso_patch_committed_dir, 0o755)\n\n repodata = '/www/pages/updates/rel-%s/repodata/' % tsconfig.SW_VERSION\n pkgsdir = '/www/pages/updates/rel-%s/Packages/' % tsconfig.SW_VERSION\n patch_applied_dir = '/opt/patching/metadata/applied/'\n patch_committed_dir = '/opt/patching/metadata/committed/'\n subprocess.check_call(['rsync', '-a', repodata,\n '%s/' % iso_patch_repo_dir])\n if os.path.exists(pkgsdir):\n subprocess.check_call(['rsync', '-a', pkgsdir,\n '%s/' % iso_patch_pkgs_dir])\n if os.path.exists(patch_applied_dir):\n subprocess.check_call(['rsync', '-a', patch_applied_dir,\n '%s/' % iso_patch_applied_dir])\n if os.path.exists(patch_committed_dir):\n subprocess.check_call(['rsync', '-a', patch_committed_dir,\n '%s/' % iso_patch_committed_dir])\n utils.progress(total_steps, step, 'preparing files', 'DONE')\n step += 1\n\n create_ini_file(clone_archive_dir, iso_name)\n\n os.chmod(iso_dir + '/isolinux.bin', 0o664)\n iso_file = os.path.join(archive_dir, iso_name + \".iso\")\n output = subprocess.check_output(\n [\"nice\", \"mkisofs\",\n \"-o\", iso_file, \"-R\", \"-D\",\n \"-A\", \"oe_iso_boot\", \"-V\", \"oe_iso_boot\",\n \"-f\", \"-quiet\",\n \"-b\", \"isolinux.bin\", \"-c\", \"boot.cat\", \"-no-emul-boot\",\n \"-boot-load-size\", \"4\", \"-boot-info-table\",\n \"-eltorito-alt-boot\", \"-e\", \"images/efiboot.img\",\n \"-no-emul-boot\",\n iso_dir],\n stderr=subprocess.STDOUT)\n LOG.info(\"{} created: [{}]\".format(iso_file, output))\n utils.progress(total_steps, step, 'iso created', 'DONE')\n step += 1\n\n output = subprocess.check_output(\n [\"nice\", \"isohybrid\",\n \"--uefi\",\n iso_file],\n stderr=subprocess.STDOUT)\n LOG.debug(\"isohybrid: {}\".format(output))\n\n output = subprocess.check_output(\n [\"nice\", \"implantisomd5\",\n iso_file],\n stderr=subprocess.STDOUT)\n LOG.debug(\"implantisomd5: {}\".format(output))\n utils.progress(total_steps, step, 'checksum implanted', 'DONE')\n print(\"Cloned iso image created: {}\".format(iso_file))\n\n except Exception as e:\n LOG.exception(e)\n e_log = \"ISO creation ({}) failed\".format(iso_name)\n if output:\n e_log += ' [' + output + ']'\n LOG.error(e_log)\n raise CloneFail(\"ISO creation failed.\")\n\n finally:\n if tmpdir:\n subprocess.call(['umount', tmpdir], stderr=DEVNULL)\n shutil.rmtree(tmpdir, ignore_errors=True)", "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n iso_ref = image_utils.prepare_deploy_iso(task, ramdisk_params,\n mode, d_info)\n node.driver_internal_info.update({'deploy_boot_iso': iso_ref})\n\n sdflex_common.set_network_setting_dhcpless_boot(node, iso_ref)\n boot_mode_utils.sync_boot_mode(task)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n remote_server_data = {}\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # NOTE(TheJulia): If this method is being called by something\n # aside from deployment, clean and rescue, such as conductor takeover,\n # we should treat this as a no-op and move on otherwise we would\n # modify the state of the node due to virtual media operations.\n if node.provision_state not in (states.DEPLOYING,\n states.CLEANING,\n states.RESCUING,\n states.INSPECTING):\n return\n\n # NOTE(TheJulia): Since we're deploying, cleaning, or rescuing,\n # with virtual media boot, we should generate a token!\n manager_utils.add_secret_token(node, pregenerated=True)\n node.save()\n ramdisk_params['ipa-agent-token'] = (\n node.driver_internal_info['agent_secret_token'])\n\n manager_utils.node_power_action(task, states.POWER_OFF)\n\n deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)\n ramdisk_params['BOOTIF'] = deploy_nic_mac\n if CONF.debug and 'ipa-debug' not in ramdisk_params:\n ramdisk_params['ipa-debug'] = '1'\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(task, boot_devices.CD.value.lower())\n\n LOG.debug(\"Node %(node)s is set to one time boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)", "def build_undercloud_on_libvirt(self, image_path,\n rhsm=None, repositories=[]):\n self.run('sysctl net.ipv4.ip_forward=1')\n self.fetch_image(path=image_path, dest='/home/stack/guest_image.qcow2',\n user='stack')\n # NOTE(Gonéri): this is a hack for our OpenStack, the MTU of its outgoing route\n # is 1400 and libvirt do not provide a mechanism to adjust the guests MTU.\n self.run(\"LIBGUESTFS_BACKEND=direct virt-customize -a /home/stack/guest_image.qcow2 --run-command 'echo MTU=\\\"1400\\\" >> /etc/sysconfig/network-scripts/ifcfg-eth0'\")\n\n env = Environment()\n env.loader = FileSystemLoader(pkg_data_filename('template'))\n template = env.get_template('virt-setup-env.j2')\n self.run('mkdir -p /home/stack/DIB', user='stack')\n self.run('cp -v /etc/yum.repos.d/*.repo /home/stack/DIB', user='stack')\n # NOTE(Gonéri): Hack to be sure DIB won't complain because of missing gpg files\n # self.run('sed -i \"s,gpgcheck=1,gpgcheck=0,\" /home/stack/DIB/*.repo', user='stack')\n dib_yum_repo_conf = self.run('find /home/stack/DIB -type f', user='stack')[0].split()\n virt_setup_template = {\n 'dib_yum_repo_conf': dib_yum_repo_conf,\n 'node': {\n 'count': 2,\n 'mem': 6144,\n 'cpu': 2\n },\n 'undercloud_node_mem': 8192,\n 'guest_image_name': '/home/stack/guest_image.qcow2'\n }\n\n if rhsm is not None:\n virt_setup_template['rhsm'] = {\n 'login': rhsm.get('login'),\n 'password': rhsm.get('password', os.environ.get('RHN_PW')),\n 'pool_id': rhsm.get('pool_id', ''),\n 'repositories': [i['name'] for i in repositories if i['type'] == 'rhsm_channel']\n }\n virt_setup_env = template.render(virt_setup_template)\n self.create_file('virt-setup-env', virt_setup_env, user='stack')\n self.run('virsh destroy instack', ignore_error=True)\n self.run('virsh undefine instack --remove-all-storage', ignore_error=True)\n self.run('source virt-setup-env; instack-virt-setup', user='stack')\n undercloud_ip = self.run(\n '/sbin/ip n | grep $(tripleo get-vm-mac instack) | awk \\'{print $1;}\\'',\n user='stack')[0]\n assert undercloud_ip, 'undercloud should have an IP'\n undercloud = Undercloud(hostname=undercloud_ip,\n via_ip=self.hostname,\n user='root',\n key_filename=self._key_filename)\n return undercloud", "def install_images():\n kickstart_path = os.path.join(options[\"destination_path\"],\n options[\"destination_kickstart_image\"])\n kickstart_path = kickstart_path.replace(\"/bootflash\", \"bootflash:\", 1)\n\n system_path = os.path.join(options[\"destination_path\"],\n options[\"destination_system_image\"])\n system_path = system_path.replace(\"/bootflash\", \"bootflash:\", 1)\n\n poap_log(\"Installing kickstart and system images\")\n poap_log(\"######### Copying the boot variables ##########\")\n cli(\"config terminal ; boot kickstart %s\" % kickstart_path)\n cli(\"config terminal ; boot system %s\" % system_path)\n\n command_successful = False\n timeout = 10 # minutes\n first_time = time.time()\n endtime = first_time + timeout * 60 # sec per min\n retry_delay = 30 # seconds\n while not command_successful:\n new_time = time.time()\n try:\n cli(\"copy running-config startup-config\")\n command_successful = True\n except SyntaxError:\n poap_log(\"WARNING: copy run to start failed\")\n if new_time > endtime:\n poap_log(\"ERROR: time out waiting for \\\"copy run start\\\" to complete successfully\")\n exit(-1)\n poap_log(\"WARNING: retry in 30 seconds\")\n time.sleep(retry_delay)\n\n poap_log(\"INFO: Configuration successful\")\n\n if multi_step_install == True:\n cli(\"config terminal ; terminal dont-ask ; write erase\")\n poap_log(\"Midway image copy/setting done\")\n exit(0)\n else:\n poap_log(\"Multi-level install not set, installed images\")", "def create_custom_iso_image_redhat(os_type, server, config, kickstart_file):\n if os_type == \"rhel7\":\n rhel_iso_filename = config[\"OS_image_name\"]\n if not os.path.isfile(kickstart_file):\n print(\"Kickstart file is not present for RHEL installation\")\n return False \t\n else:\n print(\"Installation OS type {} is not supported\".format(os_type))\n return False\n destination_folder = config[\"HTTP_file_path\"]\n\n print(\"Creating modified installation file for RHEL Installation\")\n image_url = config[\"HTTP_server_base_url\"] + rhel_iso_filename\n file_presence = is_iso_file_present(image_url)\n if not file_presence:\n print(\"ISO file is not present in the given http location. Please check the http location and then try again.\")\n return False\n\n val = is_iso_image(rhel_iso_filename)\n if val:\n if os_type == \"rhel7\":\n base_iso_image_path = config[\"HTTP_file_path\"]\n filepath = base_iso_image_path + rhel_iso_filename\n server_serial_number = server[\"Server_serial_number\"]\n\n temppath = \"/tmp/\" + \"redhatmount_\" + server_serial_number + \"/\"\n mount_path = \"/tmp/\" + \"redhatorig_\" + server_serial_number\n\n kickstart_filepath = temppath + \"ks.cfg\"\n\n mount_proc_id = mount_iso_image(filepath, mount_path)\n if mount_proc_id == 0:\n print(\"Successfully mounted the image {}\".format(rhel_iso_filename))\n else:\n print(\"Attempting to unmount the previously mounted image\")\n umount_id = unmount_iso_image(mount_path)\n mount_proc_id = mount_iso_image(filepath, mount_path)\n if mount_proc_id == 0:\n print(\"Successfully unmounted the previously mounted image\") \n else:\n print(\"Failed to mount the image {}\".format(rhel_iso_filename))\n return False\n\n copy_iso_contents(mount_path, temppath)\n kickstart_status = create_kickstart_file_for_redhat(kickstart_filepath, kickstart_file, server)\n \n if(kickstart_status and os.path.isfile(kickstart_filepath)):\n redhat_label = update_ks_file_location_redhat_iso_efi(temppath + \"EFI/BOOT/\")\n redhat_label = redhat_label.replace(\"\\\\x20\",\" \")\n print(redhat_label)\n update_ks_file_location_redhat_iso_legacy(temppath + \"isolinux/\")\n \n destination_filename = get_custom_image_name(os_type, server_serial_number) \n \n recreate_iso_proc_id = rebuild_iso_redhat_image(temppath, destination_folder, destination_filename, redhat_label)\n if recreate_iso_proc_id.returncode == 0:\n print(\"Successfully re-created the iso image for server {} after modifying the content\".format(server_serial_number))\n status = True\n else:\n print(\"Error in recreating the iso image for server {} after modifying the content\".format(server_serial_number))\n status = False\n \n umount_proc_id = unmount_iso_image(mount_path)\n if umount_proc_id == 0:\n print(\"Successfully unmounted the iso image\")\n else:\n print(\"Error in umounting the iso image\") \n\n delete_temp_folder(temppath)\n return status\n else:\n print(\"Error in fetching custom kickstart file {}\".format(kickstart_file))\n return status\n else:\n print(\"File type is not supported\")\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare boot ISO image Build bootable ISO out of `[instance_info]/kernel`, `[instance_info]/ramdisk` and `[driver_info]/bootloader` if present. Otherwise, read `kernel_id` and `ramdisk_id` from `[instance_info]/image_source` Glance image metadata. Push produced ISO image up to Glance and return temporary Swift URL to the image.
def _prepare_boot_iso(self, task, root_uuid=None): node = task.node d_info = redfish_boot._parse_deploy_info(node) kernel_href = node.instance_info.get('kernel') ramdisk_href = node.instance_info.get('ramdisk') if not kernel_href or not ramdisk_href: image_href = d_info['image_source'] image_properties = ( images.get_image_properties( task.context, image_href, ['kernel_id', 'ramdisk_id'])) if not kernel_href: kernel_href = image_properties.get('kernel_id') if not ramdisk_href: ramdisk_href = image_properties.get('ramdisk_id') if not kernel_href or not ramdisk_href: raise exception.InvalidParameterValue(_( "Unable to find kernel or ramdisk for " "to generate boot ISO for %(node)s") % {'node': task.node.uuid}) bootloader_href = d_info.get('bootloader') return self._prepare_iso_image( task, kernel_href, ramdisk_href, bootloader_href, root_uuid=root_uuid)
[ "def _prepare_iso_image(self, task, kernel_href, ramdisk_href,\n bootloader_href=None, configdrive=None,\n root_uuid=None, params=None):\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"building ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n i_info = task.node.instance_info\n driver_info = task.node.driver_info\n if driver_info.get('remote_image_share_type') == 'nfs':\n image_share_root = driver_info.get('remote_image_share_root')\n else:\n image_share_root = driver_info.get('image_share_root')\n if deploy_utils.get_boot_option(task.node) == \"ramdisk\":\n kernel_params = \"root=/dev/ram0 text \"\n kernel_params += i_info.get(\"ramdisk_kernel_arguments\", \"\")\n\n else:\n kernel_params = i_info.get('kernel_append_params', \"\")\n\n if params:\n kernel_params = ' '.join(\n (kernel_params, ' '.join(\n '%s=%s' % kv for kv in params.items())))\n\n boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node)\n\n LOG.debug(\"Trying to create %(boot_mode)s ISO image for node %(node)s \"\n \"with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, \"\n \"bootloader %(bootloader_href)s and kernel params %(params)s\"\n \"\", {'node': task.node.uuid,\n 'boot_mode': boot_mode,\n 'kernel_href': kernel_href,\n 'ramdisk_href': ramdisk_href,\n 'bootloader_href': bootloader_href,\n 'params': kernel_params})\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.iso') as boot_fileobj:\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:\n\n configdrive_href = configdrive\n\n if configdrive:\n parsed_url = urlparse.urlparse(configdrive)\n if not parsed_url.scheme:\n cfgdrv_blob = base64.decode_as_bytes(configdrive)\n\n with open(cfgdrv_fileobj.name, 'wb') as f:\n f.write(cfgdrv_blob)\n\n configdrive_href = urlparse.urlunparse(\n ('file', '', cfgdrv_fileobj.name, '', '', ''))\n\n LOG.info(\"Burning configdrive %(url)s to boot ISO image \"\n \"for node %(node)s\", {'url': configdrive_href,\n 'node': task.node.uuid})\n boot_iso_tmp_file = boot_fileobj.name\n\n images.create_boot_iso(\n task.context, boot_iso_tmp_file,\n kernel_href, ramdisk_href,\n esp_image_href=bootloader_href,\n root_uuid=root_uuid,\n kernel_params=kernel_params,\n boot_mode=boot_mode)\n iso_object_name = self._get_iso_image_name(task.node)\n\n image_url = self._publish_image(\n boot_iso_tmp_file, iso_object_name, image_share_root)\n\n LOG.debug(\"Created ISO %(name)s in NFS/CIFS for node %(node)s, \"\n \"exposed as temporary URL \"\n \"%(url)s\", {'node': task.node.uuid,\n 'name': iso_object_name,\n 'url': image_url})\n\n return image_url", "def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def _prepare_deploy_iso(self, task, params, mode):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n\n kernel_href = d_info.get('%s_kernel' % mode)\n ramdisk_href = d_info.get('%s_ramdisk' % mode)\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href, params=params)", "def create_base_image(driver,\n source_image_family='debian-10', source_image_project='debian-cloud',\n repository_url='https://github.com/google/android-cuttlefish.git',\n repository_branch='main', build_branch='aosp-master',\n build_target='aosp_cf_x86_phone-userdebug', build_id='',\n build_instance='halyard-build', build_zone='europe-west4-a', tags=[],\n dest_image='', dest_family='', image_disk='halyard-image-disk', respin=False):\n\n # SETUP\n \n build_node = utils.find_instance(driver, build_instance, build_zone)\n if build_node:\n driver.destroy_node(build_node)\n print('successfully deleted', build_instance)\n\n build_volume = utils.find_disk(driver, image_disk, build_zone)\n if build_volume:\n driver.destroy_volume(build_volume)\n print('successfully deleted', image_disk)\n\n\n # BUILD INSTANCE CREATION\n\n build_volume = driver.create_volume(\n 30, image_disk,\n location=build_zone,\n ex_image_family=source_image_family)\n\n print('built', source_image_family, 'disk')\n\n gpu_type='nvidia-tesla-p100-vws'\n gpu = utils.find_gpu(driver, gpu_type, build_zone)\n if not gpu:\n utils.fatal_error(f'Please use a zone with {gpu_type} GPUs available')\n\n build_node = driver.create_node(\n build_instance,\n 'n1-standard-16',\n None,\n location=build_zone,\n ex_image_family=source_image_family,\n ex_accelerator_type=gpu_type,\n ex_on_host_maintenance='TERMINATE',\n ex_accelerator_count=1,\n ex_service_accounts=[{'scopes':['storage-ro']}],\n ex_disk_size=30,\n ex_tags=tags)\n print('successfully created', build_instance)\n\n utils.wait_for_instance(build_instance, build_zone)\n\n driver.attach_volume(build_node, build_volume)\n\n src_files = ['create_base_image_gce.sh', 'download_artifacts.sh']\n src_files = [PATH + '/' + file for file in src_files]\n src = ' '.join(list(map(str,src_files)))\n\n os.system(f'gcloud compute scp {src} {build_instance}: \\\n --zone={build_zone}')\n\n\n # IMAGE CREATION\n\n os.system(f'gcloud compute ssh --zone={build_zone} \\\n {build_instance} -- ./create_base_image_gce.sh \\\n {repository_url} {repository_branch} \\\n {build_branch} {build_target} {build_id}')\n\n dest_names = get_dest_names(\n build_branch, build_target, build_id,\n build_instance, build_zone, dest_image, dest_family)\n \n dest_image = dest_names['dest_image']\n dest_family = dest_names['dest_family']\n\n try:\n build_image = driver.ex_get_image(dest_image)\n except:\n build_image = None\n\n if build_image:\n if respin:\n driver.ex_delete_image(build_image)\n else:\n utils.fatal_error(f'''Image {dest_image} already exists.\n (To replace run with flag --respin)''')\n\n driver.destroy_node(build_node)\n\n driver.ex_create_image(\n dest_image,\n build_volume,\n ex_licenses=['https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx'],\n family=dest_family\n )\n\n print(f'Created image {dest_image} in {dest_family} family')\n\n driver.destroy_volume(build_volume)\n\n return {\"name\": dest_image, \"family\": dest_family}", "def _prepare_iso_image_on_host(self, context, instance, local_img_dir,\n host_dir, image_meta):\n pvm_op = self.pvm_operator\n\n # Check if media library exists. Error out if it's not there.\n if not pvm_op.check_media_library_exists():\n LOG.error(_(\"There is no media library\"))\n raise exception.IBMPowerVMMediaLibraryNotAvailable()\n\n # Create vopt with name being the glance ID/UUID.\n vopt_img_name = image_meta['id']\n if not pvm_op.check_vopt_exists(name=vopt_img_name):\n\n # check for space within CONF.powervm_img_local_path\n self._check_space(local_img_dir, image_meta['size'])\n\n # It's not VIOS yet:\n # Check available space in IVM staging area and media repository\n free_space = pvm_op.get_vopt_size()\n free_padmin_space = pvm_op.get_staging_size(host_dir)\n\n free_rep_mb = int(free_space[0])\n free_staging_mb = int(free_padmin_space[0]) / 1024\n iso_size_mb = image_meta['size'] / (1024 * 1024)\n\n LOG.debug(\"Free repository space: %s MB\" % free_rep_mb)\n LOG.debug(\"Free VIOS staging space: %s MB\" % free_staging_mb)\n LOG.debug(\"ISO file size: %s MB\" % iso_size_mb)\n\n if iso_size_mb > free_rep_mb:\n raise common_ex.IBMPowerVMMediaRepOutOfSpace(\n size=iso_size_mb, free=free_rep_mb)\n\n if iso_size_mb > free_staging_mb:\n raise exception.IBMPowerVMStagingAreaOutOfSpace(\n dir=host_dir, size=iso_size_mb, free=free_staging_mb)\n\n # Fetch ISO from Glance\n\n file_path = '%s/%s.%s.%s' % (local_img_dir,\n image_meta['id'],\n CONF.host,\n image_meta['disk_format'])\n LOG.debug(\"Fetching image '%(img-name)s' from glance \"\n \"to %(img-path)s\" %\n {'img-name': image_meta['name'],\n 'img-path': file_path})\n images.fetch(context, image_meta['id'], file_path,\n instance['user_id'],\n instance['project_id'])\n if (os.path.isfile(file_path)):\n # transfer ISO to VIOS\n try:\n\n iso_remote_path, iso_size = self.\\\n _copy_image_file(file_path,\n host_dir,\n context,\n image_meta)\n\n self.pvm_operator.create_vopt_device(\n vopt_img_name, iso_remote_path)\n # The create vopt device command copies\n # the ISO into the media repository. The file at\n # iso_remote_path can be removed now.\n try:\n self.pvm_operator.remove_ovf_env_iso(\n iso_remote_path)\n except (exception.IBMPowerVMCommandFailed, Exception) as e:\n msg = (_('Error cleaning up ISO:'\n ' %(iso_remote_path)s') %\n locals())\n LOG.exception(msg)\n raise e\n finally:\n os.remove(file_path)\n else:\n raise exception.IBMPowerVMISOFileNotFound(\n ISO_file=file_path)\n else:\n LOG.debug(\"Image with id %s already on VIOS.\" % image_meta['id'])\n\n return vopt_img_name", "def _prepare_iso_image_on_host(self, context, instance, local_img_dir,\n host_dir, image_meta):\n pvm_op = self.pvm_operator\n\n # Check if media library exists. Error out if it's not there.\n if not pvm_op.check_media_library_exists():\n LOG.error(_(\"There is no media library\"))\n raise exception.IBMPowerVMMediaLibraryNotAvailable()\n\n # Create vopt with name being the glance ID/UUID.\n vopt_img_name = image_meta['id']\n if not pvm_op.check_vopt_exists(name=vopt_img_name):\n # check for space within CONF.powervm_img_local_path\n self._check_space(local_img_dir, image_meta['size'])\n\n # It's not VIOS yet:\n # Check available space in IVM staging area and media repository\n free_space = pvm_op.get_vopt_size()\n free_padmin_space = pvm_op.get_staging_size(host_dir)\n\n free_rep_mb = int(free_space[0])\n free_staging_mb = int(free_padmin_space[0]) / 1024\n iso_size_mb = image_meta['size'] / (1024 * 1024)\n\n LOG.debug(\"Free repository space: %s MB\" % free_rep_mb)\n LOG.debug(\"Free VIOS staging space: %s MB\" % free_staging_mb)\n LOG.debug(\"ISO file size: %s MB\" % iso_size_mb)\n\n if iso_size_mb > free_rep_mb:\n raise common_ex.IBMPowerVMMediaRepOutOfSpace(\n size=iso_size_mb, free=free_rep_mb)\n\n if iso_size_mb > free_staging_mb:\n raise exception.IBMPowerVMStagingAreaOutOfSpace(\n dir=host_dir, size=iso_size_mb, free=free_staging_mb)\n\n # Fetch ISO from Glance\n\n file_path = '%s/%s.%s.%s' % (local_img_dir,\n image_meta['id'],\n CONF.host,\n image_meta['disk_format'])\n LOG.debug(\"Fetching image '%(img-name)s' from glance \"\n \"to %(img-path)s\" %\n {'img-name': image_meta['name'],\n 'img-path': file_path})\n images.fetch(context, image_meta['id'], file_path,\n instance['user_id'],\n instance['project_id'])\n if (os.path.isfile(file_path)):\n # transfer ISO to VIOS\n try:\n\n iso_remote_path, iso_size = self.copy_image_file(file_path,\n host_dir)\n\n self.pvm_operator.create_vopt_device(\n vopt_img_name, iso_remote_path)\n # The create vopt device command copies\n # the ISO into the media repository. The file at\n # iso_remote_path can be removed now.\n try:\n self.pvm_operator.remove_ovf_env_iso(iso_remote_path)\n except Exception:\n msg = (_('Error cleaning up ISO:'\n ' %(iso_remote_path)s') %\n locals())\n LOG.exception(msg)\n finally:\n os.remove(file_path)\n else:\n raise exception.IBMPowerVMISOFileNotFound(\n ISO_file=file_path)\n else:\n LOG.debug(\"Image with id %s already on VIOS.\"\n % image_meta['id'])\n\n return vopt_img_name", "def setup_iso_image(context):\n iso_src_url = context.testbed.config['ISO_SRC_URL']\n datacenter_name = context.testbed.config['ISO_DATACENTER_NAME']\n datastore_path = context.testbed.config['ISO_DATASTORE_PATH']\n (datastore_name, path) = parse_datastore_path(datastore_path)\n datastore_mo = get_datastore_mo(context.client,\n context.service_instance._stub,\n datacenter_name,\n datastore_name)\n if not datastore_mo:\n raise Exception(\"Could not find datastore '{}'\".format(datastore_name))\n\n # See if the ISO image exists. Copy it into the system if it does not exist\n dsfile = datastore_file.File(datastore_mo)\n if not dsfile.exists(datastore_path):\n print(\"Putting ISO image file from '{}' at '{}'\".\n format(iso_src_url, datastore_path))\n dsfile.put(path=path, src_url=iso_src_url)", "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepare_disk_template(disk_image):\n LOGGER.info(' Cleaning up disk image %s', disk_image)\n image_cleanup_command = [\n 'virt-sysprep',\n '--format=qcow2',\n '--selinux-relabel',\n '--add=' + disk_image,\n ]\n call(image_cleanup_command)\n\n LOGGER.info(' Sparsifying image %s', disk_image)\n image_sparse_command = [\n 'virt-sparsify',\n '--format=qcow2',\n '--in-place',\n disk_image,\n ]\n call(image_sparse_command)", "def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)", "def boot_image(self) -> 'outputs.BootImageResponse':\n return pulumi.get(self, \"boot_image\")", "def prepare(topology='devstack'):\n log.info(\"Preparing boxes for %s Openstack\" % topology)\n log.info(\"Preparing virtual machines for lab=%s\" % LAB)\n url = IMAGES_REPO + DEVSTACK_DISK\n local(\"test -e %s || wget -nv %s\" % (DEVSTACK_DISK, url))\n local(\"python ./tools/cloud/create.py -l {lab} -s /opt/imgs \"\n \"-z ./{disk} -t {topo} > config_file\".format(lab=LAB,\n disk=DEVSTACK_DISK,\n topo=topology))", "def generate_ipxe_boot_file(self):\n logging.info('Creating image \\'' + self.name + '\\' IPXE boot file')\n\n # Format image ipxe boot file template with image attributes\n file_content = self.__class__.get_boot_file_template().format(image_name=self.name,\n image_initramfs=self.image,\n image_kernel=self.kernel)\n\n # Create ipxe boot file\n logging.debug('Creating boot content inside file ' + self.IMAGE_DIRECTORY + 'boot.ipxe')\n with open(self.IMAGE_DIRECTORY + 'boot.ipxe', \"w\") as ff:\n ff.write(file_content)", "def prepare_image(self, agent, image_info, metadata, files):\n return", "def do_prepare_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n rootfs_dir, native_sysroot):\n\n # We rely on the --label parameter and the naming convention\n # in partition.py prepare_rootfs() here to find the already\n # prepared rootfs partition image.\n pattern = '%s/rootfs_%s.*' % (cr_workdir, part.label)\n rootfs = glob.glob(pattern)\n if len(rootfs) != 1:\n raise WicError(\"%s shell pattern does not match exactly one rootfs image (missing --label parameter?): %s\" % (pattern, rootfs))\n else:\n rootfs = rootfs[0]\n logger.debug(\"Calculating dm-verity hash for rootfs %s (native %s).\" % (rootfs, native_sysroot))\n\n hashimg = '%s/dm-verity_%s.img' % (cr_workdir, part.label)\n # Reserve some fixed amount of space at the start of the hash image\n # for our own data (in particular, the signed root hash).\n # The content of that part is:\n # roothash=<....>\n # <potentially some more assignments in the future>\n # signature=<single line of base64 encoded OpenSSL sha256 digest>\n header_size = 4096\n ret, out = exec_native_cmd(\"veritysetup format '%s' '%s' --hash-offset=%d\" %\n (rootfs, hashimg, header_size),\n native_sysroot)\n m = re.search(r'^Root hash:\\s*(\\S+)$', out, re.MULTILINE)\n if ret or not m:\n raise WicError('veritysetup failed: %s' % out)\n else:\n root_hash = m.group(1)\n privkey = get_bitbake_var('REFKIT_DMVERITY_PRIVATE_KEY')\n password = get_bitbake_var('REFKIT_DMVERITY_PASSWORD')\n tmp = tempfile.mkdtemp(prefix='dm-verity-')\n try:\n data_filename = os.path.join(tmp, 'data')\n header = ('roothash=%s\\nheadersize=%d\\n' % (root_hash, header_size)).encode('ascii')\n with open(data_filename, 'wb') as data:\n data.write(header)\n # Must use a temporary file, exec_native_cmd() only supports UTF-8 output.\n signature = os.path.join(tmp, 'sig')\n ret, out = exec_native_cmd(\"openssl dgst -sha256 -passin '%s' -sign '%s' -out '%s' '%s'\" %\n (password, privkey, signature, data_filename),\n native_sysroot)\n if ret:\n raise WicError('openssl signing failed')\n with open(signature, 'rb') as f:\n header += b'signature=' + base64.standard_b64encode(f.read()) + b'\\n'\n if len(header) + 1 >= header_size:\n raise WicError('reserved space for dm-verity header too small')\n with open(hashimg, 'rb+') as hash:\n hash.write(header)\n finally:\n shutil.rmtree(tmp)\n\n data_bytes = os.stat(rootfs).st_size\n hash_bytes = os.stat(hashimg).st_size\n logger.debug(\"dm-verity data partition %d bytes, hash partition %d bytes, ratio %f.\" %\n (data_bytes, hash_bytes, data_bytes / hash_bytes))\n part.size = data_bytes // 1024\n part.source_file = hashimg", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n remote_server_data = {}\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # NOTE(TheJulia): If this method is being called by something\n # aside from deployment, clean and rescue, such as conductor takeover,\n # we should treat this as a no-op and move on otherwise we would\n # modify the state of the node due to virtual media operations.\n if node.provision_state not in (states.DEPLOYING,\n states.CLEANING,\n states.RESCUING,\n states.INSPECTING):\n return\n\n # NOTE(TheJulia): Since we're deploying, cleaning, or rescuing,\n # with virtual media boot, we should generate a token!\n manager_utils.add_secret_token(node, pregenerated=True)\n node.save()\n ramdisk_params['ipa-agent-token'] = (\n node.driver_internal_info['agent_secret_token'])\n\n manager_utils.node_power_action(task, states.POWER_OFF)\n\n deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)\n ramdisk_params['BOOTIF'] = deploy_nic_mac\n if CONF.debug and 'ipa-debug' not in ramdisk_params:\n ramdisk_params['ipa-debug'] = '1'\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(task, boot_devices.CD.value.lower())\n\n LOG.debug(\"Node %(node)s is set to one time boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n autoscale_headrooms: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecAutoscaleHeadroomArgs']]]]] = None,\n autoscale_headrooms_automatics: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecAutoscaleHeadroomsAutomaticArgs']]]]] = None,\n instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n labels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecLabelArgs']]]]] = None,\n metadatas: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecMetadataArgs']]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecNetworkInterfaceArgs']]]]] = None,\n node_pool_name: Optional[pulumi.Input[str]] = None,\n ocean_id: Optional[pulumi.Input[str]] = None,\n resource_limits: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecResourceLimitsArgs']]] = None,\n restrict_scale_down: Optional[pulumi.Input[bool]] = None,\n root_volume_size: Optional[pulumi.Input[int]] = None,\n root_volume_type: Optional[pulumi.Input[str]] = None,\n scheduling_tasks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecSchedulingTaskArgs']]]]] = None,\n service_account: Optional[pulumi.Input[str]] = None,\n shielded_instance_config: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecShieldedInstanceConfigArgs']]] = None,\n source_image: Optional[pulumi.Input[str]] = None,\n storage: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecStorageArgs']]] = None,\n strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecStrategyArgs']]]]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecTaintArgs']]]]] = None,\n update_policy: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecUpdatePolicyArgs']]] = None,\n __props__=None):\n ...", "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n prepare_node_for_deploy(task)\n if not http_utils.is_http_boot_requested(task.node):\n super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)\n else:\n node = task.node\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n http_info = http_utils.get_image_info(node, mode=mode)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n http_info.update(http_utils.get_instance_image_info(task))\n boot_mode_utils.sync_boot_mode(task)\n\n http_options = http_utils.build_http_config_options(task,\n http_info)\n http_options.update(ramdisk_params)\n http_config_template = deploy_utils.get_pxe_config_template(node)\n http_utils.create_http_config(task, http_options,\n http_config_template)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)\n if http_info:\n http_utils.cache_ramdisk_kernel(task, http_info)\n bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()\n if bfpv == 'true':\n node = task.node\n driver_internal_info = node.driver_internal_info\n driver_internal_info['bfpv_started'] = 'false'\n node.driver_internal_info = driver_internal_info\n node.save()", "def AMI_builder(\n AWS_access_key_id,\n AWS_secret_access_key,\n region_name,\n base_image_id,\n os,\n security_group_id,\n AMI_name,\n RPM_package_version,\n APT_OSS_version,\n):\n try:\n instance = Instance(\n AWS_access_key_id=AWS_access_key_id,\n AWS_secret_access_key=AWS_secret_access_key,\n region_name=region_name,\n base_image_id=base_image_id,\n os=os, # ubuntu, amazonLinux\n security_group_id=security_group_id,\n AMI_name=AMI_name,\n RPM_package_version=RPM_package_version,\n APT_OSS_version=APT_OSS_version,\n )\n except Exception as err:\n logging.error(\"Could not bring up the instance. \" + str(err))\n sys.exit(-1)\n AMI_id = \"\"\n installation_failed = False\n try:\n instance.wait_until_ready()\n except Exception as err:\n logging.error(\n \"Could not bring the instance to ready state. \" + str(err))\n installation_failed = True\n else:\n try:\n instance.install_ODFE()\n AMI_id = instance.create_AMI()\n except Exception as err:\n installation_failed = True\n logging.error(\n \"AMI creation failed there was an error see the logs. \" + str(err))\n finally:\n try:\n instance.cleanup_instance()\n except Exception as err:\n logging.error(\n \"Could not cleanup the instance. There could be an instance currently running, terminate it. \" + str(err))\n installation_failed = True\n if installation_failed:\n sys.exit(-1)\n # copy the AMI to the required regions\n ec2_client = boto3.client(\n \"ec2\",\n aws_access_key_id=AWS_access_key_id,\n aws_secret_access_key=AWS_secret_access_key,\n region_name=region_name,\n )\n AMI_copy_regions = [region[\"RegionName\"]\n for region in ec2_client.describe_regions()[\"Regions\"]]\n AMI_copy_regions.remove(region_name) # since AMI is created here\n copy_AMI_to_regions(\n AWS_access_key_id=AWS_access_key_id,\n AWS_secret_access_key=AWS_secret_access_key,\n AMI_id=AMI_id,\n AMI_name=AMI_name,\n AMI_source_region=region_name,\n AMI_copy_regions=AMI_copy_regions,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares the boot of deploy or rescue ramdisk over virtual media. This method prepares the boot of the deploy or rescue ramdisk after reading relevant information from the node's driver_info.
def prepare_ramdisk(self, task, ramdisk_params): node = task.node remote_server_data = {} remote_image_server = node.driver_info.get('remote_image_server') remote_image_share_root = node.driver_info.get( 'remote_image_share_root') remote_server_data['remote_image_share_type'] = ( node.driver_info.get('remote_image_share_type')) remote_server_data['remote_image_user_name'] = ( node.driver_info.get('remote_image_user_name', None)) remote_server_data['remote_image_user_password'] = ( node.driver_info.get('remote_image_user_password', None)) # NOTE(TheJulia): If this method is being called by something # aside from deployment, clean and rescue, such as conductor takeover, # we should treat this as a no-op and move on otherwise we would # modify the state of the node due to virtual media operations. if node.provision_state not in (states.DEPLOYING, states.CLEANING, states.RESCUING, states.INSPECTING): return # NOTE(TheJulia): Since we're deploying, cleaning, or rescuing, # with virtual media boot, we should generate a token! manager_utils.add_secret_token(node, pregenerated=True) node.save() ramdisk_params['ipa-agent-token'] = ( node.driver_internal_info['agent_secret_token']) manager_utils.node_power_action(task, states.POWER_OFF) deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task) ramdisk_params['BOOTIF'] = deploy_nic_mac if CONF.debug and 'ipa-debug' not in ramdisk_params: ramdisk_params['ipa-debug'] = '1' mode = deploy_utils.rescue_or_deploy_mode(node) iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode) url = (remote_server_data['remote_image_share_type'] + "://" + remote_image_server + "/" + remote_image_share_root + "/" + iso_ref) sdflex_common.eject_vmedia(task, vmedia_device) sdflex_common.insert_vmedia(task, url, vmedia_device, remote_server_data) boot_mode_utils.sync_boot_mode(task) self._set_boot_device(task, boot_devices.CD.value.lower()) LOG.debug("Node %(node)s is set to one time boot from " "%(device)s", {'node': task.node.uuid, 'device': boot_devices.CD})
[ "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n prepare_node_for_deploy(task)\n if not http_utils.is_http_boot_requested(task.node):\n super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)\n else:\n node = task.node\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n http_info = http_utils.get_image_info(node, mode=mode)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n http_info.update(http_utils.get_instance_image_info(task))\n boot_mode_utils.sync_boot_mode(task)\n\n http_options = http_utils.build_http_config_options(task,\n http_info)\n http_options.update(ramdisk_params)\n http_config_template = deploy_utils.get_pxe_config_template(node)\n http_utils.create_http_config(task, http_options,\n http_config_template)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)\n if http_info:\n http_utils.cache_ramdisk_kernel(task, http_info)\n bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()\n if bfpv == 'true':\n node = task.node\n driver_internal_info = node.driver_internal_info\n driver_internal_info['bfpv_started'] = 'false'\n node.driver_internal_info = driver_internal_info\n node.save()", "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n iso_ref = image_utils.prepare_deploy_iso(task, ramdisk_params,\n mode, d_info)\n node.driver_internal_info.update({'deploy_boot_iso': iso_ref})\n\n sdflex_common.set_network_setting_dhcpless_boot(node, iso_ref)\n boot_mode_utils.sync_boot_mode(task)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)", "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n\n # Label indicating a deploy or rescue operation being carried out on\n # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like\n # state, the mode is set to 'deploy', indicating deploy operation is\n # being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n if self.ipxe_enabled:\n # NOTE(mjturek): At this point, the ipxe boot script should\n # already exist as it is created at startup time. However, we\n # call the boot script create method here to assert its\n # existence and handle the unlikely case that it wasn't created\n # or was deleted.\n pxe_utils.create_ipxe_boot_script()\n\n # Generate options for both IPv4 and IPv6, and they can be\n # filtered down later based upon the port options.\n # TODO(TheJulia): This should be re-tooled during the Victoria\n # development cycle so that we call a single method and return\n # combined options. The method we currently call is relied upon\n # by two eternal projects, to changing the behavior is not ideal.\n dhcp_opts = pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=4)\n dhcp_opts += pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=6)\n provider = dhcp_factory.DHCPFactory()\n provider.update_dhcp(task, dhcp_opts)\n\n pxe_info = pxe_utils.get_image_info(node, mode=mode,\n ipxe_enabled=self.ipxe_enabled)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n pxe_info.update(\n pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled))\n\n boot_mode_utils.sync_boot_mode(task)\n\n pxe_options = pxe_utils.build_pxe_config_options(\n task, pxe_info, ipxe_enabled=self.ipxe_enabled,\n ramdisk_params=ramdisk_params)\n # TODO(dtantsur): backwards compability hack, remove in the V release\n if ramdisk_params.get(\"ipa-api-url\"):\n pxe_options[\"ipa-api-url\"] = ramdisk_params[\"ipa-api-url\"]\n\n if self.ipxe_enabled:\n pxe_config_template = deploy_utils.get_ipxe_config_template(node)\n else:\n pxe_config_template = deploy_utils.get_pxe_config_template(node)\n\n pxe_utils.create_pxe_config(task, pxe_options,\n pxe_config_template,\n ipxe_enabled=self.ipxe_enabled)\n manager_utils.node_set_boot_device(task, boot_devices.PXE,\n persistent=False)\n\n if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:\n kernel_label = '%s_kernel' % mode\n ramdisk_label = '%s_ramdisk' % mode\n pxe_info.pop(kernel_label, None)\n pxe_info.pop(ramdisk_label, None)\n\n if pxe_info:\n pxe_utils.cache_ramdisk_kernel(task, pxe_info,\n ipxe_enabled=self.ipxe_enabled)\n\n LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '\n 'with kernel params %(params)s',\n {'node': node.uuid, 'params': pxe_options})", "def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)", "def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)", "def _prepare_iso_image(self, task, kernel_href, ramdisk_href,\n bootloader_href=None, configdrive=None,\n root_uuid=None, params=None):\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"building ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n i_info = task.node.instance_info\n driver_info = task.node.driver_info\n if driver_info.get('remote_image_share_type') == 'nfs':\n image_share_root = driver_info.get('remote_image_share_root')\n else:\n image_share_root = driver_info.get('image_share_root')\n if deploy_utils.get_boot_option(task.node) == \"ramdisk\":\n kernel_params = \"root=/dev/ram0 text \"\n kernel_params += i_info.get(\"ramdisk_kernel_arguments\", \"\")\n\n else:\n kernel_params = i_info.get('kernel_append_params', \"\")\n\n if params:\n kernel_params = ' '.join(\n (kernel_params, ' '.join(\n '%s=%s' % kv for kv in params.items())))\n\n boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node)\n\n LOG.debug(\"Trying to create %(boot_mode)s ISO image for node %(node)s \"\n \"with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, \"\n \"bootloader %(bootloader_href)s and kernel params %(params)s\"\n \"\", {'node': task.node.uuid,\n 'boot_mode': boot_mode,\n 'kernel_href': kernel_href,\n 'ramdisk_href': ramdisk_href,\n 'bootloader_href': bootloader_href,\n 'params': kernel_params})\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.iso') as boot_fileobj:\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:\n\n configdrive_href = configdrive\n\n if configdrive:\n parsed_url = urlparse.urlparse(configdrive)\n if not parsed_url.scheme:\n cfgdrv_blob = base64.decode_as_bytes(configdrive)\n\n with open(cfgdrv_fileobj.name, 'wb') as f:\n f.write(cfgdrv_blob)\n\n configdrive_href = urlparse.urlunparse(\n ('file', '', cfgdrv_fileobj.name, '', '', ''))\n\n LOG.info(\"Burning configdrive %(url)s to boot ISO image \"\n \"for node %(node)s\", {'url': configdrive_href,\n 'node': task.node.uuid})\n boot_iso_tmp_file = boot_fileobj.name\n\n images.create_boot_iso(\n task.context, boot_iso_tmp_file,\n kernel_href, ramdisk_href,\n esp_image_href=bootloader_href,\n root_uuid=root_uuid,\n kernel_params=kernel_params,\n boot_mode=boot_mode)\n iso_object_name = self._get_iso_image_name(task.node)\n\n image_url = self._publish_image(\n boot_iso_tmp_file, iso_object_name, image_share_root)\n\n LOG.debug(\"Created ISO %(name)s in NFS/CIFS for node %(node)s, \"\n \"exposed as temporary URL \"\n \"%(url)s\", {'node': task.node.uuid,\n 'name': iso_object_name,\n 'url': image_url})\n\n return image_url", "def prepare_instance(self, task):\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n\n self.clean_up_instance(task)\n boot_device = boot_devices.DISK\n\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepVm(self):\r\n self.server.logMsg(\"PREPARING \" + self.vmName + \" FOR TESTING\")\r\n self.server.logMsg(self.vmName + \" OPERATING SYSTEM: \" + self.vmOS)\r\n self.server.logMsg(self.vmName + \" ARCHITECTURE: \" + self.getArch())\r\n self.getSnapshots()\r\n self.powerOn(False)", "def _prepare_deploy_iso(self, task, params, mode):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n\n kernel_href = d_info.get('%s_kernel' % mode)\n ramdisk_href = d_info.get('%s_ramdisk' % mode)\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href, params=params)", "def boot_installer(self):\n boot_timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT'))\n self.child.expect('Escape character')\n LOG.info('connected to the VM (controller-0)')\n # send a escape character\n self.child.sendline('\\x1b')\n self.child.expect('boot:')\n cmd_boot_line = common.get_cmd_boot_line()\n self.child.sendline(cmd_boot_line)\n LOG.info('kernel command line sent: %s', cmd_boot_line)\n # send a enter character\n self.child.sendline('\\r')\n # setting a boot timeout\n self.child.timeout = boot_timeout\n self.child.expect('Loading vmlinuz')\n LOG.info('Loading vmlinuz')\n self.child.expect('Loading initrd.img')\n LOG.info('Loading initrd.img')\n self.child.expect('Starting installer, one moment...')\n LOG.info('Starting installer ...')\n self.child.expect('Performing post-installation setup tasks')\n LOG.info('Performing post-installation setup tasks')", "def bootNodes(self):\n self.libvirt.bootSlaves()", "def bootMaster(self):\n self.libvirt.bootMaster()\n time.sleep(100)", "def __injectVirtIo(self):\n \n logging.info(\">>>> Adding virt-io drivers to the running system driver store\")\n\n root_virtio = Windows.virtRelIoDir # + virtio_path\n virtiodir=\"%SystemRoot%\\\\system32\\\\drivers\\\\virtio\"\n\n # 2) open reg file, replace data with corresponding values, inject it to our hive\n # the injection reg file is found in corresponding resource directory\n #newreg = self.prepareRegFile(hivekeyname , \"ControlSet00\"+str(currentcontrolset) , , virtiodir , virtiodir )\n self.__mergeReg(root_virtio + \"\\\\virtio.reg\") \n\n if self.__windowsVersion >= WindowsSystemInfo.WindowsSystemInfo.Win2012:\n logging.info(\"Adding Win2012+ driver configurations\")\n self.__injectViostor2012()\n\n return True", "def setupBootRegion(self):\n self.virtualMemoryRequest(\n \"PhysicalRegion\",\n {\n \"RegionType\": \"BootRegion\",\n \"Size\": PcConfig.get_boot_region_size(),\n \"Type\": \"I\",\n \"Bank\": 0,\n },\n )", "def _connect_boot_volume(self, volume, mountpoint, context, instance):\n LOG.debug('Connecting boot volume')\n instance_uuid = instance['uuid']\n volume_id = volume['id']\n\n connector = self.get_volume_connector(instance)\n connection_info = self._initialize_volume_connection(context,\n volume_id,\n connector)\n\n # Check connection_info to determine if the provided volume is\n # local to this compute node. If it is, then don't use it for\n # Solaris branded zones in order to avoid a known ZFS deadlock issue\n # when using a zpool within another zpool on the same system.\n extra_specs = self._get_flavor(instance)['extra_specs'].copy()\n brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)\n if brand == ZONE_BRAND_SOLARIS:\n driver_type = connection_info['driver_volume_type']\n if driver_type == 'local':\n msg = _(\"Detected 'local' zvol driver volume type \"\n \"from volume service, which should not be \"\n \"used as a boot device for 'solaris' \"\n \"branded zones.\")\n raise exception.InvalidVolume(reason=msg)\n elif driver_type == 'iscsi':\n # Check for a potential loopback iSCSI situation\n data = connection_info['data']\n target_portal = data['target_portal']\n # Strip off the port number (eg. 127.0.0.1:3260)\n host = target_portal.rsplit(':', 1)\n # Strip any enclosing '[' and ']' brackets for\n # IPv6 addresses.\n target_host = host[0].strip('[]')\n\n # Check if target_host is an IP or hostname matching the\n # connector host or IP, which would mean the provisioned\n # iSCSI LUN is on the same host as the instance.\n if target_host in [connector['ip'], connector['host']]:\n msg = _(\"iSCSI connection info from volume \"\n \"service indicates that the target is a \"\n \"local volume, which should not be used \"\n \"as a boot device for 'solaris' branded \"\n \"zones.\")\n raise exception.InvalidVolume(reason=msg)\n # Assuming that fibre_channel is non-local\n elif driver_type != 'fibre_channel':\n # Some other connection type that we don't understand\n # Let zone use some local fallback instead.\n msg = _(\"Unsupported volume driver type '%s' can not be used \"\n \"as a boot device for zones.\" % driver_type)\n raise exception.InvalidVolume(reason=msg)\n\n # Volume looks OK to use. Notify Cinder of the attachment.\n self._volume_api.attach(context, volume_id, instance_uuid, mountpoint)\n return connection_info", "def do_prepare_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n rootfs_dir, native_sysroot):\n\n # We rely on the --label parameter and the naming convention\n # in partition.py prepare_rootfs() here to find the already\n # prepared rootfs partition image.\n pattern = '%s/rootfs_%s.*' % (cr_workdir, part.label)\n rootfs = glob.glob(pattern)\n if len(rootfs) != 1:\n raise WicError(\"%s shell pattern does not match exactly one rootfs image (missing --label parameter?): %s\" % (pattern, rootfs))\n else:\n rootfs = rootfs[0]\n logger.debug(\"Calculating dm-verity hash for rootfs %s (native %s).\" % (rootfs, native_sysroot))\n\n hashimg = '%s/dm-verity_%s.img' % (cr_workdir, part.label)\n # Reserve some fixed amount of space at the start of the hash image\n # for our own data (in particular, the signed root hash).\n # The content of that part is:\n # roothash=<....>\n # <potentially some more assignments in the future>\n # signature=<single line of base64 encoded OpenSSL sha256 digest>\n header_size = 4096\n ret, out = exec_native_cmd(\"veritysetup format '%s' '%s' --hash-offset=%d\" %\n (rootfs, hashimg, header_size),\n native_sysroot)\n m = re.search(r'^Root hash:\\s*(\\S+)$', out, re.MULTILINE)\n if ret or not m:\n raise WicError('veritysetup failed: %s' % out)\n else:\n root_hash = m.group(1)\n privkey = get_bitbake_var('REFKIT_DMVERITY_PRIVATE_KEY')\n password = get_bitbake_var('REFKIT_DMVERITY_PASSWORD')\n tmp = tempfile.mkdtemp(prefix='dm-verity-')\n try:\n data_filename = os.path.join(tmp, 'data')\n header = ('roothash=%s\\nheadersize=%d\\n' % (root_hash, header_size)).encode('ascii')\n with open(data_filename, 'wb') as data:\n data.write(header)\n # Must use a temporary file, exec_native_cmd() only supports UTF-8 output.\n signature = os.path.join(tmp, 'sig')\n ret, out = exec_native_cmd(\"openssl dgst -sha256 -passin '%s' -sign '%s' -out '%s' '%s'\" %\n (password, privkey, signature, data_filename),\n native_sysroot)\n if ret:\n raise WicError('openssl signing failed')\n with open(signature, 'rb') as f:\n header += b'signature=' + base64.standard_b64encode(f.read()) + b'\\n'\n if len(header) + 1 >= header_size:\n raise WicError('reserved space for dm-verity header too small')\n with open(hashimg, 'rb+') as hash:\n hash.write(header)\n finally:\n shutil.rmtree(tmp)\n\n data_bytes = os.stat(rootfs).st_size\n hash_bytes = os.stat(hashimg).st_size\n logger.debug(\"dm-verity data partition %d bytes, hash partition %d bytes, ratio %f.\" %\n (data_bytes, hash_bytes, data_bytes / hash_bytes))\n part.size = data_bytes // 1024\n part.source_file = hashimg", "def prepare(topology='devstack'):\n log.info(\"Preparing boxes for %s Openstack\" % topology)\n log.info(\"Preparing virtual machines for lab=%s\" % LAB)\n url = IMAGES_REPO + DEVSTACK_DISK\n local(\"test -e %s || wget -nv %s\" % (DEVSTACK_DISK, url))\n local(\"python ./tools/cloud/create.py -l {lab} -s /opt/imgs \"\n \"-z ./{disk} -t {topo} > config_file\".format(lab=LAB,\n disk=DEVSTACK_DISK,\n topo=topology))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans up the boot of ironic ramdisk. This method cleans up the environment that was setup for booting the deploy ramdisk.
def clean_up_ramdisk(self, task): LOG.debug("Cleaning up deploy boot for " "%(node)s", {'node': task.node.uuid}) sdflex_common.eject_vmedia(task, vmedia_device) self._cleanup_iso_image(task)
[ "def clean_up_ramdisk(self, task):\n node = task.node\n mode = deploy_utils.rescue_or_deploy_mode(node)\n try:\n images_info = pxe_utils.get_image_info(\n node, mode=mode, ipxe_enabled=self.ipxe_enabled)\n except exception.MissingParameterValue as e:\n LOG.warning('Could not get %(mode)s image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'mode': mode, 'node': node.uuid, 'err': e})\n else:\n pxe_utils.clean_up_pxe_env(\n task, images_info, ipxe_enabled=self.ipxe_enabled)", "def cleanup(self):\n os.system(\"rm -rf /dev/shm/images/kinect_rgb\")\n os.system(\"rm -rf /dev/shm/images/kinect_depth\")", "def cleanup_files(self):\n os.system(\"rm -r /tmp/kernelpop\")", "def cleanup(self):\n # Reset namespace to default\n switch_to_default_rook_cluster_project()\n\n log.info(\"Delete the benchmark-operator project\")\n run(\"make undeploy\", shell=True, check=True, cwd=self.local_repo)\n # Wait until the benchmark-operator project deleted\n self.ns_obj.wait_for_delete(resource_name=BMO_NS, timeout=180)\n\n # remove from workers the label used for cache dropping\n log.info(\"Remove labels from worker nodes.\")\n helpers.remove_label_from_worker_node(self.worker_nodes, label_key=BMO_LABEL)\n\n # wait another 10 sec. after cleanup done.\n time.sleep(10)", "def wipe_puppet(self):\n # TODO IMPLEMENT THIS METHOD\n self.clean_setup()", "def _clean(self):\n print(\"Cleaning up...\", file=sys.stderr)\n self._kill_workers()\n if self._multinode and self._region_comm:\n print(\"Aborting MPI job...\", file=sys.stderr)\n self._region_comm.Abort(errorcode=1)", "def cleanup(self):\n\n self.snmp_requester.cleanup()", "def deallocate_resources(self, platform):\n pass", "def clean():\n _gc.collect()", "def cleanup_tempdir():\n devnull = open('/dev/null', 'w')\n # ignore non-zero return codes\n for disk in BOOT_DISK, SYSTEM_DISK, CACHE_DISK, DATA_DISK, \\\n SDCARD_DISK:\n if disk is not None:\n try:\n cmd_runner.run(['umount', disk],\n stdout=devnull, stderr=devnull, as_root=True).wait()\n except cmd_runner.SubcommandNonZeroReturnValue:\n pass\n # Remove TMP_DIR as root because some files written there are\n # owned by root.\n if TMP_DIR is not None:\n cmd_runner.run(['rm', '-rf', TMP_DIR], as_root=True).wait()", "def clean_up_instance(self, task):\n manager_utils.node_power_action(task, states.POWER_OFF)\n disable_secure_boot_if_supported(task)\n\n node = task.node\n if (is_directed_lanboot_requested(node) or\n http_utils.is_http_boot_requested(node)):\n # In this cleaning step it sets the URLBOOTFILE & URLBOOTFILE2 &\n # HttpBootUri path as ''.\n sdflex_common.reset_bios_settings(node)\n http_boot_uri = node.driver_info.get('http_boot_uri')\n if http_boot_uri:\n sdflex_object = sdflex_common.get_sdflex_object(node)\n sdflex_object.set_http_boot_uri(None)\n\n if http_utils.is_http_boot_requested(node):\n try:\n images_info = http_utils.get_instance_image_info(task)\n except ironic_exception.MissingParameterValue as e:\n LOG.warning('Could not get instance image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'node': node.uuid, 'err': e})\n else:\n http_utils.clean_up_http_env(task, images_info)\n else:\n super(SdflexPXEBoot, self).clean_up_instance(task)", "def cleanup(self):\n self.log.debug('template_igt - in template_igt cleanup()')\n # Add resource setup code here", "def _clean(self):\n process_args = [\n self.__python, '-m', 'pip', 'uninstall', '--yes', 'srisk'\n ]\n subprocess.Popen(process_args, shell=False).wait()\n import fnmatch\n for rootDir, subdirs, filenames in os.walk(self._src_dir):\n if rootDir.endswith('tests'):\n continue\n for filename in fnmatch.filter(filenames, '*.py'):\n try:\n if filename.startswith('brisk') is False:\n os.remove(os.path.join(rootDir, filename))\n except OSError:\n print('Error deleting file %s' % filename)", "def _cleanup(self):\n shutil.rmtree(self._temp_processing_dir)\n\n print('Cleaning up temporary files...')", "def environment_clear(self):\n self._call(\"environmentClear\")", "def cleanup():\n if len(env.releases) > 3:\n directories = env.releases\n directories.reverse()\n del directories[:3]\n env.directories = ' '.join(\n [\"%(releases_path)s/%(release)s\" % {'releases_path': env.releases_path, 'release': release} for release in\n directories])\n run(\"rm -rf %(directories)s\" % {'directories': env.directories})", "def _clear_environments(self):\n self._environments.clear()", "def clean_build():\r\n env.clean_build = True", "def env_clear(self):\r\n self._env.clear()\r\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares the boot of instance over virtual media. This method prepares the boot of the instance after reading relevant information from the node's instance_info.
def prepare_instance(self, task): node = task.node boot_option = deploy_utils.get_boot_option(node) self.clean_up_instance(task) remote_image_server = node.driver_info.get('remote_image_server') remote_image_share_root = node.driver_info.get( 'remote_image_share_root') remote_server_data = {} remote_server_data['remote_image_share_type'] = ( node.driver_info.get('remote_image_share_type')) remote_server_data['remote_image_user_name'] = ( node.driver_info.get('remote_image_user_name', None)) remote_server_data['remote_image_user_password'] = ( node.driver_info.get('remote_image_user_password', None)) # Need to enable secure boot, if being requested. # update_secure_boot_mode checks and enables secure boot only if the # deploy has requested secure boot sdflex_common.update_secure_boot_mode(task, True) iwdi = node.driver_internal_info.get('is_whole_disk_image') if boot_option == "local" or iwdi: self._set_boot_device( task, boot_devices.DISK, persistent=True) LOG.debug("Node %(node)s is set to permanently boot from local " "%(device)s", {'node': task.node.uuid, 'device': boot_devices.DISK}) return params = {} if boot_option != 'ramdisk': root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id') if not root_uuid and task.driver.storage.should_write_image(task): LOG.warning( "The UUID of the root partition could not be found for " "node %s. Booting instance from disk anyway.", node.uuid) self._set_boot_device( task, boot_devices.DISK, persistent=True) return params.update(root_uuid=root_uuid) iso_ref = self._prepare_boot_iso(task, **params) url = (remote_server_data['remote_image_share_type'] + "://" + remote_image_server + "/" + remote_image_share_root + "/" + iso_ref) sdflex_common.eject_vmedia(task, vmedia_device) sdflex_common.insert_vmedia(task, url, vmedia_device, remote_server_data) boot_mode_utils.sync_boot_mode(task) self._set_boot_device( task, boot_devices.CD.value.lower(), persistent=True) LOG.debug("Node %(node)s is set to permanently boot from " "%(device)s", {'node': task.node.uuid, 'device': boot_devices.CD})
[ "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)", "def prepare_instance(self, task):\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n\n self.clean_up_instance(task)\n boot_device = boot_devices.DISK\n\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepVm(self):\r\n self.server.logMsg(\"PREPARING \" + self.vmName + \" FOR TESTING\")\r\n self.server.logMsg(self.vmName + \" OPERATING SYSTEM: \" + self.vmOS)\r\n self.server.logMsg(self.vmName + \" ARCHITECTURE: \" + self.getArch())\r\n self.getSnapshots()\r\n self.powerOn(False)", "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n prepare_node_for_deploy(task)\n if not http_utils.is_http_boot_requested(task.node):\n super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)\n else:\n node = task.node\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n http_info = http_utils.get_image_info(node, mode=mode)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n http_info.update(http_utils.get_instance_image_info(task))\n boot_mode_utils.sync_boot_mode(task)\n\n http_options = http_utils.build_http_config_options(task,\n http_info)\n http_options.update(ramdisk_params)\n http_config_template = deploy_utils.get_pxe_config_template(node)\n http_utils.create_http_config(task, http_options,\n http_config_template)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)\n if http_info:\n http_utils.cache_ramdisk_kernel(task, http_info)\n bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()\n if bfpv == 'true':\n node = task.node\n driver_internal_info = node.driver_internal_info\n driver_internal_info['bfpv_started'] = 'false'\n node.driver_internal_info = driver_internal_info\n node.save()", "def bootNodes(self):\n self.libvirt.bootSlaves()", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n remote_server_data = {}\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # NOTE(TheJulia): If this method is being called by something\n # aside from deployment, clean and rescue, such as conductor takeover,\n # we should treat this as a no-op and move on otherwise we would\n # modify the state of the node due to virtual media operations.\n if node.provision_state not in (states.DEPLOYING,\n states.CLEANING,\n states.RESCUING,\n states.INSPECTING):\n return\n\n # NOTE(TheJulia): Since we're deploying, cleaning, or rescuing,\n # with virtual media boot, we should generate a token!\n manager_utils.add_secret_token(node, pregenerated=True)\n node.save()\n ramdisk_params['ipa-agent-token'] = (\n node.driver_internal_info['agent_secret_token'])\n\n manager_utils.node_power_action(task, states.POWER_OFF)\n\n deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)\n ramdisk_params['BOOTIF'] = deploy_nic_mac\n if CONF.debug and 'ipa-debug' not in ramdisk_params:\n ramdisk_params['ipa-debug'] = '1'\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(task, boot_devices.CD.value.lower())\n\n LOG.debug(\"Node %(node)s is set to one time boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def init_vm(self):\n self.r2api.init_vm()\n self.did_init_vm = True", "def __init__(self, vm_spec):\n super(WindowsGceVirtualMachine, self).__init__(vm_spec)\n self.boot_metadata['windows-startup-script-ps1'] = (\n windows_virtual_machine.STARTUP_SCRIPT\n )", "def _connect_boot_volume(self, volume, mountpoint, context, instance):\n LOG.debug('Connecting boot volume')\n instance_uuid = instance['uuid']\n volume_id = volume['id']\n\n connector = self.get_volume_connector(instance)\n connection_info = self._initialize_volume_connection(context,\n volume_id,\n connector)\n\n # Check connection_info to determine if the provided volume is\n # local to this compute node. If it is, then don't use it for\n # Solaris branded zones in order to avoid a known ZFS deadlock issue\n # when using a zpool within another zpool on the same system.\n extra_specs = self._get_flavor(instance)['extra_specs'].copy()\n brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)\n if brand == ZONE_BRAND_SOLARIS:\n driver_type = connection_info['driver_volume_type']\n if driver_type == 'local':\n msg = _(\"Detected 'local' zvol driver volume type \"\n \"from volume service, which should not be \"\n \"used as a boot device for 'solaris' \"\n \"branded zones.\")\n raise exception.InvalidVolume(reason=msg)\n elif driver_type == 'iscsi':\n # Check for a potential loopback iSCSI situation\n data = connection_info['data']\n target_portal = data['target_portal']\n # Strip off the port number (eg. 127.0.0.1:3260)\n host = target_portal.rsplit(':', 1)\n # Strip any enclosing '[' and ']' brackets for\n # IPv6 addresses.\n target_host = host[0].strip('[]')\n\n # Check if target_host is an IP or hostname matching the\n # connector host or IP, which would mean the provisioned\n # iSCSI LUN is on the same host as the instance.\n if target_host in [connector['ip'], connector['host']]:\n msg = _(\"iSCSI connection info from volume \"\n \"service indicates that the target is a \"\n \"local volume, which should not be used \"\n \"as a boot device for 'solaris' branded \"\n \"zones.\")\n raise exception.InvalidVolume(reason=msg)\n # Assuming that fibre_channel is non-local\n elif driver_type != 'fibre_channel':\n # Some other connection type that we don't understand\n # Let zone use some local fallback instead.\n msg = _(\"Unsupported volume driver type '%s' can not be used \"\n \"as a boot device for zones.\" % driver_type)\n raise exception.InvalidVolume(reason=msg)\n\n # Volume looks OK to use. Notify Cinder of the attachment.\n self._volume_api.attach(context, volume_id, instance_uuid, mountpoint)\n return connection_info", "def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)", "def spawn(self, instance, network_info=None, block_device_mapping=None):\n\n # Update state to inform the nova stack that the VE is launching\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.NOSTATE,\n 'launching')\n LOG.debug('instance %s: is launching' % instance['name'])\n\n # Get current usages and resource availablity.\n self._get_cpuunits_usage()\n\n # Go through the steps of creating a container\n # TODO(imsplitbit): Need to add conditionals around this stuff to make\n # it more durable during failure. And roll back changes made leading\n # up to the error.\n self._cache_image(instance)\n self._create_vz(instance)\n self._set_vz_os_hint(instance)\n self._configure_vz(instance)\n self._set_name(instance)\n self._setup_networks(instance, network_info)\n self._set_hostname(instance)\n self._set_vmguarpages(instance)\n self._set_privvmpages(instance)\n self._attach_volumes(instance)\n\n if FLAGS.ovz_use_cpuunit:\n self._set_cpuunits(instance)\n if FLAGS.ovz_use_cpulimit:\n self._set_cpulimit(instance)\n if FLAGS.ovz_use_cpus:\n self._set_cpus(instance)\n if FLAGS.ovz_use_ioprio:\n self._set_ioprio(instance)\n if FLAGS.ovz_use_disk_quotas:\n self._set_diskspace(instance)\n \n self._start(instance)\n self._initial_secure_host(instance)\n \n # Begin making our looping async call\n timer = utils.LoopingCall(f=None)\n\n # I stole this from the libvirt driver but it is appropriate to\n # have this looping timer call so that if a VE doesn't start right\n # away we can defer all of this.\n def _wait_for_boot():\n try:\n state = self.get_info(instance['name'])['state']\n db.instance_set_state(context.get_admin_context(),\n instance['id'], state)\n if state == power_state.RUNNING:\n LOG.debug('instance %s: booted' % instance['name'])\n timer.stop()\n\n except:\n LOG.exception('instance %s: failed to boot' %\n instance['name'])\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n timer.stop()\n\n timer.f = _wait_for_boot\n return timer.start(interval=0.5, now=True)", "def _create_boot_volume(self, context, instance):\n LOG.debug('Creating boot volume')\n boot_vol_az = CONF.solariszones.boot_volume_az\n boot_vol_type = CONF.solariszones.boot_volume_type\n try:\n vol = self._volume_api.create(\n context, instance['root_gb'],\n instance['hostname'] + \"-\" + self._rootzpool_suffix,\n \"Boot volume for instance '%s' (%s)\"\n % (instance['name'], instance['uuid']),\n volume_type=boot_vol_type, availability_zone=boot_vol_az)\n # TODO(npower): Polling is what nova/compute/manager also does when\n # creating a new volume, so we do likewise here.\n while True:\n volume = self._volume_api.get(context, vol['id'])\n if volume['status'] != 'creating':\n return volume\n greenthread.sleep(1)\n\n except Exception as reason:\n LOG.exception(_(\"Unable to create root zpool volume for instance \"\n \"'%s': %s\") % (instance['name'], reason))\n raise", "def _init_nodes(self):\n # model info nodes\n self._node_device_name = self._get_node('DeviceModelName', \"string\")\n self._node_device_serial_number = self._get_node(\"DeviceSerialNumber\", \"string\")\n # Acquisition mode nodes\n self._node_acquisition_mode = self._get_node(\"AcquisitionMode\", node_type= \"enumeration\", \n check_writable= True)\n self._node_acquisition_frame_rate = self._get_node(\"AcquisitionFrameRate\", \"float\", check_writable=True)\n # Image size nodes\n self._node_width = self._get_node(\"Width\", \"integer\")\n self._node_height = self._get_node(\"Height\", \"integer\")\n # Exposure nodes\n self._node_exposure_time = self._get_node(\"ExposureTime\", \"float\")\n self._node_exposure_auto = self._get_node(\"ExposureAuto\", \"enumeration\")\n # Gain nodes\n self._node_gain_auto = self._get_node(\"GainAuto\", \"enumeration\", check_writable= True)\n self._node_gain = self._get_node(\"Gain\", \"float\")\n # Gamma node\n self._node_gamma_enable = self._get_node(\"GammaEnable\", \"boolean\",check_writable= True)\n # Pixel format nodes\n self._node_pixel_format = self._get_node(\"PixelFormat\", \"enumeration\")\n # legacy init for other parameters. \n self._attribute_init()", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n\n # Label indicating a deploy or rescue operation being carried out on\n # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like\n # state, the mode is set to 'deploy', indicating deploy operation is\n # being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n if self.ipxe_enabled:\n # NOTE(mjturek): At this point, the ipxe boot script should\n # already exist as it is created at startup time. However, we\n # call the boot script create method here to assert its\n # existence and handle the unlikely case that it wasn't created\n # or was deleted.\n pxe_utils.create_ipxe_boot_script()\n\n # Generate options for both IPv4 and IPv6, and they can be\n # filtered down later based upon the port options.\n # TODO(TheJulia): This should be re-tooled during the Victoria\n # development cycle so that we call a single method and return\n # combined options. The method we currently call is relied upon\n # by two eternal projects, to changing the behavior is not ideal.\n dhcp_opts = pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=4)\n dhcp_opts += pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=6)\n provider = dhcp_factory.DHCPFactory()\n provider.update_dhcp(task, dhcp_opts)\n\n pxe_info = pxe_utils.get_image_info(node, mode=mode,\n ipxe_enabled=self.ipxe_enabled)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n pxe_info.update(\n pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled))\n\n boot_mode_utils.sync_boot_mode(task)\n\n pxe_options = pxe_utils.build_pxe_config_options(\n task, pxe_info, ipxe_enabled=self.ipxe_enabled,\n ramdisk_params=ramdisk_params)\n # TODO(dtantsur): backwards compability hack, remove in the V release\n if ramdisk_params.get(\"ipa-api-url\"):\n pxe_options[\"ipa-api-url\"] = ramdisk_params[\"ipa-api-url\"]\n\n if self.ipxe_enabled:\n pxe_config_template = deploy_utils.get_ipxe_config_template(node)\n else:\n pxe_config_template = deploy_utils.get_pxe_config_template(node)\n\n pxe_utils.create_pxe_config(task, pxe_options,\n pxe_config_template,\n ipxe_enabled=self.ipxe_enabled)\n manager_utils.node_set_boot_device(task, boot_devices.PXE,\n persistent=False)\n\n if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:\n kernel_label = '%s_kernel' % mode\n ramdisk_label = '%s_ramdisk' % mode\n pxe_info.pop(kernel_label, None)\n pxe_info.pop(ramdisk_label, None)\n\n if pxe_info:\n pxe_utils.cache_ramdisk_kernel(task, pxe_info,\n ipxe_enabled=self.ipxe_enabled)\n\n LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '\n 'with kernel params %(params)s',\n {'node': node.uuid, 'params': pxe_options})", "def setupBootRegion(self):\n self.virtualMemoryRequest(\n \"PhysicalRegion\",\n {\n \"RegionType\": \"BootRegion\",\n \"Size\": PcConfig.get_boot_region_size(),\n \"Type\": \"I\",\n \"Bank\": 0,\n },\n )", "def __init__(self, attrs = None, segment = None):\n BootInfoObject.__init__(self)\n\n self.owner = None\n self.zone = None\n\n if segment is not None:\n self.ms = segment\n self.token_exported = 0\n else:\n self.ms = weaver.image.image.add_memsection(attrs)", "def __init__(self):\n super(SdflexRedfishVirtualMediaBoot, self).__init__()\n if not sushy:\n raise ironic_exception.DriverLoadError(\n driver='sdfelx-redfish',\n reason=_('Unable to import the sushy library'))", "def _power_on(self, instance, network_info):\n name = instance['name']\n zone = self._get_zone_by_name(name)\n if zone is None:\n raise exception.InstanceNotFound(instance_id=name)\n\n # Attempt to update the zones hostid in the instance data, to catch\n # those instances that might have been created without a hostid stored.\n self._set_instance_metahostid(instance)\n\n bootargs = []\n if CONF.solariszones.solariszones_boot_options:\n reset_bootargs = False\n persistent = 'False'\n\n # Get any bootargs already set in the zone\n cur_bootargs = utils.lookup_resource_property(zone, 'global', 'bootargs')\n\n # Get any bootargs set in the instance metadata by the user\n meta_bootargs = instance.metadata.get('bootargs')\n\n if meta_bootargs:\n bootargs = ['--', str(meta_bootargs)]\n persistent = str(\n instance.metadata.get('bootargs_persist', 'False'))\n if cur_bootargs is not None and meta_bootargs != cur_bootargs:\n with ZoneConfig(zone) as zc:\n reset_bootargs = True\n # Temporarily clear bootargs in zone config\n zc.clear_resource_props('global', ['bootargs'])\n\n try:\n zone.boot(bootargs)\n self._plug_vifs(instance, network_info)\n except Exception as ex:\n reason = utils.zonemgr_strerror(ex)\n LOG.exception(_(\"Unable to power on instance '%s' via \"\n \"zonemgr(3RAD): %s\") % (name, reason))\n raise exception.InstancePowerOnFailure(reason=reason)\n finally:\n if CONF.solariszones.solariszones_boot_options:\n if meta_bootargs and persistent.lower() == 'false':\n # We have consumed the metadata bootargs and\n # the user asked for them not to be persistent so\n # clear them out now.\n instance.metadata.pop('bootargs', None)\n instance.metadata.pop('bootargs_persist', None)\n\n if reset_bootargs:\n with ZoneConfig(zone) as zc:\n # restore original boot args in zone config\n zc.setprop('global', 'bootargs', cur_bootargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares the boot of Ironic ramdisk. This method prepares the boot of the deploy or rescue ramdisk after reading relevant information from the node's driver_info and instance_info.
def prepare_ramdisk(self, task, ramdisk_params): if task.node.provision_state in (states.DEPLOYING, states.RESCUING, states.CLEANING, states.INSPECTING): node = task.node d_info = redfish_boot._parse_driver_info(node) # Label indicating a deploy or rescue operation being carried out # on the node, 'deploy' or 'rescue'. Unless the node is in a # rescue like state, the mode is set to 'deploy', indicating # deploy operation is being carried out. mode = deploy_utils.rescue_or_deploy_mode(node) iso_ref = image_utils.prepare_deploy_iso(task, ramdisk_params, mode, d_info) node.driver_internal_info.update({'deploy_boot_iso': iso_ref}) sdflex_common.set_network_setting_dhcpless_boot(node, iso_ref) boot_mode_utils.sync_boot_mode(task) manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP, persistent=False)
[ "def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n prepare_node_for_deploy(task)\n if not http_utils.is_http_boot_requested(task.node):\n super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)\n else:\n node = task.node\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n http_info = http_utils.get_image_info(node, mode=mode)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n http_info.update(http_utils.get_instance_image_info(task))\n boot_mode_utils.sync_boot_mode(task)\n\n http_options = http_utils.build_http_config_options(task,\n http_info)\n http_options.update(ramdisk_params)\n http_config_template = deploy_utils.get_pxe_config_template(node)\n http_utils.create_http_config(task, http_options,\n http_config_template)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)\n if http_info:\n http_utils.cache_ramdisk_kernel(task, http_info)\n bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()\n if bfpv == 'true':\n node = task.node\n driver_internal_info = node.driver_internal_info\n driver_internal_info['bfpv_started'] = 'false'\n node.driver_internal_info = driver_internal_info\n node.save()", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n\n # Label indicating a deploy or rescue operation being carried out on\n # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like\n # state, the mode is set to 'deploy', indicating deploy operation is\n # being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n if self.ipxe_enabled:\n # NOTE(mjturek): At this point, the ipxe boot script should\n # already exist as it is created at startup time. However, we\n # call the boot script create method here to assert its\n # existence and handle the unlikely case that it wasn't created\n # or was deleted.\n pxe_utils.create_ipxe_boot_script()\n\n # Generate options for both IPv4 and IPv6, and they can be\n # filtered down later based upon the port options.\n # TODO(TheJulia): This should be re-tooled during the Victoria\n # development cycle so that we call a single method and return\n # combined options. The method we currently call is relied upon\n # by two eternal projects, to changing the behavior is not ideal.\n dhcp_opts = pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=4)\n dhcp_opts += pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=6)\n provider = dhcp_factory.DHCPFactory()\n provider.update_dhcp(task, dhcp_opts)\n\n pxe_info = pxe_utils.get_image_info(node, mode=mode,\n ipxe_enabled=self.ipxe_enabled)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n pxe_info.update(\n pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled))\n\n boot_mode_utils.sync_boot_mode(task)\n\n pxe_options = pxe_utils.build_pxe_config_options(\n task, pxe_info, ipxe_enabled=self.ipxe_enabled,\n ramdisk_params=ramdisk_params)\n # TODO(dtantsur): backwards compability hack, remove in the V release\n if ramdisk_params.get(\"ipa-api-url\"):\n pxe_options[\"ipa-api-url\"] = ramdisk_params[\"ipa-api-url\"]\n\n if self.ipxe_enabled:\n pxe_config_template = deploy_utils.get_ipxe_config_template(node)\n else:\n pxe_config_template = deploy_utils.get_pxe_config_template(node)\n\n pxe_utils.create_pxe_config(task, pxe_options,\n pxe_config_template,\n ipxe_enabled=self.ipxe_enabled)\n manager_utils.node_set_boot_device(task, boot_devices.PXE,\n persistent=False)\n\n if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:\n kernel_label = '%s_kernel' % mode\n ramdisk_label = '%s_ramdisk' % mode\n pxe_info.pop(kernel_label, None)\n pxe_info.pop(ramdisk_label, None)\n\n if pxe_info:\n pxe_utils.cache_ramdisk_kernel(task, pxe_info,\n ipxe_enabled=self.ipxe_enabled)\n\n LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '\n 'with kernel params %(params)s',\n {'node': node.uuid, 'params': pxe_options})", "def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n remote_server_data = {}\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # NOTE(TheJulia): If this method is being called by something\n # aside from deployment, clean and rescue, such as conductor takeover,\n # we should treat this as a no-op and move on otherwise we would\n # modify the state of the node due to virtual media operations.\n if node.provision_state not in (states.DEPLOYING,\n states.CLEANING,\n states.RESCUING,\n states.INSPECTING):\n return\n\n # NOTE(TheJulia): Since we're deploying, cleaning, or rescuing,\n # with virtual media boot, we should generate a token!\n manager_utils.add_secret_token(node, pregenerated=True)\n node.save()\n ramdisk_params['ipa-agent-token'] = (\n node.driver_internal_info['agent_secret_token'])\n\n manager_utils.node_power_action(task, states.POWER_OFF)\n\n deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)\n ramdisk_params['BOOTIF'] = deploy_nic_mac\n if CONF.debug and 'ipa-debug' not in ramdisk_params:\n ramdisk_params['ipa-debug'] = '1'\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(task, boot_devices.CD.value.lower())\n\n LOG.debug(\"Node %(node)s is set to one time boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)", "def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)", "def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def _prepare_iso_image(self, task, kernel_href, ramdisk_href,\n bootloader_href=None, configdrive=None,\n root_uuid=None, params=None):\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"building ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n i_info = task.node.instance_info\n driver_info = task.node.driver_info\n if driver_info.get('remote_image_share_type') == 'nfs':\n image_share_root = driver_info.get('remote_image_share_root')\n else:\n image_share_root = driver_info.get('image_share_root')\n if deploy_utils.get_boot_option(task.node) == \"ramdisk\":\n kernel_params = \"root=/dev/ram0 text \"\n kernel_params += i_info.get(\"ramdisk_kernel_arguments\", \"\")\n\n else:\n kernel_params = i_info.get('kernel_append_params', \"\")\n\n if params:\n kernel_params = ' '.join(\n (kernel_params, ' '.join(\n '%s=%s' % kv for kv in params.items())))\n\n boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node)\n\n LOG.debug(\"Trying to create %(boot_mode)s ISO image for node %(node)s \"\n \"with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, \"\n \"bootloader %(bootloader_href)s and kernel params %(params)s\"\n \"\", {'node': task.node.uuid,\n 'boot_mode': boot_mode,\n 'kernel_href': kernel_href,\n 'ramdisk_href': ramdisk_href,\n 'bootloader_href': bootloader_href,\n 'params': kernel_params})\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.iso') as boot_fileobj:\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:\n\n configdrive_href = configdrive\n\n if configdrive:\n parsed_url = urlparse.urlparse(configdrive)\n if not parsed_url.scheme:\n cfgdrv_blob = base64.decode_as_bytes(configdrive)\n\n with open(cfgdrv_fileobj.name, 'wb') as f:\n f.write(cfgdrv_blob)\n\n configdrive_href = urlparse.urlunparse(\n ('file', '', cfgdrv_fileobj.name, '', '', ''))\n\n LOG.info(\"Burning configdrive %(url)s to boot ISO image \"\n \"for node %(node)s\", {'url': configdrive_href,\n 'node': task.node.uuid})\n boot_iso_tmp_file = boot_fileobj.name\n\n images.create_boot_iso(\n task.context, boot_iso_tmp_file,\n kernel_href, ramdisk_href,\n esp_image_href=bootloader_href,\n root_uuid=root_uuid,\n kernel_params=kernel_params,\n boot_mode=boot_mode)\n iso_object_name = self._get_iso_image_name(task.node)\n\n image_url = self._publish_image(\n boot_iso_tmp_file, iso_object_name, image_share_root)\n\n LOG.debug(\"Created ISO %(name)s in NFS/CIFS for node %(node)s, \"\n \"exposed as temporary URL \"\n \"%(url)s\", {'node': task.node.uuid,\n 'name': iso_object_name,\n 'url': image_url})\n\n return image_url", "def prepare_instance(self, task):\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n\n self.clean_up_instance(task)\n boot_device = boot_devices.DISK\n\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def bootMaster(self):\n self.libvirt.bootMaster()\n time.sleep(100)", "def bootNodes(self):\n self.libvirt.bootSlaves()", "def setupBootRegion(self):\n self.virtualMemoryRequest(\n \"PhysicalRegion\",\n {\n \"RegionType\": \"BootRegion\",\n \"Size\": PcConfig.get_boot_region_size(),\n \"Type\": \"I\",\n \"Bank\": 0,\n },\n )", "def boot_installer(self):\n boot_timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT'))\n self.child.expect('Escape character')\n LOG.info('connected to the VM (controller-0)')\n # send a escape character\n self.child.sendline('\\x1b')\n self.child.expect('boot:')\n cmd_boot_line = common.get_cmd_boot_line()\n self.child.sendline(cmd_boot_line)\n LOG.info('kernel command line sent: %s', cmd_boot_line)\n # send a enter character\n self.child.sendline('\\r')\n # setting a boot timeout\n self.child.timeout = boot_timeout\n self.child.expect('Loading vmlinuz')\n LOG.info('Loading vmlinuz')\n self.child.expect('Loading initrd.img')\n LOG.info('Loading initrd.img')\n self.child.expect('Starting installer, one moment...')\n LOG.info('Starting installer ...')\n self.child.expect('Performing post-installation setup tasks')\n LOG.info('Performing post-installation setup tasks')", "def _prepare_deploy_iso(self, task, params, mode):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n\n kernel_href = d_info.get('%s_kernel' % mode)\n ramdisk_href = d_info.get('%s_ramdisk' % mode)\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href, params=params)", "def clean_up_ramdisk(self, task):\n node = task.node\n mode = deploy_utils.rescue_or_deploy_mode(node)\n try:\n images_info = pxe_utils.get_image_info(\n node, mode=mode, ipxe_enabled=self.ipxe_enabled)\n except exception.MissingParameterValue as e:\n LOG.warning('Could not get %(mode)s image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'mode': mode, 'node': node.uuid, 'err': e})\n else:\n pxe_utils.clean_up_pxe_env(\n task, images_info, ipxe_enabled=self.ipxe_enabled)", "def multinic_bootstrap_booting(self):\n self.env.revert_snapshot(\"ready\")\n\n slave = self.env.nodes().slaves[0]\n mac_addresses = [interface.mac_address for interface in\n slave.interfaces.filter(network__name='internal')]\n try:\n for mac in mac_addresses:\n Ebtables.block_mac(mac)\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)\n slave.destroy(verbose=False)\n self.env.nodes().admins[0].revert(\"ready\")\n nailgun_slave = self.env.bootstrap_nodes([slave])[0]\n assert_equal(mac.upper(), nailgun_slave['mac'].upper())\n Ebtables.block_mac(mac)\n finally:\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)", "def do_prepare_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n rootfs_dir, native_sysroot):\n\n # We rely on the --label parameter and the naming convention\n # in partition.py prepare_rootfs() here to find the already\n # prepared rootfs partition image.\n pattern = '%s/rootfs_%s.*' % (cr_workdir, part.label)\n rootfs = glob.glob(pattern)\n if len(rootfs) != 1:\n raise WicError(\"%s shell pattern does not match exactly one rootfs image (missing --label parameter?): %s\" % (pattern, rootfs))\n else:\n rootfs = rootfs[0]\n logger.debug(\"Calculating dm-verity hash for rootfs %s (native %s).\" % (rootfs, native_sysroot))\n\n hashimg = '%s/dm-verity_%s.img' % (cr_workdir, part.label)\n # Reserve some fixed amount of space at the start of the hash image\n # for our own data (in particular, the signed root hash).\n # The content of that part is:\n # roothash=<....>\n # <potentially some more assignments in the future>\n # signature=<single line of base64 encoded OpenSSL sha256 digest>\n header_size = 4096\n ret, out = exec_native_cmd(\"veritysetup format '%s' '%s' --hash-offset=%d\" %\n (rootfs, hashimg, header_size),\n native_sysroot)\n m = re.search(r'^Root hash:\\s*(\\S+)$', out, re.MULTILINE)\n if ret or not m:\n raise WicError('veritysetup failed: %s' % out)\n else:\n root_hash = m.group(1)\n privkey = get_bitbake_var('REFKIT_DMVERITY_PRIVATE_KEY')\n password = get_bitbake_var('REFKIT_DMVERITY_PASSWORD')\n tmp = tempfile.mkdtemp(prefix='dm-verity-')\n try:\n data_filename = os.path.join(tmp, 'data')\n header = ('roothash=%s\\nheadersize=%d\\n' % (root_hash, header_size)).encode('ascii')\n with open(data_filename, 'wb') as data:\n data.write(header)\n # Must use a temporary file, exec_native_cmd() only supports UTF-8 output.\n signature = os.path.join(tmp, 'sig')\n ret, out = exec_native_cmd(\"openssl dgst -sha256 -passin '%s' -sign '%s' -out '%s' '%s'\" %\n (password, privkey, signature, data_filename),\n native_sysroot)\n if ret:\n raise WicError('openssl signing failed')\n with open(signature, 'rb') as f:\n header += b'signature=' + base64.standard_b64encode(f.read()) + b'\\n'\n if len(header) + 1 >= header_size:\n raise WicError('reserved space for dm-verity header too small')\n with open(hashimg, 'rb+') as hash:\n hash.write(header)\n finally:\n shutil.rmtree(tmp)\n\n data_bytes = os.stat(rootfs).st_size\n hash_bytes = os.stat(hashimg).st_size\n logger.debug(\"dm-verity data partition %d bytes, hash partition %d bytes, ratio %f.\" %\n (data_bytes, hash_bytes, data_bytes / hash_bytes))\n part.size = data_bytes // 1024\n part.source_file = hashimg", "def prepare(topology='devstack'):\n log.info(\"Preparing boxes for %s Openstack\" % topology)\n log.info(\"Preparing virtual machines for lab=%s\" % LAB)\n url = IMAGES_REPO + DEVSTACK_DISK\n local(\"test -e %s || wget -nv %s\" % (DEVSTACK_DISK, url))\n local(\"python ./tools/cloud/create.py -l {lab} -s /opt/imgs \"\n \"-z ./{disk} -t {topo} > config_file\".format(lab=LAB,\n disk=DEVSTACK_DISK,\n topo=topology))", "def resume_state_on_host_boot(self, *args, **kwargs):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepares the boot of instance. This method prepares the boot of the instance. Only boot option local is supported. If secure boot is enabled, it will boot the OS in secure boot.
def prepare_instance(self, task): # Need to enable secure boot, if being requested. # update_secure_boot_mode checks and enables secure boot only if the # deploy has requested secure boot sdflex_common.update_secure_boot_mode(task, True) boot_mode_utils.sync_boot_mode(task) node = task.node boot_device = None self.clean_up_instance(task) boot_device = boot_devices.DISK if boot_device and task.node.provision_state != states.ACTIVE: persistent = True if node.driver_info.get('force_persistent_boot_device', 'Default') == 'Never': persistent = False manager_utils.node_set_boot_device(task, boot_device, persistent=persistent)
[ "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})", "def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)", "def setup_boot(self):\n\n mission_state_names = list(Enums.mission_states.names())\n nominal_states = mission_state_names\n nominal_states.remove('manual')\n nominal_states.remove('startup')\n nominal_states.remove('safehold')\n nominal_states.remove('initialization_hold')\n\n if self.desired_boot_state in nominal_states:\n self.deployment_hold_length = 100 # Number of cycles for which the satellite will be in a deployment hold. This\n # is an item that is configured on Flight Software.\n self.elapsed_deployment = int(self.flight_controller.read_state(\"pan.deployment.elapsed\"))\n self.max_detumble_cycles = 100 # Number of cycles for which we expect the satellite to be in detumble\n\n # Let's be generous with what angular rate is allowable as \"detumbled.\"\n self.flight_controller.write_state(\"detumble_safety_factor\", 10)\n\n # Prevent ADCS faults from causing transition to initialization hold\n self.flight_controller.write_state(\"adcs_monitor.functional_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel1_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel2_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel3_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel_pot_fault.suppress\", \"true\")\n\n self.logger.put(f\"Waiting for the satellite to boot to {self.desired_boot_state}.\")", "def bootMaster(self):\n self.libvirt.bootMaster()\n time.sleep(100)", "def boot_installer(self):\n boot_timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT'))\n self.child.expect('Escape character')\n LOG.info('connected to the VM (controller-0)')\n # send a escape character\n self.child.sendline('\\x1b')\n self.child.expect('boot:')\n cmd_boot_line = common.get_cmd_boot_line()\n self.child.sendline(cmd_boot_line)\n LOG.info('kernel command line sent: %s', cmd_boot_line)\n # send a enter character\n self.child.sendline('\\r')\n # setting a boot timeout\n self.child.timeout = boot_timeout\n self.child.expect('Loading vmlinuz')\n LOG.info('Loading vmlinuz')\n self.child.expect('Loading initrd.img')\n LOG.info('Loading initrd.img')\n self.child.expect('Starting installer, one moment...')\n LOG.info('Starting installer ...')\n self.child.expect('Performing post-installation setup tasks')\n LOG.info('Performing post-installation setup tasks')", "def cmd_boot(self):\n self._simple_cmd(Command.BOOT)", "def resume_state_on_host_boot(self, *args, **kwargs):\n raise NotImplementedError()", "def multinic_bootstrap_booting(self):\n self.env.revert_snapshot(\"ready\")\n\n slave = self.env.nodes().slaves[0]\n mac_addresses = [interface.mac_address for interface in\n slave.interfaces.filter(network__name='internal')]\n try:\n for mac in mac_addresses:\n Ebtables.block_mac(mac)\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)\n slave.destroy(verbose=False)\n self.env.nodes().admins[0].revert(\"ready\")\n nailgun_slave = self.env.bootstrap_nodes([slave])[0]\n assert_equal(mac.upper(), nailgun_slave['mac'].upper())\n Ebtables.block_mac(mac)\n finally:\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)", "def _connect_boot_volume(self, volume, mountpoint, context, instance):\n LOG.debug('Connecting boot volume')\n instance_uuid = instance['uuid']\n volume_id = volume['id']\n\n connector = self.get_volume_connector(instance)\n connection_info = self._initialize_volume_connection(context,\n volume_id,\n connector)\n\n # Check connection_info to determine if the provided volume is\n # local to this compute node. If it is, then don't use it for\n # Solaris branded zones in order to avoid a known ZFS deadlock issue\n # when using a zpool within another zpool on the same system.\n extra_specs = self._get_flavor(instance)['extra_specs'].copy()\n brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)\n if brand == ZONE_BRAND_SOLARIS:\n driver_type = connection_info['driver_volume_type']\n if driver_type == 'local':\n msg = _(\"Detected 'local' zvol driver volume type \"\n \"from volume service, which should not be \"\n \"used as a boot device for 'solaris' \"\n \"branded zones.\")\n raise exception.InvalidVolume(reason=msg)\n elif driver_type == 'iscsi':\n # Check for a potential loopback iSCSI situation\n data = connection_info['data']\n target_portal = data['target_portal']\n # Strip off the port number (eg. 127.0.0.1:3260)\n host = target_portal.rsplit(':', 1)\n # Strip any enclosing '[' and ']' brackets for\n # IPv6 addresses.\n target_host = host[0].strip('[]')\n\n # Check if target_host is an IP or hostname matching the\n # connector host or IP, which would mean the provisioned\n # iSCSI LUN is on the same host as the instance.\n if target_host in [connector['ip'], connector['host']]:\n msg = _(\"iSCSI connection info from volume \"\n \"service indicates that the target is a \"\n \"local volume, which should not be used \"\n \"as a boot device for 'solaris' branded \"\n \"zones.\")\n raise exception.InvalidVolume(reason=msg)\n # Assuming that fibre_channel is non-local\n elif driver_type != 'fibre_channel':\n # Some other connection type that we don't understand\n # Let zone use some local fallback instead.\n msg = _(\"Unsupported volume driver type '%s' can not be used \"\n \"as a boot device for zones.\" % driver_type)\n raise exception.InvalidVolume(reason=msg)\n\n # Volume looks OK to use. Notify Cinder of the attachment.\n self._volume_api.attach(context, volume_id, instance_uuid, mountpoint)\n return connection_info", "async def pre_bootstrap(msg_cb):\n\n # Set provider type for post-bootstrap\n app.env['JUJU_PROVIDERTYPE'] = juju.get_cloud_types_by_name()[\n app.current_cloud]\n app.env['JUJU_CONTROLLER'] = app.current_controller\n app.env['JUJU_MODEL'] = app.current_model\n app.env['CONJURE_UP_SPELLSDIR'] = app.argv.spells_dir\n\n await utils.run_step('00_pre-bootstrap',\n 'pre-bootstrap',\n msg_cb)", "def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)", "def setupBootRegion(self):\n self.virtualMemoryRequest(\n \"PhysicalRegion\",\n {\n \"RegionType\": \"BootRegion\",\n \"Size\": PcConfig.get_boot_region_size(),\n \"Type\": \"I\",\n \"Bank\": 0,\n },\n )", "def set_boot_options(self, image_name, kickstart=None):\n self._disable_confirmation()\n try:\n if kickstart is None:\n self.show('install all nxos %s' % image_name, raw_text=True)\n else:\n self.show('install all system %s kickstart %s' % (image_name, kickstart), raw_text=True)\n except CLIError:\n pass", "def bootNodes(self):\n self.libvirt.bootSlaves()", "def boot(config, distro):\n distro_data = validate_distro(config.distro_maps, distro)\n # Ensure the distro has been mounted.\n mount_distro_helper(config.btrfs_uuid, distro_data)\n nspawn = NspawnManager(distro_data)\n nspawn.boot()", "def default_pxeboot_config(self):\n\n # Use private subnet for pxe booting\n self.separate_pxeboot_network = False\n self.pxeboot_subnet = self.private_pxeboot_subnet\n self.controller_pxeboot_floating_address = \\\n IPAddress(self.pxeboot_subnet[2])\n self.controller_pxeboot_address_0 = \\\n IPAddress(self.pxeboot_subnet[3])\n self.controller_pxeboot_address_1 = \\\n IPAddress(self.pxeboot_subnet[4])\n\n self.pxeboot_start_address = self.pxeboot_subnet[2]\n self.pxeboot_end_address = self.pxeboot_subnet[-2]", "def bootstrap(master=False):\n run(\"apt-get -q update\")\n run(\"apt-get -q --yes install git python-dev build-essential python-pip sshpass\")\n if master:\n run(\"curl -L http://bootstrap.saltstack.org | \"\n \"sh -s -- -M -N git develop\")\n run(\"pip install psutil apache-libcloud\")\n run(\"pip install git+https://github.com/saltstack/salt-cloud.git\"\n \"#egg=salt_cloud\")\n conf = SALT_CLOUD_TEMPLATE\n conf.update({\n 'apikey': env.rackspace_api_key,\n 'minion': {'master': str(env.box_public_ips[4])},\n 'tenant': str(env.rackspace_tenant_id),\n 'user': env.rackspace_user,\n })\n conf = {'rackspace-conf-{0}'.format(env.rackspace_user): conf}\n put(StringIO(yaml.dump(conf)), '/etc/salt/cloud.providers')\n # dummy a profiles file\n put('cloud.profiles', '/etc/salt/cloud.profiles')\n\n else: # minion\n run(\"python -c 'import urllib; print urllib.urlopen(\"\n \"\\\"http://bootstrap.saltstack.org\\\").read()' | \"\n \"sh -s -- git develop\")\n execute(shell)", "def _wait_instance_boot(self):\n if not self.ALLOW_PORTS or _utl.check_port(self.host_ip, 80):\n # Avoid to show message if already booted or not\n return\n\n _get_logger().info(\"Waiting instance boot...\")\n _sleep(self._TIMEOUT_SLEEP)\n if not _utl.check_port(self.host_ip, 80, timeout=self.TIMEOUT,\n sleep=self._TIMEOUT_SLEEP):\n raise _exc.HostRuntimeException(gen_msg=('timeout', \"boot\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cleans up the boot of instance. This method cleans up the PXE / HTTP environment that was setup for booting the instance. It unlinks the instance kernel/ramdisk in the node's directory in tftproot / httproot and removes it's PXE config / HTTP config. In case of Directed LAN Boot / UEFI HTTP Boot BIOS setting are reset. In case of UEFI iSCSI booting, it cleans up iSCSI target information from the node. Secure boot is also disabled if it was set earlier during provisioning of the ironic node.
def clean_up_instance(self, task): manager_utils.node_power_action(task, states.POWER_OFF) disable_secure_boot_if_supported(task) node = task.node sdflex_common.reset_network_setting_dhcpless_boot(node) image_utils.cleanup_iso_image(task)
[ "def clean_up_instance(self, task):\n manager_utils.node_power_action(task, states.POWER_OFF)\n disable_secure_boot_if_supported(task)\n\n node = task.node\n if (is_directed_lanboot_requested(node) or\n http_utils.is_http_boot_requested(node)):\n # In this cleaning step it sets the URLBOOTFILE & URLBOOTFILE2 &\n # HttpBootUri path as ''.\n sdflex_common.reset_bios_settings(node)\n http_boot_uri = node.driver_info.get('http_boot_uri')\n if http_boot_uri:\n sdflex_object = sdflex_common.get_sdflex_object(node)\n sdflex_object.set_http_boot_uri(None)\n\n if http_utils.is_http_boot_requested(node):\n try:\n images_info = http_utils.get_instance_image_info(task)\n except ironic_exception.MissingParameterValue as e:\n LOG.warning('Could not get instance image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'node': node.uuid, 'err': e})\n else:\n http_utils.clean_up_http_env(task, images_info)\n else:\n super(SdflexPXEBoot, self).clean_up_instance(task)", "def wipe_puppet(self):\n # TODO IMPLEMENT THIS METHOD\n self.clean_setup()", "def clean_up_ramdisk(self, task):\n LOG.debug(\"Cleaning up deploy boot for \"\n \"%(node)s\", {'node': task.node.uuid})\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n self._cleanup_iso_image(task)", "def cleanup(self):\n if not self.status:\n self.class_logger.info(\"Skip cleanup of switch id:%s due to Off status.\" % (self.id, ))\n return\n self.get()\n self.clearconfig()", "def node_cleanup(self):\n LOG.debug(\"Running node cleanup.\")\n # nodetool -h <HOST> -p <PORT> -u <USER> -pw <PASSWORD> cleanup\n try:\n self._run_nodetool_command('cleanup')\n self.status.set_status(rd_instance.ServiceStatuses.RUNNING)\n except Exception:\n LOG.exception(\"The node failed to complete its cleanup.\")\n finally:\n self.status.end_restart()", "def clean_up_ramdisk(self, task):\n node = task.node\n mode = deploy_utils.rescue_or_deploy_mode(node)\n try:\n images_info = pxe_utils.get_image_info(\n node, mode=mode, ipxe_enabled=self.ipxe_enabled)\n except exception.MissingParameterValue as e:\n LOG.warning('Could not get %(mode)s image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'mode': mode, 'node': node.uuid, 'err': e})\n else:\n pxe_utils.clean_up_pxe_env(\n task, images_info, ipxe_enabled=self.ipxe_enabled)", "def remove(self):\n\t\tc = Common()\n\t\tc.banner()\n\t\tc.client_hosts()\n\n\t\toperatingSystem = run(\"/bin/cat /etc/issue | /usr/bin/awk '{print $1}'\")\n\n\t\tif(operatingSystem=='Debian'):\n\t\t\trun('aptitude -y purge puppet')\n\t\t\trun('find /var/lib/puppet -type f -print0 | xargs -0r rm')\n\t\telse:\n\t\t\tprint '--->\\tOS not supported'\n\t\t\tsys.exit(0)\n\n\t\ttry:\n\t\t\tsubprocess.call(['/usr/sbin/puppetca', '--clean', '%s.%s' % (c.client_name(),self.domain)])\n\t\texcept Exception, e:\n\t\t\tprint 'error :', e\n\t\t\tpass\n\n\t\tsleep(3)\n\t\texit(0)", "def cleanup_files(self):\n os.system(\"rm -r /tmp/kernelpop\")", "def shutdown(self):\n self.commands[master_setup.subcommand].shutdown()", "def _remove_pxe_config(self, host):\n if host.mgmt_mac:\n dashed_mac = host.mgmt_mac.replace(\":\", \"-\")\n\n # Remove the old file if it exists\n try:\n os.remove(\"/var/pxeboot/pxelinux.cfg/01-\" + dashed_mac)\n except OSError:\n pass\n\n try:\n os.remove(\"/var/pxeboot/pxelinux.cfg/efi-01-\" + dashed_mac + \".cfg\")\n os.remove(\"/var/pxeboot/pxelinux.cfg/efi-01-\" + dashed_mac)\n except OSError:\n pass", "def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)", "def cleanDynagen(self):\n\n self.dynagen.dynamips.clear()\n self.dynagen.handled = False\n self.dynagen.devices.clear()\n self.dynagen.globalconfig.clear()\n self.dynagen.configurations.clear()\n self.dynagen.ghosteddevices.clear()\n self.dynagen.ghostsizes.clear()\n self.dynagen.bridges.clear()\n self.dynagen.autostart.clear()\n\n for dynamips in globals.GApp.dynagen.dynamips.values():\n try:\n dynamips.reset()\n except:\n continue\n\n if globals.GApp.HypervisorManager:\n globals.GApp.HypervisorManager.stopProcHypervisors()\n if globals.GApp.QemuManager:\n globals.GApp.QemuManager.stopQemu()", "def cleanup(self):\n\n self.snmp_requester.cleanup()", "def cleanupServer(self):\n if self._serverProc is not None:\n self._client(\"killServer\")\n if isinstance(self._serverProc, subprocess.Popen):\n self._serverProc.communicate()\n self._devnull.close()\n self.callCmd(\n [\"rsh\", \"-l\", \"root\", self._remoteHost,\n \"rm -rf /var/tmp/bcpython /var/lib/python2.7\"])\n self._serverProc = None", "def cleanup(self):\n self.log.debug('template_igt - in template_igt cleanup()')\n # Add resource setup code here", "def multinic_bootstrap_booting(self):\n self.env.revert_snapshot(\"ready\")\n\n slave = self.env.nodes().slaves[0]\n mac_addresses = [interface.mac_address for interface in\n slave.interfaces.filter(network__name='internal')]\n try:\n for mac in mac_addresses:\n Ebtables.block_mac(mac)\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)\n slave.destroy(verbose=False)\n self.env.nodes().admins[0].revert(\"ready\")\n nailgun_slave = self.env.bootstrap_nodes([slave])[0]\n assert_equal(mac.upper(), nailgun_slave['mac'].upper())\n Ebtables.block_mac(mac)\n finally:\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)", "def shutdown(self):\n if self.ipkernel is not None:\n self.cleanup_consoles()\n self.ipkernel.shell.exit_now = True\n self.ipkernel.cleanup_connection_file()\n self.ipkernel.iopub_thread.stop()\n self.ipkernel = None", "def shutdown(self):\n self._exec_cmd(_vix.VixVM_PowerOff,\n self._vm_handle,\n VIX_VMPOWEROP_FROM_GUEST,\n None,\n None\n )", "def cleanup(self):\n os.system(\"rm -rf /dev/shm/images/kinect_rgb\")\n os.system(\"rm -rf /dev/shm/images/kinect_depth\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute and return the daily return values.
def compute_daily_returns(df): daily_returns = df.copy() daily_returns[1:] = (fd[1:] / df[:-1].values) - 1 daily_returns.ix[0:, ] = 0 #set daily returns for row 0 to 0 return daily_returns
[ "def compute_daily_returns(self):\n # Note: Returned DataFrame must have the same number of rows\n daily_returns = (self.prices / self.prices.shift(1)) - 1\n daily_returns.ix[0, :] = 0\n return daily_returns", "def compute_daily_returns(df):\n # (value[t] / value[t-1]) - 1\n rtn = (df/df.shift(1)) - 1\n rtn.ix[0, :] = 0\n return rtn", "def get_daily_returns():\n portfolio = request.get_json(force=True)\n start_date = parse_date(request.args.get('start'))\n end_date = parse_date(request.args.get('end'))\n prices_df = prepare_dataframe(portfolio, start_date, end_date)\n performance = compute_daily_returns(prices_df)\n return performance.to_json(orient='index')", "def calculate_daily_returns(self, weighted: pd.DataFrame) -> pd.Series:\n returns = self.returns[weighted.columns]\n returns = returns.loc[weighted.index[0] :]\n\n daily_returns = (weighted.shift(1) * returns).sum(axis=1)\n return daily_returns", "def getDailyReturns(self, startDate, endDate):\n self.startDate = startDate\n self.endDate = endDate\n \n price = yf.download(stock,startDate,endDate)\n self.dReturns = pd.DataFrame(np.log(price)-np.log(price).shift(1),index=price.index)\n self.dReturns.columns = self.tickers\n self.dReturns.dropna(inplace = True)", "def __compute_return(self, data, before, after):\n\n event_ret = pd.DataFrame()\n for date in self.__event_date:\n subset_range = pd.date_range(\n start=date - pd.Timedelta(\"{d} days\".format(d=before)),\n end=date + pd.Timedelta(\"{d} days\".format(d=after)))\n data_df = data.reindex(subset_range).dropna()\n if data_df.empty:\n break\n event_ret.loc[date, 'return'] = (data_df['return'] + 1).prod() - 1\n return event_ret.mean().values", "def daily_valuations(self):\n df = pd.DataFrame(self.close_prices, columns=[\"date\", \"price\"])\n df = df.set_index(\"date\")\n df[\"quantity\"] = float(\"nan\")\n df[\"market_val\"] = float(\"nan\")\n # the prices starting from the first date the security was held\n start_date = str(self.breakdown[0][0])\n\n df2 = df.loc[start_date:]\n df2 = df2.copy() # copied to prevent chained assignment\n # update the quantity at each date\n for row in self.breakdown:\n df2.at[str(row[0]), \"quantity\"] = row[1]\n df2[\"price\"] = df2[\"price\"].fillna(method=\"ffill\")\n df2[\"quantity\"] = df2[\"quantity\"].fillna(method=\"ffill\")\n\n df2[\"price\"] = pd.to_numeric(df2[\"price\"])\n df2[\"market_val\"] = round((df2[\"price\"] * df2[\"quantity\"]), 3)\n\n df2 = df2[[\"market_val\"]]\n new_name = f\"market_val_{self.ticker}\"\n new_header = {\"market_val\": new_name}\n df2 = df2.rename(columns=new_header)\n return df2", "def _get_dollar_values(self, group=False):\n dates = sorted(self._config['dates'])\n\n # Copy dataframe and zero data before earliest portfolio date.\n dollar_values = self._daily['close'].copy()\n dollar_values.ix[\n dollar_values.index < pd.to_datetime(str(dates[0])), :] = 0.0\n\n # Loop thru dates and calculate each date range using bitmask index.\n for i, item in enumerate(dates):\n index = dollar_values.index >= pd.to_datetime(str(item))\n if i < (len(dates) - 1):\n index = index & (\n dollar_values.index < pd.to_datetime(str(dates[i + 1])))\n for key in list(dollar_values.columns.values):\n value = self._config['dates'][item]['symbols'].get(key)\n if value is None:\n dollar_values.ix[index, key] = 0.0\n else:\n dollar_values.ix[index, key] *= value * self._config[\n 'value_ratio']\n\n if group is True:\n dollar_values = self._sum_symbol_groups(dollar_values)\n return dollar_values", "def compute_monthly_returns(dbm: database_manager.DatabaseManager, tbl_name: str) -> \\\n Union[Tuple[pd.DataFrame, Tuple[str, str, str, str, str], datetime.datetime], Tuple[None, None]]:\n tbl, info = dbm.get_table(tbl_name)\n\n if tbl is None:\n return None, None\n\n tbl.dropna(axis=0, inplace=True)\n\n first_date = tbl.index[0]\n last_date = tbl.index[-1]\n prev_month = first_date.month\n\n row_idx = 0\n curr_date, prev_date = None, None\n\n monthly_returns = []\n daily_ret = 0\n monthly_ret = 0\n\n while curr_date != last_date:\n row_idx += 1\n\n curr_date = tbl.index[row_idx]\n\n curr_month = curr_date.month\n\n curr_price = tbl.iloc[row_idx]['PX_LAST']\n prev_price = tbl.iloc[row_idx - 1]['PX_LAST']\n\n if curr_price == 0:\n daily_ret = 0\n elif prev_price == 0:\n daily_ret = tbl.iloc[row_idx - 2]['PX_LAST']\n else:\n daily_ret = (curr_price / prev_price) - 1.0\n\n monthly_ret = monthly_ret * (daily_ret + 1) if monthly_ret != 0 else daily_ret + 1\n\n if curr_month != prev_month:\n # remove compounding of last daily return\n monthly_ret /= (daily_ret + 1)\n\n monthly_returns.append((prev_date, monthly_ret - 1))\n\n # reset for next month\n monthly_ret = daily_ret + 1\n\n prev_month = curr_month\n prev_date = curr_date\n\n df = pd.DataFrame(monthly_returns, columns=['Dates', 'Monthly_Return'])\n df.set_index('Dates', inplace=True)\n\n return df, info, first_date", "def compute_returns(self):\n import numpy as np\n\n print(\"Compute returns and log returns...\")\n self.data['log_price'] = np.log(self.data['close'])\n self.data['log_returns'] = self.data['log_price'].diff()\n\n\n self.data['lagged_returns'] = self.data['returns'].shift(-1)\n self.data['returns2'] = self.data['returns'] ** 2\n print(\"Done!\")", "def get_results(df,dates):\n\n data_list = []\n for date in dates:\n data_per_day = {}\n timeStampMidNight = pd.Timestamp(date)\n timeStampMorning = pd.Timestamp(date + ' ' + '06:00:00')\n wholeday= df[df['Date']==date]\n overnight = wholeday[wholeday['TimeStamp']<timeStampMorning]\n daytime = wholeday[wholeday['TimeStamp']>=timeStampMorning]\n data_per_day['wholeday'] = wholeday\n data_per_day['overnight'] = overnight\n data_per_day['daytime'] = daytime\n data_list.append(data_per_day)\n\n res = np.zeros(18)\n for data in data_list:\n sample_count = len(data['wholeday'])\n res[:6]+=extractCases(data['daytime'],sample_count)\n res[6:12]+=extractCases(data['overnight'],sample_count)\n res[12:18]+=extractCases(data['wholeday'],sample_count)\n res/=len(data_list)\n return res", "def get_daily_totals(date=None, date_mod=None, conn=CONN):\n\n if date:\n if date_mod:\n a = conn.execute('''select date(entry_time), \n sum(protein), \n sum(carbohydrate), \n sum(fat), \n sum(kcals) \n from consumption \n where date(entry_time) = date(?, ?)''', (date, date_mod))\n else:\n\n a = conn.execute('''select date(entry_time), \n sum(protein), \n sum(carbohydrate), \n sum(fat), \n sum(kcals) \n from consumption \n where date(entry_time) = date(?)''', (date,))\n else:\n a = conn.execute('''select date(entry_time), \n sum(protein), \n sum(carbohydrate), \n sum(fat), \n sum(kcals) \n from consumption \n group by date(entry_time)''')\n\n ret = a.fetchall()\n if ret[0][\"sum(kcals)\"]:\n # check that the row actually contains values, if not, the user is asking for a date with no entry\n # and instead we will return zero values (below)\n return ret\n else:\n return [{\"sum(protein)\": 0,\n \"sum(carbohydrate)\": 0,\n \"sum(fat)\": 0,\n \"sum(kcals)\": 0}]\n\n # dict of dummy values to populate the interface, instead of a sqlite row. When the user starts entering\n # data, it will be written to the db and can be returned by this function in future calls.\n # TODO: probably this is better to take care of in SQL", "def calculate_target():\n all_dates_df = pd.read_csv(\"datasets/all_dates_without_nan_df.csv\")\n aggregate_df = pd.read_csv(\"datasets/aggregate_df.csv\")\n aggregate_df = aggregate_df.iloc[:, 1:]\n\n # index over all_dates_df\n i = 0\n j = 0\n # index over aggregate_df\n index = 0\n\n while i + delta < len(all_dates_df):\n\n arguments = []\n # collect the value of SPY return adj close over the next delta days\n while i + delta < len(all_dates_df) and j < delta:\n arguments.append(all_dates_df.loc[i + delta, 'SPY_return_Adj Close'])\n j += 1\n i += 1\n\n avg = np.nanmean(arguments, axis=0)\n\n j = 0\n # write the calculated avg in the current interval\n while j < 20:\n aggregate_df.loc[index, 'SPY_return_Adj Close'] = avg\n index += 1\n j += 1\n j = 0\n\n aggregate_df.to_csv('aggregate_df.csv')", "def calculate_portfolio_return(self, price_df: pd.DataFrame) -> None:\n # Keep only data of stocks in the portfolio\n select_query = ' or '.join(f\"symbol == '{val[1]}'\" for val in self.stocks)\n self.price_df = price_df.query(select_query) \n # Calculate returns\n self.price_df['weighted_ret'] = self.price_df['dailyret'] * self.price_df['weight'] # weight * daily return\n self.portfolio_daily_returns = self.price_df.groupby('date')['weighted_ret'].sum()\n self.expected_daily_return = self.portfolio_daily_returns.mean()\n self.volatility = self.portfolio_daily_returns.std()", "def calculate_return(df, col_name, period_start_date, period_end_date):\n\tbase_value = df.loc[df[DATE_COL_NAME] == period_start_date, col_name].values[0]\n\tcurr_value = df.loc[df[DATE_COL_NAME] == period_end_date, col_name].values[0]\n\tprice_return = (curr_value - base_value) / base_value * 100\n\tdf.loc[df[DATE_COL_NAME] == period_end_date, RETURN_PREFIX + col_name] = price_return\n\treturn df", "def compute_log_returns(prices):\n # TODO: Implement Function\n \n return None", "def get_daily_data(varid, plev, years, datafiles, data, daymin=1,\n daymax=366, yearnm='year'):\n\n years = atm.makelist(years)\n datafiles = atm.makelist(datafiles)\n\n if isinstance(plev, int) or isinstance(plev, float):\n pres = atm.pres_convert(plev, 'hPa', 'Pa')\n elif plev == 'LML' and 'PS' in data:\n pres = data['PS']\n else:\n pres = None\n\n def get_var(data, varnm, plev=None):\n if plev is None:\n plev = ''\n elif plev == 'LML' and varnm == 'QV':\n varnm = 'Q'\n return data[varnm + str(plev)]\n\n if var_type(varid) == 'calc':\n print('Computing ' + varid)\n if varid == 'THETA':\n var = atm.potential_temp(get_var(data, 'T', plev), pres)\n elif varid == 'THETA_E':\n var = atm.equiv_potential_temp(get_var(data, 'T', plev), pres,\n get_var(data, 'QV', plev))\n elif varid == 'DSE':\n var = atm.dry_static_energy(get_var(data, 'T', plev),\n get_var(data, 'H', plev))\n elif varid == 'MSE':\n var = atm.moist_static_energy(get_var(data, 'T', plev),\n get_var(data, 'H', plev),\n get_var(data, 'QV', plev))\n elif varid == 'VFLXMSE':\n Lv = atm.constants.Lv.values\n var = data['VFLXCPT'] + data['VFLXPHI'] + data['VFLXQV'] * Lv\n var.attrs['units'] = data['VFLXCPT'].attrs['units']\n var.attrs['long_name'] = 'Vertically integrated MSE meridional flux'\n else:\n with xray.open_dataset(datafiles[0]) as ds:\n if varid not in ds.data_vars:\n varid = varid + str(plev)\n var = atm.combine_daily_years(varid, datafiles, years, yearname=yearnm,\n subset_dict={'day' : (daymin, daymax)})\n var = atm.squeeze(var)\n\n # Make sure year dimension is included for single year\n if len(years) == 1 and 'year' not in var.dims:\n var = atm.expand_dims(var, yearnm, years[0], axis=0)\n\n # Wrap years for extended day ranges\n if daymin < 1 or daymax > 366:\n var = wrapyear_all(var, daymin, daymax)\n\n # Convert precip and evap to mm/day\n if varid in ['precip', 'PRECTOT', 'EVAP']:\n var = atm.precip_convert(var, var.attrs['units'], 'mm/day')\n\n return var", "def analyze(self, start_date: Date, end_date: Date):\n\n dyo_list = list(self.get_daily_yields(start_date, end_date))\n\n for dyo in dyo_list:\n dyo.analyze()\n\n # put all daily energies in a DataFrame\n # - Erd = total solar energy between start and end date\n # - Empp = total photovoltaic energy produced by solar panels\n # - Ein = total DC energy input at inverters\n # - Eout = total AC energy output of inverters\n columns = ['Erd', 'Empp', 'Ein', 'Eout']\n data = [dyo.get_energies() for dyo in dyo_list]\n index = [str(dyo.date) for dyo in dyo_list]\n self.df = pd.DataFrame(data=data, index=index, columns=columns)\n # get the sum of each column, the minimum and maximum value in each column and the average of each column\n sum_ = self.df.sum(axis=0)\n min_ = self.df.min(axis=0)\n avg_ = self.df.mean(axis=0)\n max_ = self.df.max(axis=0)\n return {'tot': sum_, 'min': min_, 'avg': avg_, 'max': max_}", "def gains_daily(zone):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.results.gains.daily\", \r\n zone.eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Only gets basic pos'. The rest should be inferred from the lexicon
def get_pos(token, morph): return [c.name for c in pos if c.match(token, morph)]
[ "def get_sense_pos(self, tree):\n lem = tree.label()\n #word = tree.leaves()[0]\n sense = str(lem).split('.')[2]\n pos = str(lem).split('.')[1]\n return (pos, sense)", "def get_pos_by_feat(self, feats):\n # Part-of-speech constants, ADJ, ADJ_SAT, ADV, NOUN, VERB = 'a', 's', 'r', 'n', 'v'\n att=feats[0] # the first feature is word pos\n if att in feat_pos_mappings:\n return feat_pos_mappings[att]\n return '*'", "def _pos_lexicons(self):\n\n f = open(self.training_file)\n print \"opened PART OF SPEECH DICT\"\n counter = 0\n for words in self.parse_training_input(f):\n tagged = pos_tag(words)\n for word, pos in tagged:\n counter += 1\n if word in self.word_pos:\n self.word_pos[word].append(pos)\n else:\n self.word_pos[word] = [pos]\n if pos in self.pos_word:\n self.pos_word[pos].append(word)\n else:\n self.pos_word[pos] = [word]\n print \"Building dict...\" + str(counter)\n print \"Done with POS DICT\"\n\n # return len(self.word_pos), self.pos_word", "def local_positional(positions, m):\n return", "def get_single_pos(read, rev=False):\n strand = '+'\n if rev!=read.is_reverse:\n strand = '-'\n fpos = read.pos\n tpos = read.qlen + fpos\n return strand, fpos, tpos", "def read_pos(in_name):\n atoms = read_xyz(in_name)[-1]\n\n return atoms", "def __get_node_pos(n):\n if \"isoform_position\" in n.attributes() and n[\"isoform_position\"] is not None:\n if n[\"isoform_position\"] is None:\n return UnknownPosition(), UnknownPosition()\n else:\n return n[\"isoform_position\"] - 1, n[\"isoform_position\"]\n else:\n if n[\"position\"] is None:\n return UnknownPosition(), UnknownPosition()\n else:\n return n[\"position\"] - 1, n[\"position\"]", "def test_posTagNormalFile(self):\n result, _ = PosTagger(self.dummyFile).run()\n answer = defaultdict(int)\n answer['across'] = 1\n answer['near'] = 2\n answer['around'] = 1\n answer['in'] = 3\n self.assertEqual(result, answer, \"{} failed\".format(inspect.stack()[0][3]))", "def print_pos(self):\n return (pos, x, y)", "def _find_lemma_pos(self, app_note: Element) -> Tuple[int, int]:\n lemma_pos = app_note.cont.find(r\"\\lemma\")\n if lemma_pos is not -1:\n start = lemma_pos + len(r\"\\lemma\")\n end = start + len(Brackets(app_note.cont, start=start))\n return start + 1, end - 1\n else:\n return -1, -1", "def _find_verb_nl_pos(self, doc):\n for index, token in reversed(list(enumerate(doc))):\n if token.pos_ == \"VERB\":\n return index", "def listPosition(word):\n return word_order(word)", "def pos(self, name):\n return self.ev.position[name]", "def get_wordnet_pos(self,treebank_tag):\r\n if treebank_tag.startswith('J'):\r\n return wordnet.ADJ\r\n elif treebank_tag.startswith('V'):\r\n return wordnet.VERB\r\n elif treebank_tag.startswith('N'):\r\n return wordnet.NOUN\r\n elif treebank_tag.startswith('R'):\r\n return wordnet.ADV\r\n else:\r\n # As default pos in lemmatization is Noun\r\n return wordnet.NOUN", "def index_positional_defaulted(self) -> _typing_tuple_false:\n kinds: tuple = self.kinds\n try:\n kinds.index(1) # POSITIONAL_OR_KEYWORD = 1\n except ValueError:\n return False\n else:\n pos_only: list = self.index_positional_only or []\n\n return tuple(\n [\n i\n for i, k in enumerate(kinds)\n if ((k == 1) & (i not in pos_only))\n ]\n )", "def POS_select(speech):\n # Tokenize and lemmatize\n text = []\n for token in speech:\n if token.pos_ in ['NOUN','PROPN','VERB','ADJ']:\n text.append(token.lemma_.lower().replace('.',''))\n return text", "def index_positional(self) -> _typing_tuple_false:\n kinds: tuple = self.kinds\n try:\n kinds.index(1) # POSITIONAL_OR_KEYWORD = 1\n except ValueError:\n return False\n else:\n return tuple(\n [i for i, k in enumerate(kinds) if ((k == 0) | (k == 1))]\n )", "def cmd_position(self, n, e, d, heading):\n pass", "def getOperandIndex(self, printpos: int) -> int:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends nonempty message first and then an empty one to check if buffers are cleaned.
def test_receive_empty_message_after_nonempty(self): port = next(port_iterable) with mock_server(port) as s, client(port) as p: with s.accept()[0] as k: messages = [ prepare_message(b"blahblah"), prepare_message(b"") ] k.sendall(messages[0]) k.sendall(messages[1]) time.sleep(QUANT_SECONDS) self.assertIsNone(p.poll()) out, _ = p.communicate(b"") self.assertEqual(out, b"blahblah\n\n")
[ "def test_send_empty_message(self):\n\n extension = common.ExtensionParameter(\n common.PERMESSAGE_DEFLATE_EXTENSION)\n request = _create_request_from_rawdata(\n b'', permessage_deflate_request=extension)\n\n msgutil.send_message(request, '')\n\n # Payload in binary: 0b00000000\n # From LSB,\n # - 1 bit of BFINAL (0)\n # - 2 bits of BTYPE (no compression)\n # - 5 bits of padding\n self.assertEqual(b'\\xc1\\x01\\x00', request.connection.written_data())", "def testRawEmptyMessage(self):\n\t\tnotNeeded=self.fixture.read(1) # Empty the port.\n\t\tself.assertEqual(self.fixture.read(1),b'',\n\t\t\t\t\t\tmsg='Need an empty buffer before running this test case.')\n\t\t# port.inWaiting will be 0, so grabPortOutput will just proceed to return\n\t\t# the input outputBuffer and the default (empty) output.\n\t\trawOutput=sm.commsInterface.grabPortOutput(self.fixture,'DummyBuff','raw')\n\t\tself.assertEqual(rawOutput[0],'',msg='Expected empty string as output.')\n\t\tself.assertEqual(len(rawOutput[0]),0,msg='Expected zero bytes.')\n\t\t# 'raw' option should leave outputBuffer unchanged.\n\t\tself.assertEqual(rawOutput[1],'DummyBuff',msg='Expected unchanged DummyBuff.')\n\t\t# Check message length.\n\t\tself.assertEqual(len(rawOutput[0]),0,msg='Expected zero bytes')\n\t\tself.assertEqual(len(rawOutput[1]),9,msg='Expected nine bytes')\n\t\t# Should have no warnings.\n\t\tself.assertEqual(rawOutput[2],{},msg='Expected empty warning dict.')\n\t\t# The port should be empty now.\n\t\tself.assertEqual(self.fixture.read(1),b'',msg='Expected empty buffer after the test.')", "def is_empty_recv(self, msg):\n \n if self.is_eof(msg):\n return False\n return self.is_empty(msg, self.empty_obj_recv)", "def testFormattedEmptyMessage(self):\n\t\tnotNeeded=self.fixture.read(1) # Empty the port.\n\t\tself.assertEqual(self.fixture.read(1),b'',\n\t\t\t\t\t\tmsg='Need an empty buffer before running this test case.')\n\t\t# port.inWaiting will be 0, so grabPortOutput will just proceed to return\n\t\t# the input outputBuffer and the default (empty) output.\n\t\tformattedOutput=sm.commsInterface.grabPortOutput(self.fixture,'DummyBuff','formatted')\n\t\tself.assertEqual(formattedOutput[0],'',msg='Expected empty string as output.')\n\t\tself.assertEqual(formattedOutput[1],'DummyBuff',msg='Expected unchanged DummyBuff.')\n\t\t# Check message length.\n\t\tself.assertEqual(len(formattedOutput[0]),0,msg='Expected zero bytes')\n\t\tself.assertEqual(len(formattedOutput[1]),9,msg='Expected nine bytes')\n\t\t# Should have no warnings.\n\t\tself.assertEqual(formattedOutput[2],{},msg='Expected empty warning dict.')\n\t\t# The port should be empty now.\n\t\tself.assertEqual(self.fixture.read(1),b'',msg='Expected empty buffer after the test.')", "def empty_p(self):\n return _raw_util.raw_msg_queue_sptr_empty_p(self)", "def clear(self):\n self.msg_store = ''", "def send_ready(self):\n return bool(0 != self.send_buffer.__len__())", "def _send_empty(self, status_code, message=None):\n self.send_response(status_code, message=message)\n self.end_headers()", "def message_reset(self):\n self.message = \"\"", "def testHexEmptyMessage(self):\n\t\tnotNeeded=self.fixture.read(1) # Empty the port.\n\t\tself.assertEqual(self.fixture.read(1),b'',\n\t\t\t\t\t\tmsg='Need an empty bufferbefore running this test case.')\n\t\t# port.inWaiting will be 0, so grabPortOutput will just proceed to return\n\t\t# the input outputBuffer and the default (empty) output.\n\t\thexOutput=sm.commsInterface.grabPortOutput(self.fixture,'DummyBuff','hex')\n\t\tself.assertEqual(hexOutput[0],'',msg='Expected empty string as output.')\n\t\tself.assertEqual(len(hexOutput[0]),0,msg='Expected zero bytes.')\n\t\t# 'hex' option should leave outputBuffer unchanged.\n\t\tself.assertEqual(hexOutput[1],'DummyBuff',msg='Expected unchanged DummyBuff.')\n\t\t# Should have no warnings.\n\t\tself.assertEqual(hexOutput[2],{},msg='Expected empty warning dict.')\n\t\t# The port should be empty now.\n\t\tself.assertEqual(self.fixture.read(1),b'',msg='Expected empty buffer after the test.')", "def is_empty(self):\n return not self.unbuffered_elements and not self.buffers", "def _assert_buffer_not_empty(self):\n if self.buffer_is_empty():\n raise BufferEmpty()", "def _try_send(self):\n if self._batch_size and self._mutation_count >= self._batch_size:\n self.send()", "def is_empty (self):\n return len(self.pkts) == 0", "def empty(self):\n self._lock.acquire()\n try:\n out = self._buffer_tobytes()\n del self._buffer[:]\n if (self._event is not None) and not self._closed:\n self._event.clear()\n return out\n finally:\n self._lock.release()", "def _check_buffer_for_messages(self):\n split_buffered_data = self._buffered_string.split(_MESSAGE_SEPARATOR)\n if len(split_buffered_data) > 1: # If we find more than one item, there is a message\n messages_to_process = split_buffered_data[0:-1]\n for message in messages_to_process:\n self._buffered_messages.append(message)\n\n self._buffered_string = split_buffered_data[-1]", "def test_sendBuffer(self):\n msg = b'12345'\n self.radio.bufferTxMsg(msg)\n self.radio.sendBuffer()\n assert(len(self.radio.txBuffer) == 0) # buffer should clear after data sent\n time.sleep(0.1)\n self.radio.readBytes(True)\n assert(self.radio.getRxBytes() == msg)\n \n # Test maximum bytes sent\n self.radio.clearRxBuffer()\n msg = b'1'*100\n self.radio.bufferTxMsg(msg)\n self.radio.sendBuffer(50)\n time.sleep(0.1)\n self.radio.readBytes(True)\n assert(len(self.radio.txBuffer) == 50)\n assert(self.radio.bytesInRxBuffer == 50)", "def test_lotsofzeros512(self):\n payload = '\\x00' * 512\n rawsend(payload)\n # Check that the server is still alive.\n self.assertTrue(puck())", "def clear_betmsg(self):\n self._betmsg = BLANK" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pop a random element and return it
def rand_pop(self): import random if self._is_unique: val = random.sample(self._list, 1)[0] self._list.remove(val) else: idx = random.randrange(len(self._list)) val = self._list[idx] del self._list[idx] return val
[ "def pop(self) -> T:\n while self._queue:\n priority, access_counter, item = heapq.heappop(self._queue)\n if item is not self.REMOVED:\n del self.entry_finder[item]\n return item\n raise KeyError('pop from an empty priority queue')", "def draw(self):\n elem = self.random()\n self.remove(elem)\n return elem", "def remove_and_return_random_item_from_list(given_list):\n return given_list.pop(npri(len(given_list)))", "def PopItem(self):\n try:\n # Using popleft to have FIFO behavior.\n return self._queue.popleft()\n except IndexError:\n raise errors.QueueEmpty", "def pop(self):\n spel_to_return = self.main_list[self.largest_affinity].pop(0)\n self.size -= 1\n\n if len(self.main_list[self.largest_affinity]) == 0 and self.size > 0:\n self.update_largest_index()\n\n return spel_to_return", "def pop() -> Any:\n\tglobal stack\n\tif not stack:\n\t\treturn None\n\telse:\n\t\tn = stack[-1]\n\t\tdel stack[-1]\n\t\treturn n", "def pop(self):\n ts = int(time.time())\n item = self.rpop(keys=[self.name], client=self._redis, args=[ts])\n if item is None:\n return item\n msg, ts = item\n ts = int(ts) if ts else None\n return msg, ts", "def pop(self):\n if self.head is None:\n raise IndexError\n else:\n to_return = self.head\n self.head = to_return.next\n self.length -= 1\n return to_return.val", "def get(self):\n\n while self.heap:\n priority, node = heapq.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n self.size -= 1\n return node\n raise KeyError('pop from an empty priority queue')", "def pop(stackNumber):\n assert(-1<stackNumber<3)\n if isEmpty(stackNumber):\n return None\n element = array[top[stackNumber]]\n array[top[stackNumber]] = None \n top[stackNumber] -= 1\n return element", "def RemoveFromPop(self, old_elem):\n self.pop.remove(old_elem)", "def pop(self):\n if (self.index < 0):\n # ArrayList is empty!\n return None\n\n last_element = self.array[self.index]\n self.array[self.index] = None\n self.index -= 1\n\n if self.index <= (self.size / self.extension_factor):\n self.diminish()\n\n return last_element", "def pop(self):\n # Get our iter first to avoid catching and accidentally\n # ignoring POSKeyError\n it = iter(self)\n try:\n value = next(it)\n except StopIteration:\n raise KeyError\n self.discard(value)\n return value", "def pop_next(self):\n return heapq.heappop(self.schedule)", "def pop():\n return pyCudaImageWarp.q.get_nowait().result(timeout=30)", "def random_element(self):\n if not self._elements:\n raise EmptySetError\n from sage.misc.prandom import choice\n return choice(self._elements)", "def random_element(self):\n pass", "def stack_pop(self):\n sp = self.regs.sp\n self.regs.sp = sp - self.arch.stack_change\n return self.memory.load(sp, self.arch.bytes, endness=self.arch.memory_endness)", "def pop(self, state: 'SoState', prevTopElement: 'SoElement') -> \"void\":\n return _coin.SoElement_pop(self, state, prevTopElement)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tuple(min, max) out of list of unsorted integers.
def get_min_max(ints): max=-1 min=1000000000 for i in ints: if i>max: max=i if i<min: min =i return (min,max) pass
[ "def get_min_max(ints):\n if len(ints)<1:\n return (0, 0)\n min = ints[0]\n max = ints[0]\n \n for int in ints:\n if int < min:\n min = int\n \n if int > max:\n max = int\n\n return (min, max)", "def max_and_min(list):\n\n # return tuple containig max and min of list\n return (max(list), min(list))", "def minMax(xs):\r\n min, max = xs[0], xs[0]\r\n for x in xs[1:]:\r\n if x < min:\r\n min = x\r\n elif x > max:\r\n max = x\r\n return min,max", "def min_max(xs):\n if len(xs) is not 0:\n x_min = xs[1]\n x_max = xs[1]\n for x in xs:\n if x < x_min:\n x_min = x\n elif x > x_max:\n x_max = x\n return x_min, x_max\n else:\n return 0, 0", "def find_min_max(input_list):\n minimum = maximum = input_list[0]\n\n for i in range(1,len(input_list)):\n if minimum > input_list[i]:\n minimum = input_list[i]\n if maximum < input_list[i]:\n maximum = input_list[i]\n return minimum,maximum", "def _get_min_max(ints, left, right):\n if right <= left + 1:\n # Takes care of 1 and 2 elements- since left and right will be same for 1 elements so\n # doesn't matter how you index it. For 1\n if ints[left] < ints[right]:\n return ints[left], ints[right]\n else:\n return ints[right], ints[left]\n middle = left + (right - left) // 2\n left_min, left_max = _get_min_max(ints, left, middle)\n right_min, right_max = _get_min_max(ints, middle+1, right)\n # Compare min and max of two halves\n if left_min < right_min:\n min_int = left_min\n else:\n min_int = right_min\n\n if left_max > right_max:\n max_int = left_max\n else:\n max_int = right_max\n return min_int, max_int", "def find_min_max(values: list):\n min = None\n max = None\n for value in values:\n if max == None or value > max:\n max = value\n if min == None or value < min:\n min = value\n print('The minimum value is {0}'.format(min))\n print('The maximum value is {0}'.format(max))\n return (min, max)", "def minmax(values):\n mn, mx = None, None\n for v in values:\n if v is not None:\n if mn is None or mn > v:\n mn = v\n if mx is None or mx < v:\n mx = v\n return mn, mx", "def find_max_min(list_value):\n if len(set(list_value)) > 1:\n return [min(list_value), max(list_value)]\n else:\n return [list_value[0]]", "def min_max(arr: StaticArray) -> ():\n if arr.size() == 1:\n output = (arr[0], arr[0])\n return output\n\n max_val = arr[0]\n min_val = arr[0]\n\n for index in range(arr.size()):\n if arr[index] > max_val:\n max_val = arr[index]\n if arr[index] < min_val:\n min_val = arr[index]\n\n output = (min_val, max_val)\n return output", "def find_max_min(list_of_nums):\n \n if max(list_of_nums) == min(list_of_nums): #for a list with identical numbers\n return [len(list_of_nums)]\n \n else:\n return [min(list_of_nums), max(list_of_nums)]", "def find_max_min(l):\n l.sort()\n if l[0] != l[-1]:\n return [l[0], l[-1]]\n elif l[0] == l[-1]:\n return [len(l)]", "def range_of_list(l: list):\n return max(l) - min(l)", "def clamp(tup,min,max):\n result = ()\n for x in tup:\n if x < min:\n result = result + (min,)\n elif x > max:\n result = result + (max,)\n else:\n result = result + (x,)\n return result", "def range_minmax(ranges):\n rmin = min(ranges)[0]\n rmax = max(ranges, key=lambda x: x[1])[1]\n return rmin, rmax", "def minMaxMoy(liste) :\n min, max, som = liste[0], liste[0], float(liste[0])\n for i in liste[1:]:\n if i < min :\n min = i\n if i > max :\n max = i\n som += i\n return (min, max, som/len(liste))", "def swap_minmax(arr: List[int]) -> List[int]:\n # noting to do with empty or short list\n if not arr:\n return arr\n if len(arr) < 2:\n return arr\n # compare every element with current min and max\n minimal = maximal = arr[0]\n for element in arr:\n if element > maximal:\n maximal = element\n if element < minimal:\n minimal = element\n # get position of min and max\n index_min = arr.index(minimal)\n index_max = arr.index(maximal)\n # swap min and max\n arr[index_min], arr[index_max] = arr[index_max], arr[index_min]\n\n return arr", "def get_min(lst):\n minimum = lst[0]\n index = 0\n for i in range(len(lst)):\n if lst[i] < minimum:\n minimum = lst[i]\n index = i\n return (minimum, index)", "def get_temp_min_and_max(wxlist: [str]) -> ([str], str, str):\n temp_max, temp_min = '', ''\n for i, item in reversed(list(enumerate(wxlist))):\n if len(item) > 6 and item[0] == 'T' and '/' in item:\n # TX12/1316Z\n if item[1] == 'X':\n temp_max = wxlist.pop(i)\n # TNM03/1404Z\n elif item[1] == 'N':\n temp_min = wxlist.pop(i)\n # TM03/1404Z T12/1316Z -> Will fix TN/TX\n elif item[1] == 'M' or item[1].isdigit():\n if temp_min:\n if int(temp_min[2:temp_min.find('/')].replace('M', '-')) \\\n > int(item[1:item.find('/')].replace('M', '-')):\n temp_max = 'TX' + temp_min[2:]\n temp_min = 'TN' + item[1:]\n else:\n temp_max = 'TX' + item[1:]\n else:\n temp_min = 'TN' + item[1:]\n wxlist.pop(i)\n return wxlist, temp_max, temp_min" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locate the config for ``device_name`` in the ['audits']['config_dir'] directory, then parse the configuration and store in the DEVICE_CONFIGS dictionary.
def parse_config(device_file_name): path = os.path.expanduser(os.path.join( TESTCONFIG['audits']['config_dir'], device_file_name)) if not os.path.exists(path): pytest.fail('{0} is not a valid config'.format(path)) # if not DEVICE_CONFIGS.get(path, False): DEVICE_CONFIGS[path] = CiscoConfParse( config=path, ignore_blank_lines=False, ) return DEVICE_CONFIGS[path]
[ "def config_data(self, device_name):\n result = defaultdict(list)\n for d in sorted(self.descriptors, key=lambda d: d[\"time\"]):\n config = d[\"configuration\"].get(device_name)\n if config:\n result[d[\"name\"]].append(config[\"data\"])\n return dict(result) # strip off defaultdict behavior", "def get_saved_device_mapping(self, device_name):\n config = None\n device_config_mapping = Config().get(\"device_config_mapping\")\n if device_name in device_config_mapping.keys():\n config = device_config_mapping[device_name]\n\n logging.debug(\"For [{}] we recommend [{}]\".format(device_name, config))\n return config", "def _chromeos_config(self):\n devices = []\n if self._legacy():\n self._url = config.CHROMEOS_RECOVERY_URL_LEGACY\n else:\n self._url = config.CHROMEOS_RECOVERY_URL\n conf = [x for x in self._http_request().split('\\n\\n') if 'hwidmatch=' in x]\n for device in conf:\n device_dict = {}\n for device_info in device.splitlines():\n key_value = device_info.split('=')\n key = key_value[0]\n if len(key_value) > 1: # some keys have empty values\n value = key_value[1]\n device_dict[key] = value\n devices.append(device_dict)\n\n return devices", "def get_config(self) -> None:\n body = Helpers.req_body(self.manager, 'devicedetail')\n body['method'] = 'configurations'\n body['uuid'] = self.uuid\n\n r, _ = Helpers.call_api(\n '/131airpurifier/v1/device/configurations',\n 'post',\n headers=Helpers.req_headers(self.manager),\n json_object=body,\n )\n\n if r is not None and Helpers.code_check(r):\n self.config = Helpers.build_config_dict(r)\n else:\n logger.debug('Unable to get config info for %s',\n self.device_name)", "def get_config(name: str):\n # 1. Check environment variables\n env_name = name.replace(\"_\", \"__\").replace(\".\", \"_\").upper()\n env_val = os.getenv(\"IOT_\" + env_name)\n if env_val:\n if \";\" in env_val:\n return [v.strip() for v in env_val.split(\";\")]\n return env_val\n\n # 2. Check config file\n keys = name.split(\".\")\n val = _CONFIG_YAML\n for k in keys:\n if isinstance(val, dict):\n val = val.get(k, {})\n\n if val:\n return val\n raise ValueError(f'\"{name} not found')", "async def async_load_config(path: str, hass: HomeAssistantType, config: ConfigType, async_add_entities):\r\n dev_schema = vol.Schema({\r\n vol.Required('dev_id'): cv.string,\r\n vol.Optional(CONF_NAME, default=''): cv.string,\r\n vol.Optional(CONF_DEVICE_CLASS, default='motion'): DEVICE_CLASSES_SCHEMA\r\n# vol.Optional(CONF_ICON, default=None): vol.Any(None, cv.icon),\r\n# vol.Optional('track', default=False): cv.boolean,\r\n# vol.Optional(CONF_MAC, default=None):\r\n# vol.Any(None, vol.All(cv.string, vol.Upper)),\r\n# vol.Optional(CONF_AWAY_HIDE, default=DEFAULT_AWAY_HIDE): cv.boolean,\r\n# vol.Optional('gravatar', default=None): vol.Any(None, cv.string),\r\n# vol.Optional('picture', default=None): vol.Any(None, cv.string),\r\n# vol.Optional(CONF_CONSIDER_HOME, default=consider_home): vol.All(\r\n# cv.time_period, cv.positive_timedelta),\r\n })\r\n result = []\r\n try:\r\n _LOGGER.debug(\"async_load_config(): reading config file %s\", path)\r\n\r\n devices = await hass.async_add_job(\r\n load_yaml_config_file, path)\r\n\r\n _LOGGER.debug('async_load_config(): devices loaded from config file: %s', devices)\r\n \r\n except HomeAssistantError as err:\r\n _LOGGER.error(\"async_load_config(): unable to load %s: %s\", path, str(err))\r\n return []\r\n except FileNotFoundError as err:\r\n _LOGGER.debug(\"async_load_config(): file %s could not be found: %s\", path, str(err))\r\n return []\r\n\r\n\r\n for dev_id, device in devices.items():\r\n # Deprecated option. We just ignore it to avoid breaking change\r\n# device.pop('vendor', None)\r\n try:\r\n device = dev_schema(device)\r\n device['dev_id'] = cv.slugify(dev_id) \r\n except vol.Invalid as exp:\r\n async_log_exception(exp, dev_id, devices, hass)\r\n else: \r\n _LOGGER.debug('device: %s', device)\r\n dev = JablotronSensor(hass, **device)\r\n result.append(dev)\r\n\r\n \"\"\" Create sensors for each device in devices \"\"\"\r\n# device = JablotronSensor(hass, dev_id)\r\n async_add_entities([dev]) \r\n return result", "def find_config(filename, cfg=None):\n res = DEFAULT_CFG\n dirname, basename = path.split(filename)\n\n if not cfg:\n cfg = config\n # Overwrite default config fields with matched config ones\n for key in cfg.keys():\n abskey = path.join(dirname, key) if not path.isabs(key) else key\n for x in glob.glob(abskey):\n if x.endswith(filename):\n cfg = config[key].get()\n res.update(cfg)\n for frequency in cfg:\n if frequency_folder_days(frequency) is None:\n logger.error(\"Invalid configuration attribute '%s'\" % key)\n exit(1)\n res['pattern'] = key\n return res", "def _getconf(self, directory=None):\n if directory is None:\n directory = self.curdir\n path = os.path.abspath(os.path.join(self.curdir, directory))\n return self.configs.get(path, {})", "def update_config(path: str, dev_id: str, device: JablotronSensor):\r\n\r\n with open(path, 'a') as out:\r\n device = {device.dev_id: {\r\n 'dev_id': device.dev_id,\r\n# ATTR_NAME: device._name,\r\n# ATTR_MAC: sensor.mac,\r\n# ATTR_ICON: sensor.icon,\r\n# 'picture': sensor.config_picture,\r\n# 'track': sensor.track,\r\n# CONF_AWAY_HIDE: sensor.away_hide,\r\n }}\r\n out.write('\\n')\r\n out.write(dump(device))\r\n _LOGGER.debug('update_config(): updated %s with sensor %s', path, dev_id)", "def load_devices_from_config(file=\"icse0xxa.conf\"):\n dev_list = []\n c = ConfigParser()\n c.optionxform = str\n c.read(file)\n if ICSE0XXADevice.MAIN_CFG_SECTION not in c.sections():\n return dev_list\n for k in c[ICSE0XXADevice.MAIN_CFG_SECTION]:\n dev_list.append(ICSE0XXADevice(k, int(c[ICSE0XXADevice.MAIN_CFG_SECTION][k], 16)))\n return dev_list", "def deviceidconfigs(self, site_id, deviceidconfig_id=None, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n if not deviceidconfig_id:\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}/deviceidconfigs\".format(api_version,\n tenant_id,\n site_id)\n else:\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}/deviceidconfigs/{}\".format(api_version,\n tenant_id,\n site_id,\n deviceidconfig_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"get\")", "def mount_from_settings(device_name):\n device_config = config['network_device'][device_name]\n print('device_mount(' + device_name, *device_config.values(), sep=', ', end=')\\n')\n device_mount(\n device_name,\n device_config['address'],\n device_config['port'],\n device_config['username'],\n device_config['password'])", "def _check_deviceconfig(self, user: User, device_id: str):\n rsp = self.api_deviceconfig.with_auth(user.token).call(\n \"GET\", deviceconfig.URL_MGMT_DEVICE_CONFIGURATION.format(id=device_id)\n )\n assert rsp.status_code == 200\n conf = rsp.json().get(\"configured\")\n assert len(conf) > 0\n assert \"awsCertificate\" in conf\n assert \"awsPrivateKey\" in conf", "def locateconfig(filename):\r\n cfgpaths = [os.path.expandvars('$I3PROD')]\r\n if os.getcwd() not in cfgpaths:\r\n cfgpaths.append(os.getcwd())\r\n cfgpath = get_pkgdata_filename('iceprod.server','data')\r\n if cfgpath:\r\n cfgpaths.append(cfgpath)\r\n for cfgpath in list(cfgpaths):\r\n # try for an etc directory\r\n i = cfgpaths.index(cfgpath)\r\n if os.path.isdir(os.path.join(cfgpath,'etc')):\r\n cfgpaths.insert(i,os.path.join(cfgpath,'etc'))\r\n # try for an iceprod directory\r\n if os.path.isdir(os.path.join(cfgpath,'etc','iceprod')):\r\n cfgpaths.insert(i,os.path.join(cfgpath,'etc','iceprod'))\r\n for cfgpath in cfgpaths:\r\n if os.path.isfile(os.path.join(cfgpath,filename)):\r\n return os.path.join(cfgpath,filename)\r\n raise Exception('config {} not found'.format(filename))", "def get_conf(self, comp, conf_name):\r\n for cfg in comp.configuration_sets[0].configuration_data:\r\n if cfg.name == conf_name:\r\n return cfg.data\r\n return None", "def sdwanapps_configs(self, sdwanapp_id, config_id=None, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n if not config_id:\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sdwanapps/{}/configs\".format(api_version,\n tenant_id,\n sdwanapp_id)\n else:\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sdwanapps/{}/configs/{}\".format(api_version,\n tenant_id,\n sdwanapp_id,\n config_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"get\")", "def find_config(config_name):\n # First check for a direct reference to actual filename\n path = os.path.abspath(config_name)\n if os.path.exists(path) and os.path.isfile(path):\n return path\n # Check for a direct reference to filename w/o '.csv'\n path = '%s.csv' % path\n if os.path.exists(path) and os.path.isfile(path):\n return path\n\n # Check for the file in the default config dir w/ '.csv' ext\n path = os.path.abspath(os.path.join(config_dir, config_name))\n if os.path.exists(path) and os.path.isfile(path):\n return path\n # Check for the full in the default dir after appending .csv\n path = '%s.csv' % path\n if os.path.exists(path) and os.path.isfile(path):\n return path\n\n # Check in ROVER_PATH last\n # This seems like a questionable model, may want to drop it\n if 'ROVER_PATH' in os.environ.keys():\n path = os.path.abspath(os.path.join(os.environ['ROVER_PATH'],\n config_name))\n if os.path.exists(path) and os.path.isfile(path):\n return path\n # Check for ROVER_PATH w/ .csv extension\n path = '%s.csv' % path\n if os.path.exists(path) and os.path.isfile(path):\n return path\n\n return None", "def get_configs():\n with open(CONFIG_PATH) as f:\n return json.load(f)", "def devices_result(name):\n adb_samples = Path(__file__).parent / \"devices\"\n with (adb_samples / (name + \".out\")).open(encoding=\"utf-8\") as adb_output_file:\n return adb_output_file.read()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve all configurations from TESTCONFIG['audits']['config_dir']
def all_configs(): path = os.path.expanduser(TESTCONFIG['audits']['config_dir']) config_names = [] for glop in ['*conf']: config_names.extend( os.path.basename(x) for x in glob.iglob(os.path.join(path, glop))) return config_names
[ "def get_configs():\n with open(CONFIG_PATH) as f:\n return json.load(f)", "def _configs(self):\n return self.dm.configs", "def _getconf(self, directory=None):\n if directory is None:\n directory = self.curdir\n path = os.path.abspath(os.path.join(self.curdir, directory))\n return self.configs.get(path, {})", "def get_eval_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/balanced_vae_study_v1/metric_configs/\"))", "def experiment_configurations(self):\n pass", "def _load_configs(self):\n\n self._configs.clear()\n configs = config_utils.load(self._get_config_file_path())\n self._configs = configs.get('general')\n self._extract_test_roots()", "def config_list():\n click.echo(Config())", "def getConfigs(self):\n return dict([(key, self.getConfig(key)) for key in list(self.config.keys())])", "def get_config():\n current_dir = os.getcwd()\n config_path = find_config_path(current_dir)\n if not config_path:\n print('No .pjconfig file found')\n raise\n try:\n cf = open(config_path, 'r')\n config_text = cf.read()\n except:\n print('Unable to read the .pjconfig file')\n raise\n finally:\n cf.close()\n\n try:\n config_data = parse_json(config_text)\n except:\n print('Your .pjconfig file is not valid JSON. Please fix it and try again.')\n raise\n base_dir = os.path.dirname(config_path)\n\n return [config_data, base_dir]", "def in_cwd():\n configs = []\n\n for filename in os.listdir(os.getcwd()):\n if filename.startswith('.tmuxp') and is_config_file(filename):\n configs.append(filename)\n\n return configs", "def __read_config():\n path_to_config = os.path.join(CONST.ROOT, CONST.PATH_TO_DATASETS_CONFIG)\n with open(path_to_config) as file:\n configs = yaml.load(file, Loader=yaml.FullLoader)\n return configs", "def get_config_files(self):\n if package.backend.FORMAT == \"rpm\":\n return [\"sysconfig/clamd.amavisd\", \"tmpfiles.d/clamd.amavisd.conf\"]\n return []", "def test_readConfig(self):\n results = yield self.runCommand(\n command_readConfig,\n script=\"calendarserver_config\")\n\n self.assertEquals(results[\"result\"][\"RedirectHTTPToHTTPS\"], False)\n self.assertEquals(results[\"result\"][\"EnableSearchAddressBook\"], False)\n self.assertEquals(results[\"result\"][\"EnableCalDAV\"], True)\n self.assertEquals(results[\"result\"][\"EnableCardDAV\"], True)\n self.assertEquals(results[\"result\"][\"EnableSSL\"], False)\n self.assertEquals(results[\"result\"][\"DefaultLogLevel\"], \"warn\")\n\n self.assertEquals(results[\"result\"][\"Notifications\"][\"Services\"][\"APNS\"][\"Enabled\"], False)\n\n # Verify not all keys are present, such as umask which is not accessible\n self.assertFalse(\"umask\" in results[\"result\"])", "def _get_config_dirs():\n config_dirs = [\n USER_CONFIG_DIR,\n os.path.join(\"/\", \"etc\", \"rapport\"),\n os.path.abspath(os.path.join(\"rapport\", \"config\"))\n ]\n return config_dirs", "def configs(self) -> Sequence[\"_SingleFileConfig\"]:", "def get_eval_config_files(self):\n return list(resources.get_files_in_folder(\"config/unsupervised_study_v1/metric_configs/\"))", "def in_dir(config_dir=os.path.expanduser('~/.tmuxp'), extensions=['.yml', '.yaml', '.json', '.ini']):\n configs = []\n\n for filename in os.listdir(config_dir):\n if is_config_file(filename, extensions) and not filename.startswith('.'):\n configs.append(filename)\n\n return configs", "def get_eval_config_files(self):\n return list(\n resources.get_files_in_folder(\n \"config/correlated_factors_study_ws_id2/metric_configs/\"))", "def view_conf() -> None:\n print(Config.get_conf())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dataflow should return True if is is add custom_params dict at the end of dp
def is_add_custom_params(self): return False
[ "def has_params(self) -> bool:\n return bool(self._params)", "def is_satisfied(self):\n if (self.fed_items is not None):\n return True\n \n parameter = self.command_parameter.parameter\n if parameter.has_default:\n return True\n \n if parameter.is_args() or parameter.is_kwargs():\n return True\n \n return False", "def process_epidemic_parameters(self):", "def test_param(param_def):\n assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}\n assert param_def.free == {'f': [0, 1]}\n assert param_def.dependent == {'d': '2 + mean([a, b])'}\n assert param_def.dynamic == {'study': {'e': 'distract / c'}}", "def _check_params(self, params):\n params_keys = params.keys()\n assert \"bandwidth\" in params_keys\n assert \"count\" in params_keys\n assert params[\"bandwidth\"] > 0.0\n assert params[\"count\"] > 0\n if not \"enforce_no_matrix\" in params_keys:\n params[\"enforce_no_matrix\"] = False\n if not \"max_memory_usage\" in params_keys:\n params[\"max_memory_usage\"] = 512\n if not \"normalize\" in params_keys:\n params[\"normalize\"] = False\n return params", "def test_adaptive_get_params_decorator(self, name):\n test_adaptive_get_params_fn = self._test_adaptive_get_params_fn()\n stage = self._get_mock_stage()\n state = {'state': 3.0}\n encode_params, decode_params = self.evaluate(\n test_adaptive_get_params_fn(stage, state, name))\n\n # The graph should contain three nodes. Two for the constants created, and\n # one for the multiplication to create the params.\n graph = tf.compat.v1.get_default_graph()\n self.assertLen(graph.as_graph_def().node, 3)\n if name is not None:\n self._assert_all_graph_nodes_in_name_scope(graph, name)\n else:\n self._assert_all_graph_nodes_in_name_scope(\n graph, self._DEFAULT_NAME + '_get_params')\n # The functionality is not modified.\n self.assertEqual(6.0, encode_params['param'])\n self.assertEqual(6.0, decode_params['param'])", "def _defined_by(self, complete_params):\n nr_of_optimized_params_found = 0\n for param, value in complete_params.items():\n # Check if the current param is optimized\n if param in self.design_space.keys():\n nr_of_optimized_params_found += 1\n continue # Move on to the next parameter\n else: # If it's not optimized\n # check whether it has the right value (equal to the one set in default_params )\n if not value == self.default_params [param]:\n return False # return False immediately\n\n if not nr_of_optimized_params_found == len(self.design_space):\n raise LookupError(\"There are parameters in design_space, which aren't in the sample's complete_params.\", str(self.sample_source[complete_params]))\n\n return True", "def hasParameter(self, p) :\n return p in self.parameters", "def is_parameter(self, ):\n\t\tpass", "def supports_parameter_smart_configuration(self):\n return # boolean", "def _has_param(self, udf: Callable, name: str) -> bool:\n return name in list(signature(udf).parameters.keys())", "def _params_validate_and_generate(self) -> None:\n # default params\n if \"d\" not in self.params:\n self.params[\"d\"] = 3\n\n # calculated params\n self.params[\"T\"] = -1 # -1 until a stabilizer round is added!\n self.params[\"num_readout\"] = -1 # -1 until a logical readout is performed!\n self.params[\n \"num_lattice_readout\"\n ] = -1 # -1 until a lattice readout is performed!\n self.params[\"num_data\"] = self.params[\"d\"]\n self.params[\"num_syn\"] = self.params[\"d\"] - 1", "def _validate_ds_params(self, p):\n # Common for all datasources\n if not p.get(\"name\"):\n log.error('Error: New datasource requires \"name\".')\n return\n\n if not p.get(\"ds_ip\"):\n if p.get(\"ip\"):\n p[\"ds_ip\"] = p[\"ip\"]\n else:\n if not p.get(\"hostname\"):\n log.error('Error: New datasource requires \"ip\" or \"hostname\".')\n return\n\n if not p.get(\"hostname\"):\n p[\"hostname\"] = \"\"\n\n if not p.get(\"parent_id\"):\n p[\"parent_id\"] = 0\n\n p = self._validate_ds_tz_id(p)\n\n if p.get(\"enabled\") == False:\n p[\"enabled\"] = \"false\"\n else:\n p[\"enabled\"] = \"true\"\n\n if p.get(\"client\"):\n if not p.get(\"dorder\"):\n p[\"dorder\"] = 0\n\n if not p.get(\"maskflag\"):\n p[\"maskflag\"] = \"true\"\n\n if not p.get(\"port\"):\n p[\"port\"] = 0\n\n if not p.get(\"require_tls\"):\n p[\"require_tls\"] = \"F\"\n\n if not p.get(\"type_id\"):\n p[\"type_id\"] = 0\n\n else:\n if not p.get(\"type_id\"):\n log.error('Error: New datasource requires \"type_id\".')\n return\n\n if not p.get(\"zone_id\"):\n p[\"zone_id\"] = 0\n\n if not p.get(\"url\"):\n p[\"url\"] = \"\"\n\n _v2_base_vars = [\n \"client\",\n \"parent_id\",\n \"name\",\n \"ds_ip\",\n \"type_id\",\n \"zone_id\",\n \"enabled\",\n \"url\",\n \"parameters\",\n ]\n\n _v1_base_vars = _v2_base_vars + [\n \"ds_id\",\n \"childEnabled\",\n \"childCount\",\n \"childType\",\n \"idmId\",\n ]\n\n p[\"parameters\"] = []\n popme = []\n for key, val in p.items():\n if self.nitro.api_v == 1:\n if key not in _v1_base_vars:\n p[\"parameters\"].append({\"key\": key, \"value\": val})\n popme.append(key)\n elif self.nitro.api_v == 2:\n if key not in _v2_base_vars:\n p[\"parameters\"].append({\"key\": key, \"value\": val})\n popme.append(key)\n for key in popme:\n p.pop(key)\n return p", "def _check_model_params(self):", "def test_pipeline_adapters_params_correct():\n init_alpha = 12.1\n pipeline = pipeline_with_custom_parameters(init_alpha)\n\n # Convert into OptGraph object\n adapter = PipelineAdapter()\n opt_graph = adapter.adapt(pipeline)\n\n # Get Pipeline object back\n restored_pipeline = adapter.restore(opt_graph)\n\n # Get hyperparameter value after pipeline restoration\n restored_alpha = restored_pipeline.root_node.custom_params['alpha']\n assert np.isclose(init_alpha, restored_alpha)", "def test_get_params_decorator(self, name):\n test_get_params_fn = self._test_get_params_fn()\n stage = self._get_mock_stage()\n encode_params, decode_params = self.evaluate(\n test_get_params_fn(stage, name))\n\n # The graph should contain a single node.\n graph = tf.compat.v1.get_default_graph()\n self.assertLen(graph.as_graph_def().node, 1)\n if name is not None:\n self._assert_all_graph_nodes_in_name_scope(graph, name)\n else:\n self._assert_all_graph_nodes_in_name_scope(\n graph, self._DEFAULT_NAME + '_get_params')\n # The functionality is not modified.\n self.assertEqual(1.0, encode_params['param'])\n self.assertEqual(1.0, decode_params['param'])", "def checkParamsExist(self):\n for param in self.partitioned_params:\n if param not in self.parameter_order and param != 'rate':\n desc = ['partitioned', 'ordered'][param==self.ordered_param]\n raise ValueError('%s param \"%s\" unknown' % (desc, param))", "def supports_parameter_search(self):\n return # boolean", "def isParamOnCurve(*args, **kwargs):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A helper function for reading an integer from stdin
def read_int(): return int(input('>> '))
[ "def read_integer(self, args):\n return ReadInteger()", "def read_int(input, count, signed = False):\n\n\treturn int.from_bytes(input.read(count), 'little', signed = signed)", "def get_int_input(prompt):\n input_value = None\n while input_value is None:\n try:\n input_value = int(raw_input(prompt))\n except:\n print_error(\"Invalid Number.\")\n return input_value", "def getIntVal(prompt):\n \n while(True):\n inVal = raw_input(prompt)\n try:\n return(int(inVal))\n except:\n print('please enter an integer value')", "def int_input(text, fallback=None):\n while True:\n text = input(text)\n if not text and fallback:\n return fallback\n try:\n return int(text)\n except ValueError:\n print(\"Must be an integer!\")", "def read_barcode():\n print 'Scan barcode now!'\n line = sys.stdin.readline().strip()\n os.system('clear')\n out = int(line)\n return out", "def _readn(fn):\n with open(fn) as f:\n return int(f.read().strip())", "def readNonNegativeInteger(prompt, error_prompt):\n n = -1\n try:\n n = int(input(prompt))\n except ValueError:\n n = -1\n if n < 0:\n # User entered an invalid value for n. Display error and ask them again\n print(error_prompt)\n n = readNonNegativeInteger(prompt, error_prompt)\n return n", "def test_readinto():\n import sys\n try:\n x = sys.stdin.readinto\n return True\n except:\n return False", "def read_input():\r\n try:\r\n return input()\r\n except EOFError as eof:\r\n logging.shutdown()\r\n raise SystemExit(eof)", "def _read_line() -> List[int]:\n return list(map(int, input().split()))", "def readIntegersFromConsole(self):\n try:\n line = raw_input()\n inputArray = line.split(' ')\n \n for j in range(len(inputArray)):\n inputArray[j] = int(inputArray[j])\n\n return inputArray \n except EOFError as error:\n return [];", "def get_positive_integer_entry(text=\"Input +ve or -ve integer\", prompt=\"0\"):\n while True:\n data = input(\"{} [{}]:\".format(text, prompt))\n if data == \"\":\n data = prompt\n try:\n data = int(data)\n return abs(data)\n except ValueError as e:\n if debug: print(\"Value Error: {}\".format(e))\n print(\"Invalid data, please re-enter...\")\n continue", "def _read(prompt: str, typ: type = None):\n\n while True:\n raw = input(prompt)\n\n if typ is not None:\n try:\n return typ(raw)\n except (TypeError, ValueError):\n print(f'Invalid {typ}: {raw}.')\n continue\n\n return raw", "def readint(self):\n\t\tbe_int=self.fpbf.read(4)\n\t\tif len(be_int) == 0:\n\t\t\treturn -1\n\t\telse:\n\t\t\tle_int=unpack('!L',be_int)\n\t\t\treturn le_int[0]", "def get_num():\n i = 0\n while (i > 127) or (i < 1):\n try:\n i = int(input(\"Enter ID # from 1-127: \"))\n except ValueError:\n pass\n return i", "def ReadNumber(a, b):\n while True:\n response = input(f\"[{a}-{b-1}] >> \").strip()\n try:\n result = int(response)\n if result < a or result >= b:\n print(f\"Please enter an integer between {a} and {b-1} inclusive.\")\n else:\n break\n except ValueError:\n print(f\"Please enter an integer between {a} and {b-1} inclusive.\")\n return result", "def read_int16():\n while True:\n try:\n i = int(raw_input('> '))\n\n if is_int16(i):\n return i\n except ValueError:\n pass\n\n print(\"Must be a positive integer between\", -2**16/2 + 1, \" and\", 2**16/2 - 1)", "def ex6_ReadChar():\n N = input()\n print(N)", "def get_number(prompt):\n res = None\n while res is None:\n try:\n res = float(raw_input(prompt))\n except ValueError: pass\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A helper function for reading a bool from stdin. Requires that the user type "1" or "0".
def read_bool(): val = input('>> ') while val not in ('1', '0'): print("Answer Yes (1) or No (0)") val = input('>> ') return bool(int(val))
[ "def parse_bool(question, default=True):\n choices = 'Y/n' if default else 'y/N'\n default = 'Y' if default else 'N'\n while True:\n answer = raw_input('%s [%s]: ' % (question, choices)).upper() or default\n if answer.startswith('Y'):\n return True\n elif answer.startswith('N'):\n return False\n else:\n print(\"Invalid selection: '%s'. Must be either [y]es or [n]o.\"\n % answer)", "def strtobool(val):\n val = val.lower()\n if val in {\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"}:\n return 1\n if val in {\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"}:\n return 0\n raise ValueError(f\"invalid truth value {val!r}\")", "def get_bool(name):\n value = get_var(name)\n if isinstance(value, bool):\n return value\n if isinstance(value, int):\n return value != 0\n if isinstance(value, string_types):\n return value.lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\")\n if value:\n return True\n return False", "def boolean_from_str(src):\n if src is None:\n return None\n elif src == \"true\":\n return True\n elif src == \"false\":\n return False\n elif src == \"1\":\n return True\n elif src == \"0\":\n return False\n else:\n raise ValueError", "def get_bool(item):\r\n\r\n if str(item).lower() in ['true','yes','1','t','y']:\r\n return True\r\n if str(item).lower() in ['false', 'no', '0', 'f', 'n']:\r\n return False\r\n raise ValueError(\"'%s' cannot be parsed into a boolean value\" % item)", "def parse_user_input(\n data: typing.Optional[typing.Union[str, bool]]\n) -> typing.Optional[typing.Union[str, bool]]:\n try:\n return parse_bool(data)\n except TypeError:\n pass\n\n try:\n parse_none(data)\n return None\n except TypeError:\n pass\n\n return data", "def parse_bool(s, default=False):\n if s is None:\n return default\n return TRUTH.get(s.lower(), default)", "def _getBoolInput(self, data_block, plug, is_array=False):\r\n \r\n return self._getGenericInput(data_block, plug, bool, \"asBool\", is_array=is_array, array_type=self.BOOL_LIST_TYPE)", "def read(self, input: 'SoInput', name: 'SbName') -> \"SbBool\":\n return _coin.SoField_read(self, input, name)", "def BoolGET(val):\n if isinstance(val, bool):\n return val\n elif isinstance(val, str):\n if val == '1' or val.lower() == 'true':\n return True\n elif val == '0' or val.lower() == 'false':\n return False\n raise Invalid(\n 'boolean must be \"1\", \"true\", \"0\", or \"false\" (case insensitive)'\n )", "def str2bool(text: str) -> bool:\n text = text.lower()\n if text == \"true\":\n return True\n elif text == \"false\":\n return False\n else:\n raise ValueError(f\"Cannot parse bool: '{text}'\")", "def promptBoolean(self, key, prompt, default_value):\n # Make the default option the true/false given\n o = default_value\n\n # Loop until good option\n self.setAutoFileComplete(False)\n print(line)\n while True:\n\n # Print prompt\n self.printPrompt(key,prompt)\n\n # Get input\n newo = b.getInput(o)\n\n # Finish if valid input\n newo = newo.lower()\n if newo in [\"yes\",\"y\",\"true\"]:\n return True\n elif newo in [\"no\",\"n\",\"false\"]:\n return False", "def input_yes_no(prompt):\n while True:\n result = input(prompt)\n if result and result[0] == 'y':\n return True\n elif result and result[0] == 'n':\n return False\n else:\n print(\"Not understood - please respond yes or no.\")", "def parse_bool(value):\n return bool({\n 'True': True,\n 'False': False\n }.get(value, value))", "def bool_converter(val):\n return bool(strtobool(str(val)))", "def readByte(self, *args) -> \"SbBool\":\n return _coin.SoInput_readByte(self, *args)", "def _tristate_bool_option(val: str) -> Union[None, bool]:\n val = val and val.strip().lower()\n if not val:\n return None\n if val in \"true 1 yes on\".split():\n return True\n if val in \"false 0 no off\".split():\n return False\n raise ValueError(f\"invalid boolean {val!r} supplied\")", "def Bool(val):\n if type(val) is bool:\n return val\n if isinstance(val, str):\n v = val.upper()\n if v in {'TRUE', 'YES', 'T', 'Y', '1'}:\n return True\n if v in {'FALSE', 'NO', 'F', 'N', '0'}:\n return False\n elif int(val) == float(val):\n v = int(val)\n if v in {0, 1}:\n return bool(v)\n raise ValueError(\"Expected Boolean, but received %s\" % (val,))", "def _get_bool(val) -> bool | None:\n if isinstance(val, bool):\n return val\n elif isinstance(val, str):\n if val.strip().lower() == \"true\":\n return True\n elif val.strip().lower() == \"false\":\n return False\n return None", "def eval_request_bool(val, default=False):\n assert isinstance(default, bool)\n if val is not None:\n val = val.lower()\n if val in [\"False\", \"false\", \"0\", \"n\", \"no\", \"off\"]:\n return False\n if val in [\"True\", \"true\", \"1\", \"y\", \"yes\", \"on\"]:\n return True\n return default" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines which letter to dial to, based on the vault's serial number and code word.
def dial_to(vault_state, code): # First index is the fourth to last character of code first_index = int((vault_state['serial number'])[-4]) # The last index is the second to last character of code last_index = int((vault_state['serial number'])[-2]) # The substring is the string from the first to last index of code sub_string = code[first_index:(last_index+1)] # The desired character to return is the # lowest valued char in the substring # return sorted(sub_string[0]) return (sorted(sub_string))[0]
[ "def _find_letter(self, pin_number: int) -> str:\n offset = pin_number - self.rotation\n return alphabet[offset]", "def get_letter(serial_number: int):\n if serial_number < 0 or serial_number > 33:\n raise ValueError(\"Invalid serial number: {:d}.\".format(serial_number))\n if serial_number == 0:\n return \" \"\n if serial_number == 7:\n return 'ё'\n addition = 2 if serial_number > 7 else 1\n return chr(ord('А') + serial_number - addition)", "def current_letter():\n return next_letter_data.current_letter", "def get_residue(self, resnum):\n\n if self.start:\n index_num = resnum-self.start\n print index_num\n one_letter_code = self.sequence[index_num]\n return one_letter_code\n else:\n #Rosetta Resnum\n one_letter_code = self.sequence[int(resnum)-1]\n return one_letter_code", "def correct_letter_code(code):\n if code > ord('z'.lower()):\n return code - 26\n elif code < ord('a'.lower()):\n return code + 26\n else:\n return code", "def alphacode(pin):\n for i in range(2):\n x = pin \n\n \n mnemonic = \"This can't be right\"\n return mnemonic", "def getChar(self):\n if self.UART.is_open:\n if self.studentNumBox.hasFocus():\n c = self.UART.read(1)\n if c:\n self.studentNumBox.setText(self.studentNumBox.text() + c.decode('ascii'))\n elif self.passwordBox.hasFocus():\n c = self.UART.read(1)\n if c:\n self.passwordBox.setText(self.passwordBox.text() + c.decode('ascii'))", "def letter_at(self, index):\n if (index >= 1) and (index <= len(self)):\n return self[index-1]\n else:\n return u'?'", "def base_pair(c):\n\n c = c.lower()\n\n to_return = 'unknown'\n\n if(c == 'a'):\n to_return = 't'\n elif(c == 't'):\n to_return = 'a'\n elif(c == 'g'):\n to_return = 'c'\n elif(c == 'c'):\n to_return = 'g'\n\n return to_return", "def convert_binding(binding: str):\n special_keys = {\n \"TAB\": 9,\n \"ENTER\": 10,\n \"UP\": 259,\n \"DOWN\": 258,\n \"LEFT\": 260,\n \"RIGHT\": 261\n }\n\n # If the binding is a special key,\n # convert it with the dictionary of special keys\n if binding in special_keys.keys():\n return special_keys.get(binding)\n\n # If the binding is only one character long,\n # convert the binding with the ord method\n if len(binding) == 1:\n return ord(binding)\n\n # If the binding is invalid,\n # return -1\n return -1", "def get_new_letter(letter, my_type):\r\n uppercase = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\r\n lowercase = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\r\n if my_type == \"upper\":\r\n for i in range(0,len(lowercase)):\r\n if lowercase[i] == letter:\r\n return uppercase[i]\r\n else:\r\n for i in range(0,len(uppercase)):\r\n if uppercase[i] == letter:\r\n return lowercase[i]", "def lookup(number):\n res = requests.get(\"https://api.opencnam.com/v2/phone/%s\" % number)\n cnam = res.content.decode()\n if cnam != \"\":\n return cnam\n else:\n return None", "def readletter(self, letter):\n if letter not in self.transitions.keys():\n return -1\n return self.transitions[letter]", "def guess():\n return letter", "def voice(char: int) -> int:\n if char not in VOICABLE:\n raise ValueError('%r cannot be voiced' % (chr(char),))\n if char == 0x3046 or char == 0x30a6:\n return char + 0x004e\n if 0x30ef <= char and char <= 0x30f2:\n return char + 0x0008\n return char + 0x0001", "def current_char(self) -> str:", "def postal_code_letter(cls):\r\n return cls.random_element(cls.postal_code_letters)", "def getNthLetter(nth):\n return chr(nth + 65) # The ASCII value of 'A' is 65.", "def symbol(self):\n if self.command_type() == 'A_COMMAND':\n return self.next_command.split('@')[1]\n if self.command_type() == 'L_COMMAND':\n return self.next_command.split('(')[1][:-1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Program entry point. Greets the user and begins interactive layer override guide. Prior to exit, the program warns the user to wait a certain amount of time before opening the vault.
def main(): print("Welcome to the XY77 Battering Ram") state = get_vault_state() print("State acquired. Let's start.") print("\n**History Layer**") history_layer(state) print("\n**Code Layer**") code_layer(state) print("\n**Switches Layer**") switches_layer(state) print("\n**Button Layer**") button_layer(state) print("Layers bypassed.") print("Wait", state['suspicion level'], "seconds or more to allow suspicion level to dissipate.")
[ "def main():\n print(\"Let the games begin!\")\n ev3.Sound.speak(\"Starting Frogger 2.0 Game\").wait()\n\n main_follow_the_line()", "def main():\n the_loop = True\n type_out(\"If at any time you wish to restart of quit please enter: '!restart' or '!quit'\")\n while the_loop:\n print(\"\")\n type_out(\"Please enter a planet you would like to see your age on:\")\n saved_input = input()\n item1, item2 = check_planet(saved_input)\n type_out(item1)\n type_out(item2)", "def main ():\n altitude = 100\n fuel = 100\n velocity = 0\n while altitude > 0:\n # print(\"start here\")\n # normalizes fuel_burnt\n fuel_burnt = input(\"How much fuel you want to burn?\")\n # print(type(fuel_burnt))\n try:\n fuel_burnt = int(fuel_burnt)\n if fuel_burnt <= 0:\n fuel_burnt = 0\n elif fuel_burnt > fuel:\n fuel_burnt = fuel\n elif fuel <= 0:\n alert (\"Ran out of fuel.\")\n fuel_burnt = 0\n else:\n fuel_burnt = fuel_burnt\n velocity = velocity + 1.6 - fuel_burnt * 0.15\n print(\"your velocity is \" + str(velocity))\n # calculates fuel level at end of this round\n fuel -= fuel_burnt\n print(\"your fuel level is \" + str(fuel))\n # calculates altitude at end of this round\n altitude -= velocity\n print(\"your altitude is \" + str(altitude))\n except ValueError:\n print(\"That's not an integer! Please enter an integer\")\n # checks if laning velocity is greater than 10\n if velocity <= 10:\n print (\"You landed safely! Your altitude is \" + str(altitude) + \". Your landing velocity was \" + str(velocity))\n else:\n print (\"Ooops! Crashed! Your landing velocity was \" + str(velocity) + \".\")\n play_again()", "def shell_intro(self):\n\n print('''\n ----------------------------------------------------------\n Welcome to Flappy Bird. Below are game the game controls:\n Fly the bird: Press Space or Up Arrow key\n Quit: Click the exit botton or press Q\n ----------------------------------------------------------\n ''')\n\n start = False\n while not start:\n start = True\n difficulty = str(input('''\n Please select your difficulty by typing in 1 to 4:\n e: easy\n n: normal\n h: hard\n l: ludicrous_mode\n q: quit the game. I don't want to have fun\\n\n '''))\n\n # set difficulty based on user's input\n if difficulty == 'e':\n self.gap = 130\n elif difficulty == 'n':\n self.gap = 110\n elif difficulty == 'h':\n self.gap = 90\n elif difficulty == 'l':\n self.velocity = 5\n self.gap = 150\n elif difficulty == 'q':\n pass\n else:\n start = False\n print('please enter correct difficulty level')\n\n if difficulty == 'q':\n return (False)\n else:\n return (True)", "def intro():\n\n inventory_insert('potion')\n\n print_slow(\"Now you are ready to go on an adventure. You will be able to travel\", 0.025)\n print_slow(\"and collect awesome items and level up to your hearts content.\", 0.025)", "def main():\n\tlogger.info('Beginning %s' %sys.argv[0])\n\t\n\tif os.path.exists(os.path.join(os.getcwd(), 'EULA')):\n\t\tlogger.info('The EULA file exists. Will prompt user for accepting EULA')\n\t\tif not acceptEULA(os.path.join(os.getcwd(), 'EULA')):\n\t\t\tuninstallAndExit()\n\t\n\t# check prerequisites\n\tprint('Checking for pre-requisites')\t\n\tcheckPreReqs()\n\t\n\t# Check if all drivers are available\n\tprint('Checking for drivers')\n\tcheckDrivers()\n\t\n\t# Install backend\n\tprint('Installing backend')\t\n\tinstallBackend()\n\t\n\t# Install popup server files\n\tprint('Installing Popup server')\t\n\tinstallPopupServer()\n\t\n\t# Setup Popup server to run at login\n\tprint('Adding popup server to login')\t\n\taddPopupServerToLogin()\n\t\n\t# Setup Log Directories\n\tprint('Setting up log directories')\t\n\tsetupLoggingDirectories()\n\t\n\t# Setup Print Queues\n\tprint('Installing printer queues')\t\n\tinstallPrintQueuesUsingConfigFile()\n\t\n\t# Install Uninstaller\n\tprint('Adding uninstaller')\t\n\tinstallUninstaller()\n\t\n\tprint('\\nDU printing has been successfully installed on your computer. Please restart your GUI session to complete the installation process. The simplest way to do this is to log out and log back in!')", "def main():\r\n\tsaludo()\r\n\twhile True:\r\n\t\tmenu()\r\n\t\tprint()\r\n\t\tsleep(1)", "def run():\n\n clear_shell()\n play(choose_difficulty())", "def main():\n CLI.from_command_line()\n exit(0)", "def intro(level):\n os.system('clear')\n print(\"\\nLEVEL \", level)\n if level == 1:\n print(\"\"\"\n You are poor farmer but you have a dream.\n You want to sail the ship. Someone in city sell tickets.\n But last night you drank too much. You don't know what is going on.\n You're naked and you have a big hangover.\n Talk to host if you want know what is your name.\n Good luck!\n \"\"\")\n elif level == 2:\n print(\"\"\"\n After you get all your clothes you go back to your farm.\n From a distance you hear the voice of your wife.\n Now you are in home and you have a bad feelings...\n \"\"\")\n elif level == 3:\n print(\"\"\"\n It's time to sell your harvest and buy a ticket for great travel.\n You go to city and you see two buildings, in one of them is ticket seller.\n Talk to him.\n \"\"\")\n input(\"Click anything to continue\")", "def main():\n # Initialization\n server_address, server_port, window_height, window_width, alarm_state = initialize()\n\n # If the alarm is on\n if alarm_state == 1:\n\n # Test if the user is awake\n awake_test(window_height, window_width)\n\n # After having completed the awoke_test properly, stop the alarm\n set_alarm_state(server_address, server_port, 0)\n\n sys.exit()\n\n # If the server is not in alarm mode\n elif alarm_state == 0:\n\n # Go into management mode\n management(server_address, server_port)\n\n sys.exit()", "def main():\n global minute_timer, configuration\n\n menu_options = [\n ('Change Time', None, show_change_time_dialog),\n ('Reset Time', None, reset_time),\n ('Pause', is_paused, pause_clicked),\n ]\n\n ChangeTimeDialog().set_time_changed_callback(change_time_dialog_time_changed_callback)\n\n # Load the logged time from the json file if it exists\n if os.path.exists(config_file_name):\n with open(config_file_name) as json_file:\n configuration = json.load(json_file)\n\n ChangeTimeDialog().set_time(configuration['logged_minutes'])\n else:\n configuration['logged_minutes'] = 0\n\n # Create a minute timer for logging the worked minutes\n minute_timer = MinuteTimer(minute_timer_callback)\n\n # Create the system tray icon and menus. This function is blocking.\n SysTrayIcon(icons['paused'], '{:02d}:{:02d}'.format(int(configuration['logged_minutes'] / 60), configuration['logged_minutes'] % 60),\n menu_options, call_on_startup=on_startup, on_exit=on_exit, default_menu_index=2)", "def main():\n\n LED.set_color(RED)\n PRINTER.online()\n time.sleep(10)\n LED.set_color(YELLOW)\n greet()\n LED.set_color(GREEN)\n\n while True:\n if BUTTON.is_pressed():\n LED.set_color(YELLOW)\n winchance = random.random()\n if winchance > 0:\n print_secret_msg()\n else:\n print_poem()\n LED.set_color(GREEN)", "def main():\n\n load_dotenv()\n\n with requests.session() as s:\n calam_voyager.blocking.login(s, os.environ['VOYAGER_USERNAME'], os.environ['VOYAGER_PASSWORD'])", "def main():\r\n # Greeting to the user\r\n print(\"Welcome to the Study Timer!\\n\")\r\n # Get the amount on time the person wants to study for\r\n study_length = study_session_length()\r\n # Ask how often they want to break\r\n break_interval = break_interval_time()\r\n # Ask the user what activities they would like to do during break\r\n shuffled_list_of_activities = activities()\r\n # Determine number of breaks\r\n number_of_breaks = number_breaks(study_length, break_interval)\r\n # Start timer\r\n interval_timer(break_interval, number_of_breaks,\r\n shuffled_list_of_activities)\r\n # Ask user to start over\r\n run_timer_again()", "def main() -> None:\n try:\n _main()\n except KeyboardInterrupt:\n sys.exit(2)", "def main_program_loop():\n while True:\n clear()\n user_request = encrypt_decrypt_or_exit()\n\n # calls the encryption/decryption functions or ends the program\n\n if user_request == 'exit':\n print('May your adventures go well.')\n sys.exit()\n elif user_request == 'encrypt':\n encrypt()\n elif user_request == 'decrypt':\n decrypt()", "def tutorial():\n\tprint \"King's Decision puts you in the position of a king forced to make quick choices.\"\n\tprint \"You will be presented with a number of random situations and given a number of \"\n\tprint \"choices to choose from. You will have 15 seconds to make a snap decision; if you \"\n\tprint \"fail to come to a decision, you will automatically choose to behead the person presenting \"\n\tprint \"the case, much to the chagrin of your court and subjects. If you do this twice, the people \"\n\tprint \"will revolt and kill you.\"\n\tprint \"\\n\"\n\tprint \"The goal is to come to prudent, informed, and honorable decisions. Bad decisions will\"\n\tprint \"bring consequences, such as growing unrest among the people. If you are able to make\"\n\tprint \"good decisions five times in a row, you will win the title of 'the Great', and win the game.\"\n\tprint \"Best of luck to you, the king!\"\n\ttime.sleep(5)\n\traw_input(\"Press any key to begin the game.\")\n\tgame_start()", "def init():\n click.echo(\"Enter the profile name, server, token and path to vault secrets\")\n profile_name = click.prompt(\"Profile Name\", type=str)\n vault_server = click.prompt(\"Vault Server\", type=str)\n vault_token = click.prompt(\"Vault Token\", type=str)\n vault_secret_path = click.prompt(\"Path to vault secret\", type=str)\n\n config_file = config.create_config_file(\n vault_server, vault_token, vault_secret_path, profile_name\n )\n\n yaml.dump_data_to_yml(config_file)\n\n click.echo(\n \"\"\"\n Following information is saved.\n name: {name}\n vault_server: {server}\n vault_token: {token}\n vault_secret_path: {secret_path}\n \"\"\".format(\n name=profile_name,\n server=vault_server,\n token=vault_token,\n secret_path=vault_secret_path,\n )\n )", "def main():\n # states_list and capitals_list take the return values from read_text_file_...\n states_list, capitals_list = read_text_file_and_split_into_two_lists()\n\n # variable used to determine if game starts over\n user_choice_to_play_again = 'yes'\n\n while user_choice_to_play_again == 'yes':\n\n state_or_capital = input(\"Want to be quizzed on capital names or state names? Type 1 for capitals, \"\n \"2 for states:\\n\")\n\n # While loop makes sure user enters 1 or 2. protects against users entering long string to break program.\n while state_or_capital != '1' and state_or_capital != '2':\n state_or_capital = input(\"Want to be quizzed on capital names or state names? Type 1 for capitals, \"\n \"2 for states:\\n\")\n\n # passes the users quiz choice, the states_list, and the capitals_list\n quiz_on_cap_or_states(int(state_or_capital), states_list, capitals_list)\n\n # variable used to determine if game starts over\n user_choice_to_play_again = str(input(\"Want to play again? Type yes or no:\\n\").lower())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate and save a JSON containing the whole alarm configuration for a buildinf
def set_alarm_json(building, user): cunits = get_c_unitsforbuilding_for_operation("Modificar alarmas", UPDATE, user, building)[0] eAlarmsPerEDevices = [] for cu in cunits: p_serial = cu.profile_powermeter.powermeter.powermeter_serial eDeviceAlarms = [] cu_alarms = Alarms.objects.filter(consumer_unit=cu).exclude( status=False).exclude(alarm_identifier="Interrupción de Datos") for cua in cu_alarms: status = 1 if cua.status else 0 min_value = 0 if not cua.min_value else float(str(cua.min_value)) max_value = 0 if not cua.max_value else float(str(cua.max_value)) eDeviceAlarms.append( dict(alarm_identifier=cua.alarm_identifier, electric_parameter_id=cua.electric_parameter.pk, min_value=min_value, max_value=max_value, status=status )) eAlarmsPerEDevices.append(dict(powermeter_serial=p_serial, EDeviceAlarms=eDeviceAlarms)) i_eq = IndustrialEquipment.objects.get(building=building) i_eq.has_new_alarm_config = True i_eq.new_alarm_config = json.dumps( dict(eAlarmsPerEDevices=eAlarmsPerEDevices)) i_eq.modified_by = user i_eq.save()
[ "def create_celery_file(self):\n rates = {queue.name: queue.rate for queue in self.queues.values()\n if isinstance(queue, PushQueue)}\n with open(get_celery_configuration_path(self._app_id), 'w') as config_file:\n json.dump(rates, config_file)", "def create_alarm(self, config_alarm_info):\n mon_plugin = MonPlugin()\n plugin_uuid = mon_plugin.configure_rest_plugin()\n alarm_uuid = mon_plugin.configure_alarm(config_alarm_info)\n return alarm_uuid", "def setup(self):\n with open(self.file_name, \"w\") as f:\n json.dump(obj=self.j_config, fp=f, indent=4, sort_keys=True)", "def _create_config_file(self):\n with open(self._config_file_path, \"w\") as file:\n json.dump(self._default_config_dict, file)", "def get_data(self, alarm, config):\n return {}", "def _get_alarm_dict(self, **kwargs):\n alarm_id = self._generate_random_name()\n alarm = {\"alarm_id\": alarm_id,\n \"name\": \"TestAlarm-%s\" % alarm_id,\n \"description\": \"Test Alarm\"}\n\n alarm.update(kwargs)\n return alarm", "def create_upload_config(self, name='out'):\n with open(str(self.out / '{}.content.runtime'.format(name)), 'w') as f:\n f.write(json.dumps(self.upload))\n self.logger.info(\"Content of upload config file:\\n{}\".format(self.upload))", "def write_json(self):\n print \"writing json file...\",\n f = open(\"absorptionspectra.json\",\"w\")\n json.dump(self.data,f)\n f.close()\n print \"done!\"", "def generate_example_json(config):\n return generate_example(config, ext=\"JSON\")", "def create_alarm() -> str:\r\n #List space for just one alarm\r\n schedule_section = []\r\n\r\n #Catches date and time user has entered\r\n user_date_time = request.args.get(\"date_time\")\r\n #Catches reason user has entered\r\n user_reason = request.args.get(\"reason\")\r\n\r\n #Seperates date and time\r\n analyse_list = list(user_date_time)\r\n alarm_date = \"\"\r\n alarm_time = \"\"\r\n for date_part in range(0, 10):\r\n alarm_date = alarm_date + analyse_list[date_part]\r\n for time_part in range(11, 16):\r\n alarm_time = alarm_time + analyse_list[time_part]\r\n\r\n #appends information to a list\r\n schedule_section.append(alarm_date)\r\n schedule_section.append(alarm_time)\r\n schedule_section.append(user_reason)\r\n\r\n #appends list to the overall alarm list\r\n alarm_schedule.append(schedule_section)\r\n\r\n #Outputs Alarm created by the user\r\n confirm_message = (\"Alarm created at\", alarm_time, \"on\",\r\n alarm_date, \"because of\", user_reason)\r\n\r\n #Accessing Template from JSON File\r\n config_file = config_handle()\r\n create_template = config_file[\"file_paths\"][\"create_output\"]\r\n return render_template(create_template,\r\n user_confirm=confirm_message)", "def write_json(file, env=None):\n build_conf = build_dict(env=env)\n if isinstance(file, basestring):\n with open(file, \"w\") as f:\n json.dump(build_conf, f)\n else:\n json.dump(build_conf, file)", "def create_output_json_file():\n data = [{'test': 'test_value'}]\n path_to_create_file = os.path.join(projectPath, \"data//room_board_details.json\")\n with open(path_to_create_file, \"w\") as write_file:\n json.dump(data, write_file)", "def create_json_config_file():\n logger.info(\"Creating JSON Configuration file\")\n config_param = {'server_log_name':\n 'Enter valid server log file name',\n 'http_method': 'Enter a valid HTTP method',\n 'logging_level': 'Enter a logging level as a number only',\n 'line_number': 'Enter the number of lines to be displayed',\n 'password': 'Enter the password of at least 8 characters'}\n # getting paramters\n http_methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE',\n 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH']\n logging_levels = ['0', '10', '20', '30', '40', '50']\n server_log_name = str()\n http_method = str()\n logging_level = int()\n line_number = int()\n password = str()\n # processing server name\n while(True):\n server = get_config_parameter(config_param.get('server_log_name'))\n if(len(server.strip()) > 0):\n server_log_name = server\n break\n print('Invalid server name retry')\n while(True):\n method = get_config_parameter(config_param.get('http_method'))\n if(method.strip().upper() in http_methods):\n http_method = method.strip().upper()\n break\n print('Invalid http method retry')\n while(True):\n level = get_config_parameter(config_param.get('logging_level'))\n if(level.strip() in logging_levels):\n logging_level = level.strip()\n break\n print('Invalid logging level name retry')\n while(True):\n number = get_config_parameter(config_param.get('line_number'))\n if(number.strip().isdigit() and int(number.strip()) >= 0):\n line_number = number.strip()\n break\n print('Invalid line numbers retry')\n while(True):\n psswd = get_config_parameter(config_param.get('password'))\n if(len(psswd.strip()) >= 8):\n password = psswd.strip()\n break\n print('Invalid password lenght should be >= 8 retry')\n # saving information\n hashed_password = generate_password_hash(password)\n config_information = {\n 'server_name': server_log_name,\n 'http_method': http_method,\n 'logging_level': logging_level,\n 'line_number': number,\n 'password': hashed_password\n }\n try:\n with open('config_file.json', 'w', encoding='utf-8') as out_file:\n json.dump(config_information, out_file)\n # normally could not occur because if file does not exist python created it\n except OSError:\n logger.info(\"Could not write and/or open the file config_file.json \")\n print('Sorry, the operation could not be done')\n exit()", "def _add_metric_alarm_config(alarm_info, current_alarms):\n # Some keys that come from the argparse options can be omitted\n omitted_keys = {'debug', 'alarm_name', 'command', 'clusters', 'function'}\n\n current_alarms[alarm_info['alarm_name']] = {\n key: value\n for key, value in alarm_info.iteritems() if key not in omitted_keys\n }\n\n return current_alarms", "def save(self):\n args = [\n (\"StartLocalTime\", self.start_time.strftime(TIME_FORMAT)),\n (\n \"Duration\",\n \"\" if self.duration is None else self.duration.strftime(TIME_FORMAT),\n ),\n (\"Recurrence\", self.recurrence),\n (\"Enabled\", \"1\" if self.enabled else \"0\"),\n (\"RoomUUID\", self.zone.uid),\n (\n \"ProgramURI\",\n \"x-rincon-buzzer:0\" if self.program_uri is None else self.program_uri,\n ),\n (\"ProgramMetaData\", self.program_metadata),\n (\"PlayMode\", self.play_mode),\n (\"Volume\", self.volume),\n (\"IncludeLinkedZones\", \"1\" if self.include_linked_zones else \"0\"),\n ]\n if self.alarm_id is None:\n response = self.zone.alarmClock.CreateAlarm(args)\n self._alarm_id = response[\"AssignedID\"]\n alarms = Alarms()\n if alarms.last_id == int(self.alarm_id) - 1:\n alarms.last_alarm_list_version = \"{}:{}\".format(\n alarms.last_uid, self.alarm_id\n )\n alarms.alarms[self.alarm_id] = self\n else:\n # The alarm has been saved before. Update it instead.\n args.insert(0, (\"ID\", self.alarm_id))\n self.zone.alarmClock.UpdateAlarm(args)\n return self.alarm_id", "def json(self):\n return json.dumps(self.conf, indent=4, separators=(',', ': ')) + '\\n'", "def generate_json_report(config, trial_results):\n\n with open(config['JSON_REPORT_PATH'], 'w', encoding='utf-8') as file:\n json.dump(trial_results, file, ensure_ascii=False, indent=4)", "def _write_default():\n json.dump(EmailConfig.default_email_config, open(paths.EMAIL_CONFIG, 'w'), indent=4, sort_keys=True)", "def generate(self, filepath: str, data: dict): \n \n with open(filepath, 'w') as f:\n json.dump(data, f, indent=4)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete an old image.
def delete_image(self, event): remove_image = os.path.join( self._directory_path, "{}{}".format(self._image_id, ".jpg") ) try: os.remove(remove_image) _LOGGER.debug("Deleting old image %s", remove_image) except OSError as error: if error.errno != errno.ENOENT: raise
[ "def delete(self, *args, **kwargs):\n self.image.delete()\n super(StoredImage, self).delete(*args, **kwargs)", "def delete_old_image():\n path_to_dir = os.getcwd()\n previous_number = ChangeWallpaper.get_last_number() - 1\n\n if os.path.isfile(path_to_dir + \"/wallpaper\" + str(previous_number) + \".jpg\"):\n os.remove(path_to_dir + \"/wallpaper\" +\n str(previous_number) + \".jpg\")", "def delete_image(sender, instance, **kwargs):\n if os.path.exists(instance.image.path):\n os.remove(instance.image.path)", "def _remove_existing(img):\n if os.path.exists(img):\n os.unlink(img)\n return img", "def delete_image(self, http_request, image_id):\n image = self.image_by_id(image_id)\n if image:\n self.glance_admin_image_store.remove(image)\n http_request.setResponseCode(204)\n return b''\n http_request.setResponseCode(404)\n return b''", "def img_delete_by_path(self, img_path: str) -> None:\n img = self.img_by_path(img_path)\n if img:\n self.__session.delete(img)\n self.commit()\n else:\n print('No such image')", "def delete_image(filename):\n # Delete image\n image_path = (Path(__file__).parent / f'../images{filename}').resolve()\n if os.path.exists(image_path):\n os.remove(image_path)", "def destroy(self, image):\n return self.image.destroy(image)", "def delete_image(self, node_image):\r\n\r\n raise NotImplementedError(\r\n 'delete_image not implemented for this driver')", "def img_delete_by_id(self, img_id: int) -> None:\n img = self.img_by_id(img_id)\n if img:\n self.__session.delete(img)\n self.commit()\n else:\n print('No such image')", "def delete_image(self, offset, total):\n idx = self._idx + offset\n try:\n obj = self.__getitem__(idx)\n except IndexError:\n return None\n\n self._backup.append((idx, obj))\n\n del self._filenames[idx]\n obj.delete()\n\n if self._idx > 0 and total / 2 > offset:\n self._idx -= 1\n self._load(self._idx - self.PRELOAD_RANGE)\n else:\n self._load(self._idx + self.PRELOAD_RANGE + 1)\n\n return obj", "def delete_a_image(answer_id):\n current_image = get_answer_image(answer_id)\n if current_image:\n remove_answer_image(answer_id)\n try:\n os.remove(\"static/uploads/\" + current_image)\n except FileNotFoundError:\n pass", "def delete_image(self, index):\n if isinstance(index, int) == False or index > self.maximum_image_count:\n raise Exception(\n \"Index for deletion should be smaller integer than maximum_image_count\")\n # Delete the image from the image list by\n # poping the entry out of the dictionary!\n self.image_list.pop(index, None)", "def delete_image_tag(self, img, tag):\r\n return img.delete_tag(tag)", "def docker_registry_image_delete(self, context, image_name_and_tag):\n return self.call(context,\n self.make_msg('docker_registry_image_delete',\n image_name_and_tag=image_name_and_tag))", "def test_delete_image(self):\n image = self._create_image()\n\n with self.override_role():\n self.image_client.delete_image(image['id'])\n self.image_client.wait_for_resource_deletion(image['id'])", "def _delete_image(self, context, image_id, image_service) -> None:\n try:\n image_meta = image_service.show(context, image_id)\n image_status = image_meta.get('status')\n if image_status == 'queued' or image_status == 'saving':\n LOG.warning(\"Deleting image in unexpected status: \"\n \"%(image_status)s.\",\n {'image_status': image_status},\n resource={'type': 'image', 'id': image_id})\n image_service.delete(context, image_id)\n except Exception:\n LOG.warning(\"Image delete encountered an error.\",\n exc_info=True, resource={'type': 'image',\n 'id': image_id})", "def delete_backup(self, image):\n self._connection.deregister_image(image.id, True)", "def delete_metadata(full_path_to_img):\n piexif.remove(full_path_to_img, \"clean_image.jpg\")\n move(\"clean_image.jpg\", \"documents/clean_image.jpg\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transfer the pm value to aqi
def pm_to_aqi(): df = pd.read_csv("./static/data/total_clean_data.csv") # 将时间转换为%Y%m%d格式 df.date = df.date.apply(lambda x: time.strftime("%Y%m%d", time.strptime(x, "%Y-%m-%d-%H"))) print(df.date.head(5)) df.set_index('date', drop=True) data1 = pd.DataFrame(df['PM2.5'].groupby([df['city'], df['date']]).mean()) data1['AQI'] = ((500 - 400) / (500 - 350)) * (data1[['PM2.5']].values) + 400 # data1_date = data1[['PM2.5']].index # data1_pm = data1[['PM2.5']].values return data1.to_csv('{}.csv'.format("pm2aqi"), encoding="utf8")
[ "def m_amu(self):\n a = physical_constants[\"alpha particle mass in u\"][0]\n p = physical_constants[\"proton mass in u\"][0]\n e = physical_constants[\"electron mass in u\"][0]\n out = {\"a\": a, \"p\": p, \"p1\": p, \"p2\": p, \"e\": e}\n return pd.Series({s: out[s] for s in self.stuple})", "def aqi(self) -> typing.Optional[int]:\n return pm25_to_aqi(self.pm25)", "def military_from_AMPM(h, ampm):\r\n if (h == 12):\r\n h = 0\r\n return h + ampm*12", "def test_put_muveto_pmt_item(self):\n pass", "def pmid(self, pmid):\n with TRN:\n self._lock_check()\n sql = \"\"\"UPDATE qiita.{0} SET pmid = %s\n WHERE analysis_id = %s\"\"\".format(self._table)\n TRN.add(sql, [pmid, self._id])\n TRN.execute()", "def transfer_alm(iainfo, ialm, oainfo, oalm=None, op=lambda a,b:b):\n\treturn cmisc.transfer_alm(ainfo, ialm, oainfo, oalm=oalm, op=op)", "def store_pam_attenuation(self, ant, pol, attenuation, verify=True):\n self.logger.info(\"Commanding (%s,%s) PAM atten=%s dB in redis\" \\\n % (str(ant), pol, str(attenuation)))\n self.r.hmset(\"atten:ant:%s:%s\" % (str(ant), pol),\n {\"commanded\": str(attenuation),\n \"command_time\": time.time()}\n )\n if verify:\n assert(self.lookup_pam_attenuation(ant, pol) == attenuation)", "def adjust_mana(self,ammount):\n self.mana += ammount", "def to_mido_tempo(tempo: Tempo) -> MetaMessage:\n return MetaMessage(\n \"set_tempo\", time=tempo.time, tempo=bpm2tempo(tempo.qpm),\n )", "def db_to_amplitude(db):\n return db_to_power(db / 2.0)", "def _p_to_m_on_basis(self, A):\n m = self.realization_of().m()\n one = self.base_ring().one()\n return m._from_dict({B: one for B in A.coarsenings()}, remove_zeros=False)", "def HR2_SetAsicGain(self,idif,iasic,vnew):\n for a in self.asiclist:\n if (idif != 0 and a[\"dif\"] != idif):\n continue\n if (iasic != 0 and a[\"num\"] != iasic):\n continue\n for ipad in range(0,64):\n a[\"slc\"][\"PAGAIN\"][ipad]=vnew\n a[\"_id\"]=None\n print(idif,iasic,ipad,a[\"slc\"][\"PAGAIN\"][ipad])", "def set_amplifier_mixer_amplification_control(self, value):\n self.serial.write('am%d\\r' % (value and 1 or 0))", "def copy_signal_madam(args, comm, data, sigcopy_madam):\n if sigcopy_madam is not None:\n if comm.comm_world.rank == 0:\n print('Making a copy of the TOD for Madam', flush=args.flush)\n sigcopy_madam.exec(data)\n\n return", "def set_OM_to_time(self):\n self.m.OM.value = time.Time(54000,format = 'mjd')", "def update_input(self, name, value):\r\n self.ptas[name][0] = value", "def HR2_ChangeGain(self,idif,iasic,ipad,scale):\n \n\n for a in self.asiclist:\n if (idif != 0 and a[\"dif\"] != idif):\n continue\n if (iasic != 0 and a[\"num\"] != iasic):\n continue\n\n a[\"slc\"][\"PAGAIN\"][ipad]=scale*a[\"slc\"][\"PAGAIN\"][ipad]\n a[\"_id\"]=None\n print(idif,iasic,ipad,a[\"slc\"][\"PAGAIN\"][ipad])", "def SetPMAM(self, meridian):\n\t\tself.DisableOscillator()\t\t\t\t\t\t#Diable clock\n\t\tPMAM_bit = self.readRegister(HOUR)\t\t\t\t#Read meridian bit\t\n\t\tif(meridian == PMAM_t.AMT):\n\t\t\tPMAM_bit &= ~PM\t\t\t\t\t\t\t\t#Set AM\n\t\telse:\n\t\t\tPMAM_bit |= PM\t\t\t\t\t\t\t\t#Set PM\n\t\t\n\t\tself.writeRegister(HOUR,PMAM_bit)\t\t\t\t#Update PM/AM meridian bit\n\t\tself.EnableOscillator();\t\t\t\t\t\t#Enable clock", "def set_amplitude(instrument, amplitude, unit='DBM', channel_num=1):\n command = ':SOURce%d:POWer:LEVel:IMMediate:AMPLitude %G %s' % (channel_num, amplitude, unit)\n instrument.write(command)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the maximum overlap between frag1 & frag2 and overlap start position
def overlap(self,frag1, frag2): overlaps = [] for i in range(len(frag2)): for j in range(len(frag1)): if frag1.endswith(frag2[:i + 1], j): if i >= 3: overlaps.append((i, j)) return max(overlaps) if overlaps else (0, -1)
[ "def getOverlap(a, b):\r\n return max(0, 1 + min(a[1], b[1]) - max(a[0], b[0]))", "def find_max_overlapping_fragments(fragments):\n pair = [-1, -1]\n overlap = 0\n\n def evaluate_pair(pair, overlap, p, o):\n if o > overlap:\n return p, o\n else:\n return pair, overlap\n\n for i in range(len(fragments)):\n for j in range(i+1, len(fragments)):\n for p in [[i, j], [j, i]]:\n pair, overlap = evaluate_pair(pair, overlap, p,\n compute_max_overlap(fragments[p[0]], fragments[p[1]]))\n\n return overlap, pair", "def getoverlap(p1, p1f, p2, p2m):\r\n w1, h1 = p1.image.size\r\n w2, h2 = p2.image.size\r\n fangle = findangle((w1 // 2, h1 // 2), p1f, (w1 // 2, h1)) # point pussy down\r\n mangle = findangle((w2 // 2, h2 // 2), p2m, (w2 // 2, 0)) # point penis up\r\n if p1f[0] > w1 // 2:\r\n fangle = 360 - fangle\r\n if p2m[0] < w2 // 2:\r\n mangle = 360 - mangle\r\n #print(fangle, mangle)\r\n\r\n p1organ = findpoint(p1, p1f, fangle, 0)\r\n p2organ = findpoint(p2, p2m, mangle, 1)\r\n #print(\"p1 old: \", p1f, \" p1: \", p1organ, \" p2 old: \", p2m, \" p2: \", p2organ)\r\n\r\n p2xoffset = p1organ[0] - p2organ[0]\r\n p2yoffset = p1organ[1] - p2organ[1]\r\n p1xoffset = 0\r\n p1yoffset = 0\r\n overlap = 0\r\n p1data = p1.bwmask.copy().rotate(fangle, expand=True).load()\r\n p2data = p2.bwmask.copy().rotate(mangle, expand=True).load()\r\n w1, h1 = p1.bwmask.copy().rotate(fangle, expand=True).size\r\n w2, h2 = p2.bwmask.copy().rotate(mangle, expand=True).size\r\n for x in range(0, w1-p2xoffset, 5):\r\n for y in range(0, h1-p2yoffset, 5):\r\n if x < w2 and y < h2:\r\n if p1data[x + p2xoffset, y + p2yoffset] == 1 and p2data[x, y] == 1:\r\n overlap += 1\r\n return overlap", "def overlap(start_1, end_1, start_2, end_2):\n return range(max(start_1, start_2),\n min(end_1, end_2) + 1)", "def _get_max_min(self, that):\n maxstart = max(self.start, that.start)\n minstop = min(self.stop, that.stop)\n return (maxstart, minstop)", "def getOverlap(ls1, ls2):\n\n overlap = 0\n for [st, stp] in ls1:\n overlap += getOverlap_runner(st, stp, ls2)\n return overlap", "def detectOverlap(projection1, projection2):\n\t\tmin1, max1 = projection1\n\t\tmin2, max2 = projection2\n\t\treturn max1 > min2 and min1 < max2", "def _welch_seg_bounds(pos, l_seg, p_overlap):\n step = l_seg - p_overlap * l_seg\n starts = np.arange(pos.iloc[0], pos.iloc[-1], step)\n ends = np.arange(pos.iloc[0]+l_seg, pos.iloc[-1], step)\n ends[-1] = pos.iloc[-1]\n\n return starts, ends", "def overlap_frag(p, overlap, fragsize=8, overlap_fragsize=None):\n\n if overlap_fragsize is None:\n overlap_fragsize = fragsize\n q = p.copy()\n del(q[IP].payload)\n q[IP].add_payload(overlap)\n\n qfrag = fragment(q, overlap_fragsize)\n qfrag[-1][IP].flags |= 1\n return qfrag + fragment(p, fragsize)", "def get_max_overlap(self, mismatch_list):\n\n pos = None # Ensures that 'None' is returned if there is no valid overlap\n for count, i in enumerate(mismatch_list):\n if i is 0:\n pos = count\n return pos", "def get_bounds( reads, start_pos_index, end_pos_index ):\n max_low = sys.maxint\n max_high = -sys.maxint\n for read in reads:\n if read[ start_pos_index ] < max_low:\n max_low = read[ start_pos_index ]\n if read[ end_pos_index ] > max_high:\n max_high = read[ end_pos_index ]\n return max_low, max_high", "def find_max_overlaps(\n rps: np.ndarray,\n rp_boxes: np.ndarray\n ) -> np.ndarray:\n a = np.maximum(rps[:, None, 0], rp_boxes[:, 0])\n c = np.minimum(rps[:, None, 2], rp_boxes[:, 2])\n max_par_index = np.argmax(c - a, axis=1)\n\n return max_par_index", "def calculate_overlap_durations(ranges_a, ranges_b):\n max_starts_matrix = np.maximum.outer(ranges_a[:, 0], ranges_b[:, 0])\n min_ends_matrix = np.minimum.outer(ranges_a[:, 1], ranges_b[:, 1])\n overlap_durations = np.maximum(0, min_ends_matrix - max_starts_matrix)\n return overlap_durations", "def compute_overlap(i: Rect, j: Rect):\n return 2 * abs((i & j).area()) / abs(i.area() + j.area())", "def overlap(str1, str2):\n\tlen1 = len(str1)\n\tlen2 = len(str2)\n\tmaxPossible = min(len(str1), len(str2))\n\tfor maxOver in range(maxPossible, 0, -1):\n\t\tif str1[:maxOver] == str2[len2 - maxOver:]:\n\t\t\treturn maxOver, str2, str1\n\t\telif str2[:maxOver] == str1[len1 - maxOver:]:\n\t\t\treturn maxOver, str1, str2\n\treturn 0, str1, str2", "def get_var_protein_bounds_from_genomics_bounds(self,genome_start,genome_end):\n\t\tstart_prot = None\n\t\tstart_prot_start = None\n\t\tend_prot = None\n\t\tend_prot_end = None\n\t\tfor prot,coords in self.reference_protein_locations.items():\n\t\t\tif (genome_start >= coords[0]) and (genome_start <= coords[1]):\n\t\t\t\tstart_prot = prot\n\t\t\t\tstart_prot_start = (genome_start - coords[0]) + 1\n\t\t\tif (genome_end >= coords[0]) and (genome_end <= coords[1]):\n\t\t\t\tend_prot = prot\n\t\t\t\tend_prot_end = (genome_end - coords[0]) + 1\n\t\treturn [start_prot,start_prot_start,end_prot,end_prot_end]", "def maximal_eligable_tss_position(cbg,organism):\n # take the first (and only) orf of this organism\n orf_of_org = cbg.get_orfs_of_graph(organism=organism)[0]\n omsr = cbg.overall_minimal_spanning_range(organism=organism)\n # calculate absolute aa and nt positions from where to take acceptors into account\n if ELIGABLE_ALIGNED_TSS_3P_AA_OFFSET == None:\n abs_aa_pos = orf_of_org.protein_endPY\n else:\n abs_aa_pos = min([ min(omsr)+ELIGABLE_ALIGNED_TSS_3P_AA_OFFSET, orf_of_org.protein_endPY ])\n abs_nt_pos = orf_of_org.proteinpos2dnapos(abs_aa_pos)\n return ( abs_aa_pos, abs_nt_pos )", "def get_overlap():\n proposed = 0\n for i in range(0,13):\n proposed += proposed_amounts[i] * staggering[i]\n return round(total - proposed - 100, 2)", "def calc_overlap(claim1, claim2):\n x1 = max(claim1.x, claim2.x)\n x2 = min((claim1.x + claim1.width), (claim2.x + claim2.width))\n y1 = max(claim1.y, claim2.y)\n y2 = min((claim1.y + claim1.height), (claim2.y + claim2.height))\n return [(x, y) for x in xrange(x1, x2) for y in xrange(y1, y2)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click at a sprite that related to a game entity.
def click_at(self, sprite, player, zone, index, click_args=(0, 0, mouse.LEFT, 0)): _, _, buttons, _ = click_args game = self.board.ctrl.game seq = self.seq # Right click will clear all (if the sequence can reset). if buttons & mouse.RIGHT: if seq.can_reset: self.clear_all() return True entity = sprite.entity if zone is None: zone = entity.zone if player is None: player = game.get_player(entity.player_id) if zone != entity.zone or player.player_id != entity.player_id: from ...utils.message import warning warning('Click at zone {}, but sprite have zone {}'.format( Zone.repr_zp(entity.zone, entity.player_id), Zone.repr_zp(zone, player.player_id), )) # print('#In click entity') handled = False # Is this click event handled? if seq.cursor is None: if entity.can_do_action(msg_fn=self._msg_fn) == entity.Inactive: pass else: seq.set_tree(entity.player_operation_tree()) self.sel['source'] = entity sprite.on_mouse_release(*click_args) self.prepare_op() handled = True # print('#Create a new player operation sequence') else: op = seq.cursor_op # print('#Op:', PlayerOps.Idx2Str[op]) if op == PlayerOps.ConfirmPlay: # Click at an entity when need to confirm play: just ignore it. pass elif op == PlayerOps.SelectTarget: # Click at an entity when need to select a target: # Validate it. If passed, add to selection and go to next op; else ignore it. if not validate_target(self.sel['source'], entity, self._msg_fn, po_data=self.sel): pass else: self.sel['target'] = entity self._next_operation() sprite.on_mouse_release(*click_args) handled = True elif op == PlayerOps.SelectChoice: # Click at an entity when need to select a choice: # Check if the entity is in the choice. # If in, add to selection and go to next op; else ignore it. choices = seq.get_choices() if entity not in choices: pass else: self.sel['choice.{}'.format(seq.cursor.title)] = entity # [NOTE]: Also store all choices, sometimes useful (e.g. "Tracking" need to discard (mill) them). self.sel['choice.{}.all'.format(seq.cursor.title)] = choices self._next_operation(entity) # [NOTE]: Does not call ``on_mouse_release`` here. handled = True elif op == PlayerOps.SelectMinionPosition: # Click at an entity when need to select a minion position: just ignore it. pass elif op == PlayerOps.SelectDefender: # Clicked at an entity when need to select a defender: # Validate it. If passed, add to selection and go to next op; else ignore it. if not self.sel['source'].check_defender(entity, self._msg_fn): pass else: self.sel['target'] = entity self._next_operation() sprite.on_mouse_release(*click_args) handled = True elif op == PlayerOps.Run: # Processed in ``_maybe_run``. handled = True else: raise ValueError('Unknown or not implemented op {}'.format(op)) # print('#Current player operation sequence:', seq) self._maybe_run(game) return handled
[ "def whenClicked(self, codeObj, block):\n scriptNum = codeObj.getNextScriptId()\n cbName = 'whenSpriteClickedCb' + str(scriptNum)\n codeObj.addToCode(genIndent(2) + 'whenSpriteClicked(\"' + cbName + '\");\\n')\n\n # Generate callback code, into the codeObj's cbCode string.\n # Add two blank lines before each method definition.\n cbStr = \"\\n\\n\" + genIndent(1) + \"public void \" + cbName + \\\n \"(Sequence s)\\n\"\n cbStr += self.topBlock(1, block) + \"\\n\" # add blank line after defn.\n codeObj.addToCbCode(cbStr)", "def on_mouse_click(self):\n base.graphicsEngine.render_frame()\n p=PNMImage(1, 1,4)\n base.graphicsEngine.extract_texture_data(self.mouse_tex, base.win.getGsg())\n self.mouse_tex.store(p)\n c=p.getXelA(0,0)\n id=self.color_to_id(c)\n if id != 0 and id == self.last_mouse_down_id:\n if id in self.click_commands:\n self.click_commands[id]()", "def at_target(self, target_pos):\n x = (174 * (target_pos - 1)) + 130\n y = 50\n self.click(x, y, delay=.2)\n return self", "def click(widget, view_index=None):\n pos = center(widget, view_index)\n robouser.click(pos)", "def do_onclick(self, click_command, abs_mouse_pos, offset):\n if click_command == 2:\n raise Exception(\"Invalid onclick\")\n else:\n image = self.image\n display_name = self.name\n lv = self.level\n cost = self.cost\n dmg = self.bullet[0]\n acckfreq = round(1/self.attkFreq, 2)\n tower_range = self.attk_range\n level_up_cost = self.upgrade_cost\n loc = find_point_loc(self.get_relCenter(), None)\n data = image, display_name, lv, cost, dmg, acckfreq, tower_range, level_up_cost, loc\n return CHANGE_MENU, (LAYER_TOWER_MENU, data)", "def button_click(self, button, x, y):\n if self.board.place(x, y, self.board.player):\n self.visual_place(button)\n\n if not self.board.is_multiplayer:\n actions = self.board.get_actions()\n action = self.ai.take_action(actions)\n (ai_x, ai_y, ai_player) = action\n self.board.place(ai_x, ai_y, ai_player)\n\n ai_button = self.buttons[ai_y][ai_x]\n self.visual_place(ai_button)", "def click(self,boton):\n cadena= \"el argumento 'boton' no es igual a: ('izquierdo', 'medio', 'derecho', 4, 5, 6, 7)\"\n assert boton in self.mapa_botones_mouse.keys(),cadena\n boton = self.mapa_botones_mouse[boton]\n fake_input(self._display, X.ButtonPress, boton)\n self._display.sync()\n fake_input(self._display, X.ButtonRelease, boton)\n self._display.sync()", "def draw_entity(self, entity: EntityImage):\r\n self._screen.blit(entity[0], entity[1])", "def clickScreen(driver, top_left):\n try:\n #myElem = WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.CLASS_NAME, 'game')))\n game_element = driver.find_element_by_class_name(\"game\")\n myElem = game_element\n action = webdriver.common.action_chains.ActionChains(driver)\n action.move_to_element_with_offset(myElem, top_left[0] + 50, top_left[1] + 50)\n action.click()\n action.perform()\n #print(\"Action Performed!\")\n except TimeoutException:\n print(\"Loading took too much time!\")", "def id_click(elem_id):\n world.css_click('#%s' % elem_id)", "def click_gift_entry_button(self,title):\n locator=npsp_lex_locators[\"gift_entry\"][\"button\"].format(title)\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.click_element(locator)", "def perform_mouse_click_event(position_dict):\n\t# status\n\tprint(\"I'm ready...let the game begin...\")\n\t# parse through the position_dict\n\tfor num in sorted(position_dict.keys()):\n\t\t# extract the co-ordinates\n\t\tx, y = position_dict[num]\n\t\t# status\n\t\tprint(\"Going for number \", num, \" at x:\", x, \" y: \", y)\n\t\t# move the curser and click\n\t\tpyautogui.moveTo(x, y)\n\t\tpyautogui.click(x,y)", "def hover_and_click(self, locator_hover, locator_click):\r\n ActionChains(self.driver).move_to_element(self.find_element(locator_hover)).perform()\r\n self.click(locator_click)", "def click(self, obj, wait_page_loaded = False):\n msg = {\"command\": \"click\", \"obj\": obj,\n \"notifyPageLoaded\": wait_page_loaded}\n self._send_message(msg)\n self._receive_result()", "def click(self, locator):\r\n self.find_element(locator).click()", "def click(self, jquery=False):\n if jquery:\n e = JQuery(self)\n e.click()\n else:\n super(Clickable, self).click()", "def click( self, (x,y) ):\r\n for k in self.keys:\r\n if k.rect==None: continue\r\n x0,y0,w,h = k.rect\r\n if x >= x0 and x < x0+w and y >= y0 and y < y0+h:\r\n k.toggle_active()\r\n return k", "def flick(self, start_x, start_y, end_x, end_y):\n self._selenium_web_driver().flick(start_x, start_y, end_x, end_y)", "def tableClick(self, x, y):\n self.selectRow(x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Click at space in the play zone. This method is usually used for summoning minions.
def click_at_space(self, player, index, click_args): _, _, buttons, _ = click_args game = self.board.ctrl.game seq = self.seq player_id = player.player_id # Right click will clear all (if the sequence can reset). if buttons & mouse.RIGHT: if seq.can_reset: self.clear_all() return True # print('#In click space') handled = False # Is this click event handled? if seq.cursor is None: # If no sequence (idle), do nothing. handled = True else: op = seq.cursor_op # print('#Op:', PlayerOps.Idx2Str[op]) if op == PlayerOps.ConfirmPlay: # Click at space when need to confirm play: add to selection and go to next op. if not validate_target(self.sel['source'], None, self._msg_fn): pass else: self._next_operation() handled = True elif op == PlayerOps.SelectTarget: # Click at space when need to select a target: notice that must select a target. self._msg_fn('Must select a target!') handled = True elif op == PlayerOps.SelectChoice: # Click at space when need to select a choice: just ignore it. pass elif op == PlayerOps.SelectMinionPosition: # Click at space when need to select a minion position: # If not in my board, ignore it; # ([NOTE]: this restriction can be relaxed to support some DIY minions # that can be played into enemies' board) # else add to selection and go to next op. if player_id != game.current_player: pass else: self.board.add_loc_stub(player_id, index) self.sel['index'] = index self._next_operation() handled = True elif op == PlayerOps.SelectDefender: # Clicked at space when need to select a defender: just ignore it. pass elif op == PlayerOps.Run: # Processed in ``_maybe_run``. handled = True else: raise ValueError('Unknown or not implemented op {}'.format(op)) # print('#Player operation sequence:', seq) self._maybe_run(game) return handled
[ "def button_click(self, button, x, y):\n if self.board.place(x, y, self.board.player):\n self.visual_place(button)\n\n if not self.board.is_multiplayer:\n actions = self.board.get_actions()\n action = self.ai.take_action(actions)\n (ai_x, ai_y, ai_player) = action\n self.board.place(ai_x, ai_y, ai_player)\n\n ai_button = self.buttons[ai_y][ai_x]\n self.visual_place(ai_button)", "def click(self,boton):\n cadena= \"el argumento 'boton' no es igual a: ('izquierdo', 'medio', 'derecho', 4, 5, 6, 7)\"\n assert boton in self.mapa_botones_mouse.keys(),cadena\n boton = self.mapa_botones_mouse[boton]\n fake_input(self._display, X.ButtonPress, boton)\n self._display.sync()\n fake_input(self._display, X.ButtonRelease, boton)\n self._display.sync()", "def clickScreen(driver, top_left):\n try:\n #myElem = WebDriverWait(driver, delay).until(EC.element_to_be_clickable((By.CLASS_NAME, 'game')))\n game_element = driver.find_element_by_class_name(\"game\")\n myElem = game_element\n action = webdriver.common.action_chains.ActionChains(driver)\n action.move_to_element_with_offset(myElem, top_left[0] + 50, top_left[1] + 50)\n action.click()\n action.perform()\n #print(\"Action Performed!\")\n except TimeoutException:\n print(\"Loading took too much time!\")", "def move_to(self):\n #self.find_wall()\n \n t = self.find_best_way()\n if t:\n click(t)\n else:\n click(random.choice(locations))", "def __on_start_click_event(self):\r\n self.frames[\"game\"].tkraise()", "def click_menu(self):\n pass", "def click_at(self, sprite, player, zone, index, click_args=(0, 0, mouse.LEFT, 0)):\n\n _, _, buttons, _ = click_args\n\n game = self.board.ctrl.game\n seq = self.seq\n\n # Right click will clear all (if the sequence can reset).\n if buttons & mouse.RIGHT:\n if seq.can_reset:\n self.clear_all()\n return True\n\n entity = sprite.entity\n\n if zone is None:\n zone = entity.zone\n if player is None:\n player = game.get_player(entity.player_id)\n if zone != entity.zone or player.player_id != entity.player_id:\n from ...utils.message import warning\n warning('Click at zone {}, but sprite have zone {}'.format(\n Zone.repr_zp(entity.zone, entity.player_id),\n Zone.repr_zp(zone, player.player_id),\n ))\n\n # print('#In click entity')\n handled = False # Is this click event handled?\n if seq.cursor is None:\n if entity.can_do_action(msg_fn=self._msg_fn) == entity.Inactive:\n pass\n else:\n seq.set_tree(entity.player_operation_tree())\n self.sel['source'] = entity\n sprite.on_mouse_release(*click_args)\n self.prepare_op()\n handled = True\n # print('#Create a new player operation sequence')\n else:\n op = seq.cursor_op\n # print('#Op:', PlayerOps.Idx2Str[op])\n\n if op == PlayerOps.ConfirmPlay:\n # Click at an entity when need to confirm play: just ignore it.\n pass\n elif op == PlayerOps.SelectTarget:\n # Click at an entity when need to select a target:\n # Validate it. If passed, add to selection and go to next op; else ignore it.\n if not validate_target(self.sel['source'], entity, self._msg_fn, po_data=self.sel):\n pass\n else:\n self.sel['target'] = entity\n self._next_operation()\n sprite.on_mouse_release(*click_args)\n handled = True\n elif op == PlayerOps.SelectChoice:\n # Click at an entity when need to select a choice:\n # Check if the entity is in the choice.\n # If in, add to selection and go to next op; else ignore it.\n choices = seq.get_choices()\n if entity not in choices:\n pass\n else:\n self.sel['choice.{}'.format(seq.cursor.title)] = entity\n # [NOTE]: Also store all choices, sometimes useful (e.g. \"Tracking\" need to discard (mill) them).\n self.sel['choice.{}.all'.format(seq.cursor.title)] = choices\n self._next_operation(entity)\n # [NOTE]: Does not call ``on_mouse_release`` here.\n handled = True\n elif op == PlayerOps.SelectMinionPosition:\n # Click at an entity when need to select a minion position: just ignore it.\n pass\n elif op == PlayerOps.SelectDefender:\n # Clicked at an entity when need to select a defender:\n # Validate it. If passed, add to selection and go to next op; else ignore it.\n if not self.sel['source'].check_defender(entity, self._msg_fn):\n pass\n else:\n self.sel['target'] = entity\n self._next_operation()\n sprite.on_mouse_release(*click_args)\n handled = True\n elif op == PlayerOps.Run:\n # Processed in ``_maybe_run``.\n handled = True\n else:\n raise ValueError('Unknown or not implemented op {}'.format(op))\n # print('#Current player operation sequence:', seq)\n\n self._maybe_run(game)\n return handled", "def button_press(self, x, y, button):\n base._Widget.button_press(self, x, y, button)\n if button == 1:\n icon = self.get_icon_in_position(x, y)\n if icon is not None:\n cmd = self.progs[icon][\"cmd\"]\n if cmd.startswith(\"qshell:\"):\n exec(cmd[7:].lstrip())\n else:\n self.qtile.spawn(cmd)\n self.draw()", "def jump(self): \n if not self.isTopItem():\n self.newChatMessage(\"No puedo saltar con tanto peso\", 1)\n return\n self.triggerEvent('jump', position=self.getPosition())", "def at_target(self, target_pos):\n x = (174 * (target_pos - 1)) + 130\n y = 50\n self.click(x, y, delay=.2)\n return self", "def on_mouse_click(self):\n base.graphicsEngine.render_frame()\n p=PNMImage(1, 1,4)\n base.graphicsEngine.extract_texture_data(self.mouse_tex, base.win.getGsg())\n self.mouse_tex.store(p)\n c=p.getXelA(0,0)\n id=self.color_to_id(c)\n if id != 0 and id == self.last_mouse_down_id:\n if id in self.click_commands:\n self.click_commands[id]()", "def double_click(self):\n self.node.double_click()", "def board_clicked(self, goban_coord):\r\n self.model.play_move(goban_coord)\r\n self.view.show_position(self.model.goban)\r\n return\r\n\r\n if self.model.turn != 'B':\r\n return\r\n try:\r\n self.model.play_move(goban_coord)\r\n # Inform leela of the move played\r\n self.engine_white.playmove('B', goban_coord)\r\n self.view.show_info('Playing against\\nLeela')\r\n self.engine_white.genmove(self.model.turn)\r\n except ModelError as e:\r\n print(\"Error when playing at \" + str(goban_coord) + \" : \" + str(e))\r\n self.view.show_info(str(e))\r\n self.view.show_position(self.model.goban)", "def simulateCutKeystroke( self ):\n \n ContextUtils.typeCommandKey( \"x\" )", "def do_onclick(self, click_command, abs_mouse_pos, offset):\n if click_command == 2:\n raise Exception(\"Invalid onclick\")\n else:\n image = self.image\n display_name = self.name\n lv = self.level\n cost = self.cost\n dmg = self.bullet[0]\n acckfreq = round(1/self.attkFreq, 2)\n tower_range = self.attk_range\n level_up_cost = self.upgrade_cost\n loc = find_point_loc(self.get_relCenter(), None)\n data = image, display_name, lv, cost, dmg, acckfreq, tower_range, level_up_cost, loc\n return CHANGE_MENU, (LAYER_TOWER_MENU, data)", "def right_click(self):\n pass", "def on_click_down(self, event):\n print(\"Clicked down with tool {}\".format(self.text))", "def perform_mouse_click_event(position_dict):\n\t# status\n\tprint(\"I'm ready...let the game begin...\")\n\t# parse through the position_dict\n\tfor num in sorted(position_dict.keys()):\n\t\t# extract the co-ordinates\n\t\tx, y = position_dict[num]\n\t\t# status\n\t\tprint(\"Going for number \", num, \" at x:\", x, \" y: \", y)\n\t\t# move the curser and click\n\t\tpyautogui.moveTo(x, y)\n\t\tpyautogui.click(x,y)", "def click_gift_entry_button(self,title):\n locator=npsp_lex_locators[\"gift_entry\"][\"button\"].format(title)\n self.selenium.wait_until_page_contains_element(locator)\n self.selenium.click_element(locator)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare the operation. For common operations (ConfirmPlay, SelectTarget, etc), do nothing. For select choice operations, create a select dialog.
def prepare_op(self): if self.seq.cursor_op == PlayerOps.SelectChoice: from .utils.basic import Colors, pos, alpha_color from .utils.layers import SelectChoiceLayer from .card_sprite import HandSprite DW, DH = 0.9, 0.6 choices = self.seq.get_choices() choice_sprites = [ HandSprite( card, (0, 0), scale=0.6, callback=self._select_choice_callback, self_in_callback=True, sel_mgr_kwargs={'set_default': False}) for card in choices ] layer_ = SelectChoiceLayer( alpha_color(Colors['black'], 150), *map(int, pos(DW, DH)), position=pos((1 - DW) / 2, (1 - DH) / 2), border=True, sel_mgr=self, cancel=self.seq.can_reset, choices=choice_sprites) layer_.add_to_scene(self.board.parent) # TODO: Create a select dialog
[ "def dialog_init(self, *args, **kwargs):\n pass", "def __do_step_choose_opp(self):\r\n params = self._prepare_values_to_be_rendered()\r\n params.instruction = \"Want to play against human or machine?\"\r\n params.options.update({\r\n Commands.CHOOSE_OPP_AS_HUMAN: \"Human\",\r\n Commands.CHOOSE_OPP_AS_MACHINE: \"Machine\",\r\n })\r\n self._gui.print_screen(params)\r\n\r\n input = self._read_input()\r\n\r\n if not self._handle_common_inputs(input, params.options):\r\n if input == Commands.CHOOSE_OPP_AS_HUMAN:\r\n self._state.game.machine = None\r\n self._state.activity = States.CHOOSE_PLAYER_ORDER\r\n self._state.feedback = \"You have chosen to play \" + \\\r\n \"against another human.\"\r\n\r\n elif input == Commands.CHOOSE_OPP_AS_MACHINE:\r\n self._state.activity = States.CHOOSE_MACHINE_STRATEGY\r\n self._state.feedback = \"You have chosen to play \" + \\\r\n \"against the machine.\"", "def run_operation_dialog(self, operation):\n\n operation_dialog = self.DIALOG_OPERATIONS[operation](self)\n\n if operation_dialog.exec():\n self.data = operation_dialog.img_data", "def _options_dialog(*args, **kwargs) -> Any:\n pass", "def dialogAccept(self):\n self.startSetup()\n self.enableSetup()", "def _options_dialog_toolbox(*args, **kwargs) -> Any:\n pass", "def _prepare_browse_options(self, options):\r\n self._set_defaults(options, self._default_browse_options)\r\n return options", "def create_dialog(self):\n\n frame = super().create_dialog(\"Edit Forcefield Step\")\n\n # Create the widgets\n P = self.node.parameters\n for key in P:\n self[key] = P[key].widget(frame)\n\n # bindings...\n self[\"task\"].combobox.bind(\"<<ComboboxSelected>>\", self.reset_dialog)\n self[\"task\"].config(state=\"readonly\")\n\n self[\"forcefield_file\"].combobox.bind(\"<<ComboboxSelected>>\", self.reset_dialog)\n self[\"forcefield_file\"].combobox.bind(\"<Return>\", self.reset_dialog)\n self[\"forcefield_file\"].combobox.bind(\"<FocusOut>\", self.reset_dialog)\n\n # and set it up the first time\n self.reset_dialog()", "def _do_prepare(self):\n log.debug(\"Preparing...\")\n preparation_result = self.do_prepare()\n if preparation_result is PREPARATION_FAILED:\n self.prepare_failed()\n elif preparation_result is PREPARATION_NOT_YET_COMPLETE:\n log.debug(\"do_prepare reports preparation not yet complete\")\n else:\n self.prepare_successful()", "def __do_step_choose_player_order(self):\r\n params = self._prepare_values_to_be_rendered()\r\n params.instruction = \"Which player shall start first?\"\r\n params.options.update({\r\n Commands.CHOOSE_FIRST_PLAYER_ME: \"if you want to start\",\r\n Commands.CHOOSE_FIRST_PLAYER_OPPONENT: \"if opponent shall start\",\r\n })\r\n self._gui.print_screen(params)\r\n\r\n input = self._read_input()\r\n\r\n if not self._handle_common_inputs(input, params.options):\r\n if input == Commands.CHOOSE_FIRST_PLAYER_ME:\r\n self._state.game.player_to_start = Player.USER\r\n self._state.activity = States.START_GAME\r\n\r\n elif input == Commands.CHOOSE_FIRST_PLAYER_OPPONENT:\r\n self._state.game.player_to_start = Player.OPP\r\n self._state.activity = States.START_GAME", "def setStartingValues(self):\n if self.choosen == 0:\n self.function = self.function_i_DropwDownNew.currentText()\n else:\n self.function = self.function_v_DropwDownNew.currentText()\n self.initParametersDialog.close()", "def InitUI(self):\n if self.data_type in ['orient', 'ages']:\n belongs_to = []\n else:\n parent_table_name = self.parent_type + \"s\"\n if parent_table_name in self.contribution.tables:\n belongs_to = sorted(self.contribution.tables[parent_table_name].df.index.unique())\n else:\n belongs_to = []\n\n self.choices = {}\n if self.data_type in ['specimens', 'samples', 'sites']:\n self.choices = {1: (belongs_to, False)}\n if self.data_type == 'orient':\n self.choices = {1: (['g', 'b'], False)}\n if self.data_type == 'ages':\n for level in ['specimen', 'sample', 'site', 'location']:\n if level in self.grid.col_labels:\n level_names = []\n if level + \"s\" in self.contribution.tables:\n level_names = list(self.contribution.tables[level+\"s\"].df.index.unique())\n num = self.grid.col_labels.index(level)\n self.choices[num] = (level_names, False)\n # Bind left click to drop-down menu popping out\n self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK,\n lambda event: self.on_left_click(event, self.grid, self.choices))\n\n cols = self.grid.GetNumberCols()\n col_labels = [self.grid.GetColLabelValue(col) for col in range(cols)]\n\n # check if any additional columns have controlled vocabularies\n # if so, get the vocabulary list\n for col_number, label in enumerate(col_labels):\n self.add_drop_down(col_number, label)", "def activate_action_buttons(self,selected_partition):\n\t\t\n\t\tpartition_device = selected_partition[0]\n\t\t\n\t\tif partition_device == None and selected_partition[1] != _(\"free space\"):\n\t\t\tself.deactivate_all_options()\n\t\t\treturn\n\t\t\n\t\telif selected_partition[1] == _(\"free space\"):\t\t\t\n\t\t\t\n\t\t\tself.deactivate_all_options()\n\t\t\tself.activate_options([\"add\"])\n\t\t\n\t\telif selected_partition[2] in [\"extended\", \"lvmvg\", \"lvmpv\"] and partition_device.isleaf:\n\t\t\t\n\t\t\tself.deactivate_all_options()\n\t\t\tself.activate_options([\"delete\"])\n\t\t\n\t\telse:\n\t\t\tself.deactivate_all_options()\n\t\t\t\n\t\t\tif partition_device.format.type == None and selected_partition[2] not in [\"extended\", \"lvmvg\"]:\n\t\t\t\tself.activate_options([\"delete\"])\n\t\t\t\t\t\n\t\t\tif partition_device.format.type == \"luks\" and partition_device.kids == 0:\n\t\t\t\tself.activate_options([\"delete\"])\n\t\t\t\t\t\n\t\t\tif partition_device.format.type == \"luks\" and not partition_device.format.status:\n\t\t\t\tself.activate_options([\"decrypt\"])\n\t\t\t\n\t\t\tif self.kickstart_mode:\n\t\t\t\n\t\t\t\tif partition_device.format.mountable:\n\t\t\t\t\tself.activate_options([\"delete\", \"edit\"])\n\t\t\t\t\n\t\t\t\tif partition_device.format.type == \"swap\":\n\t\t\t\t\tself.activate_options([\"delete\"])\n\t\t\t\n\t\t\telse:\n\t\t\t\tif partition_device.format.mountable and partition_mounted(partition_device.path) == None:\n\t\t\t\t\tself.activate_options([\"delete\", \"edit\"])\n\t\t\t\t\n\t\t\t\tif partition_device.format.type == \"swap\" and swap_is_on(partition_device.sysfsPath) == False:\n\t\t\t\t\tself.activate_options([\"delete\"])\n\t\t\t\t\t\n\t\t\t\tif partition_device.format.mountable and partition_mounted(partition_device.path) != None:\n\t\t\t\t\tself.activate_options([\"unmount\"])", "def control_if_empty(self):\n if self.user_question == \"\": # if input is empty\n self.user_interaction.response_from_papybot = GRANDPY_BOT_QUESTION_EMPTY\n self.list_dialog.extend([self.user_question, self.user_interaction.response_from_papybot])\n self.loop = False\n self.case = 1\n else:\n self.user_interaction.modification_process(self.user_question)", "async def _comm_prepare(self):\n pass", "def make_choice(self):\n # To take in and identify the player's choice from the webcam'\n self.new_choice.identify_player_choice()\n # Make a computer choice\n self.new_choice.take_computer_choice()\n # Compare these choices\n self.new_choice.compare_choices()\n # Set the choices to variables\n self.displayed_player_score = self.new_choice.player_score\n self.displayed_computer_choice = self.new_choice.computer_choice", "def presetSelected(self):\n choice = self.presetListButton.selectedChoice\n if choice == 'Remove Presets':\n self.removePreset()\n elif choice == 'Load Preset':\n return\n else:\n self.tool.loadBrushPreset(choice)\n self.tool.showPanel()", "def dialog_handler_cb(self, item, data) -> None:\n # Dialog box initialization event\n if item == KDialogInitEvent:\n vs.SetItemText(self.dialog, self.kWidgetID_fileName, self.parameters.excelFileName)\n # vs.SetItemText(self.dialog, self.kWidgetID_imageFolderName, self.settings.imageFolderName)\n\n vs.ShowItem(self.dialog, self.kWidgetID_excelSheetNameLabel, False)\n vs.ShowItem(self.dialog, self.kWidgetID_excelSheetName, False)\n self.show_parameters(False)\n\n vs.EnableItem(self.dialog, self.kWidgetID_importButton, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importNewCount, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importUpdatedCount, False)\n vs.EnableItem(self.dialog, self.kWidgetID_importDeletedCount, False)\n\n elif item == self.kWidgetID_fileName:\n self.parameters.excelFileName = vs.GetItemText(self.dialog, self.kWidgetID_fileName)\n\n elif item == self.kWidgetID_fileBrowseButton:\n result, self.parameters.excelFileName = vs.GetFileN(\"Open Excel file\", \"\", \"xlsm\")\n if result:\n vs.SetItemText(self.dialog, self.kWidgetID_fileName, self.parameters.excelFileName)\n\n elif item == self.kWidgetID_excelSheetName:\n new_excel_sheet_name = vs.GetChoiceText(self.dialog, self.kWidgetID_excelSheetName, data)\n if self.parameters.excelSheetName != new_excel_sheet_name:\n self.parameters.excelSheetName = new_excel_sheet_name\n self.show_parameters(False)\n if data != 0:\n self.show_parameters(True)\n\n elif item == self.kWidgetID_withImageSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withImage, data == 0)\n self.parameters.withImageSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withImageSelector, data)\n elif item == self.kWidgetID_withImage:\n self.parameters.pictureParameters.withImage = \"{}\".format(data != 0)\n # elif item == self.kWidgetID_imageFolderName:\n # self.settings.imageFolderName = vs.GetItemText(\n # self.dialog, self.kWidgetID_imageFolderName)\n # elif item == self.kWidgetID_imageFolderBrowseButton:\n # result, self.settings.imageFolderName = vs.GetFolder(\"Select the images folder\")\n # if result == 0:\n # vs.SetItemText(self.dialog, self.kWidgetID_imageFolderName, self.settings.imageFolderName)\n elif item == self.kWidgetID_imageTextureSelector:\n self.parameters.imageTextureSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withImageSelector, data)\n elif item == self.kWidgetID_imageWidthSelector:\n self.parameters.imageWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imageWidthSelector, data)\n elif item == self.kWidgetID_imageHeightSelector:\n self.parameters.imageHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imageHeightSelector, data)\n elif item == self.kWidgetID_imagePositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_imagePosition, data == 0)\n self.parameters.imagePositionSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_imagePositionSelector, data)\n elif item == self.kWidgetID_imagePosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_imagePosition, 3)\n if valid:\n self.parameters.pictureParameters.imagePosition = str(value)\n elif item == self.kWidgetID_withFrameSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withFrame, data == 0)\n self.parameters.withFrameSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withFrameSelector, data)\n elif item == self.kWidgetID_withFrame:\n self.parameters.pictureParameters.withFrame = \"{}\".format(data != 0)\n elif item == self.kWidgetID_frameWidthSelector:\n self.parameters.frameWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameWidthSelector, data)\n elif item == self.kWidgetID_frameHeightSelector:\n self.parameters.frameHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameHeightSelector, data)\n elif item == self.kWidgetID_frameThicknessSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameThickness, data == 0)\n self.parameters.frameThicknessSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameThicknessSelector, data)\n elif item == self.kWidgetID_frameThickness:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameThickness, 3)\n if valid:\n self.parameters.pictureParameters.frameThickness = str(value)\n elif item == self.kWidgetID_frameDepthSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepth, data == 0)\n self.parameters.frameDepthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameDepthSelector, data)\n elif item == self.kWidgetID_frameDepth:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameDepth, 3)\n if valid:\n self.parameters.pictureParameters.frameDepth = str(value)\n elif item == self.kWidgetID_frameClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameClass, data == 0)\n self.parameters.frameClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameClassSelector, data)\n elif item == self.kWidgetID_frameClass:\n index, self.parameters.pictureParameters.frameClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_frameClass, 0)\n elif item == self.kWidgetID_frameTextureScaleSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScale, data == 0)\n self.parameters.frameTextureScaleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameTextureScaleSelector, data)\n elif item == self.kWidgetID_frameTextureScale:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameTextureScale, 1)\n if valid:\n self.parameters.pictureParameters.frameTextureScale = str(value)\n elif item == self.kWidgetID_frameTextureRotationSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotation, data == 0)\n self.parameters.frameTextureRotationSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_frameTextureRotationSelector, data)\n elif item == self.kWidgetID_frameTextureRotation:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_frameTextureRotation, 1)\n if valid:\n self.parameters.pictureParameters.frameTextureRotation = str(value)\n elif item == self.kWidgetID_withMatboardSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withMatboard, data == 0)\n self.parameters.withMatboardSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withMatboardSelector, data)\n elif item == self.kWidgetID_withMatboard:\n self.parameters.pictureParameters.withMatboard = \"{}\".format(data != 0)\n elif item == self.kWidgetID_matboardPositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPosition, data == 0)\n self.parameters.matboardPositionSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardPositionSelector, data)\n elif item == self.kWidgetID_windowWidthSelector:\n self.parameters.windowWidthSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_windowWidthSelector, data)\n elif item == self.kWidgetID_windowHeightSelector:\n self.parameters.windowHeightSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_windowHeightSelector, data)\n elif item == self.kWidgetID_matboardPosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardPosition, 3)\n if valid:\n self.parameters.pictureParameters.matboardPosition = str(value)\n elif item == self.kWidgetID_matboardClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClass, data == 0)\n self.parameters.matboardClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardClassSelector, data)\n elif item == self.kWidgetID_matboardClass:\n index, self.parameters.pictureParameters.matboardClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_matboardClass, 0)\n elif item == self.kWidgetID_matboardTextureScaleSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScale, data == 0)\n self.parameters.matboardTextureScaleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardTextureScaleSelector, data)\n elif item == self.kWidgetID_matboardTextureScale:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardTextureScale, 1)\n if valid:\n self.parameters.pictureParameters.matboardTextureScale = str(value)\n elif item == self.kWidgetID_matboardTextureRotatSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotat, data == 0)\n self.parameters.matboardTextureRotatSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_matboardTextureRotatSelector, data)\n elif item == self.kWidgetID_matboardTextureRotat:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_matboardTextureRotat, 1)\n if valid:\n self.parameters.pictureParameters.matboardTextureRotat = str(value)\n elif item == self.kWidgetID_withGlassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_withGlass, data == 0)\n self.parameters.withGlassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_withGlassSelector, data)\n elif item == self.kWidgetID_withGlass:\n self.parameters.pictureParameters.withGlass = \"{}\".format(data != 0)\n elif item == self.kWidgetID_glassPositionSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_glassPosition, data == 0)\n self.parameters.glassPositionSelector = vs.GetChoiceText(\n self.dialog, self.kWidgetID_glassPositionSelector, data)\n elif item == self.kWidgetID_glassPosition:\n valid, value = vs.GetEditReal(self.dialog, self.kWidgetID_glassPosition, 3)\n if valid:\n self.parameters.pictureParameters.glassPosition = str(value)\n elif item == self.kWidgetID_glassClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_glassClass, data == 0)\n self.parameters.glassClassSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_glassClassSelector, data)\n elif item == self.kWidgetID_glassClass:\n index, self.parameters.pictureParameters.glassClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_glassClass, 0)\n elif item == self.kWidgetID_excelCriteriaSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_excelCriteriaValue, data != 0)\n new_excel_criteria_selector = vs.GetChoiceText(self.dialog, self.kWidgetID_excelCriteriaSelector, data)\n if new_excel_criteria_selector != self.parameters.excelCriteriaSelector:\n self.parameters.excelCriteriaSelector = new_excel_criteria_selector\n self.update_criteria_values(False)\n if data != 0:\n self.update_criteria_values(True)\n else:\n index = vs.GetChoiceIndex(self.dialog, self.kWidgetID_excelCriteriaValue, self.parameters.excelCriteriaValue)\n if index == -1:\n vs.SelectChoice(self.dialog, self.kWidgetID_excelCriteriaValue, 0, True)\n self.parameters.excelCriteriaValue = \"Select a value ...\"\n else:\n vs.SelectChoice(self.dialog, self.kWidgetID_excelCriteriaValue, index, True)\n elif item == self.kWidgetID_excelCriteriaValue:\n self.parameters.excelCriteriaValue = vs.GetChoiceText(self.dialog, self.kWidgetID_excelCriteriaValue, data)\n elif item == self.kWidgetID_symbolCreateSymbol:\n self.parameters.symbolCreateSymbol = \"{}\".format(data != 0)\n selector_index = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_symbolFolderSelector, 0)\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolderSelector, data)\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolder, selector_index == 0 and data == 1)\n elif item == self.kWidgetID_symbolFolderSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_symbolFolder, data == 0)\n self.parameters.symbolFolderSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_symbolFolderSelector, data)\n elif item == self.kWidgetID_classAssignPictureClass:\n self.parameters.classAssignPictureClass = \"{}\".format(data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClassSelector, data == 1)\n selector_index = vs.GetPopUpChoiceIndex(self.dialog, self.kWidgetID_classPictureClassSelector, self.parameters.classClassPictureSelector)\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClass, selector_index == 0 and data != 0)\n elif item == self.kWidgetID_classPictureClassSelector:\n vs.EnableItem(self.dialog, self.kWidgetID_classPictureClass, data == 0)\n self.parameters.classClassPictureSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_classPictureClassSelector, data)\n elif item == self.kWidgetID_classPictureClass:\n index, self.parameters.pictureParameters.pictureClass = vs.GetSelectedChoiceInfo(self.dialog, self.kWidgetID_classPictureClass, 0)\n elif item == self.kWidgetID_classCreateMissingClasses:\n self.parameters.createMissingClasses = \"{}\".format(data == 1)\n elif item == self.kWidgetID_metaImportMetadata:\n self.parameters.metaImportMetadata = \"{}\".format(data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkTitleSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorNameSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkCreationDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkMediaSelector, data == 1)\n # vs.EnableItem(self.dialog, self.kWidgetID_metaTypeSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaRoomLocationSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaArtworkSourceSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaRegistrationNumberSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorBirthCountrySelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorBirthDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaAuthorDeathDateSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaDesignNotesSelector, data == 1)\n vs.EnableItem(self.dialog, self.kWidgetID_metaExhibitionMediaSelector, data == 1)\n elif item == self.kWidgetID_metaArtworkTitleSelector:\n self.parameters.metaArtworkTitleSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkTitleSelector, data)\n elif item == self.kWidgetID_metaAuthorNameSelector:\n self.parameters.metaAuthorNameSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorNameSelector, data)\n elif item == self.kWidgetID_metaArtworkCreationDateSelector:\n self.parameters.metaArtworkCreationDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkCreationDateSelector, data)\n elif item == self.kWidgetID_metaArtworkMediaSelector:\n self.parameters.metaArtworkMediaSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkMediaSelector, data)\n # elif item == self.kWidgetID_metaTypeSelector:\n # self.parameters.metaTypeSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaTypeSelector, data)\n elif item == self.kWidgetID_metaRoomLocationSelector:\n self.parameters.metaRoomLocationSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaRoomLocationSelector, data)\n elif item == self.kWidgetID_metaArtworkSourceSelector:\n self.parameters.metaArtworkSourceSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaArtworkSourceSelector, data)\n elif item == self.kWidgetID_metaRegistrationNumberSelector:\n self.parameters.metaRegistrationNumberSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaRegistrationNumberSelector, data)\n elif item == self.kWidgetID_metaAuthorBirthCountrySelector:\n self.parameters.metaAuthorBirthCountrySelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorBirthCountrySelector, data)\n elif item == self.kWidgetID_metaAuthorBirthDateSelector:\n self.parameters.metaAuthorBirthDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorBirthDateSelector, data)\n elif item == self.kWidgetID_metaAuthorDeathDateSelector:\n self.parameters.metaAuthorDeathDateSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaAuthorDeathDateSelector, data)\n elif item == self.kWidgetID_metaDesignNotesSelector:\n self.parameters.metaDesignNotesSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaDesignNotesSelector, data)\n elif item == self.kWidgetID_metaExhibitionMediaSelector:\n self.parameters.metaExhibitionMediaSelector = vs.GetChoiceText(self.dialog, self.kWidgetID_metaExhibitionMediaSelector, data)\n elif item == self.kWidgetID_importIgnoreErrors:\n self.parameters.importIgnoreErrors = \"{}\".format(data != 0)\n vs.ShowItem(self.dialog, self.kWidgetID_importErrorCount, data == 0)\n elif item == self.kWidgetID_importIgnoreExisting:\n self.parameters.importIgnoreExisting = \"{}\".format(data != 0)\n elif item == self.kWidgetID_importIgnoreUnmodified:\n self.parameters.importIgnoreUnmodified = \"{}\".format(data != 0)\n elif item == self.kWidgetID_importButton:\n self.import_pictures()\n vs.SetItemText(self.dialog, self.kWidgetID_importNewCount, \"New Pictures: {}\".format(self.importNewCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importUpdatedCount, \"Updated Pictures: {}\".format(self.importUpdatedCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importDeletedCount, \"Deleted Pictures: {}\".format(self.importDeletedCount))\n vs.SetItemText(self.dialog, self.kWidgetID_importErrorCount, \"Error Pictures: {}\".format(self.importErrorCount))\n\n # This section handles the following cases:\n # - The Dialog is initializing\n # - The name of the workbook file has changed\n if item == self.kWidgetID_fileName or item == self.kWidgetID_fileBrowseButton or item == KDialogInitEvent:\n self.set_workbook()\n\n # The image selection has changed\n if item == self.kWidgetID_withImageSelector or item == self.kWidgetID_withImage or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withImageSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withImage) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_imageWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imagePosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageTextureLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_imageTextureSelector, state)\n\n # The frame selection has changed\n if item == self.kWidgetID_withFrameSelector or item == self.kWidgetID_withFrame or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withFrameSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withFrame) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_frameWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThicknessLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThicknessSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameThickness, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameDepth, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameClass, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScaleLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScaleSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureScale, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotationLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotationSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_frameTextureRotation, state)\n\n # The matboard selection has changed\n if item == self.kWidgetID_withMatboardSelector or item == self.kWidgetID_withMatboard or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withMatboardSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withMatboard) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_windowWidthLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowWidthSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowHeightLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_windowHeightSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardPosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardClass, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScaleLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScaleSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureScale, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotatLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotatSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_matboardTextureRotat, state)\n\n # The glass selection has changed\n if item == self.kWidgetID_withGlassSelector or item == self.kWidgetID_withGlass or item == self.kWidgetID_excelSheetName:\n state = vs.GetSelectedChoiceIndex(self.dialog, self.kWidgetID_withGlassSelector, 0) != 0 or \\\n vs.GetBooleanItem(self.dialog, self.kWidgetID_withGlass) is True\n\n vs.EnableItem(self.dialog, self.kWidgetID_glassPositionLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassPositionSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassPosition, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClassLabel, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClassSelector, state)\n vs.EnableItem(self.dialog, self.kWidgetID_glassClass, state)\n\n # After the event has been handled, update some of the import validity settings accordingly\n self.parameters.imageValid = ((self.parameters.withImageSelector == \"-- Manual\" and self.parameters.pictureParameters.withImage == \"True\") or\n self.parameters.withImageSelector != \"-- Manual\") and \\\n (self.parameters.imageTextureSelector != \"-- Select column ...\") and \\\n (self.parameters.imageWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.imageHeightSelector != \"-- Select column ...\")\n\n self.parameters.frameValid = ((self.parameters.withFrameSelector == \"-- Manual\" and self.parameters.pictureParameters.withFrame == \"True\") or\n self.parameters.withFrameSelector != \"-- Manual\") and \\\n (self.parameters.frameWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.frameHeightSelector != \"-- Select column ...\")\n\n self.parameters.matboardValid = ((self.parameters.withMatboardSelector == \"-- Manual\" and self.parameters.pictureParameters.withMatboard == \"True\") or\n self.parameters.withMatboardSelector != \"-- Manual\") and \\\n (self.parameters.windowWidthSelector != \"-- Select column ...\") and \\\n (self.parameters.windowHeightSelector != \"-- Select column ...\")\n\n self.parameters.glassValid = ((self.parameters.withGlassSelector == \"-- Manual\" and\n self.parameters.pictureParameters.withGlass == \"True\") or self.parameters.withGlassSelector != \"-- Manual\")\n\n self.parameters.criteriaValid = \\\n (self.parameters.excelCriteriaSelector != \"-- Select column ...\" and self.parameters.excelCriteriaValue != \"Select a value ...\")\n\n self.parameters.importValid = (self.parameters.imageValid or self.parameters.frameValid) and self.parameters.criteriaValid\n\n vs.EnableItem(self.dialog, self.kWidgetID_importButton, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importNewCount, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importUpdatedCount, self.parameters.importValid)\n vs.EnableItem(self.dialog, self.kWidgetID_importDeletedCount, self.parameters.importValid)", "def prepare_command(self, title, cmd):\n return cmd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return a dict of the XLS file keyed on article_id the name of the manuscript number column is hard wired in this function.
def index_table_on_article_id(table_type): path = get_xls_path(table_type) # get the data and the row of colnames data_rows = get_xls_data_rows(table_type) col_names = get_xls_col_names(table_type) article_index = defaultdict(list) for data_row in data_rows: article_id = get_cell_value("poa_m_ms_no", col_names, data_row) # author_id = get_cell_value("poa_a_id", col_names, data_row) article_index[article_id].append(data_row) # print article_id, author_id return article_index
[ "def process_xlsx(content):\r\n data = {}\r\n workbook = xlrd.open_workbook(file_contents=content)\r\n worksheets = workbook.sheet_names()\r\n for worksheet_name in worksheets:\r\n worksheet = workbook.sheet_by_name(worksheet_name)\r\n worksheet.name = slughifi(worksheet.name)\r\n headers = make_headers(worksheet)\r\n worksheet_data = make_worksheet_data(headers, worksheet)\r\n data[worksheet.name] = worksheet_data\r\n return data", "def make_excel_odict_list(xlfile):\n\n wb=wb_or_str(xlfile)\n sheet = wb.get_active_sheet()\n all_data=[]\n highest_row=sheet.get_highest_row()\n for row in range(2, sheet.get_highest_row() + 1):\n data_holding_dict = collections.OrderedDict()\n for c in data_columns:\n location=c+str(row)\n data=sheet[location].value\n data_holding_dict[c]=data\n all_data.append(data_holding_dict)\n\n return all_data", "def read_excel_data(self):\n try:\n xl = pd.ExcelFile(self.excel_file)\n except IOError:\n raise FileNotFoundError(f\"file {self.excel_file} not found, or no data\") \n else:\n # parse pandas sheets into dict:\n # {sheet_name: data stored in the sheet}\n self.xl_dict = {sheet_name: xl.parse(sheet_name) \n for sheet_name in xl.sheet_names}", "def _getPubmedIds(self, filename):\n\tfp = open(filename, 'r')\n\tself.mgiPubmedIds = {}\n\tfp.next()\t\t# skip file header line\n\tfor line in fp:\n\t id, hasPdf, year, journal = line.split('\\t')\n\t self.mgiPubmedIds[id] = hasPdf\n\n\tfp.close()", "def xls2dict(self, xlspath):\n with xlrd.open_workbook(xlspath) as book:\n sh = book.sheet_by_index(0)\n for row in range(1, sh.nrows):\n self.dico_vals[row] = []\n for col in range(sh.ncols):\n if sh.cell_type(row, col) is 1:\n self.dico_vals[row].append(unicode(sh.cell_value(row, col)))\n elif sh.cell_type(row, col) is 2:\n self.dico_vals[row].append(unicode(sh.cell_value(row, col)))\n elif sh.cell_type(row, col) is 3:\n self.date = xlrd.xldate_as_tuple(sh.cell_value(row, col), book.datemode)\n dico_vals[row].append(unicode('-'.join([unicode(i) for i in date[:3]])))\n elif sh.cell_type(row, col) is 4:\n self.dico_vals[row].append(unicode(int(sh.cell_value(row, col))))\n elif sh.cell_type(row, col) is 0:\n self.dico_vals[row].append(unicode('NULL'))\n # End of function\n return book, self.dico_vals", "def load_david_worksheet(fname='fMRI MA significance bias database 03-24-13.xlsx',\n verbose=False):\n\n workbook = xlrd.open_workbook(fname)\n sheet=workbook.sheet_by_name('Original Sheet')\n\n studies={}\n all_ids=[]\n\n # load identifier and N value from spreadsheet cells\n\n for i in range(1,sheet.nrows):\n idval=sheet.cell(i,5).value\n\n if idval == xlrd.empty_cell.value:\n continue\n\n # check for cases with no DOI or PMID\n if str(idval).find('No')==0:\n print('skipping',idval)\n continue\n\n # determine whether it's a doi or pmid and treat accordingly\n try:\n id=int(idval)\n is_pmid=True\n except ValueError:\n is_pmid=False\n\n if not is_pmid:\n try:\n id=idval.split('doi:')[1].replace(' ','') #.replace(\"doi:\",'')\n except:\n print('bad identifier',idval)\n continue\n\n if not id in studies.keys():\n studies[id]=[]\n\n # get sample size value\n n=sheet.cell(i,6).value\n if n==xlrd.empty_cell.value:\n if verbose:\n print('no value for',id)\n continue\n\n try:\n nval=int(n)\n except ValueError:\n print('skipping bad value: %s'%n)\n\n all_ids.append(id)\n studies[id].append(sheet.cell(i,6).value)\n\n unique_ids=list(studies.keys())\n print('found %d ids'%len(all_ids))\n print('found %d unique ids'%len(unique_ids))\n\n\n # exclude studies with multiple N values\n good_unique_ids=unique_ids\n for id in unique_ids:\n if len(studies[id])>1:\n if verbose:\n print('excluding:',(id,studies[id]))\n good_unique_ids.remove(id)\n del studies[id]\n return good_unique_ids,studies", "def read_excel(filename: str, columns: list=['Name', 'Weight', 'Symbol'], sheetname='Motif'):\n with pd.ExcelFile(filename) as xl:\n df = xl.parse(sheetname)\n df.columns = df.iloc[0]\n df.reindex(df.index.drop(0))\n df_useful = df[columns]\n records = df_useful.to_dict('records')[1:]\n records = [{k.lower() : v for k,v in r.items()} for r in records] \n return records", "def get_article_title_2_id(self):\n pages_table = Tables.PageTable(self.P.pages)\n return dict(FlipIt(pages_table.select_id_title_of_articles()))", "def load_major_names(filename=\"majors.xls\"):\n m_names = {}\n try:\n book = xlrd.open_workbook(filename)\n worksheet = book.sheet_by_index(0)\n for i in range(1, worksheet.nrows):\n try:\n major_id = str(int(worksheet.cell_value(i, 0)))\n except ValueError:\n major_id = worksheet.cell_value(i, 0)\n m_names[major_id] = worksheet.cell_value(i, 1)\n except IOError:\n logging.error(\"Can't read file %s\", filename)\n return m_names", "def obtener_cod_articulo_k42():\r\n articulos = Workbook()\r\n articulo = articulos.worksheets[0]\r\n libro_art = load_workbook('C:\\Maestro\\Maestro articulos k42.xlsx')\r\n hoja_art = libro_art.worksheets[0]\r\n cont = 1\r\n colores =['AMARILLO','AMBAR','AZUL','BEIGE','BLANCO','CAFE','CASTAÑO','DORADO','GRIS','LILA','MARRON','NARANJA','NEGRO','OPAL','PERLADO','PLATA','ROJO','ROSADO','TRANSLUCIDO','TRASLUCIDO','TRANSPARENTE','VERDE','VINOTINTO','VIOLETA']\r\n articulo.append([\"Codigo\",\"Nombre\",\"Cliente\",\"Categoria de inventario\",\"Linea\",\"Mercado\",\"Tipo Terminacion\",\"Organizacion\",\"Estado\",\"Cod.Componente\",\"Cod.Modelo\",\"Tipo Modelo\",\"Diametro\",\"Largo\",\"Peso Producion\",\"Formulacion\",\"Material\",\"Color\",\"Ancho\",\"Calibre\",\"Cod.Insumo\",\"Corrugado Master\",\"Und.Empque Maestro\",\"Corrugado Secundario\",\"Und.Empque Secundaria\",\"Plegadiza\",\"Und.Empque Plegadiza\",\"Particion\",\"Und.Empque Particion\",\"Bolsa/Rollo Master\",\"Unidad Empaque Bolsa Master\",\"Bolsa/Rollo Secundaria\",\"Unidad Empaque Bolsa Secundaria\",\"EXT_1501\",\"EXT_WEL90\",\"EXT_1503\",\"EXT_WEL120\",\"TER_3710\",\"TER_70K\",\"TER_50K\",\"TER_RDK54\",\"TER_GABLER\",\"IMP_560\",\"IMP_560R\",\"IMP_580R\",\"ETI_A\",\"ETI_B\",\"TER_KARV_200\",\"TER_KARV_400\",\"EMP_RENCO\",\"TER_AUTOV\",\"TER_ILLIG_P\",\"TER_RDKP72\",\"TER_RDKP54\",\"TER_ILLIG_G\",\"TER_TAIWANESA\",\"TRO_KRAUSSE\",\"PRE_HERLAN\",\"PRE_SHULER\",\"LIN_HERLAN\",\"LIN_MALL\",\"LIN_SCHULER\",\"ENG_A\",\"ENG_B\",\"INY_FROMAG\",\"INY_ORIENTE\",\"LLE_COMADIS\",\"SEL_AUTOMATICA\",\"HOR_A\",\"LAC_SPRIGMAG\",\"MOL_TERMICOS\",\"TER_COMMODORE\",\"PESO VENTAS\",\"VOLUMEN EMPAQUE PPAL\",\"UNIDAD VOLUMEN EMPAQUE PPAL\",\"YIELD\",\"CATEGORIA COSTOS PAC\",\"CATEGORIA PRODUCTO\",\"VOLUMEN ORACLE CVR(M3)\",\"PESO ORACLE CVR(Kg)\",\"INTRACLASE CAJA\",\"PESO BRUTO(Kg)\",\"RADICADO\",\"TIPO DE MOLDE\",\"MOLDE 1\",\"MOLDE 2\",\"MOLDE 3\",\"YIELD\",\"DESPERDICIO\",\"OP STD\",\"DESCRIPCION OP\",\"OP STD PRODUCCION\",\"RECURSO\",\"INVERSO(RENDIMIENTO)\",\"IDENTIFICADOR DE BODEGAS\",\"Desperdicio Componente\",\"Desperdicio Insumo\",\"Desperdicio Corr Master\",\"Desperdicio Corr sec\",\"Desperdicio Bolsa master\",\"Desperdicio Bolsa sec\",\"Descripcion larga\"])\r\n nombre = r\"C:\\Maestro\\MaestroK42.xlsx\"\r\n for fila in range(2,hoja_art.max_row):\r\n if \"Active\" == hoja_art.cell(row= fila, column=37).value or \"HAI\" == hoja_art.cell(row= fila, column=37).value:\r\n cont += 1\r\n articulo.cell(row=cont,column=1, value=hoja_art.cell(row=fila,column=2).value) #CODIGO PADRE\r\n articulo.cell(row=cont,column=2, value=hoja_art.cell(row=fila,column=3).value) #DESCRIPCION CORTA\r\n articulo.cell(row=cont,column=3, value=hoja_art.cell(row=fila,column=12).value)#CLIENTE\r\n articulo.cell(row=cont,column=4, value=hoja_art.cell(row=fila,column=24).value)#CATEGORIA DE INVENTARIO\r\n cadena = hoja_art.cell(row=fila,column=1).value\r\n org = re.split(r'\\ ',cadena)\r\n articulo.cell(row=cont,column=8, value=org[0])#ORGANIZACION\r\n articulo.cell(row=cont,column=9, value=hoja_art.cell(row=fila,column=37).value)#ESTADO \r\n articulo.cell(row=cont,column=11, value=hoja_art.cell(row=fila,column=10).value)#COD MODELO\r\n if (hoja_art.cell(row=fila,column=131).value)is not None:\r\n articulo.cell(row=cont,column=15, value=float(hoja_art.cell(row=fila,column=131).value)*1000)#PESO PRODUCCION\r\n articulo.cell(row=cont,column=73, value=float(hoja_art.cell(row=fila,column=131).value)*1000)#PESO PRODUCCION\r\n cadena = hoja_art.cell(row=fila,column=24).value \r\n if (cadena)is not None:\r\n material = re.split(r'\\.',cadena)\r\n if len(material)>=1:\r\n articulo.cell(row=cont,column=17, value=material[1])#MATERIAL\r\n cadena = hoja_art.cell(row=fila,column=23).value\r\n separado = re.split(r'\\.',cadena)\r\n if (len(separado))>=3: \r\n articulo.cell(row=cont,column=5, value=separado[1]) #LINEA\r\n articulo.cell(row=cont,column=6, value=separado[0]) #MERCADO\r\n articulo.cell(row=cont,column=7, value=separado[3]) #TIPO TERMINACION\r\n for color in colores:\r\n if re.search(color, hoja_art.cell(row=fila,column=3).value)is not None:\r\n articulo.cell(row=cont,column=18, value=color) #COLOR\r\n break\r\n if re.match(\"SP LAM\", hoja_art.cell(row= fila, column=3).value)is not None:\r\n ac = hoja_art.cell(row= fila, column=3).value\r\n ac = ac[ac.find(\"X\")-5:ac.find(\"X\")+5]\r\n ac = re.findall(r'\\d+.\\d+',ac)\r\n if ac == [] or len(ac)<2:\r\n articulo.cell(row=cont,column=19, value=\"Validar descripcion\") #ANCHO\r\n articulo.cell(row=cont,column=20, value=\"Validar descripcion\") #CALIBRE\r\n elif len(ac[0])>3:\r\n articulo.cell(row=cont,column=19, value=ac[1]) #ANCHO\r\n articulo.cell(row=cont,column=20, value=ac[0]) #CALIBRE\r\n else:\r\n articulo.cell(row=cont,column=19, value=ac[1]) #ANCHO\r\n articulo.cell(row=cont,column=20, value=\"Validar descripcion\") #CALIBRE \r\n articulo.cell(row=cont,column=75, value=hoja_art.cell(row=fila,column=132).value)#UND VOLUMEN EMPAQUE PPAL\r\n articulo.cell(row=cont,column=77, value=hoja_art.cell(row=fila,column=25).value)#CATEGORIA COSTOS PAC\r\n articulo.cell(row=cont,column=78, value=hoja_art.cell(row=fila,column=23).value)#CATEGORIA PRODUCTO \r\n articulo.cell(row=cont,column=79, value=hoja_art.cell(row=fila,column=133).value)#VOLUMEN ORACLE CVR\r\n articulo.cell(row=cont,column=80, value=hoja_art.cell(row=fila,column=131).value)#PESO ORACLE CVR (Kg)\r\n articulo.cell(row=cont,column=81, value=hoja_art.cell(row=fila,column=20).value)#INTERCLASE CAJA\r\n articulo.cell(row=cont,column=82, value=hoja_art.cell(row=fila,column=21).value)#INTERCLASE CAJA\r\n articulo.cell(row=cont,column=102, value=hoja_art.cell(row=fila,column=38).value)#DESCRIPCION LARGA\r\n articulos.save(nombre)\r\n articulos.close()\r\n print(\"Se guardó correctamente\")\r\n return articulos", "def parse_spreadsheet(filename):\r\n contents={}\r\n\r\n # Open the source spreadsheet\r\n workbook=xlrd.open_workbook(filename=filename)\r\n\r\n # Iterate through the list of tabs (sheets)\r\n for tab in range(0, workbook.nsheets):\r\n\r\n # Fetch the current sheet\r\n worksheet = workbook.sheet_by_index(tab)\r\n tab_label = worksheet.name\r\n\r\n # For the first row, extract the column labes\r\n labels=[]\r\n for i in range(0, worksheet.ncols):\r\n labels.append(worksheet.row(0)[i].value)\r\n\r\n # Check whether the current table has an ignore field, so we can\r\n # later skip some lines\r\n if '##IGNORE##' in labels:\r\n do_ignore=True\r\n else:\r\n do_ignore=False\r\n\r\n entries=[]\r\n\r\n for i in range(1, worksheet.nrows):\r\n # Fetch row\r\n row = worksheet.row(i)\r\n entry={}\r\n # Store values on each cell\r\n for j in range(0, worksheet.ncols):\r\n\r\n # If it's a number, convert it to an integer first\r\n # (we don't want to end up reading eth/1.0/11.0)\r\n if is_number(row[j].value):\r\n entry[ labels[j] ] = str( int(row[j].value) )\r\n else:\r\n entry[ labels[j] ] = str(row[j].value)\r\n\r\n if do_ignore is False:\r\n entries.append(entry)\r\n elif do_ignore is True and entry['##IGNORE##'].lower()==\"no\":\r\n entries.append(entry)\r\n else:\r\n pass\r\n # output.debug(\"Ignored line %i\" % i)\r\n\r\n # Store the data of this sheet\r\n contents[tab_label]=entries\r\n\r\n return contents", "def get_manual_data(manual_data_file):\n manual_data = {}\n reader = csv.DictReader(open(manual_data_file))\n for row in reader:\n key = row.pop('PDF URL')\n manual_data[key] = row\n return manual_data", "def extractDocs(self, dxlFileContent):\n \n docs = dxlFileContent.getElementsByTagName(\"document\")\n\n for doc in docs:\n dico = {}\n dico['type'] = 'PlominoDocument'\n dico['id'], dico['title'] = self.getIdTitleAttributes(doc)\n dico['form'] = doc.getAttribute('form')\n # import all the items included in this doc\n\n dico['items'] = self.extractItems(doc)\n dico['files'] = self.extractInsertedFiles(doc)\n\n self.docs.append(dico)", "def getExcelData(self):\n try:\n polarionDict = self.polarionDict\n self.excelData = {}\n excelData = self.excelData\n\n for t in polarionDict:\n stepsCompact = []\n steps = polarionDict[t]['steps']\n\n for i, s in enumerate(steps):\n row = [str(i + 1)]\n row.extend(s[5:12])\n stepsCompact.append(row)\n\n excelData['{}_Steps'.format(t)] = stepsCompact\n except:\n print(traceback.format_exc())", "def gather_psc_dict():\n dicts = {}\n headers = None\n wb = open_workbook(FETCHED_PSC_PATH)\n # f*king excel...http://stackoverflow.com/a/1108474/160863\n # we need to get the datemode of the workbook to translate its dates\n wbdatemode = wb.datemode\n sheet = wb.sheets()[0] # there's only one sheet in this book\n\n for row in sheet.get_rows():\n vals = [col.value for col in row]\n if not headers:\n if vals[0] == '': # # first few rows are empty, so skip\n continue\n else:\n headers = vals\n else: # headers are set\n\n d = dict(zip(headers, vals))\n if not d['END DATE']: # if a row has an END DATE, then we don't want it\n # for whatever reason, some values have trailing spaces...\n d['PSC CODE'] = d['PSC CODE'].strip()\n # again, f*king excel...http://stackoverflow.com/a/1108474/160863\n dt = datetime(*xldate_as_tuple(d['START DATE'], datemode = wbdatemode))\n d['START DATE'] = dt.strftime('%Y-%m-%d')\n dicts[d['PSC CODE']] = d\n\n return dicts", "def getAircraftCodeDict():\n\n d1 = d2 = {}\n \n f = shelve.open(filename2) \n d1 = f\n for sk in d1.keys():\n k = int(sk) # convert the string key in the shelve\n d2[k] = d1[sk] # to an int for the dictionary\n \n\n return d2", "def _process_xlsx_files(self):\n logging.info(f\"Processing {self.transaction} as XLSX files.\")\n\n # Getting the function\n function_for_file = files_definitions[self.transaction][\"function\"]\n\n dataframe = pd.DataFrame()\n\n for file in self.files:\n blob_content = self.blob_container_client.get_blob_client(blob=file).download_blob().content_as_bytes()\n df = pd.read_excel(blob_content, dtype=str)\n df = self._simplify_columns(df)\n df = function_for_file(df)\n\n dataframe = pd.concat([dataframe, df])\n \n return dataframe", "def xls_to_csv(self, model, file, header_map=None, extra_columns=None):\n decoded_data = base64.decodestring(file)\n ftemp = 'temp' + datetime.utcnow().strftime('%H%M%S%f')[:-3]\n f = open(ftemp + '.xls', 'wb+')\n f.write(decoded_data)\n f.seek(0)\n f.close()\n try:\n wb = xlrd.open_workbook(f.name)\n except xlrd.XLRDError:\n raise ValidationError(\n _('Invalid file format, only .xls or .xlsx file allowed!'))\n except Exception:\n raise\n st = wb.sheet_by_index(0)\n csv_file = open(ftemp + '.csv', 'wb')\n csv_out = unicodecsv.writer(csv_file,\n encoding='utf-8',\n quoting=unicodecsv.QUOTE_ALL)\n _HEADER_FIELDS = []\n if st._cell_values:\n _HEADER_FIELDS = st._cell_values[0]\n id_index = -1 # -1 means no id\n for nrow in xrange(st.nrows):\n if nrow == 0: # Header, find id field\n header_values = [x.lower().strip()\n for x in st.row_values(nrow)]\n if 'id' in header_values:\n id_index = header_values.index('id')\n if nrow > 0:\n row_values = st.row_values(nrow)\n for index, val in enumerate(row_values):\n ctype = st.cell(nrow, index).ctype\n type = ctype_text.get(ctype, 'unknown type')\n if id_index == index and val:\n # UUID replace id\n xml_id = '%s.%s' % ('pabi_xls', uuid.uuid4())\n row_values[index] = xml_id\n elif type == 'empty' or type == 'text' \\\n or type == 'bool' or type == 'error' \\\n or type == 'blank':\n row_values[index] = st.cell(nrow, index).value\n elif type == 'number':\n if not val:\n row_values[index] = 0\n else:\n if not str(val).isdigit():\n row_values[index] = int(val)\n else:\n row_values[index] = val\n elif type == 'xldate':\n str_date = self.xldate_to_datetime(\n st.cell(nrow, index).value)\n row_values[index] = str_date\n else:\n row_values[index] = st.cell(nrow, index).value\n csv_out.writerow(row_values)\n else:\n csv_out.writerow(st.row_values(nrow))\n csv_file.close()\n csv_file = open(ftemp + '.csv', 'r')\n file_txt = csv_file.read()\n csv_file.close()\n os.unlink(ftemp + '.xls')\n os.unlink(ftemp + '.csv')\n if not file_txt:\n raise ValidationError(_(str(\"File Not found.\")))\n # Create xml_ids if not already assigned\n if id_index == -1:\n _HEADER_FIELDS.insert(0, 'id')\n xml_id = '%s.%s' % ('pabi_xls', uuid.uuid4())\n file_txt = self._add_column('id', xml_id, file_txt)\n # Map column name\n if header_map:\n _HEADER_FIELDS = [header_map.get(x.lower().strip(), False) and\n header_map[x.lower()] or False\n for x in _HEADER_FIELDS]\n # Add extra column\n if extra_columns:\n for column in extra_columns:\n _HEADER_FIELDS.insert(0, str(column[0]))\n file_txt = self._add_column(column[0], column[1], file_txt)\n return (_HEADER_FIELDS, file_txt)", "def getMetaData(headers,fileId, colId, dfResults): # don't need headers = colnames in th df\r\n #print('getMetaData fileid',fileId,' colId:',colId)\r\n #print('type of fileId ',type(fileId))\r\n #TODO sort out the type checking \r\n fileId = int(fileId)\r\n \r\n row = dfResults.loc[dfResults[colId]==fileId]\r\n \r\n metaData = {}\r\n \r\n for col in np.arange(1,len(headers)):\r\n if headers[col]['columnType']==u'FILEHANDLEID':\r\n continue\r\n name=headers[col]['name']\r\n if ((str(name)!= \"ROW_ID\") & (str(name) != \"ROW_VERSION\")):\r\n # not in dataframe .. could remove them first I guesss\r\n metaData[name]=row[name].values[0]\r\n \r\n return metaData", "def get_association_dict(weight=False):\n book = xlrd.open_workbook(path)\n sheets = dict(zip(book.sheet_names(),book.sheets()))\n associations = sheets['recoded associations']\n \n # Get a list of the categories and initialize the association dictionary.\n # This dictionary holds a counter for each exemplar, counting the responses.\n categories = set([x.value for x in associations.col(1)[1:]])\n association_dict = {category:defaultdict(Counter) for category in categories}\n \n # Let's define a row using the column names from the sheet.\n # Having a namedtuple means that we can call each value by its name.\n Row = namedtuple('Row',[cell.value for cell in associations.row(0)])\n \n # And loop over the rows, so that we can count the associations for each exemplar.\n for n in range(1,associations.nrows):\n row = Row(*[str(cell.value) for cell in associations.row(n)])\n association_dict[row.category][row.exemplar].update(weight_associations(row, weight))\n for category, excounter in association_dict.items():\n for exemplar in excounter:\n try:\n excounter[exemplar].pop('x')\n except KeyError:\n pass\n return association_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that formatted JSON is returned when called by the BrowsableAPIRenderer.
def test_renderer_works_correctly_with_browsable_api(self): rendered = self.renderer.render( data=self.data, media_type="text/html", renderer_context={"indent": 4}, ) self.assertEqual(rendered, json.dumps(self.data, indent=4))
[ "def format_response(self, json_response):\n raise NotImplementedException", "def __to_format(self, response):\n if self.format == 'json':\n return response.json()", "def test_renderer_works_correctly_with_return_dict(self):\n rendered = self.renderer.render(\n data=ReturnDict(self.data, serializer=None),\n media_type=\"application/json\",\n renderer_context={},\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, self.data)", "def test_renderer_works_correctly_with_application_json(self):\n rendered = self.renderer.render(\n data=self.data,\n media_type=\"application/json\",\n renderer_context={\"indent\": 4},\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, self.data)", "def _post_process_json(self):\n pass", "def render(self, data):\n separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS\n\n try:\n render = json.dumps(\n data, ensure_ascii=self.ensure_ascii, separators=separators\n )\n\n # Unicode symbols \\u2028 and \\u2029 are invisible in JSON and\n # make output are invalid. To avoid this situations, necessary\n # replace this symbols.\n # For more information read this article: http://goo.gl/ImC89E\n for wrong_symbol, expected in WRONG_UNICODE_SYMBOLS:\n render = render.replace(wrong_symbol, expected)\n\n render = bytes(render.encode('utf-8'))\n except Exception as exc:\n raise SerializerError(exc)\n return render", "def _ensure_json_supported(self):\n self._ensure_support({\n 'version': (2, 4, 0),\n 'label': 'JSON API'\n })", "def test_use_json_return_format(\n self,\n fetch_client: FetchClient,\n mock_response: MagicMock,\n mock_requests_get: MagicMock,\n ) -> None:\n res = fetch_client.fetch(1)\n assert isinstance(res, Response)\n\n mock_requests_get.assert_called_once_with(\n \"http://www.giantbomb.com/api/game/1\",\n params={\"api_key\": \"fake_key\", \"format\": \"json\"},\n headers={\"User-Agent\": \"Pybomb {}\".format(version)},\n )", "def return_data(self, data, format=None):\n if format is None:\n format = self.format\n if format == \"json\":\n formatted_data = json.loads(data)\n else:\n formatted_data = data\n return formatted_data", "def test_renderer_works_correctly_with_browsable_api_with_date(self):\n today = datetime.date.today()\n data = {\"today\": today}\n rendered = self.renderer.render(\n data=data, media_type=\"text/html\", renderer_context={\"indent\": 4}\n )\n reloaded = orjson.loads(rendered)\n self.assertEqual(reloaded, {\"today\": today.isoformat()})", "def handle_exception(self, exc):\n response = super(FormatAPIView, self).handle_exception(exc)\n serializer_class = getattr(\n getattr(self.request, 'accepted_renderer', None),\n 'error_serializer_class', None)\n if serializer_class is None:\n return response\n\n serializer = serializer_class(\n instance=exc, context=self.get_serializer_context())\n response.data = serializer.data\n return response", "def _generate_json_response(self, context):\n raise NotImplementedError", "def _handle_response(self):\n # print(self.response.json())\n if not (200 <= self.response.status_code < 300):\n raise BinanceAPIException(self.response)\n try:\n return self.response.json()\n except ValueError:\n raise BinanceRequestException('Invalid Response: %s' % self.response.text)", "def json_value(self):", "def test_json_format(self):\n reformatted = json.dumps(\n self.json,\n indent=4,\n ensure_ascii=False,\n sort_keys=True,\n )\n reformatted += \"\\n\"\n if self.json_str != reformatted:\n self.json_path.write_text(reformatted)\n self.fail(\"JSON file is not formatted correctly, Fixing...\")", "def serialize(self, obj):\n if self.endpoint.Serializer is None:\n return json.dumps(obj)\n else:\n return super(JSON, self).serialize(obj)", "def _check_json(self, expects, response):\n if 'expect_json' in expects:\n json_data = expects['expect_json']\n eq_(json_data, json.loads(response.data))", "def test_json_from_none(self):\n ans = Base.to_json_string(None)\n self.assertEqual(ans, \"[]\")", "def dispatch_request(self, *args, **kwargs):\n _ret = super(JsonEndpoint, self).dispatch_request(*args, **kwargs)\n return jsonify(_ret)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When using the builtin json when called by the BrowsableAPIRenderer, ensure that native datetime.date objects are serialized correctly.
def test_renderer_works_correctly_with_browsable_api_with_date(self): today = datetime.date.today() data = {"today": today} rendered = self.renderer.render( data=data, media_type="text/html", renderer_context={"indent": 4} ) reloaded = orjson.loads(rendered) self.assertEqual(reloaded, {"today": today.isoformat()})
[ "def json_serial(obj):\n if isinstance(obj, dt.date):\n serial = obj.strftime(\"%m/%d/%Y\")\n return serial\n raise TypeError(\"Type not serializable\")", "def make_json_serializable(doc: Dict):\n for k, v in doc.items():\n if isinstance(v, datetime.date):\n doc[k] = v.strftime(\"%Y-%m-%d\")\n elif isinstance(v, datetime.datetime):\n doc[k] = v.isoformat()", "def test_date_serialization(self):\r\n person = self.Person(birth_date=date(1986, 9, 15))\r\n self.session.commit()\r\n d = to_dict(person)\r\n assert 'birth_date' in d\r\n assert d['birth_date'] == person.birth_date.isoformat()", "def dumps(data):\n\n def _convert(o):\n if isinstance(o, datetime):\n return o.__str__()\n\n return json.dumps(data, indent=2, default=_convert)", "def _handleDate(date):\n if type(date) is not datetime.date:\n raise TypeError('Dates needs to be specified by datetime.date')\n return date", "def test_datetime_serialization(self):\r\n computer = self.Computer(buy_date=datetime.now())\r\n self.session.commit()\r\n d = to_dict(computer)\r\n assert 'buy_date' in d\r\n assert d['buy_date'] == computer.buy_date.isoformat()", "def serializer_date_format(dt):\n return serializers.DateTimeField().to_representation(dt)", "def Serialize_JSON(self):\n if isinstance(self.TimeStamp, datetime):\n return self.TimeStamp.__str__()", "def test_format_old_date_does_not_change_timezone(self):\n date = '1901-01-01'\n\n with self.app_request_context('/'):\n format_value = format_date(self.autoescape_context, date)\n\n assert format_value.striptags() == '1 January 1901'", "def json_serialize_datetime(dt : datetime.datetime) -> Optional[str]:\n import datetime\n if isinstance(dt, datetime.datetime):\n return dt.isoformat() + 'Z'\n return None", "async def put_date( # pylint: disable=inconsistent-return-statements\n self, complex_body: _models.DateWrapper, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:", "def test_that_updated_at_returned_by_to_dict_is_an_iso_string(self):\n b = BaseModel()\n self.assertEqual(b.to_dict()[\"updated_at\"], b.updated_at.isoformat())", "def test_that_created_at_returned_by_to_dict_is_an_iso_string(self):\n b = BaseModel()\n self.assertEqual(b.to_dict()[\"created_at\"], b.created_at.isoformat())", "def fix_blank_date_time_format(schema, json):\n stack_to_process = list()\n stack_to_process.append((schema, json))\n while len(stack_to_process) > 0:\n s, j = stack_to_process.pop()\n try:\n if s.type == 'object':\n if s.properties is None:\n # NOTE: Some MailChimp schemata use additionalProperties\n # instead of properties, which the singer Schema class does not\n # support. I think this means some MailChimp date-times are\n # inappropriately coming through as strings but have not\n # investigated further.\n continue\n for prop, spec in s.properties.items():\n if prop not in j:\n continue\n if spec.type in ('object', 'array'):\n stack_to_process.append((spec, j[prop]))\n elif spec.type == 'string':\n if spec.format == 'date-time':\n if j[prop] == '':\n # Remove empty date-time property\n del j[prop]\n elif s.type == 'array':\n if s.items is None:\n # Skip because no item definition in schemata.\n continue\n if s.items.type in ('object', 'array'):\n stack_to_process.extend([(s.items, datum) for datum in j])\n elif s.items.type == 'string':\n if s.items.format == 'date-time':\n j[:] = [datum for datum in j if datum != '']\n except (TypeError, ValueError, LookupError) as e:\n # Augment with contextual info\n raise ValueError({'stack': stack_to_process,\n 'current': (s, j),\n 'schema': schema,\n 'json': json}) from e", "def writeDateValue(self, value):\n raise NotImplementedError(\"TODO. Need type check.\")", "def _update_json_for_timeday(self, json_response, date_timekey, context):\n raise NotImplementedError", "def latest_date():\n \n # start by trying today's date\n try_date = datetime.date(datetime.now())\n \n # the function will iterate until it finds a date with information \n date = find_usable_date(try_date)\n \n return jsonify(str(date))", "def dateConvertor(obj):\n return obj.strftime('%Y-%m-%d %H:%M:%S') if type(obj) is datetime.datetime else obj", "def convert_date_type(self):\r\n # Convert Date from Object to Datetime datatype\r\n self.all_data[self._date] = pd.to_datetime(self.all_data[self._date], dayfirst = True)\r\n\r\n # Set Index\r\n self.all_data = self.all_data.set_index(self._date)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that when application/json is requested by the client that the renderer returns unindented JSON.
def test_renderer_works_correctly_with_application_json(self): rendered = self.renderer.render( data=self.data, media_type="application/json", renderer_context={"indent": 4}, ) reloaded = orjson.loads(rendered) self.assertEqual(reloaded, self.data)
[ "def _json_request(self):\n return 'json' in self._get_content_type()", "def test_renderer_works_correctly_with_browsable_api(self):\n rendered = self.renderer.render(\n data=self.data,\n media_type=\"text/html\",\n renderer_context={\"indent\": 4},\n )\n\n self.assertEqual(rendered, json.dumps(self.data, indent=4))", "def print_non_standard(data):\n\n format = request.values.get(\"format\", \"classic\")\n if format == \"json\":\n return jsonify(data)\n else:\n return jsonify(dict(result=1, message=\"success\", epidata=data))", "def wants_json(mimetypes):\n best = mimetypes.best_match(['application/json', 'text/html'])\n return best == 'application/json'", "def _utf8_encoded_json(request):\r\n content_type = request.META.get('CONTENT_TYPE', '')\r\n parts = content_type.split(';')\r\n if (len(parts) != 2 or\r\n parts[0].strip().lower() != 'application/json' or\r\n parts[1].strip().lower() != 'charset=utf-8'):\r\n return False\r\n return True", "def accepts_only_json_request(f):\n @functools.wraps(f)\n def decorated_function(*args, **kwargs):\n if not request.is_json:\n return status_406()\n return f(*args, **kwargs)\n return decorated_function", "def expect_json(view_function):\n @wraps(view_function)\n def parse_json_into_request(request, *args, **kwargs):\n # cdodge: fix postback errors in CMS. The POST 'content-type' header can include additional information\n # e.g. 'charset', so we can't do a direct string compare\n if \"application/json\" in request.META.get('CONTENT_TYPE', '') and request.body:\n try:\n request.json = json.loads(request.body.decode('utf8'))\n except ValueError:\n return JsonResponseBadRequest({\"error\": \"Invalid JSON\"})\n else:\n request.json = {}\n\n return view_function(request, *args, **kwargs)\n\n return parse_json_into_request", "def accept_json(request):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n json.loads(request.data.decode())\n if request.json:\n return func(*args, **kwargs)\n except Exception:\n resp = make_response(json.dumps({'error': 'Invalid JSON'}), 422)\n resp.mimetype = 'application/json'\n return resp\n resp = make_response(json.dumps({'error': 'Unsupported media type'}), 415)\n resp.mimetype = 'application/json'\n return resp\n return wrapper\n return decorator", "def render_json(self, d):\n\t\tself.set_flag(\"render\", False)\n\t\tself.response.headers[\"Content-Type\"] = \"application/json\"\n\t\tjson_txt = json.dumps(d)\n\t\tself.response.out.write(json_txt)", "def format_response(self, json_response):\n raise NotImplementedException", "def is_json(response):\n\n for left, right in [(response.getcode(), 200),\n (response.info().getmaintype(), \"application\"),\n (response.info().getsubtype(), \"json\")]:\n if left != right:\n return False\n\n return True", "def test_json_format(self):\n reformatted = json.dumps(\n self.json,\n indent=4,\n ensure_ascii=False,\n sort_keys=True,\n )\n reformatted += \"\\n\"\n if self.json_str != reformatted:\n self.json_path.write_text(reformatted)\n self.fail(\"JSON file is not formatted correctly, Fixing...\")", "def test_request_not_json(self):\n result = self.client.post(\n '/api/v1/orders/',\n content_type = 'text/css',\n data=json.dumps(self.order)\n )\n self.assertEqual(result.status_code,401)\n self.assertIn('Content-type must be application/json',str(result.data))", "def __to_format(self, response):\n if self.format == 'json':\n return response.json()", "def _ensure_json_supported(self):\n self._ensure_support({\n 'version': (2, 4, 0),\n 'label': 'JSON API'\n })", "def validate_json(request):\n if not request.is_json:\n print(\"Warning! Bad content-type '{}' in payload\".format(request.content_type))\n raise UnsupportedMediaType\n try:\n json_payload = request.get_json()\n return json_payload\n except Exception as e:\n bad_request_error = BadRequest()\n bad_request_error.description = '{}'.format(e)\n raise bad_request_error", "def test_renderer_works_correctly_with_return_dict(self):\n rendered = self.renderer.render(\n data=ReturnDict(self.data, serializer=None),\n media_type=\"application/json\",\n renderer_context={},\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, self.data)", "def test_loads_trailing(self):\n assert orjson.loads(\"{}\\n\\t \") == {}", "def json_middleware(f):\n def handle(*args, **kwargs):\n result = f(*args, **kwargs)\n response = response_from(result)\n try:\n response.content = json.dumps(response.content, indent=4)\n except TypeError:\n return response\n response.headers['Content-Type'] = 'application/json'\n return response\n return handle" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that collections.OrderedDict is serialized correctly.
def test_renderer_works_correctly_with_ordered_dict(self): rendered = self.renderer.render( data=OrderedDict(self.data), media_type="application/json", renderer_context={}, ) reloaded = orjson.loads(rendered) self.assertEqual(reloaded, self.data)
[ "def _setup_ordered_dict(self):\n\n ruamel.yaml.representer.RoundTripRepresenter.add_representer(\n collections.OrderedDict,\n ruamel.yaml.representer.RoundTripRepresenter.represent_ordereddict\n )", "def test_dump_sorted_json_string(self):\n sorted_str = JsonStats.Utils.dump_sorted_json_string(self._unsorted_dict)\n\n self.assertEqual(self._expected_sorted_dict, sorted_str,\n msg=\"Expected '%s', got '%s'\" %\n (self._expected_sorted_dict, sorted_str))", "def test_serialize_a_order(self):\n uuid_str = str(uuid.uuid4())\n order = Order(uuid=uuid_str, product_id = 1, customer_id = 1, price = 10, quantity = 1)\n data = order.serialize()\n self.assertNotEqual(data, None)\n self.assertIn('id', data)\n self.assertEqual(data['id'], None)\n self.assertIn('uuid', data)\n self.assertEqual(data['uuid'], uuid_str)\n self.assertIn('product_id', data)\n self.assertEqual(data['product_id'], 1)\n self.assertIn('customer_id', data)\n self.assertEqual(data['customer_id'], 1)\n self.assertIn('price', data)\n self.assertEqual(data['price'], 10)\n self.assertIn('quantity', data)\n self.assertEqual(data['quantity'], 1)", "def ordereddict_to_dict(self, value):\n for k, v in value.items():\n if type(v) == OrderedDict:\n value[k] = self.ordereddict_to_dict(v)\n if type(v) == list:\n for item in v:\n if type(item) == OrderedDict:\n v.remove(item)\n v.append(self.ordereddict_to_dict(item))\n return dict(value)", "def fix_deletion_utils_datastructures_SortedDict(utils):\n from django.utils import datastructures as dj_datastructures\n from ..django_legacy.django1_9.datastructures import SortedDict as SortedDictCompat\n utils.inject_class(dj_datastructures, \"SortedDict\", SortedDictCompat)", "def fix_deletion_utils_datastructures_SortedDict(utils):\n from django.utils import datastructures as dj_datastructures\n from ..django_legacy.django1_9.datastructures import SortedDict as SortedDictCompat\n\n utils.inject_class(dj_datastructures, \"SortedDict\", SortedDictCompat)", "def test_dict_parameterized_serializer_none(self):\n none_data = self.dict_field_data.copy()\n none_data[\"types\"] = dict(self.dict_field_data[\"types\"])\n none_data[\"types\"][\"foo-type\"] = None\n none = ExampleDictFieldSerializer(data=none_data)\n none.is_valid(raise_exception=True)\n self.assertEqual(\n none.data, none_data, 'Wrong serializer reproduction')", "def test_to_dict_not_dunder_dict(self):\n bm = BaseModel()\n self.assertNotEqual(bm.to_dict(), bm.__dict__)", "def encode_test(self, obj, expected):\n self.assertEqual(json.dumps(obj, sort_keys=True, cls=policy.ConfigEncoder), expected)", "def get_serialized_dumps(ascending: bool = False) -> List[dict]:\n ...", "def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n order = Order()\n self.assertRaises(DataValidationError, order.deserialize, data)", "def test_init_odict(self):\n odict = collections.OrderedDict(one=1)\n params = insightiq_api.Parameters(odict)\n\n expected = [['one', 1]]\n actual = params._data\n\n self.assertEqual(expected, actual)", "def test_to_dictionary(self):\n self.assertTrue(type(self.equad.to_dictionary()) is dict)", "def test_emptydict_json(self):\n dic = Base.to_json_string([{}])\n self.assertEqual(dic, \"[{}]\")", "def test_serialization(self):\n dags = collect_dags()\n serialized_dags = {}\n for v in dags.values():\n dag = SerializedDAG.to_dict(v)\n SerializedDAG.validate_schema(dag)\n serialized_dags[v.dag_id] = dag\n\n # Compares with the ground truth of JSON string.\n actual, expected = self.prepare_ser_dags_for_comparison(\n actual=serialized_dags[\"simple_dag\"],\n expected=serialized_simple_dag_ground_truth,\n )\n assert actual == expected", "def _ensure_data_is_dict(self):\n if not isinstance(self.data, dict):\n raise wtf_validators.ValidationError(\n self.gettext(\"Not a valid dictionary (list input detected).\")\n )", "def _writeDict(self, o):\n for key, val in o.iteritems():\n self.writeString(key, False)\n self.writeElement(val)", "def test_dict_parameterized_serializer_type(self):\n type_data = self.dict_field_data.copy()\n type_data[\"types\"] = [self.dict_field_data[\"types\"]]\n wrong_type = ExampleDictFieldSerializer(data=type_data)\n with self.assertRaises(exceptions.ValidationError) as cm:\n wrong_type.is_valid(raise_exception=True)\n self.assertIn(\n 'expected a dictionary of items',\n cm.exception.detail[\"types\"][0].lower(),\n 'Wrong dict type validation error')", "def assertIsDictionary(self, value):\n self.assertTrue(isinstance(value, dict), \"%s %s\" % (type(value), value))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that decimal.Decimal is serialized correctly when rest_framework.settings.api_settings.COERCE_DECIMAL_TO_STRING=True
def test_renderer_works_correctly_with_decimal_as_str(self): api_settings.COERCE_DECIMAL_TO_STRING = True rendered = self.renderer.render( data=Decimal("1.0"), media_type="application/json", renderer_context={}, ) reloaded = orjson.loads(rendered) self.assertEqual(reloaded, str(Decimal("1.0")))
[ "def test_decimal_encoding(self):\n ones = '1.111111111111111111'\n self.assertEqual(self.conn._encode_json({'hi': Decimal(ones)}),\n '{\"hi\": %s}' % ones)", "def test_encode_decimal(self):\n d = Decimal(3.14)\n assert d == float(d)", "def validate_decimaltype(x):\n if not (isinstance(x, str) and is_decimal.match(x)):\n raise SwaggerValidationError()", "def test_decimal():\n assert hug.types.decimal(\"1.1\") == Decimal(\"1.1\")\n assert hug.types.decimal(\"1\") == Decimal(\"1\")\n assert hug.types.decimal(1.1) == Decimal(1.1)\n with pytest.raises(ValueError):\n hug.types.decimal(\"bacon\")", "def transform_decimal(val, *modes):\n try:\n return decimal.Decimal(val)\n except (TypeError, decimal.InvalidOperation):\n if 'raises' in modes:\n raise\n return val", "def test_get_decimal(self):\n row = {\"col1\": Decimal(\"100.00\")}\n self.assertEqual(get_decimal(row, \"col1\"), Decimal(\"100.00\"))", "def any_decimal(min_value=Decimal(0), max_value=Decimal('99.99'), decimal_places=2):\n tpl = '{{:.{}f}}'.format(decimal_places)\n return Decimal(tpl.format(any_float(min_value=float(min_value),\n max_value=float(max_value),\n precision=decimal_places)))", "def to_internal_value(self, value):\n try:\n value = Money(value)\n except IncorrectMoneyInputError:\n self.fail('invalid')\n\n return self.validate_precision(value)", "def test_allowed_decimal_places(self):\n test_cases = ((\"sell_amount\", 2), (\"buy_amount\", 2), (\"rate\", 6))\n value = \"1.0987654321\"\n non_decimal_places = 2\n for field, expected_places in test_cases:\n trade = self.factory.make_trade(save=False)\n setattr(trade, field, value[: non_decimal_places + expected_places])\n trade.full_clean()\n trade.save()\n trade.refresh_from_db()\n # overflow by one digit\n setattr(trade, field, value[: non_decimal_places + expected_places + 1])\n with self.assertRaises(\n ValidationError,\n msg=f\"Expected {field} with {value} to raise ValidationError.\",\n ):\n trade.full_clean()", "def test_query_parse_float_to_decimal(self):\n responses.add(\n responses.GET,\n re.compile(\n r'^https://.*/query/\\?q=SELECT\\+currency\\+FROM\\+Account$'\n ),\n body='{\"currency\": 1.0}',\n status=http.OK,\n )\n session = requests.Session()\n client = Salesforce(\n session_id=tests.SESSION_ID,\n instance_url=tests.SERVER_URL,\n session=session,\n parse_float=decimal.Decimal,\n )\n\n result = client.query('SELECT currency FROM Account')\n self.assertIsInstance(result[\"currency\"], decimal.Decimal)\n self.assertNotIsInstance(result[\"currency\"], float)\n self.assertEqual(result, {\"currency\": decimal.Decimal(1.0)})\n self.assertEqual(result, {\"currency\": 1.0})\n self.assertNotEqual(result, {\"currency\": \"1.0\"})", "def test_base_class_print_unchanged(self):\n self.assertTrue((NumberPrettify().prettify('0.1234') == '0.1234'), msg=\"Base Class prettify should return as is\") \n self.assertTrue((NumberPrettify().prettify('1234') == '1234'), msg=\"Base Class prettify should return as is\") \n self.assertTrue((NumberPrettify().prettify('0.000') == '0.000'), msg=\"Base Class prettify should return as is\")", "def test_convert_amount():\n money = convert_amount(\"1.000,00€\")\n assert money.amount == Decimal(\"1000.00\")\n assert money.currency == EUR", "def skip_or_run_decimal_test(func):\n\n return skip_or_run_test_pcall_require(func, 'decimal',\n 'does not support decimal type')", "def validate_precision(self, value):\n _, digittuple, exponent = value.amount.as_tuple()\n\n if exponent >= 0:\n # 1234500.0\n total_digits = len(digittuple) + exponent\n whole_digits = total_digits\n decimal_places = 0\n elif len(digittuple) > abs(exponent):\n # 123.45\n total_digits = len(digittuple)\n whole_digits = total_digits - abs(exponent)\n decimal_places = abs(exponent)\n else:\n # 0.001234\n total_digits = abs(exponent)\n whole_digits = 0\n decimal_places = total_digits\n\n if self.max_digits is not None and total_digits > self.max_digits:\n self.fail('max_digits', max_digits=self.max_digits)\n if self.decimal_places is not None and decimal_places > self.decimal_places:\n self.fail('max_decimal_places', max_decimal_places=self.decimal_places)\n if self.max_whole_digits is not None and whole_digits > self.max_whole_digits:\n self.fail('max_whole_digits', max_whole_digits=self.max_whole_digits)\n\n return value", "def validate_price_precision(value: Optional[\"Decimal\"], currency: str):\n\n # check no needed when there is no value\n if not value:\n return\n currency_fraction = get_currency_fraction(currency)\n value = value.normalize()\n if value.as_tuple().exponent < -currency_fraction:\n raise ValidationError(\n f\"Value cannot have more than {currency_fraction} decimal places.\"\n )", "def test_validate_wrong_format_data(self):\n self.user_data[\"dependents\"] = \"wrong format\"\n serializer = QuoteSerializer(data=self.user_data)\n assert serializer.is_valid() == False", "def test03_add_account_with_decimal_limitations(self):\n self.lg('%s STARTED' % self._testID)\n self.lg('create new account %s with decimal limitations' % self.account)\n max_memory = '3.5'\n self.Accounts.create_new_account(self.account, self.admin_username+\"@itsyouonline\", max_memory=max_memory)\n self.Accounts.open_account_page(self.account)\n account_maxmemory = self.get_text(\"account_page_maxmemory\")\n self.assertTrue(account_maxmemory.startswith(max_memory), \"Account max memory is [%s]\"\n \" and expected is [%s]\" % (\n account_maxmemory, max_memory))\n\n self.lg('%s ENDED' % self._testID)", "def test_should_be_a_field_price(self):\n field = Product._meta.get_field('price')\n self.assertIsInstance(field, models.DecimalField)", "def payout_decimal(self, payout_decimal):\n\n self._payout_decimal = payout_decimal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that rest_framework.utils.serializer_helpers.ReturnDict is serialized correctly.
def test_renderer_works_correctly_with_return_dict(self): rendered = self.renderer.render( data=ReturnDict(self.data, serializer=None), media_type="application/json", renderer_context={}, ) reloaded = orjson.loads(rendered) self.assertEqual(reloaded, self.data)
[ "def test_if_to_dict_returns_dict(self):\n b = BaseModel()\n self.assertTrue(type(b.to_dict()) is dict)", "def test_default_required_output_for_dict(self):\n class ExampleSerializer(serializers.Serializer):\n omitted = serializers.CharField(default='abc')\n included = serializers.CharField()\n\n serializer = ExampleSerializer({'included': 'abc'})\n with pytest.raises(KeyError):\n serializer.data", "def test_dict_parameterized_serializer_none(self):\n none_data = self.dict_field_data.copy()\n none_data[\"types\"] = dict(self.dict_field_data[\"types\"])\n none_data[\"types\"][\"foo-type\"] = None\n none = ExampleDictFieldSerializer(data=none_data)\n none.is_valid(raise_exception=True)\n self.assertEqual(\n none.data, none_data, 'Wrong serializer reproduction')", "def test_dict_parameterized_serializer_type(self):\n type_data = self.dict_field_data.copy()\n type_data[\"types\"] = [self.dict_field_data[\"types\"]]\n wrong_type = ExampleDictFieldSerializer(data=type_data)\n with self.assertRaises(exceptions.ValidationError) as cm:\n wrong_type.is_valid(raise_exception=True)\n self.assertIn(\n 'expected a dictionary of items',\n cm.exception.detail[\"types\"][0].lower(),\n 'Wrong dict type validation error')", "def test_to_dict_not_dunder_dict(self):\n bm = BaseModel()\n self.assertNotEqual(bm.to_dict(), bm.__dict__)", "def get_serialization_data(self, serializer: 'Serializer') -> Dict[str, Any]:", "def test_to_dictionary(self):\n self.assertTrue(type(self.equad.to_dictionary()) is dict)", "def _attempt_json_conversion(_response, _return_json):\n if _return_json and not isinstance(_response, dict):\n try:\n _response = _response.json()\n except Exception as _exc_msg:\n _exc_name = type(_exc_msg).__name__\n _error_msg = f\"Failed to convert to JSON due to the following exception: {_exc_name}: {_exc_msg}\"\n logger.error(_error_msg)\n errors.handlers.eprint(_error_msg)\n return _response", "def test_sanitize_params_only_jsonifies_dicts_lists(self):\n payload = {'message': 'abc', 'privacy': self.privacy_dict, 'xyz': ['a','b']}\n retval = self.c._sanitize_params(payload)\n self.assertEquals(retval, \n {'access_token': self.access_token,\n 'message': 'abc', \n 'privacy': json.dumps(self.privacy_dict),\n 'xyz': json.dumps(['a','b'])})", "def test_to_dict_with_args(self):\n b = BaseModel()\n with self.assertRaises(TypeError):\n b.to_dict(None)", "def test_api_keys_result_serialization(self):\n\n # Construct a json representation of a ApiKeysResult model\n api_keys_result_model_json = {}\n api_keys_result_model_json['ok'] = True\n api_keys_result_model_json['key'] = 'testString'\n api_keys_result_model_json['password'] = 'testString'\n\n # Construct a model instance of ApiKeysResult by calling from_dict on the json representation\n api_keys_result_model = ApiKeysResult.from_dict(api_keys_result_model_json)\n assert api_keys_result_model != False\n\n # Construct a model instance of ApiKeysResult by calling from_dict on the json representation\n api_keys_result_model_dict = ApiKeysResult.from_dict(api_keys_result_model_json).__dict__\n api_keys_result_model2 = ApiKeysResult(**api_keys_result_model_dict)\n\n # Verify the model instances are equivalent\n assert api_keys_result_model == api_keys_result_model2\n\n # Convert model instance back to dict and verify no loss of data\n api_keys_result_model_json2 = api_keys_result_model.to_dict()\n assert api_keys_result_model_json2 == api_keys_result_model_json", "def _return_serialized_api_response(cls):\n # we prepare a json response\n returned_api_response = cls.api_response.serialize()\n # we clean up the API response object for the next call\n cls.api_response = APIResponse()\n return returned_api_response", "def test_no_to_dict():\n value = mock.MagicMock()\n del value.to_dict\n\n with pytest.raises(exceptions.InvalidModelInstanceError):\n object_._convert_relationship(value=value)", "def asdict(self):", "def make_boto_response_json_serializable(response_obj: Any) -> Any:\n # Types that should pass through this function unmodified\n PASS_THROUGH_TYPES = (str, int, float, bool, type(None))\n\n if isinstance(response_obj, PASS_THROUGH_TYPES):\n return response_obj\n elif isinstance(response_obj, dict):\n return {\n key: make_boto_response_json_serializable(value)\n for key, value in response_obj.items()\n }\n elif isinstance(response_obj, datetime):\n return str(response_obj)\n elif isinstance(response_obj, StreamingBody):\n return response_obj.read().decode(\"utf-8\")\n elif isinstance(response_obj, (list, tuple)):\n return [make_boto_response_json_serializable(value) for value in response_obj]\n else:\n raise SoclessException(\n f\"Attempting to serialize unsupported object type of {type(response_obj)}. The integration will need to be updated to support this type\"\n )", "def clean_payload(payload: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:\n return Serializer._clean_item(payload)", "def test_todict(self):\n i = self.value()\n n = i.to_dict()\n self.assertEqual(i.to_dict(), n)", "def bad_api_response_fixture() -> dict[str, Any]:\n return cast(dict[str, Any], json.loads(load_fixture(\"bad_api_response.json\")))", "def test_to_dictionary(self):\n self.assertDictEqual(self.payment.to_dictionary(), {\n \"trader_id\": \"32\" * 20,\n \"transaction_number\": 2,\n \"transferred\": {\n \"amount\": 3,\n \"type\": \"BTC\"\n },\n \"payment_id\": 'aaa',\n \"address_from\": 'a',\n \"address_to\": 'b',\n \"timestamp\": 4000,\n \"success\": True\n })" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that rest_framework.utils.serializer_helpers.ReturnList is serialized correctly.
def test_renderer_works_correctly_with_return_list(self): test_list = [{"1": 1}] rendered = self.renderer.render( data=ReturnList(test_list, serializer=None), media_type="application/json", renderer_context={}, ) reloaded = orjson.loads(rendered) self.assertEqual(reloaded, test_list)
[ "def test_json_list():\n resources = wrapped_resource(MockResponse(json.dumps([{'foo': 'bar'}]),\n encoding='utf-8'))\n assert isinstance(resources, ResourceList)\n eq_(1, len(resources))\n eq_('bar', resources[0].foo)", "def test_list_cast(self):\n self.plist = PaginatedResourceList(int, self.endpoint)\n\n entire_list = list(self.plist)\n self.assertEqual(entire_list, list(range(self.total)))\n self.assertEqual(len(responses.calls), self.lazy_pages(self.total-1))", "def test_serialize_deserialize_list_type(self):\n type_ = ListType\n tests = [\n ([\"string\", 1, 1.23, True], [\"string\", 1, 1.23, True], Type),\n ([\"string\", \"str\"], [\"string\", \"str\"], StringType),\n ]\n for value, result, list_obj_type in tests:\n deserialized_value = type_.deserialize(\n type_.serialize(value, list_obj_type),\n list_obj_type\n )\n self.assertEqual(deserialized_value, result)\n self.assertEqual(type(deserialized_value), type(result))", "def marshal_list_with(self, model, enveloppe):\n def wrapper(fn, *args, **kwargs):\n import pdb; pdb.set_trace()\n fn(*args, **kwargs)\n\n return wrapper", "def test_me_get_list(self):\n pass", "def ensure_list(fun):\n @functools.wraps(fun)\n def func_wrapper(*args, **kwargs):\n list_ = fun(*args, **kwargs)\n if list_ and not isinstance(list_, list):\n return [list_]\n else:\n return list_\n return func_wrapper", "def make_response(self, data, *args, **kwargs):\n # we've already got a response, eg, from jsonify\n if isinstance(data, Response):\n return (data, *args)\n\n if isinstance(data, (list, tuple)) and len(data) and isinstance(data[0], Model):\n model_name = data[0].__class__.__name__\n if model_name in self.serializers_many:\n data = self.serializers_many[model_name].dump(data).data\n\n # we got the result of serializer.dump(obj)\n if isinstance(data, MarshalResult):\n data = data.data\n\n # we got plain python data types that need to be serialized\n return super().make_response(data, *args, **kwargs)", "def test_get_list(self):\n\t\tinput = get_list('./tests/sample.json')\n\t\tassert isinstance(input, list)", "def test_is_list(self):\n self.assertEqual(type(self.randomcode),list, 'Code not a list')", "def test_is_list_false(test_rlp_reader_contract):\n contract = test_rlp_reader_contract\n rlp_encoded_item = rlp.encode(1)\n\n assert contract.functions.testIsList(rlp_encoded_item).call() is False", "def test_default_func_list(self):\n ref = Custom()\n\n def default(obj):\n if isinstance(obj, Custom):\n return [str(obj)]\n\n assert orjson.dumps({\"a\": ref}, default=default) == b'{\"a\":[\"%s\"]}' % str(\n ref\n ).encode(\"utf-8\")", "def test_is_list_true(test_rlp_reader_contract):\n contract = test_rlp_reader_contract\n rlp_encoded_item = rlp.encode([1, 2, 3])\n\n assert contract.functions.testIsList(rlp_encoded_item).call() is True", "def test_list(self):\n payloads = [\n b'payload A',\n b'second payload'\n b'payload 3+'\n ]\n res = []\n provider = payload_provider.List(payloads)\n for payload in provider:\n res.append(payload)\n for num, payload in enumerate(payloads):\n self.assertEqual(res[num], payload, 'Payload not expected in position {0}'.format(num))", "def test_serialize_a_wishlist(self):\n wishlist = Wishlist(name=\"wishlist_name\", customer_id=1234)\n data = wishlist.serialize()\n self.assertNotEqual(data, None)\n self.assertIn('id', data)\n self.assertEqual(data['id'], None)\n self.assertIn('name', data)\n self.assertEqual(data['name'], \"wishlist_name\")\n self.assertIn('customer_id', data)\n self.assertEqual(data['customer_id'], 1234)", "def test_withListCompleted(self):\n self.assertWellFormedRequest({\"listCompleted\": True})", "def test_return_list_type(self):\n data_input = functions.invest_dataframe(FILE_NAME)\n start = TEST_START\n end = TEST_END\n out_return = functions.return_list(data_input, start, end)\n self.assertEqual(np.ndarray, type(out_return))", "def is_list(obj):\n return isinstance(obj, list)", "def _check_is_list(obj):\n return isinstance(obj, (list, List))", "def test_group_by_params_string_list_fields(self):\n group_params = {\"node\": \"localhost\"}\n serializer = GroupBySerializer(data=group_params)\n validation = serializer.is_valid()\n self.assertTrue(validation)\n node_result = serializer.data.get(\"node\")\n self.assertIsInstance(node_result, list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that numpy.int is serialized correctly with Python's builtin json module.
def test_built_in_renderer_works_correctly_with_numpy_int(self): data = numpy.int32(0) rendered = self.renderer.render( data=data, media_type="text/html", renderer_context={ "django_encoder_class": DjangoNumpyJSONEncoder, "indent": 4, }, ) reloaded = orjson.loads(rendered) self.assertEqual(reloaded, data)
[ "def jsonDefault(obj):\n import numpy\n if isinstance(obj, numpy.integer): return int(obj)\n raise TypeError(\"%s (%s) is not JSON serializable\" % (repr(obj), type(obj)))", "def json_try_int(js):\n for key in js.keys():\n try:\n val_i = int(js[key])\n js[key] = val_i\n except ValueError:\n pass", "def test_option_invalid_int(self):\n with pytest.raises(orjson.JSONEncodeError):\n orjson.dumps(True, option=9223372036854775809)", "def test_option_not_int(self):\n with pytest.raises(orjson.JSONEncodeError):\n orjson.dumps(True, option=True)", "def json_encoder(obj):\n if isinstance(obj, np.generic):\n return np.asarray(obj).item()\n # The following are not used by this app but commonly used by db apps.\n # For Decimal [from decimal import Decimal] at the top of the file.\n #\n # if isinstance(obj, datetime):\n # return obj.isoformat()\n # if isinstance(obj, Decimal):\n # return float(obj)\n raise TypeError('Type {obj.__class__} is not handled for encoding'.format(**locals()))", "def test_write_simple_int_to_array(self):\n sample_json = {'arr': [{'key': 'val1'}, {'key':'val2'}]}\n result = chrome_defaults._recursive_write(\n sample_json, 'arr', value='val3', delete_attrib=False,\n child_name='key', where_clause=None)\n self.assertEqual(result['arr'][0]['key'], 'val3')\n self.assertEqual(result['arr'][1]['key'], 'val3')\n self.assertEqual(len(result['arr']), 2)", "def test_simple_avu_to_json_integer(self):\n data = [{'a': 'k1', 'u': 'root_0_n', 'v': '5'}]\n json_output = jsonavu.avu2json(data, \"root\")\n self.assertEqual('{\"k1\": 5}', json.dumps(json_output))", "def ensure_json_serializable(value):\n import numpy as np\n if isinstance(value, np.ndarray):\n return value.tolist()\n elif isinstance(value, np.float):\n return float(value)\n elif isinstance(value, np.integer):\n return int(value)\n elif isinstance(value, dict):\n new_dict = {}\n for k,v in value.iteritems():\n new_dict[k] = ensure_json_serializable(v)\n return new_dict\n elif isinstance(value, list):\n new_list = []\n for element in value:\n new_list.append(ensure_json_serializable(element))\n return new_list\n else:\n return value", "def test_default(self):\n complex_num = 1 - 1j\n expected_str = '1-1*j'\n json_str = json.dumps(complex_num, cls=ComplexEncoder)\n assert expected_str == json_str[1:(- 1)]\n # ignore quotes", "def test_deserialise(self, serialised):\n\t\tresult = integer_module.deserialise(serialised)\n\t\tself.assertIsInstance(result, int)", "def test_is_serialised(self, serialised):\n\t\tself.assertTrue(integer_module.is_serialised(serialised), \"This must be identified as a serialised integer.\")", "def test_simple_json_to_avu_integer(self):\n data = json.loads('{\"k1\": 5 }')\n avu = jsonavu.json2avu(data, \"root\")\n self.assertEqual([{'a': 'k1', 'u': 'root_0_n', 'v': '5'}], avu)", "def is_integer(obj):\n return isinstance(obj, (int, np.integer))", "def _validate(x):\n if not isinstance(x, int):\n raise TypeError(\"Only Integer Arrays are allowed\")", "def serialize_numpy(self, buff, numpy):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def test_is_not_serialised(self, serialised):\n\t\tself.assertFalse(integer_module.is_serialised(serialised), \"This must not be identified as a serialised integer.\")", "def _element_to_serializable(obj: Any) -> Any:\n if isinstance(obj, bytes):\n obj = obj.decode('utf-8')\n\n elif isinstance(obj, np.generic):\n obj = obj.item()\n\n return obj", "def test_representation(self):\n assert str(ToNumpy()) == \"ToNumpy()\"", "def _to_json_default(obj):\n # Datetime\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n\n # UUID\n if isinstance(obj, uuid.UUID):\n return str(obj)\n\n # numpy\n if hasattr(obj, 'item'):\n return obj.item()\n\n # # Enum\n # if hasattr(obj, 'value'):\n # return obj.value\n\n try:\n return obj.id\n except Exception:\n raise TypeError('{obj} is not JSON serializable'.format(obj=repr(obj)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the referenced pod is ready, without refreshing the status from the k8s API. Using the kubetest `is_ready` we end up calling the API each time, where as we already have the info we want after calling `kube.get_pods`. The additional calls occasionally result in API errors so we want to reduce the chance for failure and therefore test flakiness. This is a copy of the kubetest `Pod.is_ready`
def is_pod_ready(pod) -> bool: status = pod.obj.status if status is None: return False # check the pod phase to make sure it is running. a pod in # the 'failed' or 'success' state will no longer be running, # so we only care if the pod is in the 'running' state. phase = status.phase if phase.lower() != "running": return False for cond in status.conditions: # we only care about the 'ready' condition if cond.type.lower() != "ready": continue # check that the readiness condition is true return cond.status.lower() == "true" # Catchall return False
[ "def is_up_and_running(self, pod_name, ocp_value):\n if not pod_name:\n return False\n pod_info = ocp_value.exec_oc_cmd(f\"get pods {pod_name} -o json\")\n if pod_info['status']['containerStatuses'][0]['ready']:\n if 'running' in pod_info['status']['containerStatuses'][0]['state']:\n return True\n return False", "def wait_for_container_status_ready(pod: Pod):\n logger.info(f\"Wait for container of the pod {pod.name} move to Running state\")\n\n def do_wait_for_container_status_ready(pod_obj: Pod, timeout=300):\n logger.info(f\"Waiting for container status ready for {timeout}s\")\n start_time = time.time()\n while (\n pod_obj.get()[\"status\"][\"containerStatuses\"][0][\"ready\"] is False\n and time.time() - start_time < timeout\n ):\n logger.info(\"Waiting for container status ready\")\n time.sleep(5)\n return pod_obj.get()[\"status\"][\"containerStatuses\"][0][\"ready\"]\n\n retry(\n CommandFailed,\n text_in_exception=\"can't read container state of busybox deployment\",\n func=do_wait_for_container_status_ready,\n )(pod)", "def test_are_worker_nodes_ready(self, setup_params):\n node_api_obj = setup_params[\"node_api_obj\"]\n api_response_node_status = node_api_obj.are_worker_nodes_ready()\n assert api_response_node_status is True", "def get_ready_noobaa_endpoint_count(namespace):\n pods_info = get_pods_having_label(\n label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace\n )\n ready_count = 0\n for ep_info in pods_info:\n container_statuses = ep_info.get(\"status\", {}).get(\"containerStatuses\")\n if container_statuses is not None and len(container_statuses) > 0:\n if container_statuses[0].get(\"ready\"):\n ready_count += 1\n return ready_count", "def oc_verify_health(self):\n return (\n OCP(\n kind=\"namespacestore\",\n namespace=config.ENV_DATA[\"cluster_namespace\"],\n resource_name=self.name,\n ).get()[\"status\"][\"phase\"]\n == constants.STATUS_READY\n )", "def readyCheck(self, container, timeout):\n self.logger.debug('Checking if component %s is ready...', self.getName()) \n checks = []\n for check in self.config.ready_checks:\n checks.append((check, buildHealthCheck(check)))\n \n start = time.time()\n while True:\n now = time.time()\n if now - start > timeout:\n # Timed out completely.\n self.logger.debug('Component %s ready checks have timed out')\n return False\n \n # Try each check. If any fail, we'll sleep and try again.\n check_failed = None\n for (config, check) in checks:\n report('Running health check: ' + config.getTitle(), component = self)\n result = check.run(container, report)\n if not result:\n report('Health check failed', component = self)\n check_failed = config\n break\n \n if check_failed:\n report('Sleeping ' + str(check_failed.timeout) + ' second(s)...', component = self)\n time.sleep(check_failed.timeout)\n else:\n break\n \n return True", "def _api_ready(relation, key):\n ready = 'no'\n for rid in relation_ids(relation):\n for unit in related_units(rid):\n ready = relation_get(attribute=key, unit=unit, rid=rid)\n return ready == 'yes'", "def mark_all_pods_not_ready(node_name, reason):\n # Get the client.\n kube_client = get_client()\n\n # Retrieve the pods on the specified node.\n response = kube_client.list_namespaced_pod(\n \"\", field_selector=\"spec.nodeName=%s\" % node_name)\n\n pods = response.items\n if pods is not None:\n for pod in pods:\n for condition in pod.status.conditions:\n if condition.type == \"Ready\":\n if condition.status != \"False\":\n # Update the Ready status to False\n body = {\"status\":\n {\"conditions\":\n [{\"type\": \"Ready\",\n \"status\": \"False\",\n \"reason\": reason,\n }]}}\n try:\n DLOG.debug(\n \"Marking pod %s in namespace %s not ready\" %\n (pod.metadata.name, pod.metadata.namespace))\n kube_client.patch_namespaced_pod_status(\n pod.metadata.name, pod.metadata.namespace, body)\n except ApiException:\n DLOG.exception(\n \"Failed to update status for pod %s in \"\n \"namespace %s\" % (pod.metadata.name,\n pod.metadata.namespace))\n break\n return", "def check_testbed_ready(self):\n health_check_call = [] # list of tuples: (health check method, prop value)\n for prop in self.testing_props:\n if prop in self._prop_to_health_check:\n health_check_call.append(\n (self._prop_to_health_check[prop], self.testing_props[prop]))\n self._execute_health_check_methods(health_check_call)", "def needs_init(self):\n return (self._node_device_status and (self._node_device_status.tag == 'down'\n or self._node_device_status.tag == 'unready'))", "def any_pods_running(namespace, label):\n pod_data = get_json(\"pod\", label=label, namespace=namespace)\n if not pod_data or not len(pod_data.get(\"items\", [])):\n log.info(\"No pods found for label '%s'\", label)\n return False\n for pod in pod_data[\"items\"]:\n if _check_status_for_restype(\"pod\", pod):\n return True\n return False", "def is_ready(self):\n return (self._node_device_status and self._node_device_status.tag == 'ready'\n and self.gripper_io.is_valid())", "def test_data_view_status_reports_services_ready():\n view_id = \"524\"\n\n status = client.get_data_view_service_status(data_view_id=view_id)\n\n # There is no way to guarantee that this view is not retraining, but the\n # majority of the time it should be in a stable, trained state\n assert status.predict.is_ready()\n assert status.experimental_design.is_ready()\n assert status.data_reports.is_ready()\n assert status.model_reports.is_ready()", "def wait_for_wl_to_complete(self):\n sample = TimeoutSampler(\n timeout=self.timeout_completed,\n sleep=40,\n func=self.pods_expected_status,\n pattern=\"fio-client\",\n expected_num_pods=1,\n expected_status=constants.STATUS_COMPLETED,\n )\n if not sample.wait_for_func_status(result=True):\n raise TimeoutExpiredError(\n f\"fio-client pod did not move to running state after {self.timeout_completed} sec\"\n )", "async def async_wait_start_success(self):\n import asyncio\n from jina.serve.runtimes.servers import BaseServer\n\n _timeout = self.args.timeout_ready\n if _timeout <= 0:\n _timeout = None\n else:\n _timeout /= 1e3\n\n timeout_ns = 1e9 * _timeout if _timeout else None\n now = time.time_ns()\n\n check_protocol = getattr(self.args, 'protocol', [\"grpc\"])[0]\n\n async def check_readiness_server():\n self.logger.debug(f'Checking readiness to {self.runtime_ctrl_address} with protocol {check_protocol}')\n ready = await BaseServer.async_is_ready(\n ctrl_address=self.runtime_ctrl_address,\n timeout=_timeout,\n protocol=check_protocol,\n logger=self.logger,\n # Executor does not have protocol yet\n )\n if ready:\n self.logger.debug(f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is ready')\n else:\n self.logger.debug(f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is not yet ready')\n return ready\n\n while timeout_ns is None or time.time_ns() - now < timeout_ns:\n if (\n self.ready_or_shutdown.event.is_set()\n and ( # submit the health check to the pod, if it is\n self.is_shutdown.is_set() # a worker and not shutdown\n or not self.args.pod_role == PodRoleType.WORKER\n or (\n await check_readiness_server()\n )\n )\n ):\n self._check_failed_to_start()\n self.logger.debug(__ready_msg__)\n return\n else:\n await asyncio.sleep(0.1)\n\n self._fail_start_timeout(_timeout)", "def is_chartmuseum_up() -> bool:\n cmd = \"kubectl get pods --field-selector status.phase=Running -n test\"\n result = check_output(cmd, shell=True).decode('utf-8')\n logger.info(\"Checking if chartmuseum is UP: %s\", result)\n if result == '':\n logger.info(\"chartmuseum is Down\")\n return False\n logger.info(\"chartmuseum is Up\")\n return True", "def test_are_all_nodes_ready(self, setup_params):\n node_api_obj = setup_params[\"node_api_obj\"]\n api_response_node_status = node_api_obj.are_all_nodes_ready()\n assert api_response_node_status is True", "def all_pods_running(namespace, label):\n pod_data = get_json(\"pod\", label=label, namespace=namespace)\n if not pod_data or not len(pod_data.get(\"items\", [])):\n log.info(\"No pods found for label '%s'\", label)\n return False\n statuses = []\n for pod in pod_data[\"items\"]:\n statuses.append(_check_status_for_restype(\"pod\", pod))\n return len(statuses) and all(statuses)", "def HasIsControllerReady(self):\n return self.__has('IsControllerReady')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the total number of Container restarts for the Pod. This is a copy of the kubetest `Pod.get_restart_count` but without the call to `refresh()` such that we reduce the number of calls to the API, and reduce the chance for flakiness in that call.
def get_pod_restart_count(pod) -> int: container_statuses = pod.obj.status.container_statuses if container_statuses is None: return 0 total = 0 for container_status in container_statuses: total += container_status.restart_count return total
[ "def count_pods(self, name: str) -> int:\n try:\n pods = self.get_pods(name)\n return len(pods)\n except Exception as except_count:\n raise Exception(\"Failed to count number of pods {}\".format(name)) from except_count", "def _collect_service_restart_stats():\n try:\n service_dict = ServiceStateWrapper().get_all_services_status()\n except Exception as e:\n logging.error(\"Could not fetch service status: %s\", e)\n return\n for service_name, status in service_dict.items():\n SERVICE_RESTART_STATUS.labels(\n service_name=service_name,\n status=\"Failure\",\n ).set(status.num_fail_exits)\n SERVICE_RESTART_STATUS.labels(\n service_name=service_name,\n status=\"Success\",\n ).set(status.num_clean_exits)", "def verify_rgw_restart_count_fixture(request):\n if config.ENV_DATA[\"platform\"].lower() in constants.ON_PREM_PLATFORMS:\n log.info(\"Getting RGW pod restart count before executing the test\")\n initial_counts = get_rgw_restart_counts()\n\n def finalizer():\n rgw_pods = get_rgw_pods()\n for rgw_pod in rgw_pods:\n rgw_pod.reload()\n log.info(\"Verifying whether RGW pods changed after executing the test\")\n for rgw_pod in rgw_pods:\n assert rgw_pod.restart_count in initial_counts, \"RGW pod restarted\"\n\n request.addfinalizer(finalizer)", "def faulted_count(self) -> int:\n return pulumi.get(self, \"faulted_count\")", "def RetryCount(self):\n if self.force_auto_sync:\n self.get('RetryCount')\n return self._RetryCount", "def running_count(self) -> int:\n\n n = 0\n for job in self.running_jobs.values():\n n += job.job_spec.cores\n\n return n", "def restart(self, **kwargs): # noqa\n try:\n # Resolve single pod name if short form (cmd-1269180282-1nyfz) is passed\n if 'name' in kwargs and kwargs['name'].count('-') == 2:\n kwargs['name'] = '{}-{}'.format(kwargs['id'], kwargs['name'])\n\n # Iterate over RCs / RSs to get total desired count if not a single item\n desired = 1\n if 'name' not in kwargs:\n desired = 0\n labels = self._scheduler_filter(**kwargs)\n # fetch RS (which represent Deployments)\n controllers = self._scheduler.rs.get(kwargs['id'], labels=labels).json()['items']\n if not controllers:\n controllers = []\n for controller in controllers:\n desired += controller['spec']['replicas']\n except KubeException:\n # Nothing was found\n return []\n\n try:\n tasks = [\n functools.partial(\n self._scheduler.pod.delete,\n self.id,\n pod['name']\n ) for pod in self.list_pods(**kwargs)\n ]\n\n async_run(tasks)\n except Exception as e:\n err = \"warning, some pods failed to stop:\\n{}\".format(str(e))\n self.log(err, logging.WARNING)\n\n # Wait for pods to start\n try:\n timeout = 300 # 5 minutes\n elapsed = 0\n while True:\n # timed out\n if elapsed >= timeout:\n raise DeisException('timeout - 5 minutes have passed and pods are not up')\n\n # restarting a single pod behaves differently, fetch the *newest* pod\n # and hope it is the right one. Comes back sorted\n if 'name' in kwargs:\n del kwargs['name']\n pods = self.list_pods(**kwargs)\n # Add in the latest name\n if len(pods) == 0:\n # if pod is not even scheduled wait for it and pass dummy kwargs\n # to indicate restart of a single pod\n kwargs['name'] = \"dummy\"\n continue\n kwargs['name'] = pods[0]['name']\n pods = pods[0]\n\n actual = 0\n for pod in self.list_pods(**kwargs):\n if pod['state'] == 'up':\n actual += 1\n\n if desired == actual:\n break\n\n elapsed += 5\n time.sleep(5)\n except Exception as e:\n err = \"warning, some pods failed to start:\\n{}\".format(str(e))\n self.log(err, logging.WARNING)\n\n # Return the new pods\n pods = self.list_pods(**kwargs)\n return pods", "def running_disparity_count(self):\n return self._running_disparity_count", "def request_reset(self, req):\n # First get the docker container ID\n with open(\"/proc/self/cgroup\", \"r\") as f:\n line = f.readline()\n idx = line.split(\"/\")[-1].strip()\n req.reply(\"ok\",)\n os.system(\"docker restart {}\".format(idx))", "def get_object_count(self, container_name, headers=None, params=None,\n expected_object_count=None, requestslib_kwargs=None):\n\n def success_func(response):\n object_count = response.headers.get('x-container-object-count')\n if not response.ok or object_count is None:\n return False\n if expected_object_count != object_count:\n return False\n return True\n\n response = self.retry_until_success(\n self.client.get_container_metadata,\n func_args=[container_name],\n func_kwargs={'requestslib_kwargs': requestslib_kwargs},\n success_func=success_func,\n max_retries=10)\n\n return int(response.headers.get('x-container-object-count'))", "def installed_patch_count(self) -> int:\n return pulumi.get(self, \"installed_patch_count\")", "def get_count_series(self):\n numb=0\n for rrd_file in list(self._cache.keys()):\n numb += len(self._cache[rrd_file]['values'])\n return numb", "def calculateNumProcesses(self):\n total = 0\n for processList in self.processes.values():\n total+=len(processList)\n return total", "def schedules_count(self) -> float:\n return pulumi.get(self, \"schedules_count\")", "def mgr_pod_node_restart(self):\n mgr_pod_obj = pod.get_mgr_pods()\n mgr_node_obj = pod.get_pod_node(mgr_pod_obj[0])\n\n self.nodes.restart_nodes([mgr_node_obj])\n\n wait_for_nodes_status()\n\n # Check for Ceph pods\n pod_obj = ocp.OCP(\n kind=constants.POD, namespace=config.ENV_DATA[\"cluster_namespace\"]\n )\n assert pod_obj.wait_for_resource(\n condition=\"Running\", selector=\"app=rook-ceph-mgr\", timeout=600\n )\n assert pod_obj.wait_for_resource(\n condition=\"Running\",\n selector=\"app=rook-ceph-mon\",\n resource_count=3,\n timeout=600,\n )\n assert pod_obj.wait_for_resource(\n condition=\"Running\",\n selector=\"app=rook-ceph-osd\",\n resource_count=3,\n timeout=600,\n )", "def core_count_histogram(self) -> 'outputs.ReportSummaryHistogramChartDataResponse':\n return pulumi.get(self, \"core_count_histogram\")", "def getRunCount(self):\r\n runCount = stackless.getruncount() + self.timeKeeper.getSleeperCount()\r\n return (runCount - 1) # subtract the timeKeeper tasklet\r", "def getImageCount(self) -> \"int\":\n return _coin.SoGetPrimitiveCountAction_getImageCount(self)", "def count() -> int:\n return _api_calls.get(Inner._ANNOTATIONS_ENDPOINT + \"count\").json()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stuff The root tag.
def tag_root(node,context): return node.process_children(context)
[ "def add_root_tag():\n \n # change input_path based on output path of wikiextracter\n input_path = '../../WikipediaDump/output'\n output_path = '../data/wiki_dump'\n \n \n for root, dirs, files in os.walk(input_path):\n for file in files:\n if file.startswith('wiki'):\n cur_dir = str(os.path.basename(os.path.join(root)))\n os.makedirs(os.path.join(output_path, cur_dir), exist_ok=True)\n with open(os.path.join(root, file), 'r') as in_file, open('{}/{}/{}'.format(output_path, cur_dir, file), 'w') as out_file:\n out_file.write('<articles>\\n')\n for line in in_file.readlines():\n out_file.write(line)\n out_file.write('</articles>')", "def handle_starttag(self, tag, attrs) -> None:\n if tag in self.keeptags:\n self.textdata += f'<{tag}>'", "def strip_empty_tags(self):\n tag = self.root\n while True:\n next_tag = tag.findNext(True)\n if not next_tag: break\n if next_tag.contents or next_tag.attrs:\n tag = next_tag\n continue\n next_tag.extract()", "def idmefmessage_tag_tostring(root_tag):\n\n if root_tag:\n simple_root = Element(root_tag)\n else:\n simple_root = Element(\"SimpleRandomRoot\")\n # simple_root.set(\"xmlns:xsi\", \"http://www.w3.org/2001/XMLSchema-instance\")\n # xml_string = tostring(simple_root, short_empty_elements=False, encoding=\"unicode\")\n tmp_stream = StringIO()\n elementtree = ElementTree(simple_root)\n elementtree.write(tmp_stream, encoding='unicode', xml_declaration=True, short_empty_elements=False)\n root_string = tmp_stream.getvalue()\n if root_tag:\n simple_root = Element(root_tag)\n start_roottag = re.search('<.*?>.*?<' + root_tag + '.*?>', root_string, flags=re.DOTALL).group()\n end_roottag = re.search('</' + root_tag + '.*?>', root_string).group()\n else:\n start_roottag = re.search('<.*?xml.*?>', root_string, flags=re.DOTALL).group()\n end_roottag = \"\"\n return start_roottag + os.linesep, end_roottag + os.linesep", "def _root_body_(self):\n node = self.worldbody.find(\"./body[@name='{}']\".format(self._root_))\n return node", "def setRoot(self, root: 'ScXMLScxmlElt') -> \"void\":\n return _coin.ScXMLDocument_setRoot(self, root)", "def handle_endtag(self, tag) -> None:\n if tag in self.keeptags:\n self.textdata += f'</{tag}>'", "def get_base_tag():\n\n # `version` is currently the only attribute that's required for ROBLOX to\n # recognize the file as a Model. All of the others are included to match\n # what ROBLOX outputs when exporting a model to your computer.\n return ElementTree.Element(\"roblox\", attrib={\n \"xmlns:xmine\": \"http://www.w3.org/2005/05/xmlmime\",\n \"xmlns:xsi\": \"http://www.w3.org/2001/XMLSchema-instance\",\n \"xsi:noNamespaceSchemaLocation\": \"http://www.roblox.com/roblox.xsd\",\n \"version\": \"4\" })", "def test_starttag_closing():\n inst = _encoder.TextEncoder('foo')\n\n result = inst.starttag(b'xx', iter([]), True)\n assert result == b'[[xx]]'\n\n result = inst.starttag(b'yy', iter([(b'aa', None), (b'bb', b'cc')]), True)\n assert result == b'[[yy aa bb=cc]]'", "def close(self, tag):\n return \"</{}>\".format(self.tags[tag].split(\" \", 1)[0])", "def create_subElement(root, name, content):\r\n sub_element = doc.createElement(name)\r\n root.appendChild(sub_element)\r\n sub_content = doc.createTextNode(content)\r\n sub_element.appendChild(sub_content)\r\n \r\n return root", "def get_root(self):\n return self.xml_tree", "def __create_closing_html_tag(self, tag):\n\n tag = tag.replace('<', '</')\n if tag.count('<') > 1:\n tag = tag.split('>')\n tag = tag[1] + '>' + tag[0] + '>'\n return tag", "def parse_tag(self, root, fmt, insert_children=True, create=None):\n arguments = {}\n extra_args = {}\n children = []\n text = []\n\n if root.text:\n text.append(root.text.strip())\n\n for k, v in root.attrib.iteritems():\n k = k.lower()\n self.is_valid_argument(k, fmt, root)\n if k == 'version' and root.tag == 'odML':\n continue # special case for XML version\n if k not in fmt._xml_attributes:\n self.error(\"<%s %s=...>: is not a valid attribute for %s\" % (root.tag, k, root.tag), root)\n else:\n k = fmt._xml_attributes[k] or k\n arguments[k] = v\n\n for node in root:\n node.tag = node.tag.lower()\n self.is_valid_argument(node.tag, fmt, root, node)\n if node.tag in fmt._args:\n if node.tag in self.tags and node.tag in fmt._map: # this is a heuristic, but works for now\n sub_obj = self.parse_element(node)\n if sub_obj is not None:\n extra_args[fmt.map(node.tag)] = sub_obj\n children.append(sub_obj)\n else:\n tag = fmt.map(node.tag)\n if tag in arguments:\n # TODO make this an error, however first figure out a way to let <odML version=><version/> pass\n self.warn(\"Element <%s> is given multiple times in <%s> tag\" % (node.tag, root.tag), node)\n arguments[tag] = node.text.strip() if node.text else None\n\n else:\n self.error(\"Invalid element <%s> in odML document section <%s>\" % (node.tag, root.tag), node)\n if node.tail:\n text.append(node.tail.strip())\n\n if create is None:\n obj = fmt.create()\n else:\n obj = create(args=arguments, text=''.join(text), children=children)\n if sys.version_info > (3,):\n self.check_mandatory_arguments(dict(list(arguments.items()) + list(extra_args.items())),\n fmt, root.tag, root)\n else:\n self.check_mandatory_arguments(dict(arguments.items() + extra_args.items()),\n fmt, root.tag, root)\n\n for k, v in arguments.items():\n if hasattr(obj, k):\n try:\n setattr(obj, k, v)\n except Exception as e:\n self.warn(\"cannot set '%s' property on <%s>: %s\" % (k, root.tag, repr(e)), root)\n if not self.ignore_errors:\n raise\n\n if insert_children:\n for child in children:\n obj.append(child)\n\n return obj", "def _root(self, bin_db):\r\n # Lookup the token in the BIN database\r\n if not self.is_word:\r\n return self.text\r\n w, m = bin_db.lookup_word(self.text, self.at_start)\r\n if m:\r\n m = [ x for x in m if self._bin_filter(x) ]\r\n if m:\r\n w = m[0].stofn\r\n return w.replace(\"-\", \"\")", "def testMultiTopLevel():\n parsed = parseHTML('<h1>Hello!</h1><h1>Goodbye!</h1>')\n assert parsed.flattenXML() == (\n '<h1 xmlns=\"http://www.w3.org/1999/xhtml\">Hello!</h1>'\n '<h1 xmlns=\"http://www.w3.org/1999/xhtml\">Goodbye!</h1>'\n )", "def test_root_xml(self):\n self.assertEqual(\n self.target_xml_root,\n self.ccc.xml_root\n )", "def getRoot(self) -> \"ScXMLScxmlElt *\":\n return _coin.ScXMLDocument_getRoot(self)", "def __convert_first_level_tags(self, chunk, tag):\n\n html_tag = self.first_level_tags[tag]\n if html_tag == '<blockquote>':\n for index, line in enumerate(chunk):\n line = line + '<br>'\n chunk[index] = line\n\n chunk = list(map(lambda elem: elem[len(tag):], chunk))\n if html_tag in ('<ul>', '<ol>'):\n chunk = [\n self.__enclose_in_html_tag(elem, '<li>') for elem in chunk\n ]\n chunk[0] = html_tag + chunk[0]\n chunk[-1] = chunk[-1] + self.__create_closing_html_tag(html_tag)\n return chunk" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asynchronous version of get_response. Funneling everything, including WSGI, into a single async get_response() is too slow. Avoid the context switch by using a separate async response path.
async def get_response_async(self, request): # Setup default url resolver for this thread. set_urlconf(settings.ROOT_URLCONF) response = await self._middleware_chain(request) response._resource_closers.append(request.close) if response.status_code >= 400: await sync_to_async(log_response, thread_sensitive=False)( "%s: %s", response.reason_phrase, request.path, response=response, request=request, ) return response
[ "async def _async_get(_async_timeout_seconds=5, *args, **kwargs):\n \n # Taken almost directly from the aiohttp tutorial\n with async_timeout.timeout(_async_timeout_seconds):\n async with session.get(*args, **kwargs) as response:\n return await response", "def async(fn):\n fn = asyncio.coroutine(fn)\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n coroutine = functools.partial(fn, *args, **kwargs)\n return async_response(coroutine(), current_app, request)\n return wrapper", "def handle_async_route(loop=None):\n\n global _async_handled\n if _async_handled is True:\n return\n _async_handled = True\n\n original = Flask.add_url_rule\n if loop is None:\n try:\n loop = asyncio.get_event_loop()\n except: #pragma: no cover\n loop = asyncio.new_event_loop()\n\n def replacement(self, rule, endpoint=None, view_func=None, **options):\n global _async_map\n sync_function = _async_map.get(view_func)\n if sync_function is None:\n if view_func is not None :\n def sync_function(*args, **kwargs):\n result = view_func(*args, **kwargs)\n return loop.run_until_complete(result) if inspect.iscoroutine(result) else result\n\n functools.update_wrapper(sync_function, view_func)\n _async_map[view_func] = sync_function\n else:\n sync_function = view_func\n\n return original(self, rule, endpoint=endpoint or \"sync-{}\".format(view_func.__name__), view_func=sync_function, **options)\n\n Flask.add_url_rule = replacement", "def main_app(environ, start_response):\n \n url = environ['PATH_INFO'] \n if(url):\n url = url.lstrip(\"/\")\n \n response_handler = None\n \n if(url in REQUESTS_MAP): #FIXME: strings have apparently an hash, regexp not. NEEDS REF\n url = REQUESTS_MAP[url]\n \n for k in REQUESTS_MAP:\n if(isinstance(k, basestring)):\n continue\n \n if(k.search(url)): #FIXME: search or matches is faster??\n response_handler = REQUESTS_MAP[k]\n break;\n \n if(response_handler is None):\n response_handler = ResponseHandler()\n \n if CARAVAN_DEBUG:\n print \"\\nServing \" + url \n \n response_handler.run(url, environ)\n \n if CARAVAN_DEBUG:\n print \"\\tresponse headers: \" + str(response_handler.headers)\n \n start_response(response_handler.status, response_handler.headers) \n \n return response_handler.body", "def _get_response(self, request):\n response = None\n callback, callback_args, callback_kwargs = self.resolve_request(request)\n\n # Apply view middleware\n for middleware_method in self._view_middleware:\n response = middleware_method(\n request, callback, callback_args, callback_kwargs\n )\n if response:\n break\n\n if response is None:\n wrapped_callback = self.make_view_atomic(callback)\n # If it is an asynchronous view, run it in a subthread.\n if iscoroutinefunction(wrapped_callback):\n wrapped_callback = async_to_sync(wrapped_callback)\n try:\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\n except Exception as e:\n response = self.process_exception_by_middleware(e, request)\n if response is None:\n raise\n\n # Complain if the view returned None (a common error).\n self.check_response(response, callback)\n\n # If the response supports deferred rendering, apply template\n # response middleware and then render the response\n if hasattr(response, \"render\") and callable(response.render):\n for middleware_method in self._template_response_middleware:\n response = middleware_method(request, response)\n # Complain if the template response middleware returned None\n # (a common error).\n self.check_response(\n response,\n middleware_method,\n name=\"%s.process_template_response\"\n % (middleware_method.__self__.__class__.__name__,),\n )\n try:\n response = response.render()\n except Exception as e:\n response = self.process_exception_by_middleware(e, request)\n if response is None:\n raise\n\n return response", "def get_request_life_cycle_wrapper(function, api, mimetype):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n connexion_request = api.get_request(*args, **kwargs)\n while asyncio.iscoroutine(connexion_request):\n connexion_request = yield from connexion_request\n\n connexion_response = function(connexion_request)\n while asyncio.iscoroutine(connexion_response):\n connexion_response = yield from connexion_response\n\n framework_response = api.get_response(connexion_response, mimetype,\n connexion_request)\n while asyncio.iscoroutine(framework_response):\n framework_response = yield from framework_response\n\n return framework_response\n\n return asyncio.coroutine(wrapper)", "def custom_request_response(func: typing.Callable) -> ASGIApp:\r\n is_coroutine = asyncio.iscoroutinefunction(func)\r\n\r\n async def app(scope: Scope, receive: Receive, send: Send) -> None:\r\n request = GzipRequest(scope, receive=receive, send=send)\r\n if is_coroutine:\r\n response = await func(request)\r\n else:\r\n response = await run_in_threadpool(func, request)\r\n await response(scope, receive, send)\r\n\r\n return app", "def handle_request(self, request: aiohttp.web.Request) -> \\\n aiohttp.web.StreamResponse:\n\n # Use aiohttp's WSGI implementation\n protocol = WSGIServerHttpProtocol(request.app, True)\n protocol.transport = request.transport\n\n # Build WSGI Response\n environ = protocol.create_wsgi_environ(request, request.content)\n\n # Create responses\n ws = aiohttp.web.WebSocketResponse()\n response = aiohttp.web.StreamResponse()\n\n #: Write delegate\n @asyncio.coroutine\n def write(data):\n yield from response.write(data)\n\n #: EOF Write delegate\n @asyncio.coroutine\n def write_eof():\n yield from response.write_eof()\n\n # WSGI start_response function\n def start_response(status, headers, exc_info=None):\n if exc_info:\n raise exc_info[1]\n\n status_parts = status.split(' ', 1)\n status = int(status_parts.pop(0))\n reason = status_parts[0] if status_parts else None\n\n response.set_status(status, reason=reason)\n\n for name, value in headers:\n response.headers[name] = value\n\n response.start(request)\n\n return write\n if is_websocket_request(request):\n ws.start(request)\n\n # WSGI HTTP responses in websocket are meaningless.\n def start_response(status, headers, exc_info=None):\n if exc_info:\n raise exc_info[1]\n ws.start(request)\n return []\n\n @asyncio.coroutine\n def write(data):\n return\n\n @asyncio.coroutine\n def write_eof():\n return\n\n response = ws\n else:\n ws = None\n\n # Add websocket response to WSGI environment\n environ['wsgi.websocket'] = ws\n\n # Run WSGI app\n response_iter = self.wsgi(environ, start_response)\n\n try:\n iterator = iter(response_iter)\n\n wsgi_response = []\n try:\n item = next(iterator)\n except StopIteration as stop:\n try:\n iterator = iter(stop.value)\n except TypeError:\n pass\n else:\n wsgi_response = iterator\n else:\n if isinstance(item, bytes):\n # This is plain WSGI response iterator\n wsgi_response = itertools.chain([item], iterator)\n else:\n # This is coroutine\n yield item\n wsgi_response = yield from iterator\n for item in wsgi_response:\n yield from write(item)\n\n yield from write_eof()\n finally:\n if hasattr(response_iter, 'close'):\n response_iter.close()\n\n # Return selected response\n return response", "def execute_async(self):\r\n\r\n self.response = self.payload.execute_request()\r\n if hasattr(self.response, 'headers'):\r\n self.status_url = self.response.headers.get(\r\n 'Location', 'NoLocationFound')\r\n\r\n return self", "def process_response(self, request, response):\n return self.__process_awesome_response(request, response)", "async def get(self):\n http = httpclient.AsyncHTTPClient()\n\n spark_ui_base_url = os.environ.get(\"SPARKMONITOR_UI_HOST\", \"localhost\")\n spark_ui_port = os.environ.get(\"SPARKMONITOR_UI_PORT\", \"8080\")\n spark_ui_url = \"http://{baseurl}:{port}\".format(baseurl=spark_ui_base_url,port=spark_ui_port)\n\n request_path = self.request.uri[(self.request.uri.index(proxy_root) + len(proxy_root) + 1):]\n self.replace_path = self.request.uri[:self.request.uri.index(proxy_root) + len(proxy_root)]\n backendurl = helpers.url_path_join(spark_ui_url, request_path)\n self.debug_url = spark_ui_url\n self.backendurl = backendurl\n\n logger.info(\"GET: \\n Request uri:%s \\n Port: %s \\n Host: %s \\n request_path: %s \", self.request.uri,\n os.environ.get(\n \"SPARKMONITOR_UI_PORT\", \"4040\"), os.environ.get(\"SPARKMONITOR_UI_HOST\", \"127.0.0.1\"),\n request_path)\n try:\n x = await http.fetch(backendurl)\n self.handle_response(x)\n except:\n self.handle_bad_response()", "async def respond(\n self,\n response: Optional[BaseHTTPResponse] = None,\n *,\n status: int = 200,\n headers: Optional[Union[Header, Dict[str, str]]] = None,\n content_type: Optional[str] = None,\n ):\n try:\n if self.stream is not None and self.stream.response:\n raise ServerError(\"Second respond call is not allowed.\")\n except AttributeError:\n pass\n # This logic of determining which response to use is subject to change\n if response is None:\n response = HTTPResponse(\n status=status,\n headers=headers,\n content_type=content_type,\n )\n\n # Connect the response\n if isinstance(response, BaseHTTPResponse) and self.stream:\n response = self.stream.respond(response)\n\n if isawaitable(response):\n response = await response # type: ignore\n # Run response middleware\n try:\n middleware = (\n self.route and self.route.extra.response_middleware\n ) or self.app.response_middleware\n if middleware and not self._response_middleware_started:\n self._response_middleware_started = True\n response = await self.app._run_response_middleware(\n self, response, middleware\n )\n except CancelledErrors:\n raise\n except Exception:\n error_logger.exception(\n \"Exception occurred in one of response middleware handlers\"\n )\n self.responded = True\n return response", "def async_route_check(self):\n # type: () -> bool\n return asyncio.iscoroutinefunction(self.get_response)", "async def _handle_request(self, request):\n self._responses[id(request)] = response = asyncio.Future()\n self._requests.put_nowait(request)\n try:\n # wait until test case provides a response\n return await response\n finally:\n del self._responses[id(request)]", "def fetch(\n self, path: str, raise_error: bool = False, **kwargs: Any\n ) -> HTTPResponse:\n if path.lower().startswith((\"http://\", \"https://\")):\n url = path\n else:\n url = self.get_url(path)\n return self.io_loop.run_sync(\n lambda: self.http_client.fetch(url, raise_error=raise_error, **kwargs),\n timeout=get_async_test_timeout(),\n )", "def asynchronous(method):\n warnings.warn(\"@asynchronous is deprecated, use coroutines instead\",\n DeprecationWarning)\n # Delay the IOLoop import because it's not available on app engine.\n from tornado.ioloop import IOLoop\n\n @functools.wraps(method)\n def wrapper(self, *args, **kwargs):\n self._auto_finish = False\n result = method(self, *args, **kwargs)\n if result is not None:\n result = gen.convert_yielded(result)\n\n # If @asynchronous is used with @gen.coroutine, (but\n # not @gen.engine), we can automatically finish the\n # request when the future resolves. Additionally,\n # the Future will swallow any exceptions so we need\n # to throw them back out to the stack context to finish\n # the request.\n def future_complete(f):\n f.result()\n if not self._finished:\n self.finish()\n IOLoop.current().add_future(result, future_complete)\n # Once we have done this, hide the Future from our\n # caller (i.e. RequestHandler._when_complete), which\n # would otherwise set up its own callback and\n # exception handler (resulting in exceptions being\n # logged twice).\n return None\n return result\n return wrapper", "def wsgi_app(self, environ, start_response):\n request = Request(environ)\n urls = self.url_map.bind_to_environ(environ)\n response = self.dispatch_request(urls, request)\n return response(environ, start_response)", "def __call__(self, environ, make_response):\n resp = self.dispatch_request(environ, make_response)\n return resp.render()", "def wsgi_app(self, environ, start_response):\n request = Request(environ)\n response = self.dispatch_request(request)\n return response(environ, start_response)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resolve and call the view, then apply view, exception, and template_response middleware. This method is everything that happens inside the request/response middleware.
def _get_response(self, request): response = None callback, callback_args, callback_kwargs = self.resolve_request(request) # Apply view middleware for middleware_method in self._view_middleware: response = middleware_method( request, callback, callback_args, callback_kwargs ) if response: break if response is None: wrapped_callback = self.make_view_atomic(callback) # If it is an asynchronous view, run it in a subthread. if iscoroutinefunction(wrapped_callback): wrapped_callback = async_to_sync(wrapped_callback) try: response = wrapped_callback(request, *callback_args, **callback_kwargs) except Exception as e: response = self.process_exception_by_middleware(e, request) if response is None: raise # Complain if the view returned None (a common error). self.check_response(response, callback) # If the response supports deferred rendering, apply template # response middleware and then render the response if hasattr(response, "render") and callable(response.render): for middleware_method in self._template_response_middleware: response = middleware_method(request, response) # Complain if the template response middleware returned None # (a common error). self.check_response( response, middleware_method, name="%s.process_template_response" % (middleware_method.__self__.__class__.__name__,), ) try: response = response.render() except Exception as e: response = self.process_exception_by_middleware(e, request) if response is None: raise return response
[ "def handle(self, request, path):\n if path != '/':\n raise Resolver404\n view = self.route.view\n return view(request, route=self.route)", "def __call__(self, *args, **kwargs):\n return self.view(*args, **kwargs)", "def __call__(self, environ, make_response):\n resp = self.dispatch_request(environ, make_response)\n return resp.render()", "def dispatch_request(self, *args, **kwargs):\n path = request.path.lstrip(\"/\")\n matching_template = self._get_template(path)\n\n if not matching_template:\n abort(404, f\"Can't find page for: {path}\")\n\n if matching_template[-2:] == \"md\":\n with open(\n f\"{current_app.template_folder}/{matching_template}\"\n ) as f:\n file_content = f.read()\n parsed_file = load_frontmatter_from_markdown(file_content)\n wrapper_template = parsed_file.metadata.get(\"wrapper_template\")\n\n if not wrapper_template or not os.path.isfile(\n current_app.template_folder + \"/\" + wrapper_template\n ):\n abort(404, f\"Can't find page for: {path}\")\n\n context = parsed_file.metadata.get(\"context\", {})\n return self._render_markdown(\n parsed_file.content, wrapper_template, context\n )\n\n return render_template(matching_template, **self._get_context())", "def as_view(self, view, cacheable=False, extra_permission=None):\r\n def inner(request, *args, **kwargs):\r\n if not self.has_permission(request, extra_permission):\r\n # show login pane\r\n return self.login(request)\r\n return view(request, *args, **kwargs)\r\n\r\n # Mark it as never_cache\r\n if not cacheable:\r\n inner = never_cache(inner)\r\n\r\n # We add csrf_protect here so this function can be used as a utility\r\n # function for any view, without having to repeat 'csrf_protect'.\r\n if not getattr(view, 'csrf_exempt', False):\r\n inner = csrf_protect(inner)\r\n\r\n inner = ensure_csrf_cookie(inner)\r\n\r\n return update_wrapper(inner, view)", "def view_function(self):\n\n function = as_request_function(self.function)\n decorators = [\"cache\", \"gzip\", \"xframe\", \"csrf\", \"decorators\"]\n kwargs = {attr: getattr(self, attr) for attr in decorators}\n\n # Creates the default view function.\n @apply_decorators(**kwargs)\n def view_function(request, **kwargs):\n try:\n self.prepare_arguments(request, kwargs)\n result = function(request, **kwargs)\n return self.prepare_response(result, request)\n except HttpExceptional as exc:\n return exc.get_response()\n\n return view_function", "def dispatch(self):\n\n # Setup everything, the session, etc.\n self._init_meta()\n\n self.session_store = sessions.get_store(request=self.request)\n self.context.set_dotted('this.session', self.session)\n\n self.events.before_startup(controller=self)\n self.startup()\n self.events.after_startup(controller=self)\n\n # Authorization\n res = self._is_authorized()\n if isinstance(res, webapp2.Response):\n return res\n\n # Dispatch to the method\n self.events.before_dispatch(controller=self)\n result = super(Controller, self).dispatch()\n self.events.after_dispatch(response=result, controller=self)\n\n # Return value handlers.\n # Response has highest precendence, the view class has lowest.\n response_handler = response_handlers.factory(type(result))\n\n if response_handler:\n self.response = response_handler(self, result)\n\n # View rendering works similar to the string mode above.\n elif self.meta.view.auto_render:\n self._clear_redirect()\n self.response = self.meta.view.render()\n\n else:\n self.abort(500, 'Nothing was able to handle the response %s (%s)' % (result, type(result)))\n\n self.events.dispatch_complete(controller=self)\n\n self.session_store.save_sessions(self.response)\n self.events.clear()\n return self.response", "def as_view(self, view, cacheable=False, extra_permission=None):\n @wraps(view)\n def inner(request, *args, **kwargs):\n if not request.user.is_authenticated:\n return self.login(request)\n elif not self.has_permission(request, extra_permission):\n raise PermissionDenied()\n return view(request, *args, **kwargs)\n\n # Mark it as never_cache\n if not cacheable:\n inner = never_cache(inner)\n\n # We add csrf_protect here so this function can be used as a utility\n # function for any view, without having to repeat 'csrf_protect'.\n if not getattr(view, 'csrf_exempt', False):\n inner = csrf_protect(inner)\n\n inner = ensure_csrf_cookie(inner)\n\n return update_wrapper(inner, view)", "def resolve_request(self, request):\n # Work out the resolver.\n if hasattr(request, \"urlconf\"):\n urlconf = request.urlconf\n set_urlconf(urlconf)\n resolver = get_resolver(urlconf)\n else:\n resolver = get_resolver()\n # Resolve the view, and assign the match object back to the request.\n resolver_match = resolver.resolve(request.path_info)\n request.resolver_match = resolver_match\n return resolver_match", "def routecbv(self, view:View):\n # views must provide at least path and method\n assert view._meta.path\n assert view._meta.method\n\n kwargs = {}\n kwargs['path'] = view._meta.path\n kwargs['method'] = view._meta.method\n kwargs['name'] = view._meta.name\n kwargs['skip'] = view._meta.skip\n kwargs['apply'] = view._meta.plugins\n kwargs['meta'] = view._meta\n\n cb = view.as_callable()\n self.route(**kwargs)(cb)\n return view", "def process_view(\n self,\n request: HttpRequest,\n view_func: Callable,\n view_args: Any,\n view_kwargs: Any,\n ) -> None:\n # force the creation of a valid session by saving it.\n if (\n hasattr(request, \"session\")\n and request.session.session_key is None\n and settings.STORE_ANONYMOUS_SESSIONS is True\n ):\n request.session.save()\n\n if hasattr(view_func, \"__name__\"):\n request.profiler.view_func_name = view_func.__name__\n else:\n request.profiler.view_func_name = view_func.__class__.__name__", "def __call__(self, context, request, path):\n logger.debug(\"router.__call__: path=%s\" % path)\n\n endpoint, values = self.match(context, request, path)\n return self.view_functions[endpoint](context, request, **values)", "def __call__(self, *args, **kwargs):\n # If an error occurs while processing the request, an exception is \n # raised which causes the processing to halt immediately. The \n # exception is caught here.\n try:\n self.do_dataview(*args, **kwargs)\n except PolicyDataviewError:\n pass\n return self._result()", "def serve(self, request):\r\n view = MailChimpView.as_view(page_instance=self)\r\n return view(request)", "def on_template_response(self, context, **kwargs):\n request = kwargs.setdefault(\"request\", self.req())\n\n res = TemplateResponse(request, \"some/template.html\", context)\n\n return self.on_response(res, **kwargs)", "def __call__(self):\n self.update()\n\n if self.request.response.getStatus() in REDIRECT_STATUS_CODES:\n # don't bother rendering when redirecting\n return ''\n\n if self.layout is None:\n layout = zope.component.queryMultiAdapter(\n (self, self.request, self.context), ILayoutTemplate)\n if layout is None:\n layout = zope.component.getMultiAdapter(\n (self, self.request), ILayoutTemplate)\n return layout(self)\n return self.layout()", "def wrapped_view(view, **kwargs):\n\n @functools.wraps(view)\n def django_view(request, **dj_kwargs):\n kwargs['request'] = request\n return wrap_to_request(view(**dj_kwargs), **kwargs)\n\n return django_view", "def _ensure_response_has_view(self):\n if not (self.response.original and isinstance(self.response.original, View)):\n raise ValueError(\"The response is not a view\")", "def _render_response(request, *args, **kwargs):\n httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}\n status = kwargs.pop('status', 200)\n if 'context_instance' not in kwargs:\n kwargs['context_instance'] = RequestContext(request)\n return HttpResponse(loader.render_to_string(*args, **kwargs),\n status=status, **httpresponse_kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve/set the urlconf for the request. Return the view resolved, with its args and kwargs.
def resolve_request(self, request): # Work out the resolver. if hasattr(request, "urlconf"): urlconf = request.urlconf set_urlconf(urlconf) resolver = get_resolver(urlconf) else: resolver = get_resolver() # Resolve the view, and assign the match object back to the request. resolver_match = resolver.resolve(request.path_info) request.resolver_match = resolver_match return resolver_match
[ "def get_configure_view(self):\n from .view import ConfigureView # Django 1.9 setup issue\n return ConfigureView.as_view()", "def url_resolve(request, url_name):\n reverse_args = prep_args(request)\n\n try:\n url = reverse(url_name, **reverse_args)\n except:\n return HttpResponseNotFound()\n\n view, args, kwargs = resolve(url)\n\n return view(request, *args, **kwargs)", "def construct_url(context, request):", "def handle(self, request, path):\n if path != '/':\n raise Resolver404\n view = self.route.view\n return view(request, route=self.route)", "def view(self):\n\n if not self._view:\n resolver = get_resolver(None)\n for pattern in resolver.url_patterns:\n if self._looks_like(self.callback, pattern.callback):\n self._view = pattern.callback\n break\n\n return self._view", "def test_config_harvester_url_resolves_to_correct_view(self):\n view = resolve('/hcc/config/Harvester1')\n self.assertEqual(view.func.__name__, 'ConfigHarvesterView')", "def create_url_adapter(self, request):\n if request is not None:\n\n Website = Pool().get('nereid.website')\n\n website = Website.get_from_host(request.host)\n rv = website.get_url_adapter(self).bind_to_environ(\n request.environ,\n server_name=self.config['SERVER_NAME']\n )\n return rv", "def _get_urls_property(self, bases, urlpatterns):\n\n @property\n def urls(conf):\n if hasattr(conf, '_urls_cache'):\n return conf._urls_cache\n new_urlpatterns = []\n for pattern in urlpatterns:\n # Call to recursively apply the conf and bases to each of the\n # views referenced in the urlconf.\n new_urlpatterns.append(self.add_conf_to_pattern(pattern, self.conf, bases))\n conf._urls_cache = urlconf_include(new_urlpatterns, app_name=self.application_name.split('.')[-1], namespace=self.local_name)\n return conf._urls_cache\n return urls", "def __call__(self, context, request, path):\n logger.debug(\"router.__call__: path=%s\" % path)\n\n endpoint, values = self.match(context, request, path)\n return self.view_functions[endpoint](context, request, **values)", "def get_request_settings(config):\n cfg = deepcopy(config)\n url = cfg.pop('_url', None)\n return url, {k: v for k, v in cfg.items() if k.startswith('_')}", "def get_view(cls):\n try:\n view = cls.__dict__['view']\n except KeyError:\n message = \"This test must have a 'view' attribute.\"\n raise ImproperlyConfigured(message)\n\n try:\n return view.as_view()\n except AttributeError:\n return view", "def add_conf_to_pattern(self, pattern, conf, bases):\n\n # Don't import at module scope as this module will be imported from a\n # settings file.\n from molly.utils.views import BaseView\n\n if isinstance(pattern, RegexURLResolver):\n # Recurse through the patterns\n patterns = []\n for subpattern in pattern.url_patterns:\n patterns.append(self.add_conf_to_pattern(subpattern, conf, bases))\n # Create a new RegexURLResolver with the new patterns\n return RegexURLResolver(pattern.regex.pattern, # The regex pattern string\n patterns,\n pattern.default_kwargs,\n pattern.app_name,\n pattern.namespace)\n elif isinstance(pattern, RegexURLPattern):\n # Get the callback and make sure it derives BaseView\n callback = pattern.callback\n if not issubclass(callback, BaseView):\n return callback\n \n if bases:\n # Create a new callback with the extra bases\n callback = type(callback.__name__ + 'Extended', (callback,) + bases, {})\n callback.__module__ = pattern.callback.__module__\n \n # Instantiate the callback with the conf object\n callback = callback(conf)\n \n # Transplant this new callback into a new RegexURLPattern, keeping\n # the same regex, default_args and name.\n return RegexURLPattern(pattern.regex.pattern,\n callback,\n pattern.default_args,\n pattern.name)\n else:\n raise TypeError(\"Expected RegexURLResolver or RegexURLPattern instance, got %r.\" % type(pattern))", "def process_request(self, request):\r\n self.request = request\r\n site = get_current_site(request)\r\n if site.id > 1:\r\n prefix = \"_{0}\".format(site.id)\r\n self.request.urlconf = settings.ROOT_URLCONF + prefix", "def process_request(self, request):\n\n host = request.get_host().lower()\n matches = main_domain_regex.match(host)\n if matches:\n request.subdomain = matches.group('subdomain')\n if request.subdomain is not UNSET:\n urlconf = settings.SUBDOMAIN_URLCONFS.get(request.subdomain)\n if urlconf is not None:\n request.urlconf = urlconf\n else:\n request.subdomain = None\n urlconf = virtualhost_to_urlconf(host)\n if urlconf is False:\n logger.error('Attempt to access %s as hostname; Ignored;' % host)\n raise Http404\n request.host = host\n request.urlconf = urlconf", "def routecbv(self, view:View):\n # views must provide at least path and method\n assert view._meta.path\n assert view._meta.method\n\n kwargs = {}\n kwargs['path'] = view._meta.path\n kwargs['method'] = view._meta.method\n kwargs['name'] = view._meta.name\n kwargs['skip'] = view._meta.skip\n kwargs['apply'] = view._meta.plugins\n kwargs['meta'] = view._meta\n\n cb = view.as_callable()\n self.route(**kwargs)(cb)\n return view", "def reverse_querystring(\n view, urlconf=None, args=None, kwargs=None, current_app=None, query_kwargs=None\n):\n base_url = reverse(\n view, urlconf=urlconf, args=args, kwargs=kwargs, current_app=current_app\n )\n if query_kwargs:\n return \"{}?{}\".format(base_url, urlencode(query_kwargs))\n return base_url", "def view_instance(cls, request=None, *args, **kwargs):\n try:\n view = cls.view\n except AttributeError:\n message = \"This test must have a 'view' attribute.\"\n raise ImproperlyConfigured(message)\n\n return view(request=request, args=args, kwargs=kwargs)", "def __init__(self, urlpatterns):\n self.urlpatterns = urlpatterns", "def get_urls(self, urlpatterns=None):\n existing_patterns = urlpatterns is not None\n if not existing_patterns:\n urlpatterns = []\n from django.conf.urls import url, include\n # Since this module gets imported in the application's root package,\n # it cannot import models from other applications at the module level,\n # and django.contrib.contenttypes.views imports ContentType.\n from django.contrib.contenttypes import views as contenttype_views\n\n def urlpatterns_in_namespace(urlpatterns, namespace):\n \"\"\" Given a list of url patterns and a namespace to look for,\n return a reference to the list of url patterns attached to the\n namespace (if found) or an empty list, and a boolean indicating\n whether the namespace already existed. \"\"\"\n namespace_urlpatterns = []\n namespace_exists = False\n for resolver in urlpatterns:\n if getattr(resolver, 'namespace', None) == namespace:\n namespace_urlpatterns = resolver.url_patterns\n namespace_exists = True\n break\n return namespace_urlpatterns, namespace_exists\n\n # URL auto-loader traverses all installed apps\n for app_config in utils.get_project_app_configs():\n app_namespace = getattr(app_config,\n 'url_namespace',\n app_config.label)\n app_urlpatterns, app_namespace_exists = \\\n urlpatterns_in_namespace(urlpatterns, app_namespace)\n\n # only auto-load app URLs if defined\n if not existing_patterns:\n # attempt to append from the app's URLs\n try:\n app_urlpatterns.append(\n url(r'', include(r'{}.urls'.format(app_config.name)))\n )\n except ImportError:\n pass\n\n for model in app_config.get_models():\n model_name = model._meta.model_name\n model_urlpatterns, model_namespace_exists = \\\n urlpatterns_in_namespace(app_urlpatterns, model_name)\n try:\n controller = self.get_registered_controller(model)\n except NotRegistered:\n controller = None\n else:\n controller.url_app_namespace = app_namespace\n model_urlpatterns.extend(controller.urls)\n\n # if the namespace exists we already appended/extended in place\n if model_urlpatterns and not model_namespace_exists:\n if controller:\n model_namespace = controller.model_namespace\n model_prefix = controller.url_prefix\n else:\n model_namespace = model._meta.model_name\n model_prefix = model._meta.verbose_name_plural.lower(\n ).replace(' ', '-')\n app_urlpatterns.append(\n url(('^{}'.format(model_prefix)\n if model_prefix else ''), include(\n (model_urlpatterns, model_namespace)\n )),\n )\n\n # create an app index view if a named view is not provided\n # TODO: this is being added unconditionally right now... what we\n # really want to do is see if an index was specified (naturally or\n # explcitly) and only add this if we do not have one\n from .. import views\n AppIndex = getattr(app_config, 'AppIndexView', views.AppIndexView)\n app_index = AppIndex.as_view(app_config=app_config, backend=self)\n app_urlpatterns.append(url(r'^$', app_index, name='index'))\n\n\n # if the namespace exists we already appended/extended in place\n if app_urlpatterns and not app_namespace_exists:\n urlprefix = getattr(app_config, 'url_prefix', app_config.label)\n urlprefix = (r'^{}/'.format(urlprefix)\n if urlprefix is not None and urlprefix != ''\n else r'')\n urlpatterns.append(\n url(urlprefix, include(\n (app_urlpatterns, app_namespace)))\n )\n\n SiteIndex = getattr(self, 'SiteIndex', None)\n if SiteIndex:\n urlpatterns.append(\n url(r'^$',\n SiteIndex.as_view(backend=self),\n name='home')\n )\n\n # render(urlpatterns)\n\n return urlpatterns" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raise an error if the view returned None or an uncalled coroutine.
def check_response(self, response, callback, name=None): if not (response is None or asyncio.iscoroutine(response)): return if not name: if isinstance(callback, types.FunctionType): # FBV name = "The view %s.%s" % (callback.__module__, callback.__name__) else: # CBV name = "The view %s.%s.__call__" % ( callback.__module__, callback.__class__.__name__, ) if response is None: raise ValueError( "%s didn't return an HttpResponse object. It returned None " "instead." % name ) elif asyncio.iscoroutine(response): raise ValueError( "%s didn't return an HttpResponse object. It returned an " "unawaited coroutine instead. You may need to add an 'await' " "into your view." % name )
[ "def _ensure_response_has_view(self):\n if not (self.response.original and isinstance(self.response.original, View)):\n raise ValueError(\"The response is not a view\")", "async def test_callback_view_rejects_missing_code(hass: HomeAssistant) -> None:\n view = LogiCircleAuthCallbackView()\n resp = await view.get(MockRequest(hass, {}))\n\n assert resp.status == HTTPStatus.BAD_REQUEST", "def async_route_check(self):\n # type: () -> bool\n return asyncio.iscoroutinefunction(self.get_response)", "def test_anonymous_raises_exception(self):\n with self.assertRaises(PermissionDenied):\n self.dispatch_view(\n self.build_request(path=self.view_url), raise_exception=True)", "def test_raise_func_response(self):\n user = self.build_unauthorized_user()\n req = self.build_request(user=user, path=self.view_url)\n\n def func(request):\n return HttpResponse(\"CUSTOM\")\n\n resp = self.dispatch_view(req, raise_exception=func)\n assert resp.status_code == 200\n assert force_text(resp.content) == 'CUSTOM'", "def _async_check(self):\n # type: () -> None\n if asyncio.iscoroutinefunction(self.get_response):\n self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore", "def test_error_calls_given_func_if_func_not_None(self):\n error('foo', func=Fake(callable=True, expect_call=True))", "def test_call_direct_positional_args_raises(self):\n view_callable = self.make_view_callable()\n with pytest.raises(TypeError):\n view_callable._call(1)", "def error_received(self, exc: Exception) -> None:", "def error(self, coro: ErrorFunc) -> ErrorFunc:\n\n if not inspect.iscoroutinefunction(coro):\n raise TypeError('The error handler must be a coroutine.')\n\n params = inspect.signature(coro).parameters\n if len(params) != 2:\n raise TypeError('error handler must have 2 parameters')\n\n self.on_error = coro\n return coro", "def test_doesnt_print_None_when_no_traceback_present(self):\n error(\"error message\", func=utils.abort, stdout=error)\n assert_not_contains('None', sys.stdout.getvalue())", "def atomic_view(a_view):\n @wraps(a_view)\n def _wrapped_view(*args, **kwargs):\n try:\n with transaction.atomic():\n response = a_view(*args, **kwargs)\n if response.status_code >= 400:\n raise NonOverlappingError()\n except NonOverlappingError:\n pass\n return response\n\n return _wrapped_view", "def test_call_direct_unknown_kwarg_raises(self):\n view_callable = self.make_view_callable()\n with pytest.raises(TypeError):\n view_callable._call(b=2)", "def test_wrapped_not_callable_raises(self):\n not_callable = object()\n with pytest.raises(TypeError):\n BaseViewCallable(not_callable)", "def __call__(self, *args, **kwargs):\n # If an error occurs while processing the request, an exception is \n # raised which causes the processing to halt immediately. The \n # exception is caught here.\n try:\n self.do_dataview(*args, **kwargs)\n except PolicyDataviewError:\n pass\n return self._result()", "def competition_required(view):\n\n def wrapper(request, *args, **kwargs):\n current = models.Competition.current()\n if current is None:\n return redirect(\"coaches:inactive\")\n\n # Try to pass to function\n try:\n return view(request, *args, competition=current, **kwargs)\n except TypeError:\n return view(request, *args, **kwargs)\n\n return wrapper", "def process_exception(self, request, exception):\n\n if hasattr(request, 'LTI_MODE') and request.LTI_MODE and \\\n hasattr(exception, 'args') and len(exception.args) > 0:\n return HttpResponse(f\"An error occurred in the peer-review system:<br />{exception.args[0]}\")\n\n return None", "def testViewViewNotFound(self):\n response = self.client.get(reverse('pub_view', args=[100]))\n self.assertEquals(response.status_code, 404)", "def test_mising_renderer(self):\r\n assert self.giotto_view.can_render('text/html') == False\r\n self.assertRaises(NoViewMethod, lambda: self.giotto_view.render({}, 'text/html'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pass the exception to the exception middleware. If no middleware return a response for this exception, return None.
def process_exception_by_middleware(self, exception, request): for middleware_method in self._exception_middleware: response = middleware_method(request, exception) if response: return response return None
[ "def process_spider_exception(self, response, exception, spider):\n return None", "def process_exception(self, request, exception):\n\n if hasattr(request, 'LTI_MODE') and request.LTI_MODE and \\\n hasattr(exception, 'args') and len(exception.args) > 0:\n return HttpResponse(f\"An error occurred in the peer-review system:<br />{exception.args[0]}\")\n\n return None", "def http_exception_handler(\n request: Request,\n exc: Union[StarletteHTTPException, OptimadeHTTPException],\n) -> JSONAPIResponse:\n return general_exception(request, exc)", "def handle_exception(self, exc):\n response = super(FormatAPIView, self).handle_exception(exc)\n serializer_class = getattr(\n getattr(self.request, 'accepted_renderer', None),\n 'error_serializer_class', None)\n if serializer_class is None:\n return response\n\n serializer = serializer_class(\n instance=exc, context=self.get_serializer_context())\n response.data = serializer.data\n return response", "def request_validation_exception_handler(\n request: Request, exc: RequestValidationError\n) -> JSONAPIResponse:\n return general_exception(request, exc)", "def manage_content_exceptions(handler):\n try:\n return handler()\n except app.exceptions.ResponseContent as r:\n return r.content", "def custom_exception_handler(exc, context):\n # print('CUSTOM HANDLER')\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler(exc, context)\n\n # Customizing response\n if response is not None:\n errors = []\n for k, v in response.data.items():\n errors.append(\"{} : {}\".format(k, v))\n\n response.data = {\n # 'status_code': response.status_code,\n 'errors': errors\n }\n return response", "def _get_http_exception_handler(self, app):\n handle_http_exception = app.handle_http_exception\n\n @wraps(handle_http_exception)\n def ret_val(exception):\n context = stack.top\n request = context.request\n # Full url, e.g.,\n # http://example.com/channel/page.html?x=y\n url = request.url\n # Previous url maps to\n # http://example.com/channel/\n url_root = request.url_root\n # Removes the query parameters\n base_url = request.base_url\n # /channel\n script_root = request.script_root\n # /page.html\n path = request.path\n if path.startswith('/'):\n path = path[1:]\n paths = path.split('/')\n mpath = \",\".join(paths)\n mpath = \",{0},\".format(mpath)\n if exception.code in [404,]:\n try: \n redirect_to = Redirect.objects.get(linkname=path)\n target = redirect_to.target\n long_slug = target.get_absolute_url()\n except: \n return handle_http_exception(exception)\n return redirect(long_slug)\n return ret_val", "def make_exception_route(exception: Type[HTTPException]) -> Callable[[], Response]:\n def _route() -> Response:\n raise exception()\n return _route", "def ie_detect_middleware(get_response):\n\n def middleware(request: HttpRequest):\n if user_agent_is_internet_explorer(request.META.get(\"HTTP_USER_AGENT\")):\n if \"ie-unsupported\" not in request.path:\n # prevent redirect loops\n return redirect(\"ie-unsupported\")\n\n return get_response(request)\n\n return middleware", "def response_else_exception(response):\n if response.status_code != HTTPStatus.OK:\n logger.debug(\"Response returned from the server: {0}\".format(response.text))\n raise HTTPResponseError(response)\n return response", "def __handleException(self, e, request, requestType):\n assert isinstance(e, Exception);\n\n event = GetResponseForExceptionEvent(self, request, requestType, e);\n self._dispatcher.dispatch(ConsoleKernelEvents.EXCEPTION, event);\n\n # a listener might have replaced the exception\n e = event.getException();\n\n if ( not event.hasResponse()) :\n raise e;\n\n response = event.getResponse();\n\n if (isinstance(e, ConsoleExceptionInterface)) :\n # keep the CLI status code\n response.setStatusCode(e.getStatusCode());\n else:\n response.setStatusCode(1);\n\n try:\n return self.__filterResponse(response, request, requestType);\n except Exception as e:\n return response;", "async def default_error_handler(request: web.Request) -> web.Response:\n with error_context(request) as context:\n logger.error(context.message, exc_info=True)\n return web.json_response(context.data, status=context.status)", "def on_response(self, middleware=None, *, priority=0):\n if callable(middleware):\n return self.middleware(middleware, \"response\", priority=priority)\n else:\n return partial(\n self.middleware, attach_to=\"response\", priority=priority\n )", "def getException(self):\n \n return self._exception", "async def http_error_handler(self, request: \"Request\", exc: \"HTTPException\") -> Response:\n return JSONResponse(\n status_code=exc.status_code,\n content={\n \"error\": \"invalid_request\",\n \"error_description\": f\"{exc.detail}\",\n \"error_uri\": str(request.url),\n },\n )", "def handler500(request, *args, **argv):\n # warning: in django2 signature of this function has changed\n response = render_to_response(\"500.html\", {})\n response.status_code = 200\n return response", "def handle_api_exception(func):\n\n def inner(*args, **kwargs):\n \"\"\"\n handler api exception\n \"\"\"\n request = args[0]\n content_type = \"application/json\"\n try:\n body, status = func(*args, **kwargs)\n except CustomException as error:\n exp = HTTPException.from_custom_exception(error)\n body = _serialize(exp.as_dict())\n status = exp.status\n except HTTPException as error:\n body = _serialize(error.as_dict())\n status = error.status\n except Exception as error:\n error_traceback = traceback.format_exc()\n Logger.error(error_traceback)\n body = \"Something went wrong\"\n status = 500\n return HttpResponse(\n json.dumps(body), status=status, content_type=content_type\n )\n\n return inner", "def fancy_error_template_middleware(app):\r\n def application(environ, start_response):\r\n try:\r\n return app(environ, start_response)\r\n except Exception as exc:\r\n sio = StringIO()\r\n traceback.print_exc(file=sio)\r\n sio.seek(0)\r\n response = Response(\r\n status=500,\r\n body=render_error_page(500, exc, traceback=sio.read()),\r\n content_type=\"text/html\"\r\n )\r\n return response(environ, start_response)\r\n\r\n return application" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reset the URLconf after each request is finished.
def reset_urlconf(sender, **kwargs): set_urlconf(None)
[ "def process_request(self, request):\r\n self.request = request\r\n site = get_current_site(request)\r\n if site.id > 1:\r\n prefix = \"_{0}\".format(site.id)\r\n self.request.urlconf = settings.ROOT_URLCONF + prefix", "def reset(self):\n SGMLParser.reset(self)\n self.url = None", "def clear_requests(self) -> None:\n with self._lock:\n self._requests.clear()", "def after_finish(self):\n #Remove object reference to decrement the reference count\n self.fcgi_sock = None", "def _reset_resources(self):\n UnitTestServer._resources = ResourcePool()", "def __del__(self):\n for key, value in self.old.items():\n setattr(request.cfg, key, value)\n for key in self.new:\n delattr(request.cfg, key)", "def do_reset():\n global responses, nextQuestion, survey\n session['responses'] = responses = []\n nextQuestion = 0\n return redirect(\"/\")", "def restart(self):\n\n for request in self._requests:\n request.delivered = 0\n self._delivered = []", "def reset_url(url):\n if url in cached_urls:\n del cached_urls[url]\n del cached_urls_age[url]", "def reset_init(self):\n self.response_block = False\n self.line_counter = 0", "def process_request(self, request):\n global _urlconf_pages\n\n page_list = list(\n Page.objects.exclude(glitter_app_name='').values_list('id', 'url').order_by('id')\n )\n\n with _urlconf_lock:\n if page_list != _urlconf_pages:\n glitter_urls = 'glitter.urls'\n if glitter_urls in sys.modules:\n importlib.reload(sys.modules[glitter_urls])\n _urlconf_pages = page_list", "def reset():\n urllib2.install_opener(urllib2.build_opener())", "def apEnd(self):\n try:\n del self.factory.runningFetchers[self.request.uri]\n except exceptions.KeyError:\n log.debug(\"We are not on runningFetchers!!!\",'Fetcher')\n log.debug(\"Class is not in runningFetchers: \"+str(self.__class__),\n 'Fetcher')\n if self.request:\n log.debug(' URI:' + self.request.uri, 'Fetcher')\n log.debug('Running fetchers: '\n +str(self.factory.runningFetchers),'Fetcher')\n #raise exceptions.KeyError\n for req in self.requests[:]:\n self.remove_request(req)\n\n import gc\n #Cleanup circular references\n reactor.callLater(5, gc.collect)", "def rulesetsRefreshed(self):\n for handler in self.handlers:\n handler.rulesetsRefreshed()", "def invalidateCache():\n global _config\n _config = None", "def reload_():\n load_conf(True)", "def clear_cache(self):\n global JR_SITE_CACHE\n JR_SITE_CACHE = {}", "async def before_reset_context(self, request: Request, response: Optional[Response]) -> None:", "def process_request(self, request):\n\n host = request.get_host().lower()\n matches = main_domain_regex.match(host)\n if matches:\n request.subdomain = matches.group('subdomain')\n if request.subdomain is not UNSET:\n urlconf = settings.SUBDOMAIN_URLCONFS.get(request.subdomain)\n if urlconf is not None:\n request.urlconf = urlconf\n else:\n request.subdomain = None\n urlconf = virtualhost_to_urlconf(host)\n if urlconf is False:\n logger.error('Attempt to access %s as hostname; Ignored;' % host)\n raise Http404\n request.host = host\n request.urlconf = urlconf", "def _reset_request_timer(self) -> None:\n if self._request_timer:\n self._request_timer()\n self._request_timer = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
input dataframe output classifier model and crossvalidation metrics
def classifier(df): y = df.pop('label') X = df.values X_train, X_test, y_train, y_test = ( train_test_split(X, y, test_size=0.33, random_state=42) ) gbc = GradientBoostingClassifier(n_estimators=200, learning_rate=0.1, max_features="auto") logistic = LogisticRegression(n_jobs=-1) mod4 = gbc.fit(X_train, y_train) mod3 = logistic.fit(X_train, y_train)
[ "def metrics_classification(df):\n true_labels = df['True value']\n predictions = df.drop('True value', axis=1)\n\n scoreDf = pd.DataFrame(columns=[\"Model\", \"Accuracy\"])\n scoreDf.set_index(\"Model\")\n for model_name, predictions in predictions.iteritems():\n scoreDf = scoreDf.append(get_metrics_for_algorithm(model_name, true_labels, predictions), ignore_index=True)\n return scoreDf", "def build_model(df, model_type):\n # split data and create data_dict\n\n # train model\n\n # run against test set\n\n # call get_metrics\n\n\n return df, metrics", "def evaluate_model(model, inputs_test, labels_test, category_names):\n y_hat = model.predict(inputs_test)\n\n score_df = pd.DataFrame({\"category\": category_names, \"precision\": np.nan, \"recall\": np.nan, \"F1 score\": np.nan})\n\n for ii, col_name in enumerate(category_names):\n pre, rec, score, support = precision_recall_fscore_support(labels_test.iloc[:, ii], y_hat[:, ii], average=\"weighted\")\n score_df.loc[score_df[\"category\"] == col_name, \"precision\"] = pre\n score_df.loc[score_df[\"category\"] == col_name, \"recall\"] = rec\n score_df.loc[score_df[\"category\"] == col_name, \"F1 score\"] = score\n\n print(score_df)\n print(score_df.mean())", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred = model.predict(X_test)\n \n multi_f1 = multioutput_fscore(Y_test,Y_pred, beta = 1)\n overall_accuracy = (Y_pred == Y_test).mean().mean()\n\n print('Average overall accuracy {0:.2f}%'.format(overall_accuracy*100))\n print('F1 score (custom definition) {0:.2f}%'.format(multi_f1*100))\n\n # Print the whole classification report.\n Y_pred = pd.DataFrame(Y_pred, columns = Y_test.columns)\n \n for column in Y_test.columns:\n print('Model Performance with Category: {}'.format(column))\n print(classification_report(Y_test[column],Y_pred[column]))", "def evaluate_model(classifier, features, labels):\n\n # Cross-validate the classifier\n cv_scores = cross_val_score(classifier, features, labels, cv=5)\n cv_mean_score = np.mean(cv_scores)\n print('CV Accuracy (5-fold):', cv_scores)\n print('Mean CV Accuracy:', cv_mean_score)", "def build_classifier():\n X = pd.read_csv(os.path.join(PROJECT_DIR, \"train_features.csv\"), skiprows=1, header=None).as_matrix()\n Y = pd.read_csv(os.path.join(PROJECT_DIR, \"train_labels.csv\"), header=None).as_matrix().ravel()\n\n # Split data into training and cross validation sets\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=3131)\n\n std_scale = preprocessing.StandardScaler().fit(X_train)\n X_train_std = std_scale.transform(X_train)\n # X_test_std = std_scale.transform(X_test)\n\n pca_std = PCA(n_components=13).fit(X_train_std)\n X_train_std = pca_std.transform(X_train_std)\n # X_test_std = pca_std.transform(X_test_std)\n\n clf = svm.SVC(C=5)\n clf.fit(X_train_std, y_train)\n\n # Compare predictions of classifier on cross-validation sets with ground-truths\n # print clf.score(X_test_std, y_test)\n return clf, std_scale, pca_std", "def ml_classification(x_train, y_train, x_test, y_test, cross_validation=False):\n from time import time\n from sklearn.naive_bayes import GaussianNB\n from sklearn.svm import SVC\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\n from sklearn.metrics import accuracy_score\n\n from sklearn.model_selection import KFold\n from sklearn.base import clone\n\n classifiers = (GaussianNB(), SVC(\n kernel=\"rbf\", ), DecisionTreeClassifier(), KNeighborsClassifier(\n n_neighbors=10), AdaBoostClassifier(), RandomForestClassifier(100))\n\n names = [\n \"Naive Bayes\", \"SVM\", \"Decision Trees\", \"KNeighbors\", \"AdaBoost\",\n \"Random Forest\"\n ]\n\n for idx, clf in enumerate(classifiers):\n\n clf_cv = clone(clf)\n\n print(\"\\n\", names[idx], \"\\n\", \"-\" * 20)\n\n t0 = time()\n # Fitting the model without cross validation\n clf.fit(x_train, y_train[:, 0])\n train_time = time() - t0\n y_pred = clf.predict(x_test)\n accuracy = accuracy_score(y_pred, y_test[:, 0])\n\n if cross_validation:\n k_fold = KFold(n_splits=10)\n\n t0 = time()\n # Fitting the model with cross validation\n for id_train, id_test in k_fold.split(x_train):\n # print(y_train[id_train, 0].shape)\n clf_cv.fit(x_train[id_train], y_train[id_train, 0])\n train_time_cv = time() - t0\n\n y_pred_cv = clf_cv.predict(x_test)\n accuracy_cv = accuracy_score(y_pred_cv, y_test[:, 0])\n\n print(\"Test Accuracy: \\t {:.3f}\".format(accuracy))\n if cross_validation:\n print(\"Test Accuracy CV:\\t {:.3f}\".format(accuracy_cv))\n\n print(\"Training Time: \\t {:.1f} ms\".format(train_time * 1000))\n if cross_validation:\n print(\n \"Training Time CV: \\t {:.1f} ms\".format(train_time_cv * 1000))", "def prediction(X_train, y_train):\n assert X_train.shape[0] == y_train.shape[0], \"data sets not the same size\"\n results_dict = {}\n # set scoring\n scoring = ['f1', 'accuracy'] # use f1 scoring because of class imbalance\n\n # baseline model\n print(\"Running baseline\")\n dummy_model = DummyClassifier(strategy='prior')\n scores = cross_validate(dummy_model, X_train, y_train, return_train_score=True, scoring=scoring)\n store_results(\"Baseline\", scores, results_dict)\n\n # model 1 Random Forest\n print(\"Running model 1\")\n rf_model = make_pipeline(RandomForestClassifier())\n scores = cross_validate(rf_model, X_train, y_train, return_train_score=True, scoring=scoring)\n # scores\n store_results(\"Random Forest\", scores, results_dict)\n\n # model 2 Logistic Regression\n print(\"Running model 2\")\n logreg_pipeline = make_pipeline(LogisticRegression(max_iter=600, class_weight=\"balanced\"))\n scores = cross_validate(logreg_pipeline, X_train, y_train, return_train_score=True, scoring=scoring)\n store_results(\"Logistic Regression\", scores, results_dict)\n results_dict= pd.DataFrame(results_dict)\n\n print(results_dict)\n\n # hyperparameter optimization on best models\n print(\"Optimizing hyperparameters for model 1\")\n param_dist = {\n \"n_estimators\": scipy.stats.randint(low=10, high=300),\n \"max_depth\": scipy.stats.randint(low=1, high=5000)\n }\n random_search = RandomizedSearchCV(RandomForestClassifier(), param_dist, n_iter=5, cv=5, random_state=120, scoring=scoring[0])\n random_search.fit(X_train, y_train)\n\n best_score_rf = random_search.best_score_\n best_est_rf = pd.DataFrame(random_search.best_estimator_)\n best_cv_rf = random_search.cv_results_\n hyperparam_df = pd.DataFrame(best_cv_rf)[['mean_test_score', 'params']]\n hyperparam_df['model'] = 'RandomForest'\n\n print(\"Optimizing hyperparameters for model 2\")\n param_dist = {\n \"class_weight\": [\"balanced\", \"none\"],\n \"C\": scipy.stats.randint(low=0, high=1000)\n }\n random_search = RandomizedSearchCV(LogisticRegression(max_iter=600), param_dist, n_iter=5, cv=5, random_state=120, scoring=scoring[0])\n random_search.fit(X_train, y_train)\n best_cv_logr = random_search.cv_results_\n best_hp_log = random_search.best_estimator_\n log_reg_df = pd.DataFrame(best_cv_logr)[['mean_test_score', 'params']]\n log_reg_df['model'] = 'LogisticRegression'\n\n # Compile results of hyperparameter optimization\n hyperparam_df = hyperparam_df.append(log_reg_df).sort_values('mean_test_score', ascending=False).reset_index(drop=True)\n column_test_name = \"mean \" + scoring[0] +\" score\"\n hyperparam_df = hyperparam_df.rename(columns={'mean_test_score': column_test_name})\n # Pick best classifier\n if hyperparam_df[\"model\"][0] == 'RandomForest':\n best_model = best_est_rf\n else: best_model = best_hp_log\n\n return hyperparam_df, best_model, results_dict", "def get_model_metrics(X_train,y_train,X_test,y_test,y_predTrain,y_predTest):\n \n statist_train = []\n MAE_lTrain = metrics.mean_absolute_error(y_train, y_predTrain)\n MSE_lTrain = metrics.mean_squared_error(y_train,y_predTrain)\n RMSE_lTrain = np.sqrt(metrics.mean_squared_error(y_train, y_predTrain))\n R2_lTrain = r2_score(y_train, y_predTrain)\n train = 'Train'\n\n list_metrics = [MAE_lTrain, MSE_lTrain, RMSE_lTrain, R2_lTrain, train]\n statist_train.append(list_metrics)\n statist_train = pd.DataFrame(statist_train,columns = ['MAE', 'MSE', 'RMSE', 'R2','Dataset'])\n \n statist_test = []\n MAE = metrics.mean_absolute_error(y_test, y_predTest)\n MSE = metrics.mean_squared_error(y_test, y_predTest)\n RMSE = np.sqrt(metrics.mean_squared_error(y_test, y_predTest))\n R2 = r2_score(y_test, y_predTest)\n test = 'Test'\n \n list_metrics = [MAE, MSE, RMSE, R2, test]\n statist_test.append(list_metrics)\n statist_test = pd.DataFrame(statist_test,columns = ['MAE', 'MSE', 'RMSE', 'R2', 'Dataset'])\n \n statist = pd.merge(statist_train,statist_test, how='outer').set_index('Dataset')\n \n return statist", "def report(model):\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n print('Confusion Matrix\\n')\n print(confusion_matrix(y_test, y_pred))\n print('\\nClassification Report\\n')\n print(classification_report(y_test, y_pred)) \n return y_pred", "def classification_report(self,X,y):\n y_pred = self.predict(X)\n clfr = classification_report(y, y_pred)\n\treturn clfr", "def evaluate_pipeline(pipeline, X_test, Y_test, category_names):\n Y_pred = pipeline.predict(X_test)\n\n Y_pred_df = pd.DataFrame( Y_pred, columns = Y_test.columns) \n report = average_classification_report(Y_test,Y_pred_df)\n overall_accuracy = (Y_pred == Y_test).mean().mean()\n\n print('Average overall accuracy {0:.2f}%'.format(overall_accuracy*100))\n print(report)\n\n # Print the whole classification report.\n Y_pred = pd.DataFrame(Y_pred, columns = Y_test.columns)\n \n for column in Y_test.columns:\n print('Model Performance with Category: {}'.format(column))\n print(classification_report(Y_test[column],Y_pred[column]))", "def evaluate(X, Y, hyperparams):\n\n# from scikits.learn.cross_val import LeaveOneOut\n# loo = LeaveOneOut(len(Y))\n from scikits.learn.cross_val import KFold\n K = 5\n# print >> sys.stderr, \"Using 10-fold cross-validation\"\n loo = KFold(len(Y), K)\n# print loo\n\n all_y_test = []\n all_y_test_predict = []\n\n nlltotal = 0.\n for train, test in loo:\n trainidx = [idx for idx in range(len(train)) if train[idx]]\n testidx = [idx for idx in range(len(test)) if test[idx]]\n X_train, X_test, y_train, y_test = X[trainidx], X[testidx], Y[trainidx], Y[testidx]\n# print \"train\", X_train.shape, y_train.shape\n# print \"test\", X_test.shape, y_test.shape\n\n if len(frozenset(y_train)) == 1:\n # Skip training on this LOO set if there is only one y-value in the training set\n continue\n\n clf = fit_classifier(X_train, y_train, hyperparams)\n\n# print \"target\", y_test\n## print \"predict\", clf.predict(X_test)\n# print \"predict\", clf.predict_proba(X_test)\n## print \"df\", clf.decision_function(X_test)\n## print \"score\", clf.score(X_test, y_test)\n\n# y_test_predict = clf.predict_proba(X_test)\n y_test_predict = clf.predict(X_test)\n# print y_test_predict\n\n all_y_test.append(y_test)\n all_y_test_predict.append(y_test_predict)\n\n## print clf.best_estimator\n# print precision_score(y_test, y_test_predict)\n# print recall_score(y_test, y_test_predict)\n# print classification_report(y_test, y_test_predict)\n#\n#\n# assert y_test.shape == (1,)\n# assert y_test_predict.shape == (1,)\n# if y_test_predict[0] >= 1.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f >= 1. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1-1e-9\n# elif y_test_predict[0] <= 0.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f <= 0. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1e-9\n#\n# if y_test[0] == 1:\n# probtarget = y_test_predict[0]\n# else:\n# assert y_test[0] == 0\n# probtarget = 1-y_test_predict[0]\n## print \"probtarget\", probtarget\n## print y_test[0], y_test_predict[0], repr(probtarget)\n# nll = -math.log(probtarget)\n## print \"nll\", nll\n## print\n#\n# nlltotal += nll\n# nlltotal /= len(Y)\n## print \"nlltotal %f (alpha=%f, n_iter=%d)\" % (nlltotal, alpha, n_iter)\n# return nlltotal\n\n y_test = numpy.hstack(all_y_test)\n y_test_predict = numpy.hstack(all_y_test_predict)\n assert y_test.ndim == 1\n assert y_test_predict.ndim == 1\n assert Y.shape == y_test.shape\n assert y_test.shape == y_test_predict.shape\n# import plot\n# print \"precision_recall_fscore_support\", scikits.learn.metrics.precision_recall_fscore_support(y_test, y_test_predict)\n f1 = f1_score(y_test, y_test_predict)\n# print \"\\tf1 = %0.3f when evaluating with %s\" % (f1, hyperparams)\n# sys.stdout.flush()\n# precision, recall, thresholds = scikits.learn.metrics.precision_recall_curve(y_test, y_test_predict)\n# plot.plot_precision_recall(precision, recall)\n# print \"confusion_matrix\", scikits.learn.metrics.confusion_matrix(y_test, y_test_predict)\n# print \"roc_curve\", scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# fpr, tpr, thresholds = scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# print \"auc\", scikits.learn.metrics.auc(fpr, tpr)\n# plot.plot_roc(fpr, tpr)\n return f1", "def MakeModels(df= None, filter_2D = False, set_normy = False, set_scaler = True, set_pca = True,\n algs = ['ridge', 'lasso', 'knn', 'svr', 'dt', 'rf', 'gp', 'xgb', 'net'], save_fig = True, show_fig = False,\n set_kernel = 'RBF', svr_opt_kern = False, set_search = 'GSV', set_varthresh = None, gp_alpha = None,\n gp_opt = None):\n # Raise error if df not specified\n if df is None:\n raise ValueError('Must specify a dataframe.')\n if isinstance(df, pd.DataFrame) == False:\n raise ValueError('Input df must be a dataframe.')\n\n # If filter_2D is True, will only use 2D descriptors\n if filter_2D == True:\n df = Filter_2D(df)\n\n # Remove infinite values\n df[df.columns] = df[df.columns].astype(float)\n df = df.fillna(0.0).astype(float)\n df = df.replace([np.inf, -np.inf], np.nan)\n df = df.dropna(axis=0, how='any')\n\n # Set X and y\n X = df.iloc[:, :-1]\n y = df.iloc[:, -1]\n\n # Train Test Split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)\n\n #AddPipelinestoDict\n pipe_dict = AddPipeLinestoDict(set_normy= set_normy, set_pca = set_pca, set_kernel = set_kernel,\n svr_opt_kern= svr_opt_kern, set_scaler = set_scaler, algs = algs,\n set_varthresh = set_varthresh, gp_alpha= gp_alpha, gp_opt = gp_opt)\n\n for alg in pipe_dict.keys():\n if set_search == 'GSV':\n search = GridSearchCV(pipe_dict[alg]['pipe'], pipe_dict[alg]['params'], cv=5, n_jobs=-1)\n search.fit(X_train, y_train)\n pipe_dict[alg]['best_estimator'] = search.best_estimator_\n if set_search == 'RSV':\n search = RandomizedSearchCV(pipe_dict[alg]['pipe'], pipe_dict[alg]['params'], cv=5,\n n_jobs=-1, n_iter=500)\n search.fit(X_train, y_train)\n pipe_dict[alg]['best_estimator'] = search.best_estimator_\n\n # Print training scores and params\n print(\"{}'s Best score is: {}\".format(alg, search.best_score_))\n print(\"{}'s Best params are: {}\".format(alg, search.best_params_))\n\n # Make and print predictions and scores\n if alg == 'gp':\n y_pred, sigma = pipe_dict[alg]['best_estimator'].predict(X_test, return_std = True)\n\n else:\n y_pred = pipe_dict[alg]['best_estimator'].predict(X_test)\n\n r2 = r2_score(y_pred=y_pred, y_true=y_test)\n rmse = mean_squared_error(y_true=y_test, y_pred=y_pred, squared=False)\n print(\"{}'s R2 is: {}\".format(alg, r2))\n print(\"{}'s RMSE is: {}\".format(alg, rmse))\n\n # Plot prediction with error bars if GP\n if alg == 'gp':\n f1, ax = plt.subplots(figsize=(12, 12))\n plt.plot(y_test, y_pred, 'r.')\n plt.errorbar(y_test, y_pred, yerr=sigma, fmt='none', ecolor='blue', alpha=0.8)\n\n else:\n f1, ax = plt.subplots(figsize=(12, 12))\n plt.plot(y_test, y_pred, 'r.')\n\n # Plot line with ideal slope of 1\n x_vals = np.array(ax.get_xlim())\n y_vals = 0 + 1*x_vals\n plt.plot(x_vals, y_vals, \"r--\")\n\n # Add title string\n titlestr = []\n if filter_2D is True:\n title2D = '2D'\n titlestr.append(title2D)\n if set_pca == True:\n titlepca = 'PCA'\n titlestr.append(titlepca)\n if set_scaler == True:\n titlescaler = 'Scaled'\n titlestr.append(titlescaler)\n titlename = str(alg)\n titlestr.append(titlename)\n titley = y.name\n titlestr.append(titley)\n if 'gp' in algs:\n titlekern = set_kernel\n titlestr.append(titlekern)\n if set_normy is True:\n titlenormy = 'NormY'\n titlestr.append(titlenormy)\n if set_varthresh is not None:\n titlevt = 'VT_{}'.format(set_varthresh)\n titlestr.append(titlevt)\n\n # place a text box in upper left in axes coords\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n textstr = '\\n'.join(f'{k}: {v}' for k, v in search.best_params_.items())\n textstr2 = 'R2 is: %.3f' % r2 + '\\n' + 'RMSE is %.3f' % rmse + '\\n' + '\\n'.join(titlestr)\n ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n ax.text(0.05, 0.80, textstr2, transform=ax.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n\n # Plot Figure with relative path to folder\n filedir = os.path.dirname(os.path.abspath(__file__))\n parentdir = os.path.dirname(filedir)\n plotdir = os.path.join(parentdir, 'Plots')\n titledir = os.path.join(plotdir, titley)\n if save_fig == True:\n if not os.path.exists(titledir):\n os.makedirs(titledir)\n plt.title(titlename)\n plt.xlabel('{} Experimental'.format(titley))\n plt.ylabel('{} Predicted'.format(titley))\n if save_fig == True:\n figfile = os.path.join(titledir, '_'.join(titlestr))\n plt.savefig('{}.png'.format(figfile))\n if show_fig == True:\n plt.show()\n plt.close()\n\n return {'pipe_dict': pipe_dict, 'X_test': X_test, 'y_test': y_test, 'y_pred': y_pred, 'df': df, 'titlestr': titlestr}", "def train_model(self):\n X = self.df[self.features].values\n Y = self.df['target'].values\n\n kfold = StratifiedKFold(n_splits=self.kfold_splits, shuffle=True, random_state=seed)\n nsplits = kfold.get_n_splits(X,Y)\n cvpredictions = [] # compare outputs from each cross-validation\n\n self.msg_svc.INFO(\"DLPYTORCH : Fitting K-Fold cross validations\")\n for ind,(train,test) in enumerate(kfold.split(X,Y)):\n self.msg_svc.INFO(\"DLPYTORCH : - Fitting K-Fold {0}\".format(ind))\n\n Y_train = Y[train]\n Y_test = Y[test]\n\n # -- store test/train data from each k-fold as histograms (to compare later)\n h_tests = {}\n h_trains = {}\n for n,v in self.targets.iteritems():\n h_tests[n] = ROOT.TH1D(\"test_\"+n,\"test_\"+n,10,0,10)\n h_trains[n] = ROOT.TH1D(\"train_\"+n,\"train_\"+n,10,0,10)\n\n # fill histogram for each target\n for (n,v) in enumerate(self.targets.iteritems()):\n [h_tests[n].Fill(i) for i in X[test][np.where(Y_test==v)]]\n [h_trains[n].Fill(i) for i in X[train][np.where(Y_train==v)]]\n\n\n ## Fit the model to training data & save the history\n self.model.train()\n e_losses = []\n for t in range(self.epochs):\n e_losses += self.train_epoch(X[train],Y_train)\n self.msg_svc.INFO(\"DLPYTORCH : Epoch {0} -- Loss {1}\".format(t,e_losses[-1]))\n self.histories.append(e_losses)\n\n # evaluate the model\n self.msg_svc.DEBUG(\"DLPYTORCH : Evaluate the model: \")\n self.model.eval()\n\n # Evaluate training sample\n self.msg_svc.INFO(\"DLPYTORCH : Predictions from training sample\")\n train_predictions = self.predict(X[train])\n self.train_predictions.append(train_predictions)\n\n # Evaluate test sample\n self.msg_svc.INFO(\"DLPYTORCH : Predictions from testing sample\")\n test_predictions = self.predict(X[test])\n self.test_predictions.append(test_predictions)\n\n # Make ROC curve from test sample\n self.msg_svc.INFO(\"DLPYTORCH : Make ROC curves\")\n fpr,tpr,_ = roc_curve(Y[test], test_predictions)\n self.fpr.append(fpr)\n self.tpr.append(tpr)\n\n # Plot the predictions to compare test/train\n self.msg_svc.INFO(\"DLPYTORCH : Plot the train/test predictions\")\n self.plotter.prediction(h_trains,h_tests) # compare DNN prediction for different targets\n\n self.msg_svc.INFO(\"DLPYTORCH : Finished K-Fold cross-validation: \")\n self.accuracy = {'mean':np.mean(cvpredictions),'std':np.std(cvpredictions)}\n self.msg_svc.INFO(\"DLPYTORCH : - Accuracy: {0:.2f}% (+/- {1:.2f}%)\".format(np.mean(cvpredictions), np.std(cvpredictions)))\n\n return", "def svm_classification(eeg_features_df):\n total_acc = 0\n cycles = 1\n all_best_acc = 0\n for x in range(0, cycles):\n train_x, train_y, test_x, test_y = prepare_data(eeg_features_df)\n\n # classification\n plot_confusion_matrix = 0\n best_acc, prediction = gp.my_svm(train_x, train_y, test_x, test_y, plot_confusion_matrix)\n total_acc += best_acc\n if best_acc > all_best_acc:\n all_best_acc = best_acc\n best_prediction = prediction\n\n test_y = np.array(test_y)\n print(\"MEAN Accuracy: \", total_acc/cycles, \" %\")\n print(\"BEST Accuracy: \", all_best_acc, \" %\")\n\n classes_accuracy(test_y, best_prediction)\n\n if confusion_matrix:\n confusion_matrix.prepare_plot(test_y, best_prediction)", "def evaluate(idxs):\n for key, result_analysis in analysis_dct.items():\n if \"avg-\" in key:\n new_idxs = list(set([i[0:-2] for i in idxs]))\n else:\n new_idxs = idxs\n # \n df_X = result_analysis.trinary.df_X\n ser_y = result_analysis.trinary.ser_y\n states = list(set(ser_y.values))\n #\n ensemble = classifier_ensemble.ClassifierEnsemble(\n filter_high_rank=100, size=100)\n ensemble.fit(df_X, ser_y)\n df_X_test = df_X.loc[new_idxs, :]\n ser_y_test = ser_y.loc[new_idxs]\n df_predict = ensemble.predict(df_X_test)\n df_predict[\"true state\"] = ser_y_test\n # Construct the predictions\n predictions = []\n for clf in ensemble.clfs:\n df_X_test_sub = df_X[ensemble.columns]\n dct = {i: [] for i in states}\n for idx in new_idxs:\n {i: dct[i].append(clf.coef_[i].dot(df_X_test_sub.loc[idx, :]))\n for i in states}\n df_result = pd.DataFrame(dct, index=new_idxs)\n predictions.append(df_result)\n result_analysis.df_predict = df_predict\n result_analysis.predictions = predictions\n result_analysis.ensemble = ensemble", "def evaluate_model(model, testX, testY, batch_size, label_names):\n # Predictions\n predictions = model.predict(testX, batch_size=batch_size)\n \n # Classification report\n classification = classification_report(testY.argmax(axis=1),\n predictions.argmax(axis=1),\n target_names=label_names)\n \n # Print classification report to terminal\n print(classification)\n \n # name for saving report\n report_path = os.path.join(\"..\", \"out\", \"classification_report.txt\")\n \n # Save classification report\n with open(report_path, 'w', encoding='utf-8') as f:\n f.writelines(classification_report(testY.argmax(axis=1),\n predictions.argmax(axis=1),\n target_names=label_names))\n \n print(f\"\\n[INFO] Classification report is saved as '{report_path}'.\")", "def evaulate_model(self, model=True):\n print('Start predicting...')\n y_pred = self.gbm.predict(\n self.X_test, num_iteration=self.gbm.best_iteration_)\n result = pd.DataFrame(y_pred - self.y_test).std()\n print(result)\n\n if not model:\n pre = pd.read_csv('elo/data/sample_submission.csv')\n pre.target = y_pred\n pre.to_csv('elo/data/result.csv', index=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the str of this command, bool is just long, etc
def to_cmd(self): if self.otype is bool: if self.value: return self.long() else: return "" else: return " ".join([self.long(), str(self.value) if self.value is not None else ""])
[ "def getCmdString(self,cmd):\n if hasattr(cmd,\"command\") and isinstance(cmd.command, Command):\n cmd.command = cmd.command.composeCmdString()\n return super(self.__class__,self).getCmdString(cmd)\n elif isinstance(cmd,list):\n cmdarr = []\n for c in cmd:\n if hasattr(c,\"command\") and isinstance(c.command, Command):\n c.command = c.command.composeCmdString()\n cmdarr.append(c.command.composeCmdString())\n cmdarr.append(super(self.__class__,self).getCmdString(cmd))\n return \"\\n\".join(cmdarr)\n else:\n return super(self.__class__,self).getCmdString(cmd)", "def get_short_string(self):\n ret_val = []\n ret_val.append(self.type)\n ret_val.append(self.TYPE_DELIM)\n ret_val.append(self.subtype)\n return \"\".join(ret_val)", "def cmdset_string(self):\n name, alias = self.cmd()\n if not name:\n AssertionError('Command name is mandatory!')\n t = name\n if alias:\n t += ', ' + alias\n return t", "def __getcmdstr(self, cmd, *args):\n params = []\n for arg in args:\n params.append(getitemslist(arg))\n items = []\n if params:\n for i in range(max([len(param) for param in params])):\n for param in params:\n if i < len(param):\n param = param[i]\n if isinstance(param, bool):\n param = '1' if param else '0'\n elif isinstance(param, float):\n param = format(param, self.floatformat)\n items.append('%s ' % str(param))\n cmdstr = '%s %s' % (cmd, ''.join(items))\n return cmdstr.strip()", "def render_command(self):\n return ' '.join(self.command)", "def composeCmdString(self):\n if hasattr(self,\"cmdstring\"):\n print \"cmdstring is %s\" % self.cmdstring\n return self.cmdstring\n cmdstring = \"\"\n if hasattr(self,\"cmdarray\") and len(self.cmdarray) > 0:\n cmdstring += \" \".join(self.cmdarray)\n if hasattr(self,\"cmdparametervalues\"):\n if not hasattr(self,\"parameterdefs\"):\n for k,v in self.cmdparametervalues.iteritems():\n if not k.startswith(\"-\"):\n if len(k) == 1:\n k = \"-\" + k\n else:\n k = \"--\" + k\n if v == False:\n continue\n if v == True:\n cmdstring += \" %s\" % k\n else:\n cmdstring += \" %s=%s\" % (k,v)\n else:\n # This is the branch for commands defined by parameter defs\n # Tool name should be in the \"bin\" attribute \n if hasattr(self,\"bin\"):\n cmdstring = self.bin\n else:\n raise Exception(\"Specified command must have a 'bin' attribute.\")\n \n # Determines if the argument pattern is an optional one\n optionalargre = re.compile(\"\\?.+?\\?\")\n \n # Determines if the argument pattern has quoting of the <VALUE>\n quotecheckre = re.compile(\"(\\S)<VALUE>(\\S)\") \n \n # Go through the parameter defs in order and \n # for any parameter with a value, substitute the value into the \n # \"pattern\"\n \n # Sort the parameterdef keys based on pdef.order\n sortednames = sorted(self.parameterdefs.iterkeys(),key=lambda name: int(self.parameterdefs[name].order))\n \n for pname in sortednames:\n pdef = self.parameterdefs[pname]\n if pname in self.cmdparametervalues:\n value = self.cmdparametervalues[pname]\n \n if value == False:\n continue\n \n # If <VALUE> is surrounded by something (e.g. single quotes)\n # then we should make sure that char is escaped in the value\n quotestring = None\n match = quotecheckre.search(pdef.pattern)\n if match is not None:\n if len(match.groups()) == 2:\n if match.group(1) == match.group(2):\n quotestring = match.group(1)\n \n # Do some courtesy escaping\n if isinstance(value,basestring) and quotestring is not None:\n # Remove existing escapes\n value = value.replace(\"\\\\\" + quotestring,quotestring)\n # Escape the quote\n value = value.replace(quotestring,\"\\\\\" + quotestring)\n \n \n # Substitute the value into the pattern\n if optionalargre.search(pdef.pattern) is not None:\n \n # This is the case of a switch with an optional argument\n if value == True:\n # Adding the switch with no argument\n cmdstring += \" %s\" % optionalargre.sub(\"\",pdef.pattern)\n else:\n # Remove the question marks and substitute the VALUE\n cmdstring += \" %s\" % pdef.pattern.replace(\"?\",\"\").replace(\"<VALUE>\",value)\n \n else:\n if value == True:\n cmdstring += \" %s\" % pdef.pattern\n else:\n cmdstring += \" %s\" % pdef.pattern.replace(\"<VALUE>\",value)\n \n return cmdstring.encode('ascii','ignore')", "def getStr(self):\r\n return _osgDB.Field_getStr(self)", "def takeStr(self):\r\n return _osgDB.Field_takeStr(self)", "def __str__(self):\n if self._arg is not None:\n return \"{0} {1}\".format(self._opcode, self._arg)\n else:\n return \"{0}\".format(self._opcode)", "def string_get(self, ypos, xpos, length):\n # the screen's co-ordinates are 1 based, but the command is 0 based\n xpos -= 1\n ypos -= 1\n cmd = self.exec_command('ascii({0},{1},{2})'.format(ypos, xpos, length).encode(\"utf-8\"))\n # this usage of utf-8 should only return a single line of data\n assert len(cmd.data) == 1, cmd.data\n return cmd.data[0].decode(\"utf-8\")", "def rawstr(self):\n return self.__rawstr", "def get_help_str(self):\n return self.help_str", "def make_cmd_str(self):\n cmd_str = f' -ss {self.start_time} '\n if self.end_time:\n cmd_str += f'-to {self.end_time} '\n cmd_str += f\"-metadata title='{self.title}' \"\n if self.artist:\n cmd_str += f\" -metadata artist='{self.artist}' \"\n\n cmd_str += self._generate_track_filename(Track.file_extention)\n return cmd_str", "def get_command_str(self, param_id):\n return \" \".join(map(str, self.params[param_id]))", "def test_command_repr(self):\n cmd = Command(\"device\", \"command name\", 1, \"def\", 3, kw1=\"abc\")\n assert (\n repr(cmd) == \"<Command('device', 'command name', 1, 'def', 3, kw1='abc')>\"\n )", "def raw_string(self) -> str: # test passed\n return self._string_content", "def get_command(data):\n return data.get(\"command\")", "def print_cmd(self):\n \n return self.__print_cmd", "def datatype(self) -> str:", "def __repr__(self):\n return f'{self.__class__.__name__}(added={self.added!r}, command={self.command!r})'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
randomize sequence of n nucleotides. We will refer it as a root.
def sample_root_seq(n): seq = "" for i in range(n): seq += random.sample(MLE.NUCS, 1)[0] return seq
[ "def rumble_sequence(n):\n # range starts with 0 and ends with n-1\n seq = list(range(n))\n # but Royal Rumbles start with 1 and ends with n\n incr_seq = [i+1 for i in seq] \n output_seq = random.sample(incr_seq, n)\n\n return output_seq", "def _random_dismantlable_lattice(n):\n from sage.misc.prandom import randint\n\n D = DiGraph({0: [n-1]})\n for i in range(1, n-1):\n a = randint(0, i//2)\n b_ = list(D.depth_first_search(a))\n b = b_[randint(1, len(b_)-1)]\n D.add_vertex(i)\n D.add_edge(a, i)\n D.add_edge(i, b)\n D.delete_edge(a, b)\n return D", "def generate_nodes(nr_nodes):\n return [''.join(random.choice(string.ascii_letters + string.digits) for n in range(4)) \\\n for i in range(nr_nodes)]", "def build_random_dummy_tree(n):\n tree = BinaryTree()\n for i in range(n):\n tree.insert_random(Node(random.randint(0,BinaryTree.DUMMY_RANGE)))\n return tree", "def random_with_n_digits(self, n):\n\n range_start = 10 ** (n - 1)\n range_end = (10 ** n) - 1\n return randint(range_start, range_end)", "def flip_random_n(chain,n):\n for i in range(n):\n chain = flip(chain, rnd.randint(chain.size))\n return chain", "def random_nodes(G, n, prng):\n\tgraph_nodes = list(G.nodes)\n\tif n > len(graph_nodes):\n\t\tn = len(graph_nodes)\n\tnodes = prng.sample(graph_nodes, n)\n\tnodes = np.array(nodes)\n\n\treturn nodes", "def genRandomSequence(numDoms):\n files = ls(DATAPATH)\n f = list(open(DATAPATH + choice(files)))[1::2]\n sequence = choice(f).strip()\n sequence.translate(None, '-')\n \n starts, ends, seqs = findDomains(sequence, hmmfile)\n if len(starts) < numDoms:\n return genRandomSequence(numDoms)\n prefix = sequence[:starts[0]]\n suffix = sequence[ends[-1]:]\n if prefix == '' or suffix == '':\n return genRandomSequence(numDoms)\n linkers = []\n for i in range(len(starts)-1):\n linkers.append(sequence[ends[i]+1:starts[i+1]])\n \n middle = ''\n for _ in range(numDoms - 1):\n middle += choice(seqs) + choice(linkers)\n middle += choice(seqs)\n\n newSeq = prefix + middle + suffix\n newSeq = ''.join(newSeq.split('-'))\n\n #Deletes all lowercase letters\n newSeq = newSeq.translate(None, string.ascii_lowercase)\n #Deletes all illegal AA characters\n newSeq = newSeq.translate(None, 'BJOUXZ')\n\n return newSeq", "def _generate_nodes(self, n, new_node):\n i = 0\n while i < n:\n x, y = random.random(), random.random()\n if (x - .5) ** 2 + (y - .5) ** 2 < .5 ** 2:\n yield new_node(x, y)\n i += 1", "def sample_random_node(self):\n #Naive Approach \n return self.tree[int(self.rng.random()*len(self.tree))] # OUT OF BOUNDS ERRORS? Check this", "def mutate(self, n):\n seq_len = len(self.seq)\n self.mut_ind_list = []\n mutation_count = 0\n while mutation_count < n:\n mut_ind = np.random.randint(0, seq_len - 1)\n self.mut_ind_list.append(mut_ind)\n mut_nuc = self.mutated_seq[mut_ind]\n mut_choices = np.asarray(['transition', 'transversion'])\n mut_type = np.random.choice(mut_choices, p=[0.75, 0.25])\n if mut_type == 'transition':\n mutated_nuc = t_ition[mut_nuc]\n else:\n mutated_nuc = np.random.choice(t_version[mut_nuc], p=[0.5, 0.5])\n if mut_ind % 3 == 0:\n new_codon = str(mutated_nuc) + str(self.mutated_seq[mut_ind + 1]) + str(self.mutated_seq[mut_ind + 2])\n if (new_codon != 'TAA') and (new_codon != 'TAG') and (new_codon != 'TGA'):\n self.mutated_seq[mut_ind] = mutated_nuc\n mutation_count += 1\n elif mut_ind % 3 == 1:\n new_codon = str(self.mutated_seq[mut_ind - 1]) + str(mutated_nuc) + str(self.mutated_seq[mut_ind + 1])\n if (new_codon != 'TAA') and (new_codon != 'TAG') and (new_codon != 'TGA'):\n self.mutated_seq[mut_ind] = mutated_nuc\n mutation_count += 1\n else:\n new_codon = str(self.mutated_seq[mut_ind - 2]) + str(self.mutated_seq[mut_ind - 1]) + str(mutated_nuc)\n if (new_codon != 'TAA') and (new_codon != 'TAG') and (new_codon != 'TGA'):\n self.mutated_seq[mut_ind] = mutated_nuc\n mutation_count += 1", "def random_datasets(n, genomes_path):\n p = Path(genomes_path)\n genomes = list(p.glob(\"*.fna.gz\"))\n random_genomes = random.sample(population=genomes, k=n)\n return random_genomes", "def generateNumSyndicate():\n if randint(0, 3):\n return str(randint(1, numSyndicates))\n return 'null'", "def __init__(self, n=3, seed=None):\n self.n = n\n if seed != None:\n random.seed(seed)\n # TODO: your code goes here\n #101001010101 idk what goes here", "def test_nr_nucleotide(self):\n preds = [\n MotifChange(\"A\", \"C\", forward_only=True),\n MotifChange(\"G\", \"A\", forward_only=True),\n ]\n sm = NonReversibleNucleotide(predicates=preds)\n got = sm.get_param_list()\n self.assertEqual(got, [\"A>C\", \"G>A\"])", "def random_subtree(program):\n nodes = program.size_of_subtree()\n node_index = random.randint(math.ceil((nodes - 1) / 3), nodes - 1)\n return subtree_at_index(program, node_index)", "def sample(self, n, unique=False):\n self._sampled_unique = unique\n random_values = np.random.rand(int(n * 1 if unique else n))\n tree_idxs, scaled_random_values = self.find(random_values)\n if unique:\n i = 0\n while i < 100:\n tree_idxs, unique_idx = np.unique(tree_idxs, return_index=True)\n scaled_random_values = scaled_random_values[unique_idx]\n if len(tree_idxs) < n:\n new_idxs, new_values = self.find(\n np.random.rand(2 * (n - len(tree_idxs))))\n tree_idxs = np.concatenate([tree_idxs, new_idxs])\n scaled_random_values = np.concatenate(\n [scaled_random_values, new_values])\n else:\n break\n i += 1\n if len(tree_idxs) < n:\n raise RuntimeError(\n \"After 100 tries, unable to get unique indexes.\")\n tree_idxs = tree_idxs[:n]\n\n priorities = self.tree[tree_idxs]\n self.prev_tree_idxs = tree_idxs\n T_idxs, B_idxs = np.divmod(tree_idxs - self.low_idx, self.B)\n return (T_idxs, B_idxs), priorities", "def simulate_sequence(length):\n dna = ['A', 'C', 'G', 'T']\n sequence = ''\n for i in range(length):\n sequence += random.choice(dna)\n return sequence", "def gen(self, n=0):\n if n == 0:\n return self._gen_relative()\n return self(self.base_field().gen(n - 1))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a sequence and samples from it a sequence at the same length, that we might get after time t according to JC. We will refer it as a leaf.
def sample_leaf(other, t): seq = "" for char in other: seq += MLE.sample_b(char, t) return seq
[ "def subseq1():\n\n longdur = 201e-9\n\n wait = bb.BluePrint()\n wait.insertSegment(0, ramp, args=(0, 0), dur=10e-9)\n wait.setSR(SR1)\n\n wiggle = bb.BluePrint()\n wiggle.insertSegment(0, sine, args=(10e6, 10e-3, 0, 0), dur=longdur)\n wiggle.setSR(SR1)\n\n blob = bb.BluePrint()\n blob.insertSegment(0, gauss, args=(25e-3, 12e-9, 0, 0), dur=longdur)\n blob.setSR(SR1)\n\n slope = bb.BluePrint()\n slope.insertSegment(0, ramp, (0, 15e-3), dur=longdur)\n slope.setSR(SR1)\n\n elem1 = bb.Element()\n elem1.addBluePrint(1, wait)\n elem1.addBluePrint(2, wait)\n elem1.addBluePrint(3, wait)\n\n elem2 = bb.Element()\n elem2.addBluePrint(1, wiggle)\n elem2.addBluePrint(2, slope)\n elem2.addBluePrint(3, blob)\n\n elem3 = elem1.copy()\n\n seq = Sequence()\n seq.setSR(SR1)\n seq.addElement(1, elem1)\n seq.addElement(2, elem2)\n seq.addElement(3, elem3)\n seq.setSequencingNumberOfRepetitions(1, 10)\n seq.setSequencingNumberOfRepetitions(3, 10)\n\n return seq", "def sample_trajectory(self):\n ind = np.random.choice(self.N, 1, p=self.W[-1, :])\n return self.genealogy(ind)", "def sample(self, t, initial='zero'):\n\n ts = np.nan * np.zeros((t, self.dim), dtype=float)\n\n if initial == 'zero':\n ts[:self.order, :] = 0\n else:\n ts[:self.order, :] = initial\n\n for ctr in range(self.order, t):\n # get concatenated version of previous timesteps\n prev_stack = ts[ctr - self.order:ctr, :]\n prev_cc = np.concatenate(np.flipud(prev_stack))[:, None]\n mean = self.a_full.dot(prev_cc).flatten()\n next_tp = np.random.multivariate_normal(mean, self.k)\n ts[ctr] = next_tp\n return ts", "def sample(self):\r\n # if the couter is bigger than bufferSize, which means the replaybuffer is full\r\n if self.Counter > self.bufferSize:\r\n rang = self.bufferSize\r\n else:\r\n rang = self.Counter\r\n indexs = np.random.choice(rang,size = self.batchSize)\r\n samples = self.buffer[indexs,:]\r\n return samples", "def read_simple(self):\r\n\r\n #Create the sequence that we gonna feed with blocks\r\n reading_seq = Sequence('Reading sequence')\r\n \r\n # Create a Channel pulse for the train of pulse\r\n train = ChannelPulses(channel=6, name='Tchou Tchou')\r\n train.add_trainPulses(0, 20,20, 20)\r\n \r\n dt_read1 = np.linspace(50, 550, 4)\r\n \r\n for i, dt in enumerate(dt_read1):\r\n # Create a Channel for reading the counts\r\n read = ChannelPulses(channel=1, name='read')\r\n read.add_pulses([30,30+dt, 600,670])\r\n \r\n #Create the block\r\n block = PulsePatternBlock(name='Block read %d'%i)\r\n block.add_channelEvents([read, train])\r\n \r\n # Add the block to the sequence\r\n reading_seq.add_block(block)\r\n \r\n return reading_seq", "def subseq2():\n\n longdur = 101e-9\n\n wait = bb.BluePrint()\n wait.insertSegment(0, ramp, args=(0, 0), dur=10e-9)\n wait.setSR(SR1)\n\n wiggle = bb.BluePrint()\n wiggle.insertSegment(0, sine, args=(10e6, 10e-3, 0, 0), dur=longdur)\n wiggle.setSR(SR1)\n\n blob = bb.BluePrint()\n blob.insertSegment(0, gauss, args=(25e-3, 12e-9, 0, 0), dur=longdur)\n blob.setSR(SR1)\n\n slope = bb.BluePrint()\n slope.insertSegment(0, ramp, (0, 15e-3), dur=longdur)\n slope.setSR(SR1)\n\n elem1 = bb.Element()\n elem1.addBluePrint(1, wait)\n elem1.addBluePrint(2, wait)\n elem1.addBluePrint(3, wait)\n\n elem2 = bb.Element()\n elem2.addBluePrint(1, wiggle)\n elem2.addBluePrint(2, slope)\n elem2.addBluePrint(3, blob)\n\n seq = Sequence()\n seq.setSR(SR1)\n seq.addElement(1, elem2)\n seq.addElement(2, elem1)\n seq.setSequencingNumberOfRepetitions(2, 15)\n\n return seq", "def sample_root_seq(n):\n seq = \"\"\n for i in range(n):\n seq += random.sample(MLE.NUCS, 1)[0]\n return seq", "def sample_batch(self, batch_size=64):\n\n # Retrieve the root.\n root = self.sub_left if (len(self.parents) == 0) else self.parents[0]\n ss = np.random.uniform(0, root.value, batch_size)\n return retrieve_leaf_vec(root, ss)", "def sample(self, size):", "def test_one_sequenced_sample(base_store: Store, helpers):\n\n # GIVEN a database with a case with a sequenced sample\n new_case = add_case(helpers, base_store)\n sample = helpers.add_sample(base_store, sequenced_at=datetime.now())\n base_store.relate_sample(new_case, sample, \"unknown\")\n assert sample.sequenced_at is not None\n\n # WHEN getting active cases\n cases = base_store.cases()\n\n # THEN cases should contain number of sequenced samples\n assert cases\n for case in cases:\n assert case.get(\"samples_sequenced\") == 1", "def grab(sequence, random = numpy.random):\n\n return sequence[random.randint(len(sequence))]", "def _slice_a_subsequence(self, seq):\n n = len(seq)\n if n == 1:\n return seq\n else:\n start = random_pick(n)\n length = random_pick(range(1, n))\n end = start + length\n if end < n:\n sub_seq = seq[start:end]\n else:\n sub_seq = seq[start:n] + seq[0:end - n - 1]\n\n return sub_seq", "def sample_random_node(self):\n #Naive Approach \n return self.tree[int(self.rng.random()*len(self.tree))] # OUT OF BOUNDS ERRORS? Check this", "def sample_trajectory(self) -> Optional[TransitionBatch]:\n if not self.trajectory_indices:\n return None\n idx = self._rng.choice(len(self.trajectory_indices))\n indices = np.arange(\n self.trajectory_indices[idx][0], self.trajectory_indices[idx][1]\n )\n return self._batch_from_indices(indices)", "def get_sample(self, n_taps):\n return(self.bank[-(n_taps)])", "def simulate_sequence(length):\n dna = ['A', 'C', 'G', 'T']\n sequence = ''\n for i in range(length):\n sequence += random.choice(dna)\n return sequence", "def test_sample(self):\n\n seq = [self.rng() for _ in range(20)]\n length = len(seq)\n\n # Sample size too large\n with self.assertRaises(ValueError):\n self.rng.choice(seq, length + 1, replace=False)\n\n # Check sample size and content\n old_seq = seq.copy()\n k = self.rng.randint(length)\n sample = self.rng.choice(seq, k, replace=False)\n\n self.assertEqual(len(sample), k)\n for e in sample:\n self.assertIn(e, seq)\n\n # Seq should not be modified\n self.assertListEqual(seq, old_seq)", "def getSequence(self, loc=None, **kargs):\n raise NotImplementedError", "def subSample(self, data): \n data['Display Time'] = pd.to_datetime(data['Display Time'])\n data['time_gap'] = data['Display Time'].shift(1)-data['Display Time'][0]\n data['time_gap'][0] = '00:00:00'\n mods = [0,870,871,872,873,874,875,876,877,878,879,880,881,882,883,884,885,886,887,888,889,890,891,892,893,894,895,896,897,898,899]\n subset = pd.DataFrame()\n for i in range(1,len(data.index)):\n seconds = data['time_gap'][i].total_seconds()\n if (seconds%900) in mods:\n subj_id = data['subjectId'][i]\n gv = data['GlucoseValue'][i]\n dt = data['Display Time'][i]\n temp_df = pd.DataFrame({'Display Time':[dt], 'GlucoseValue':[gv], 'subjectId':[subj_id]})\n subset = pd.concat([temp_df,subset],ignore_index=True)\n subset = subset.iloc[::-1]\n subset = subset.reset_index(drop=True)\n data.drop(['time_gap'], axis=1, inplace=True)\n return subset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a tree of 4 leaves
def sample_tree(n, alpha, beta): root = sample_root_seq(n) leaf1 = sample_leaf(root, beta) leaf2 = sample_leaf(root, alpha) root_neighbor = sample_leaf(root, alpha) leaf3 = sample_leaf(root_neighbor, beta) leaf4 = sample_leaf(root_neighbor, alpha) return leaf1, leaf2, leaf3, leaf4
[ "def sample_tree(nleaves):\n if nleaves < 3:\n raise Exception('too few requested leaves')\n V = {0, 1}\n E = {frozenset([0, 1])}\n leaves = {0, 1}\n for i in range(nleaves-2):\n v = random.choice(list(leaves))\n leaves.remove(v)\n va = len(V)\n vb = va + 1\n for nat in (va, vb):\n V.add(nat)\n leaves.add(nat)\n E.add(frozenset([v, nat]))\n return V, E", "def randomSample(tree):\r\n\r\n\t# Take an initial sample\r\n\tsample = Node((uniform(-pi, pi), uniform(-2, 2)))\r\n\r\n\twhile existsInTree(tree, sample): # sample again until we haven't see said sample\r\n\t\tsample = Node((uniform(-pi, pi), uniform(-2, 2)))\r\n\r\n\treturn sample", "def do_test_insert_4_leafs(self, hashtype):\n check_hashtype(hashtype)\n name = self.rng.next_file_name(8)\n tree = NLHTree(name, hashtype)\n leaf_names = set()\n a_leaf = self.make_leaf(leaf_names, hashtype)\n b_leaf = self.make_leaf(leaf_names, hashtype)\n c_leaf = self.make_leaf(leaf_names, hashtype)\n d_leaf = self.make_leaf(leaf_names, hashtype)\n self.assertEqual(len(tree.nodes), 0)\n tree.insert(a_leaf)\n self.assertEqual(len(tree.nodes), 1)\n tree.insert(b_leaf)\n self.assertEqual(len(tree.nodes), 2)\n tree.insert(c_leaf)\n self.assertEqual(len(tree.nodes), 3)\n tree.insert(d_leaf)\n self.assertEqual(len(tree.nodes), 4)\n # we expect the nodes to be sorted\n for ndx in range(3):\n self.assertTrue(tree.nodes[ndx].name < tree.nodes[ndx + 1].name)\n\n matches = tree.list('*')\n for ndx, qqq in enumerate(tree.nodes):\n self.assertEqual(matches[ndx], ' ' + qqq.name)\n\n self.assertEqual(tree, tree)\n tree2 = tree.clone()\n self.assertEqual(tree2, tree)", "def sample_random_node(self):\n #Naive Approach \n return self.tree[int(self.rng.random()*len(self.tree))] # OUT OF BOUNDS ERRORS? Check this", "def setUp(self):\n self.Empty = TreeNode()\n self.Single = TreeNode(Name='a')\n self.Child = TreeNode(Name='b')\n self.OneChild = TreeNode(Name='a', Children=[self.Child])\n self.Multi = TreeNode(Name = 'a', Children='bcd')\n self.Repeated = TreeNode(Name='x', Children='aaa')\n self.BigName = map(TreeNode, '0123456789')\n self.BigParent = TreeNode(Name = 'x', Children = self.BigName)\n self.Comparisons = map(TreeNode, 'aab')\n \n nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])\n nodes['a'].append(nodes['b'])\n nodes['b'].append(nodes['c'])\n nodes['c'].append(nodes['d'])\n nodes['c'].append(nodes['e'])\n nodes['c'].append(nodes['f'])\n nodes['f'].append(nodes['g'])\n nodes['a'].append(nodes['h'])\n self.TreeNode = nodes\n self.TreeRoot = nodes['a']\n\n self.s = '((H,G),(R,M));'\n self.t = DndParser(self.s, TreeNode)\n self.s2 = '(((H,G),R),M);'\n self.t2 = DndParser(self.s2, TreeNode)\n self.s4 = '(((H,G),(O,R)),X);'\n self.t4 = DndParser(self.s4, TreeNode)", "def draw_random_trees(list_of_lists,Samples,index,n=100000):\n\trandom_trees = []\n\tfor i in xrange(n):\n\t\t#create raw_tree\n\t\traw_tree = []\n\t\tfor alist in list_of_lists:\n\t\t\torigin = random.choice(alist)\t\n\t\t\traw_tree.append(origin)\n\t\t#create tree\n\t\tprint [i.name for i in raw_tree], [i.name for i in Samples]\n\t\tfinal_trees = create_tree(raw_tree, Samples, index)\n\t\tif final_trees: random_trees += final_trees\n\t#return\n\treturn random_trees", "def create_tree(raw_tree, Samples, index):\n\t#initialize index of sample\n\tcount = 0\n\tif count == index: count += 1 #index to be skipped\n\t#initialize final tree\n\tfinal_tree = Tree()\n\t#add each sample to final tree in proper format\n\tfor origin in raw_tree:\n\t\t#add node\n\t\tfinal_tree.nodes.append(Node(origin, Samples[count]))\n\t\t#add to index\n\t\tcount += 1\n\t\tif count == index: count += 1 #index to be skipped\n\t#don't append tree if has loops\n\tfinal_tree.to_dict()\n\tif final_tree.loops(): return None\n\t#if pairs of samples from same time point exist, change the format to include and internode\n\tfinal_tree = get_internodes(final_tree)\n if final_tree.double_progenitor(): return None\n\t#sort nodes\n\tfinal_tree.sort_nodes()\n\t#return\n\treturn final_tree", "def test_insert_4_leafs(self):\n for using in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:\n self.do_test_insert_4_leafs(using)", "def test_random_spanning_tree_multiplicative_large():\n from math import exp\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n gamma = {\n (0, 1): -0.6383,\n (0, 2): -0.6827,\n (0, 5): 0,\n (1, 2): -1.0781,\n (1, 4): 0,\n (2, 3): 0,\n (5, 3): -0.2820,\n (5, 4): -0.3327,\n (4, 3): -0.9927,\n }\n\n # The undirected support of gamma\n G = nx.Graph()\n for u, v in gamma:\n G.add_edge(u, v, lambda_key=exp(gamma[(u, v)]))\n\n # Find the multiplicative weight for each tree.\n total_weight = 0\n tree_expected = {}\n for t in nx.SpanningTreeIterator(G):\n # Find the multiplicative weight of the spanning tree\n weight = 1\n for u, v, d in t.edges(data=\"lambda_key\"):\n weight *= d\n tree_expected[t] = weight\n total_weight += weight\n\n # Assert that every tree has an entry in the expected distribution\n assert len(tree_expected) == 75\n\n # Set the sample size and then calculate the expected number of times we\n # expect to see each tree. This test uses a near minimum sample size where\n # the most unlikely tree has an expected frequency of 5.15.\n # (Minimum required is 5)\n #\n # Here we also initialize the tree_actual dict so that we know the keys\n # match between the two. We will later take advantage of the fact that since\n # python 3.7 dict order is guaranteed so the expected and actual data will\n # have the same order.\n sample_size = 1200\n tree_actual = {}\n for t in tree_expected:\n tree_expected[t] = (tree_expected[t] / total_weight) * sample_size\n tree_actual[t] = 0\n\n # Sample the spanning trees\n #\n # Assert that they are actually trees and record which of the 75 trees we\n # have sampled.\n #\n # For repeatability, we want to take advantage of the decorators in NetworkX\n # to randomly sample the same sample each time. However, if we pass in a\n # constant seed to sample_spanning_tree we will get the same tree each time.\n # Instead, we can create our own random number generator with a fixed seed\n # and pass those into sample_spanning_tree.\n rng = Random(37)\n for _ in range(sample_size):\n sampled_tree = nx.random_spanning_tree(G, \"lambda_key\", seed=rng)\n assert nx.is_tree(sampled_tree)\n\n for t in tree_expected:\n if nx.utils.edges_equal(t.edges, sampled_tree.edges):\n tree_actual[t] += 1\n break\n\n # Conduct a Chi squared test to see if the actual distribution matches the\n # expected one at an alpha = 0.05 significance level.\n #\n # H_0: The distribution of trees in tree_actual matches the normalized product\n # of the edge weights in the tree.\n #\n # H_a: The distribution of trees in tree_actual follows some other\n # distribution of spanning trees.\n _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))\n\n # Assert that p is greater than the significance level so that we do not\n # reject the null hypothesis\n assert not p < 0.05", "def create_all_trees(Samples):\n\ttrees = []\n\t#add theoretical sample for any cases where multiple samples exist from same timepoint\n\t#iterate to define options for progenitors for each sample\n\ttimes = [sample.time for sample in Samples]\n\tfor sample1 in Samples:\n\t\tfor sample0 in Samples:\n\t\t\tif sample0.time <= sample1.time and sample0.name != sample1.name: sample1.options.append(sample0)\n\t\t#add fictional node as root if there are two time0 samples, otherwise time0 sample is the root\n\t\tif sample1.time == min(times) and times.count(min(times)) > 1: sample1.options.append(\"fiction\")\n\t#build combined list of all possible origins\n\tlist_of_lists = []\n\tfor i in Samples: list_of_lists.append(i.options)\n\t#all possible combinations\n\tif [] in list_of_lists: #time0 sample, root\n\t\tindex = list_of_lists.index([])\n\t\tlist_of_lists.pop(index)\n\telse:\n\t\tindex = None\n\t#if there are two []s\n\tif [] in list_of_lists: raise Exception(\"Two samples with no origin, investigate further\") #this should be solved due to the fictional origin\n\t#create raw trees with all combinations of options for the different samples\n\tif len(Samples) < 10:\n\t\t#raw tree - each element in the list is the origin of the respective element in samples, this defines a tree\n\t\ttrees_raw = list(itertools.product(*list_of_lists))\n\t\t#go through the raw trees and change their format to pairs of each sample and its origin\n\t\tfor tree in trees_raw:\n\t\t\t#create trees\n\t\t\tfinal_trees = create_tree(tree, Samples, index)\n\t\t\t#add to final list\n #if final_tree:\n #for node in final_tree.nodes:\n #print node.progenitor, node.progeny\n\t\t\tif final_trees: trees += final_trees\n #if final_tree: trees += [final_tree]\n\n\telse: #if there are too many trees, use random trees instead of all trees\n\t\ttrees = draw_random_trees(list_of_lists, Samples, index, 1000000)\n\t#return\n\treturn trees", "def test_random_spanning_tree_additive_large():\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n edges = {\n (0, 1): 1,\n (0, 2): 1,\n (0, 5): 3,\n (1, 2): 2,\n (1, 4): 3,\n (2, 3): 3,\n (5, 3): 4,\n (5, 4): 5,\n (4, 3): 4,\n }\n\n # Build the graph\n G = nx.Graph()\n for u, v in edges:\n G.add_edge(u, v, weight=edges[(u, v)])\n\n # Find the additive weight for each tree.\n total_weight = 0\n tree_expected = {}\n for t in nx.SpanningTreeIterator(G):\n # Find the multiplicative weight of the spanning tree\n weight = 0\n for u, v, d in t.edges(data=\"weight\"):\n weight += d\n tree_expected[t] = weight\n total_weight += weight\n\n # Assert that every tree has an entry in the expected distribution\n assert len(tree_expected) == 75\n\n # Set the sample size and then calculate the expected number of times we\n # expect to see each tree. This test uses a near minimum sample size where\n # the most unlikely tree has an expected frequency of 5.07.\n # (Minimum required is 5)\n #\n # Here we also initialize the tree_actual dict so that we know the keys\n # match between the two. We will later take advantage of the fact that since\n # python 3.7 dict order is guaranteed so the expected and actual data will\n # have the same order.\n sample_size = 500\n tree_actual = {}\n for t in tree_expected:\n tree_expected[t] = (tree_expected[t] / total_weight) * sample_size\n tree_actual[t] = 0\n\n # Sample the spanning trees\n #\n # Assert that they are actually trees and record which of the 75 trees we\n # have sampled.\n #\n # For repeatability, we want to take advantage of the decorators in NetworkX\n # to randomly sample the same sample each time. However, if we pass in a\n # constant seed to sample_spanning_tree we will get the same tree each time.\n # Instead, we can create our own random number generator with a fixed seed\n # and pass those into sample_spanning_tree.\n rng = Random(37)\n for _ in range(sample_size):\n sampled_tree = nx.random_spanning_tree(\n G, \"weight\", multiplicative=False, seed=rng\n )\n assert nx.is_tree(sampled_tree)\n\n for t in tree_expected:\n if nx.utils.edges_equal(t.edges, sampled_tree.edges):\n tree_actual[t] += 1\n break\n\n # Conduct a Chi squared test to see if the actual distribution matches the\n # expected one at an alpha = 0.05 significance level.\n #\n # H_0: The distribution of trees in tree_actual matches the normalized product\n # of the edge weights in the tree.\n #\n # H_a: The distribution of trees in tree_actual follows some other\n # distribution of spanning trees.\n _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))\n\n # Assert that p is greater than the significance level so that we do not\n # reject the null hypothesis\n assert not p < 0.05", "def test_get_tree(self):\n pass", "def test_tree():\n base = TreeNode('base', 55)\n\n first = TreeNode('first', 12, base)\n base.set_child_1(first)\n\n second = TreeNode('second', 13, base)\n base.set_child_2(second)\n\n third = TreeNode('third', 15, base)\n base.set_child_3(third)\n\n a = TreeNode('a', 20, first)\n first.set_child_1(a)\n\n b = TreeNode('b', 30, first)\n first.set_child_2(b)\n\n c = TreeNode('c', 40, first)\n first.set_child_3(c)\n\n # print(base.get_total_weight())\n # print(a.weight + b.weight + c.weight + first.weight + second.weight + third.weight + base.weight)\n print(\n a.weight + b.weight + c.weight + first.weight + second.weight + third.weight + base.weight == base.get_total_weight())", "def test_depth(populated_tree):\n assert populated_tree.depth() == 4", "def random_subtree(program):\n nodes = program.size_of_subtree()\n node_index = random.randint(math.ceil((nodes - 1) / 3), nodes - 1)\n return subtree_at_index(program, node_index)", "def test_bst_three_root(bst_three):\n assert bst_three.root.data == 10\n assert bst_three.root.left.data == 5\n assert bst_three.root.right.data == 15", "def make_tree(dataset):\n\treturn make_tree_helper(dataset)", "def test_iter_trees(self):\n trees = list(treelib.iter_trees(StringIO(fungi + fungi + fungi)))\n self.assertEqual(len(trees), 3)", "def get_random_tagged_tree(number_leafnodes, percentage_parasites, percentage_unknown, p_multifurcation, beta_distribution_parameters):\n # Arguments:\n # number_leafnodes - needed for randomized function\n # percentage_parasites\n # percentage_unknown - proportion of unknown leafnodes\n # percentage_multifurcation\n # beta_distribution_parameters - [A_FL, B_FL, A_P, B_P]\n\n global percentage_multifurcation\n percentage_multifurcation = p_multifurcation\n\n START_TIME = datetime.datetime.now().replace(microsecond=0)\n CURRENT_TIME = datetime.datetime.now().replace(microsecond=0)\n print(\"---- randomized tree ----\")\n current_percentage_parasites = 0\n # randomized(cls, taxa, branch_length=1.0, branch_stdev=None) \n # Create a randomized bifurcating tree given a list of taxa.\n # https://github.com/biopython/biopython/blob/master/Bio/Phylo/BaseTree.py\n randomized_tree = Phylo.BaseTree.Tree.randomized(number_leafnodes)\n randomized_tree.clade.name = 'root'\n boolean = True\n CURRENT_TIME = print_time(START_TIME)\n print(\"---- tag tree ----\")\n while boolean:\n current_tree = deepcopy(randomized_tree)\n result = tag_tree(current_tree.clade, [], 0, [0, 0], percentage_parasites, percentage_unknown, beta_distribution_parameters) # father_tag = 0 -> free living\n nodelist = result[1]\n leaf_distr = result[2]\n # child_depth = child_depth + result[3]\n # %P = #FL / (#P + #FL) * 100\n current_percentage_parasites = leaf_distr[1] / (leaf_distr[0] + leaf_distr[1]) \n print(\"tried\", current_percentage_parasites*100, \"% of parasites\") # 40% parasites?\n if (percentage_parasites - permitted_deviation) < current_percentage_parasites < (percentage_parasites + permitted_deviation):\n boolean = False\n print(\"----\")\n CURRENT_TIME = print_time(CURRENT_TIME)\n print(\"----\")\n # print(current_percentage_parasites, '% parasites,', 100 - current_percentage_parasites, '% free-living')\n return [current_tree, nodelist]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples leaves from the given topology and distances (alpha and beta). Each leaf has sequence of of length n. calculates the distances between the sequences generated, and reconstructs the tree. compares whether it rebuild the right topology they were created from. Repeats the process m times, and then returns the percentage of the successful reconstructions.
def msa(m, n, alpha, beta): success = 0 for i in range(m): # We will search for the two minimal values. If they are both between the leaves -> success. min_t = float('inf') second_min_t = float('inf') min_indexes = [(0, 0), (0, 0)] leaves = sample_tree(n, alpha, beta) mles = np.zeros(shape=(len(leaves), len(leaves))) # Calculates MLE for each couple of leaves. for j in range(len(leaves)): for k in range(j): # converts two sequences to list of pairs: (char from seq1, char from seq2) # in order to work with MLE functions. pairs = [(leaves[j][s], leaves[k][s]) for s in range(n)] mles[j, k] = MLE.single_mle_calculator(pairs) if mles[j, k] < min_t: min_indexes[1] = (min_indexes[0][0], min_indexes[0][1]) min_indexes[0] = (j, k) second_min_t = min_t min_t = mles[j, k] elif mles[j, k] < second_min_t: min_indexes[1] = (j, k) second_min_t = mles[j, k] # The function has found the right tree only if the distances between leaves 1 and 2, # and between 3 and 4, are the shortest, the order between them does'nt matter. if min_indexes[0] in REAL_NEIGHBORS and min_indexes[1] in REAL_NEIGHBORS: success += 1 # returns the percent of successful reconstruction return float(success / m)
[ "def sample_tree(n, alpha, beta):\n root = sample_root_seq(n)\n\n leaf1 = sample_leaf(root, beta)\n leaf2 = sample_leaf(root, alpha)\n\n root_neighbor = sample_leaf(root, alpha)\n leaf3 = sample_leaf(root_neighbor, beta)\n leaf4 = sample_leaf(root_neighbor, alpha)\n\n return leaf1, leaf2, leaf3, leaf4", "def test_random_spanning_tree_multiplicative_large():\n from math import exp\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n gamma = {\n (0, 1): -0.6383,\n (0, 2): -0.6827,\n (0, 5): 0,\n (1, 2): -1.0781,\n (1, 4): 0,\n (2, 3): 0,\n (5, 3): -0.2820,\n (5, 4): -0.3327,\n (4, 3): -0.9927,\n }\n\n # The undirected support of gamma\n G = nx.Graph()\n for u, v in gamma:\n G.add_edge(u, v, lambda_key=exp(gamma[(u, v)]))\n\n # Find the multiplicative weight for each tree.\n total_weight = 0\n tree_expected = {}\n for t in nx.SpanningTreeIterator(G):\n # Find the multiplicative weight of the spanning tree\n weight = 1\n for u, v, d in t.edges(data=\"lambda_key\"):\n weight *= d\n tree_expected[t] = weight\n total_weight += weight\n\n # Assert that every tree has an entry in the expected distribution\n assert len(tree_expected) == 75\n\n # Set the sample size and then calculate the expected number of times we\n # expect to see each tree. This test uses a near minimum sample size where\n # the most unlikely tree has an expected frequency of 5.15.\n # (Minimum required is 5)\n #\n # Here we also initialize the tree_actual dict so that we know the keys\n # match between the two. We will later take advantage of the fact that since\n # python 3.7 dict order is guaranteed so the expected and actual data will\n # have the same order.\n sample_size = 1200\n tree_actual = {}\n for t in tree_expected:\n tree_expected[t] = (tree_expected[t] / total_weight) * sample_size\n tree_actual[t] = 0\n\n # Sample the spanning trees\n #\n # Assert that they are actually trees and record which of the 75 trees we\n # have sampled.\n #\n # For repeatability, we want to take advantage of the decorators in NetworkX\n # to randomly sample the same sample each time. However, if we pass in a\n # constant seed to sample_spanning_tree we will get the same tree each time.\n # Instead, we can create our own random number generator with a fixed seed\n # and pass those into sample_spanning_tree.\n rng = Random(37)\n for _ in range(sample_size):\n sampled_tree = nx.random_spanning_tree(G, \"lambda_key\", seed=rng)\n assert nx.is_tree(sampled_tree)\n\n for t in tree_expected:\n if nx.utils.edges_equal(t.edges, sampled_tree.edges):\n tree_actual[t] += 1\n break\n\n # Conduct a Chi squared test to see if the actual distribution matches the\n # expected one at an alpha = 0.05 significance level.\n #\n # H_0: The distribution of trees in tree_actual matches the normalized product\n # of the edge weights in the tree.\n #\n # H_a: The distribution of trees in tree_actual follows some other\n # distribution of spanning trees.\n _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))\n\n # Assert that p is greater than the significance level so that we do not\n # reject the null hypothesis\n assert not p < 0.05", "def get_random_tagged_tree(number_leafnodes, percentage_parasites, percentage_unknown, p_multifurcation, beta_distribution_parameters):\n # Arguments:\n # number_leafnodes - needed for randomized function\n # percentage_parasites\n # percentage_unknown - proportion of unknown leafnodes\n # percentage_multifurcation\n # beta_distribution_parameters - [A_FL, B_FL, A_P, B_P]\n\n global percentage_multifurcation\n percentage_multifurcation = p_multifurcation\n\n START_TIME = datetime.datetime.now().replace(microsecond=0)\n CURRENT_TIME = datetime.datetime.now().replace(microsecond=0)\n print(\"---- randomized tree ----\")\n current_percentage_parasites = 0\n # randomized(cls, taxa, branch_length=1.0, branch_stdev=None) \n # Create a randomized bifurcating tree given a list of taxa.\n # https://github.com/biopython/biopython/blob/master/Bio/Phylo/BaseTree.py\n randomized_tree = Phylo.BaseTree.Tree.randomized(number_leafnodes)\n randomized_tree.clade.name = 'root'\n boolean = True\n CURRENT_TIME = print_time(START_TIME)\n print(\"---- tag tree ----\")\n while boolean:\n current_tree = deepcopy(randomized_tree)\n result = tag_tree(current_tree.clade, [], 0, [0, 0], percentage_parasites, percentage_unknown, beta_distribution_parameters) # father_tag = 0 -> free living\n nodelist = result[1]\n leaf_distr = result[2]\n # child_depth = child_depth + result[3]\n # %P = #FL / (#P + #FL) * 100\n current_percentage_parasites = leaf_distr[1] / (leaf_distr[0] + leaf_distr[1]) \n print(\"tried\", current_percentage_parasites*100, \"% of parasites\") # 40% parasites?\n if (percentage_parasites - permitted_deviation) < current_percentage_parasites < (percentage_parasites + permitted_deviation):\n boolean = False\n print(\"----\")\n CURRENT_TIME = print_time(CURRENT_TIME)\n print(\"----\")\n # print(current_percentage_parasites, '% parasites,', 100 - current_percentage_parasites, '% free-living')\n return [current_tree, nodelist]", "def test_random_spanning_tree_additive_large():\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n edges = {\n (0, 1): 1,\n (0, 2): 1,\n (0, 5): 3,\n (1, 2): 2,\n (1, 4): 3,\n (2, 3): 3,\n (5, 3): 4,\n (5, 4): 5,\n (4, 3): 4,\n }\n\n # Build the graph\n G = nx.Graph()\n for u, v in edges:\n G.add_edge(u, v, weight=edges[(u, v)])\n\n # Find the additive weight for each tree.\n total_weight = 0\n tree_expected = {}\n for t in nx.SpanningTreeIterator(G):\n # Find the multiplicative weight of the spanning tree\n weight = 0\n for u, v, d in t.edges(data=\"weight\"):\n weight += d\n tree_expected[t] = weight\n total_weight += weight\n\n # Assert that every tree has an entry in the expected distribution\n assert len(tree_expected) == 75\n\n # Set the sample size and then calculate the expected number of times we\n # expect to see each tree. This test uses a near minimum sample size where\n # the most unlikely tree has an expected frequency of 5.07.\n # (Minimum required is 5)\n #\n # Here we also initialize the tree_actual dict so that we know the keys\n # match between the two. We will later take advantage of the fact that since\n # python 3.7 dict order is guaranteed so the expected and actual data will\n # have the same order.\n sample_size = 500\n tree_actual = {}\n for t in tree_expected:\n tree_expected[t] = (tree_expected[t] / total_weight) * sample_size\n tree_actual[t] = 0\n\n # Sample the spanning trees\n #\n # Assert that they are actually trees and record which of the 75 trees we\n # have sampled.\n #\n # For repeatability, we want to take advantage of the decorators in NetworkX\n # to randomly sample the same sample each time. However, if we pass in a\n # constant seed to sample_spanning_tree we will get the same tree each time.\n # Instead, we can create our own random number generator with a fixed seed\n # and pass those into sample_spanning_tree.\n rng = Random(37)\n for _ in range(sample_size):\n sampled_tree = nx.random_spanning_tree(\n G, \"weight\", multiplicative=False, seed=rng\n )\n assert nx.is_tree(sampled_tree)\n\n for t in tree_expected:\n if nx.utils.edges_equal(t.edges, sampled_tree.edges):\n tree_actual[t] += 1\n break\n\n # Conduct a Chi squared test to see if the actual distribution matches the\n # expected one at an alpha = 0.05 significance level.\n #\n # H_0: The distribution of trees in tree_actual matches the normalized product\n # of the edge weights in the tree.\n #\n # H_a: The distribution of trees in tree_actual follows some other\n # distribution of spanning trees.\n _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))\n\n # Assert that p is greater than the significance level so that we do not\n # reject the null hypothesis\n assert not p < 0.05", "def test_example_4():\n import pwseqdist as pw\n import pandas as pd\n from tcrdist.repertoire import TCRrep\n import multiprocessing\n\n df = pd.read_csv(\"dash.csv\")\n df = df.head(100) # for faster testing\n tr = TCRrep(cell_df = df, \n organism = 'mouse', \n chains = ['alpha','beta'], \n use_defaults=False,\n compute_distances = False,\n cpus = 1,\n db_file = 'alphabeta_gammadelta_db.tsv')\n\n metrics_a = {\n \"cdr3_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"pmhc_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr2_a_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr1_a_aa\" : pw.metrics.nw_hamming_metric }\n\n metrics_b = {\n \"cdr3_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"pmhc_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr2_b_aa\" : pw.metrics.nw_hamming_metric ,\n \"cdr1_b_aa\" : pw.metrics.nw_hamming_metric }\n\n weights_a = { \n \"cdr3_a_aa\" : 1,\n \"pmhc_a_aa\" : 1,\n \"cdr2_a_aa\" : 1,\n \"cdr1_a_aa\" : 1}\n\n weights_b = { \n \"cdr3_b_aa\" : 1,\n \"pmhc_b_aa\" : 1,\n \"cdr2_b_aa\" : 1,\n \"cdr1_b_aa\" : 1}\n\n kargs_a = { \n 'cdr3_a_aa' : \n {'use_numba': False},\n 'pmhc_a_aa' : {\n 'use_numba': False},\n 'cdr2_a_aa' : {\n 'use_numba': False},\n 'cdr1_a_aa' : {\n 'use_numba': False}\n }\n kargs_b = { \n 'cdr3_b_aa' : \n {'use_numba': False},\n 'pmhc_b_aa' : {\n 'use_numba': False},\n 'cdr2_b_aa' : {\n 'use_numba': False},\n 'cdr1_b_aa' : {\n 'use_numba': False}\n }\n\n tr.metrics_a = metrics_a\n tr.metrics_b = metrics_b\n\n tr.weights_a = weights_a\n tr.weights_b = weights_b\n\n tr.kargs_a = kargs_a \n tr.kargs_b = kargs_b\n\n tr.compute_distances()\n\n tr.pw_cdr3_b_aa\n tr.pw_beta", "def benchmark():\n \n # get the lines in english.txt\n with open('english.txt', 'r') as infile:\n lines = infile.readlines()\n \n # create a domain\n domain = 2**np.arange(3, 11)\n \n times_build_linked = []\n times_build_avl = []\n times_build_btree = []\n \n times_linked = []\n times_avl = []\n times_btree = []\n \n for i in domain: \n # get a random set of lines of size 2**i\n choices = np.random.choice(lines, i, replace=False)\n \n ####################################################\n # BUILD THE TREES #\n ####################################################\n \n # create a BST, SinglyLinkedList, and AVL Tree\n btree = BST()\n avl = AVL()\n linked = SinglyLinkedList()\n \n # Linked List\n start = time()\n for choice in choices:\n linked.append(choice)\n times_build_linked.append(time() - start)\n \n # BST\n start = time()\n for choice in choices:\n btree.insert(choice)\n times_build_btree.append(time() - start)\n \n # AVL\n start = time()\n for choice in choices:\n avl.insert(choice)\n times_build_avl.append(time() - start)\n\n \n ####################################################\n # FIND THE NODES #\n ####################################################\n \n # from the nodes, randomely choose 5 of them\n five_choices = np.random.choice(choices, 5, replace=False)\n \n # Linked List\n start = time()\n for choice in five_choices:\n linked.iterative_find(choice)\n times_linked.append(time() - start)\n \n # BST \n start = time()\n for choice in five_choices:\n btree.find(choice)\n times_btree.append(time() - start)\n \n # AVL\n start = time()\n for choice in five_choices:\n avl.find(choice)\n times_avl.append(time() - start)\n \n fig, axes = plt.subplots(1,2)\n \n # Plot build times\n axes[0].loglog(domain, times_build_linked, \".-\", basex=2, basey=10, label=\"Linked List\")\n axes[0].loglog(domain, times_build_avl, \".-\", basex=2, basey=10, label=\"AVL\")\n axes[0].loglog(domain, times_build_btree, \".-\", basex=2, basey=10, label=\"BST\")\n axes[0].set_title(\"Build the Structure\")\n axes[0].legend()\n \n # Plot find times\n axes[1].loglog(domain, times_linked, \".-\", basex=2, basey=10, label=\"Linked List\")\n axes[1].loglog(domain, times_avl, \".-\", basex=2, basey=10, label=\"AVL\")\n axes[1].loglog(domain, times_btree, \".-\", basex=2, basey=10, label=\"BST\")\n axes[1].set_title(\"find() functions\")\n axes[1].legend()\n \n plt.tight_layout()\n plt.show()", "def test_create_Gamma_eta_tree_more_per_regression():\n\n\n # random - structure output check\n # data creation\n n = 200\n min_size_leaf = 1\n\n X = np.random.uniform(size = (n, 510), low = -1,high = 1)\n y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\\\n 10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)\n\n rf_class = sklearn.ensemble.RandomForestRegressor(n_estimators = 2,\n min_samples_leaf = min_size_leaf)\n random_forest = rf_class.fit(X = X,\n y = y.ravel())\n\n tree = random_forest.estimators_[0]\n\n max_depth_range = np.max(smooth_rf.depth_per_node(tree)) + 1\n\n G, n, ln, ld, li, fd, fi = smooth_rf.create_Gamma_eta_tree_more_per(tree)\n\n assert G.shape == (np.sum(tree.tree_.children_left == -1),\n max_depth_range), \\\n \"Gamma returned does not have the correct shape\"\n\n assert n.shape == G.shape, \\\n \"eta returned does not have the correct shape\"\n\n assert np.all(n >= 0), \\\n \"eta returned has negative values\"\n\n assert np.all(n[:,0] ==\n tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]),\\\n \"eta structure doesn't match up with number of observes per leaf\"\n\n # new tests (ln,ld,li)\n assert ln.shape[0] == G.shape[0] and ld.shape[0] == G.shape[0] and \\\n li.shape[0] == G.shape[0], \\\n \"leaf based outputs should have same number of leaves and Gamma\"\n\n assert np.all(np.ceil(ln) == ln) and np.all(ln > 0), \\\n \"leaf counts should be strictly positive and integers\"\n\n assert np.all(ln ==\n tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]), \\\n \"number of obs in each leaf not matching tree structure\"\n\n assert np.all(np.ceil(ld) == ld) and np.all(ld >= 0), \\\n \"leaf depth should be positive and integers\"\n\n assert np.all(li >= - 1e-10), \\\n \"leaf impurity (mse) should be non-negative\"\n\n # newest tests (fd, fi)\n assert fd.shape == G.shape and fi.shape == G.shape, \\\n \"shapes of full depth and impurity should make shape of Gamma\"\n\n assert np.all(fd[:,0] == ld) and np.all(np.ceil(fd) == fd) and \\\n np.all(fd >= 0), \\\n \"full depth shape should mirror leaf depth structure\"\n\n assert np.all(fi[:,0] == li) and np.all(fi >= - 1e-10), \\\n \"full impurity (mse) should mirror leaf impurity structure\"\n\n # for c_idx in range(fi.shape[1] - 1):\n # assert np.all(fi[:,c_idx] - fi[:,c_idx + 1] <= 1e-10), \\\n # \"impurity should be increasing (mse)\"\n\n # static check\n\n # tree structure:\n # ~upper: left, lower: right~\n # num obs depth\n # |--1 10 1\n # -0-| 34 0\n # | |--3 9 2\n # |-2-| 24 1\n # | |--5 8 3\n # |-4-| 15 2\n # |--6 7 3\n\n\n # eta\n # (1) 10 | 24 | 0 | 0\n # (3) 9 | 15 | 10 | 0\n # (5) 8 | 7 | 9 | 10\n # (6) 7 | 8 | 9 | 10\n\n # Gamma\n # (1) 10 | 18+24+28 = 70 | 0 | 0\n # (3) 9 * 2 = 18 | 24+28 = 52 | 10 | 0\n # (5) 8 * 3 = 24 | 28 | 18 | 10\n # (6) 7 * 4 = 28 | 24 | 18 | 10\n\n\n\n class inner_fake_tree():\n def __init__(self, nn, cl, cr, v):\n self.weighted_n_node_samples = nn\n self.children_left = cl\n self.children_right = cr\n self.value = v\n self.impurity = np.zeros(v.shape[0]) # this isn't a good test\n\n class fake_tree():\n def __init__(self, nn, cl, cr, v):\n self.tree_ = inner_fake_tree(nn, cl, cr, v)\n self.__class__ = sklearn.tree.tree.DecisionTreeRegressor\n\n weighted_n_node_samples = np.array([34,10,24,9,15,8,7], dtype = np.int)\n children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)\n children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)\n value = np.array([-99, 1, -99, 2, -99, 3, 4]).reshape((-1,1,1))\n\n test = fake_tree(weighted_n_node_samples,\n children_left,\n children_right,\n value)\n\n n_leaf = 4\n\n g_static, n_static, ln_static, ld_static, li_static, \\\n fd_static, fi_static = \\\n smooth_rf.create_Gamma_eta_tree_more_per(test)\n\n n_expected = np.array([[10,24,0,0],\n [9,15,10,0],\n [8,7,9,10],\n [7,8,9,10]])\n g_expected = np.array([[10,70,0,0],\n [18,52,10,0],\n [24,28,18,10],\n [28,24,18,10]])\n ln_expected = n_expected[:,0]\n ld_expected = np.array([1,2,3,3])\n fd_expected = np.array([[1,0,0,0],\n [2,1,0,0],\n [3,2,1,0],\n [3,2,1,0]])\n\n assert np.all(g_static == g_expected), \\\n \"static test's Gamma failed to reproduce correct solutions\"\n assert np.all(n_static == n_expected), \\\n \"static test's eta failed to reproduce correct solutions\"\n assert np.all(ln_static == ln_expected), \\\n \"static test's leaf count failed to reproduce correct solutions\"\n assert np.all(ld_static == ld_expected), \\\n \"static test's leaf depth failed to reproduce correct solutions\"\n assert np.all(fd_static == fd_expected), \\\n \"static test's full depth failed to reproduce correct solutions\"", "def waxman(n,alpha=.0015,beta=0.6):\n G = nx.Graph()\n for i in range(n):\n G.add_node(i)\n G.node[i]['loc'] = (random.random(),random.random())\n\n dist = {}\n L = 0\n for u in range(n):\n print(u)\n for v in range(u+1,n):\n dist = euclid_distance(G,u,v)\n if dist > L:\n L = dist\n\n for u in range(n):\n print(u)\n for v in range(u+1,n):\n p_edge = alpha*(e**(-euclid_distance(G,u,v)/(beta*L)))\n if random.random() < p_edge:\n G.add_edge(u,v)\n\n cc = nx.connected_components(G)\n\n print(len(cc))\n for c in range(len(cc)-1):\n u = random.choice(cc[c])\n v = random.choice(cc[c+1])\n G.add_edge(u,v)\n\n return G", "def find_merger_epochs(alltrees,\n idx_all,\n mpgs,\n nout_ini=37,\n dist_gal_scale=2,\n mass_ratio=\"early\"):\n verbose=False\n gal_list=[]\n mr_list=[]\n nout_list=[]\n nout_ini_list=[] # initial time when two halos(Galaxy stellar components in this case) overlap.\n\n for idx in idx_all:\n # full tree of a galaxy\n atree = ctu.extract_a_tree(alltrees.data, idx)\n\n # main progenitor tree\n main = ctu.extract_main_tree(atree, idx)\n\n x_nout = main['nout'].flatten()\n i_nout_ok = x_nout > nout_ini\n main = main[i_nout_ok]\n #x_nout = x_nout[i_nout_ok]\n pos = np.zeros((3,len(main)))\n pos[0,:] = main['x']\n pos[1,:] = main['y']\n pos[2,:] = main['z']\n\n mass_ratios_single = np.zeros(len(main))\n nout_inits = np.zeros(len(main))\n for i, nout in enumerate(main['nout']):\n # merger ratio\n i_prgs = np.where(atree['desc_id'] == main['id'][i])[0]\n\n # multiple prgs = merger\n if len(i_prgs) > 1:\n if verbose: print(\" {} mergers at nout = {}\".format(len(i_prgs), nout))\n id_prgs = atree['id'][i_prgs]\n mass_prgs = atree['m'][i_prgs]\n m_r = mass_prgs / max(mass_prgs)\n\n i_sat = np.argmax(mass_prgs[1:]) + 1\n #mass_ratios_single[i] = max([mass_prgs[1:] / max(mass_prgs)][0])\n if mass_ratio == \"max\":\n mass_ratios_single[i] = mass_prgs[i_sat] / mass_prgs[0]\n\n satellite = ctu.extract_main_tree(atree, id_prgs[i_sat], no_subset=True)\n\n nout_min = max([min(main['nout']), min(satellite['nout'])])\n i_main_ok = (main['nout'] > nout_min) * (main['nout'] < nout)\n i_sat_ok = (satellite['nout'] > nout_min) * (satellite['nout'] < nout)\n satellite = satellite[i_sat_ok]\n\n dd = np.sqrt(np.square(pos[0,i_main_ok] - satellite['x']) \\\n + np.square(pos[1,i_main_ok] - satellite['y'])\n + np.square(pos[2,i_main_ok] - satellite['z'])) * 1e3 # in kpc\n rgal_tot = main['rvir'][i_main_ok] + satellite['rvir']\n #print(\" Galaxy sizes : main {}, and the second {}, and the sum {}\".format(\n # main['r'][i_main_ok], satellite['r'], rgal_tot))\n #print(\" dd :\", dd)\n if sum(dist_gal_scale * rgal_tot < dd) > 0:\n #print(50 * rgal_tot - dd)\n #print(satellite['nout'][50 * rgal_tot < dd])\n nout_inits[i] = max(satellite['nout'][dist_gal_scale * rgal_tot < dd])\n if mass_ratio == \"early\":\n\n mass_ratios_single[i] = satellite['m'][satellite['nout'] == nout_inits[i]] / mass_prgs[0]\n\n else:\n nout_inits[i] = nout\n if verbose:\n print(\" Mass ratios : \", m_r, nout, nout_inits[i])\n\n else:\n mass_ratios_single[i] = 0\n\n ind_ok = np.where(mass_ratios_single > 0.01)[0]\n if len(ind_ok) > 0:\n # if a satellite oscillates around the host,\n # it could be identified as multiple mergers with short time interval.\n # leave only the first passage / merger.\n # No, it doesn't happen in ConsistentTrees.\n\n #good =[]\n #for i in range(len(ind_ok)-1):\n # if ind_ok[i+1] > ind_ok[i] + 2:\n # good.append(ind_ok[i])\n #good.append(ind_ok[-1])\n #ind_ok = good\n mr = 1./mass_ratios_single[ind_ok]\n\n gal_list.append(idx)\n mr_list.append(mr)\n nout_list.append(x_nout[ind_ok])\n nout_ini_list.append(nout_inits[ind_ok])\n\n\n inds=[]\n for i, gal in enumerate(mpgs):\n galid = gal.data['idx'][0]\n ind = np.where(galid == gal_list)[0]\n if len(ind) > 0:\n inds.append(i)\n merger = Merger()\n merger.mr = mr_list[ind]\n merger.nout = nout_list[ind]\n merger.nout_ini = nout_ini_list[ind]\n gal.merger = merger\n else:\n gal.merger = None\n #return gal_list, mr_list, nout_list, nout_ini_list", "def test_create_Gamma_eta_tree_more_regression():\n\n\n # random - structure output check\n # data creation\n n = 200\n min_size_leaf = 1\n\n X = np.random.uniform(size = (n, 510), low = -1,high = 1)\n y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\\\n 10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)\n\n rf_class = sklearn.ensemble.RandomForestRegressor(n_estimators = 2,\n min_samples_leaf = min_size_leaf)\n random_forest = rf_class.fit(X = X,\n y = y.ravel())\n\n tree = random_forest.estimators_[0]\n\n max_depth_range = np.max(smooth_rf.depth_per_node(tree)) + 1\n\n G, n, ln, ld, li = smooth_rf.create_Gamma_eta_tree_more(tree)\n\n assert G.shape == (np.sum(tree.tree_.children_left == -1),\n max_depth_range), \\\n \"Gamma returned does not have the correct shape\"\n\n assert n.shape == G.shape, \\\n \"eta returned does not have the correct shape\"\n\n assert np.all(n >= 0), \\\n \"eta returned has negative values\"\n\n assert np.all(n[:,0] ==\n tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]),\\\n \"eta structure doesn't match up with number of observes per leaf\"\n\n # new tests\n assert ln.shape[0] == G.shape[0] and ld.shape[0] == G.shape[0] and \\\n li.shape[0] == G.shape[0], \\\n \"leaf based outputs should have same number of leaves and Gamma\"\n\n assert np.all(np.ceil(ln) == ln) and np.all(ln > 0), \\\n \"leaf counts should be strictly positive and integers\"\n\n assert np.all(ln ==\n tree.tree_.weighted_n_node_samples[tree.tree_.children_left == -1]), \\\n \"number of obs in each leaf not matching tree structure\"\n\n assert np.all(np.ceil(ld) == ld) and np.all(ld >= 0), \\\n \"leaf depth should be positive and integers\"\n\n assert np.all(li >= - 1e-10), \\\n \"leaf impurity (mse) should be non-negative\"\n\n # static check\n\n # tree structure:\n # ~upper: left, lower: right~\n # num obs depth\n # |--1 10 1\n # -0-| 34 0\n # | |--3 9 2\n # |-2-| 24 1\n # | |--5 8 3\n # |-4-| 15 2\n # |--6 7 3\n\n\n # eta\n # (1) 10 | 24 | 0 | 0\n # (3) 9 | 15 | 10 | 0\n # (5) 8 | 7 | 9 | 10\n # (6) 7 | 8 | 9 | 10\n\n # Gamma\n # (1) 10 | 18+24+28 = 70 | 0 | 0\n # (3) 9 * 2 = 18 | 24+28 = 52 | 10 | 0\n # (5) 8 * 3 = 24 | 28 | 18 | 10\n # (6) 7 * 4 = 28 | 24 | 18 | 10\n\n\n # WHEN parent == True\n # eta\n # (1) 10 | 10+24 = 34 | 34+0 = 34 | 34+0 = 34\n # (3) 9 | 15+9 = 24 | 24+10 = 34 | 34+0 = 34\n # (5) 8 | 8+7 = 15 | 15+9 = 24 | 24+10 = 34\n # (6) 7 | 8+7 = 15 | 15+9 = 24 | 24+10 = 34\n\n # Gamma\n # (1) 10 | 10+(18+24+28) = 80 | 80+0 = 80 | 80+0 = 80\n # (3) 9 * 2 = 18 | 18+(24+28) = 70 | 70+10 = 80 | 80+0 = 80\n # (5) 8 * 3 = 24 | 24+28 = 52 | 52+18 = 70 | 70+10 = 80\n # (6) 7 * 4 = 28 | 28+24 = 52 | 52+18 = 70 | 70+10 = 80\n\n\n\n class inner_fake_tree():\n def __init__(self, nn, cl, cr, v):\n self.weighted_n_node_samples = nn\n self.children_left = cl\n self.children_right = cr\n self.value = v\n self.impurity = np.zeros(v.shape[0]) # this isn't a good test\n\n class fake_tree():\n def __init__(self, nn, cl, cr, v):\n self.tree_ = inner_fake_tree(nn, cl, cr, v)\n self.__class__ = sklearn.tree.tree.DecisionTreeRegressor\n\n weighted_n_node_samples = np.array([34,10,24,9,15,8,7], dtype = np.int)\n children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)\n children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)\n value = np.array([-99, 1, -99, 2, -99, 3, 4]).reshape((-1,1,1))\n\n test = fake_tree(weighted_n_node_samples,\n children_left,\n children_right,\n value)\n\n n_leaf = 4\n\n g_static, n_static, ln_static, ld_static, li_static = \\\n smooth_rf.create_Gamma_eta_tree_more(test)\n\n n_expected = np.array([[10,24,0,0],\n [9,15,10,0],\n [8,7,9,10],\n [7,8,9,10]])\n g_expected = np.array([[10,70,0,0],\n [18,52,10,0],\n [24,28,18,10],\n [28,24,18,10]])\n ln_expected = n_expected[:,0]\n ld_expected = np.array([1,2,3,3])\n\n assert np.all(g_static == g_expected), \\\n \"static test's Gamma failed to reproduce correct solutions\"\n assert np.all(n_static == n_expected), \\\n \"static test's eta failed to reproduce correct solutions\"\n assert np.all(ln_static == ln_expected), \\\n \"static test's leaf count failed to reproduce correct solutions\"\n assert np.all(ld_static == ld_expected), \\\n \"static test's leaf depth failed to reproduce correct solutions\"\n\n # WHEN parent == true\n g_static, n_static, ln_static, ld_static, li_static = \\\n smooth_rf.create_Gamma_eta_tree_more(test, parents_all=True)\n\n n_expected = np.array([[10,34,34,34],\n [9,24,34,34],\n [8,15,24,34],\n [7,15,24,34]])\n g_expected = np.array([[10,80,80,80],\n [18,70,80,80],\n [24,52,70,80],\n [28,52,70,80]])\n\n ln_expected = n_expected[:,0]\n ld_expected = np.array([1,2,3,3])\n\n assert np.all(g_static == g_expected), \\\n \"static test's Gamma failed to reproduce correct solutions, \" +\\\n \"parent = True\"\n assert np.all(n_static == n_expected), \\\n \"static test's eta failed to reproduce correct solutions, \" +\\\n \"parent = True\"\n assert np.all(ln_static == ln_expected), \\\n \"static test's leaf count failed to reproduce correct solutions, \" +\\\n \"parent = True\"\n assert np.all(ld_static == ld_expected), \\\n \"static test's leaf depth failed to reproduce correct solutions, \" +\\\n \"parent = True\"", "def steinerTree(nodes):\n #works in 2 or 3 dimensions\n R = len(nodes[0]) # either 2 or 3 -- this is the dimension we're working in\n n = len(nodes)\n steinerPoints = []\n for i in range(n - 2):\n steinerPoints.append([random.uniform(min([i[dim] for i in nodes]), max([i[dim] for i in nodes])) for dim in\n range(R)])\n jump = 0\n for i in steinerPoints:\n for j in nodes:\n jump += dist(i, j)\n jump /= (len(steinerPoints) * len(nodes))\n #now the initial topology must be created\n snLocs = [i for i in range(n - 2)]\n snConnections = [random.choice(snLocs) for i in range(len(nodes))] #connections between steiner points and nodes\n ssLocs = [i for i in range(int(nCr(len(steinerPoints), 2)))]\n ssConnections = [] #connections between steiner points and other steiner points\n for i in range(n - 3):\n ssConnections.append(random.choice(ssLocs))\n ssLocs.remove(ssConnections[-1])\n print(createTree(snConnections, ssConnections)) #this is the structure of the initial tree\n iterations = 0\n while iterations < 25000:\n oldConnections = (snConnections[:],\n ssConnections[:]) #these fucking colons needing to be here cost me hours of time\n\n vec = [random.random() for dim in range(R)]\n negaters = [random.randint(0, 1) for dim in range(R)]\n for dim in range(R):\n if negaters[dim]:\n vec[dim] *= -1\n vec = normalize(vec)\n #multiply each component by the jump size\n for j in range(R):\n vec[j] *= jump\n r = random.randint(0, len(steinerPoints) - 1)\n newsol = [steinerPoints[r][dim] + vec[dim] for dim in range(R)]\n newsteinerPoints = steinerPoints[:r] + [newsol] + steinerPoints[r + 1:]\n if pathLength(nodes, newsteinerPoints, ssConnections, snConnections) < \\\n pathLength(nodes, steinerPoints, ssConnections, snConnections):\n steinerPoints = newsteinerPoints\n\n r1 = random.randint(0, len(snConnections) - 1)\n r2 = random.randint(0, len(snConnections) - 1)\n newSnConnections = snConnections[:]\n newSnConnections[r1], newSnConnections[r2] = newSnConnections[r2], newSnConnections[r1]\n if pathLength(nodes, steinerPoints, ssConnections, newSnConnections) < \\\n pathLength(nodes, steinerPoints, ssConnections,snConnections):\n snConnections = newSnConnections[:]\n r = random.randint(0, len(ssConnections) - 1)\n newSsConnection = random.randint(0, nCr(len(steinerPoints), 2) - 1)\n if pathLength(nodes, steinerPoints, ssConnections[:r] + [newSsConnection] + ssConnections[r + 1:], snConnections) < \\\n pathLength(nodes, steinerPoints, ssConnections, snConnections) and unique(\n ssConnections[:r] + [newSsConnection] + ssConnections[r + 1:]):\n ssConnections[r] = newSsConnection\n allssConnections = [i for i in combinations([i for i in range(n - 2)], 2)]\n steinerPointsCounts = [3 for i in range(len(steinerPoints))]\n for i in ssConnections:\n for j in allssConnections[i]:\n steinerPointsCounts[j] -= 1\n snConnections = []\n for i in range(len(steinerPointsCounts)):\n for j in range(steinerPointsCounts[i]):\n snConnections.append(i)\n random.shuffle(snConnections)\n if not isValid(snConnections, ssConnections, steinerPoints):\n snConnections, ssConnections = oldConnections\n jump *= .9995\n iterations += 1\n if iterations == 25000 and not isValid(snConnections, ssConnections, steinerPoints):\n # restarts if we've failed\n print(\"Starting over...\")\n steinerPoints = []\n for i in range(n - 2):\n steinerPoints.append([random.uniform(min([i[dim] for i in nodes]), max([i[dim] for i in nodes])) for dim in\n range(R)])\n jump = 0\n for i in steinerPoints:\n for j in nodes:\n jump += dist(i, j)\n jump /= (len(steinerPoints) * len(nodes))\n #now the initial topology must be created\n snLocs = [i for i in range(n - 2)]\n snConnections = [random.choice(snLocs) for i in range(len(nodes))] #connections between steiner points and nodes\n ssLocs = [i for i in range(int(nCr(len(steinerPoints), 2)))]\n ssConnections = [] #connections between steiner points and other steiner points\n for i in range(n - 3):\n ssConnections.append(random.choice(ssLocs))\n ssLocs.remove(ssConnections[-1])\n iterations = 0\n\n #wrap up program\n\n print(\"steinerPoints:\")\n for sol in steinerPoints:\n print(sol)\n print(\"ssConnections: \", ssConnections)\n print(\"snConnections: \", snConnections)\n print(\"tree: \", createTree(snConnections, ssConnections))\n print(pathLength(nodes, steinerPoints, ssConnections, snConnections))\n # if not isValid(snConnections, ssConnections):\n # print(\"I have not generated a valid Steiner tree for you. I am very sorry.\")\n # return\n\n #for 3D plots\n if R == 3:\n lines = []\n for i in range(n):\n lines.append([nodes[i], steinerPoints[snConnections[i]]])\n allssConnections = []\n for i in combinations([i for i in range(n - 2)], 2):\n allssConnections.append(i)\n for i in ssConnections:\n lines.append([steinerPoints[allssConnections[i][0]], steinerPoints[allssConnections[i][1]]])\n VecStart_x = []\n VecStart_y = []\n VecStart_z = []\n VecEnd_x = []\n VecEnd_y = []\n VecEnd_z = []\n for line in lines:\n VecStart_x.append(line[0][0])\n VecEnd_x.append(line[1][0])\n VecStart_y.append(line[0][1])\n VecEnd_y.append(line[1][1])\n VecStart_z.append(line[0][2])\n VecEnd_z.append(line[1][2])\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for i in range(len(VecStart_x)):\n ax.plot([VecStart_x[i], VecEnd_x[i]], [VecStart_y[i], VecEnd_y[i]], zs=[VecStart_z[i], VecEnd_z[i]])\n pl.plot([i[0] for i in steinerPoints], [i[1] for i in steinerPoints], [i[2] for i in steinerPoints], 'bo')\n pl.plot([i[0] for i in nodes], [i[1] for i in nodes], [i[2] for i in nodes], 'ro')\n # ax.text(min([i[0] for i in nodes])-1, min(i[1] for i in nodes)-1, min(i[2] for i in nodes)-1,\n # \"Total distance: \"+str(pathLength(nodes, steinerPoints, ssConnections, snConnections)), fontsize=15)\n ax.set_title(\"Total Distance: \" + str(pathLength(nodes, steinerPoints, ssConnections, snConnections)))\n\n ## draw sphere\n# u = np.linspace(0, 2 * np.pi, 100)\n# v = np.linspace(0, np.pi, 100)\n#\n# x = 1 * np.outer(np.cos(u), np.sin(v))\n# y = 1 * np.outer(np.sin(u), np.sin(v))\n# z = 1 * np.outer(np.ones(np.size(u)), np.cos(v))\n# elev = 10.0\n# rot = 80.0 / 180 * np.pi\n# ax.plot_surface(x, y, z, rstride=4, cstride=4, color='b', linewidth=0, alpha=0.5)\n# pl.show()\n # Create a sphere\n pi = np.pi\n cos = np.cos\n sin = np.sin\n phi, theta = np.mgrid[0.0:pi:100j, 0.0:2.0*pi:100j]\n x = radius*sin(phi)*cos(theta)\n y = radius*sin(phi)*sin(theta)\n z = radius*cos(phi)\n\n\n def slerp(p1, p2, t):\n omega = np.arccos( p1.dot(p2) )\n sin_omega = np.sin(omega)\n t = t[:, np.newaxis]\n return ( np.sin( (1-t)*omega )*p1 + np.sin( t*omega )*p2 )/sin_omega\n\n p1 = np.array([1, 0, 0])\n p2 = np.array([0, 1, 0])\n t = np.linspace(0, 1, 30)\n\n arc = slerp(p1, p2, t)\n\n #Import data\n# data = np.genfromtxt('leb.txt')\n# theta, phi, r = np.hsplit(data, 3)\n# theta = theta * pi / 180.0\n# phi = phi * pi / 180.0\n# xx = sin(phi)*cos(theta)\n# yy = sin(phi)*sin(theta)\n# zz = cos(phi)\n\n #Set colours and render\n# ax = fig.add_subplot(111, projection='3d')\n\n ax.plot_surface(\n x, y, z, rstride=1, cstride=1, color='c', alpha=0.3, linewidth=0)\n\n pl.plot( arc[:, 0], arc[:, 1] )\n ax.set_xlim([-1,1])\n ax.set_ylim([-1,1])\n ax.set_zlim([-1,1])\n# ax.set_aspect(\"equal\")\n pl.tight_layout()\n manager = plt.get_current_fig_manager()\n manager.window.showMaximized()\n plt.savefig('Steiner_tree.png')\n pl.show()", "def run_q4():\n pruned_builder = PrunedTreeBuilding()\n builder = BuildDecisionTree()\n m_list = [1000, 10000, 100000, 1000000]\n percentage_irrelevants = []\n for m in m_list:\n irrelevant = 0\n new_f, new_l = CreateData(m).create_data_set()\n r = pruned_builder.build_depth_pruned_tree_id3(new_f, new_l, 0, 9)\n tree_node_indices = builder.get_tree_nodes_indices(r, [])\n for i in range(15, 21):\n irrelevant += tree_node_indices.count(i)\n percentage_irrelevants.append((irrelevant / len(tree_node_indices)) * 100)\n\n print(percentage_irrelevants)", "def __estimate_branch_lengths(tree, extant_genomes, method):\n\n # reset edge labels:\n for edge in tree.preorder_edge_iter():\n edge.label = None\n # index the edges:\n e_idx = 0\n # pairwise paths between leafs:\n pw_paths = {}\n # list of edges in the order of labeling 0,1,...,n-1.\n edges = []\n # paths from the leafs to any internal node. If the internal node is the LCA of two leafs,\n # merging the paths of both leafs gives the PW path.\n leaf_paths = {node.label:{} for node in tree.internal_nodes()}\n # Iterate between all leafs, to create the leaf_paths. If a LCA with previous leafs\n # is found, create the pw_path.\n for node in tree.leaf_node_iter():\n current_node = node\n path = []\n # traverse from leaf to root.\n while current_node != tree.seed_node:\n # if current edge is not labelled, create a label:\n if current_node.edge.label is None:\n current_node.edge.label = e_idx\n e_idx += 1\n edges.append(current_node.edge)\n # current path from leaf to internal node:\n path.append(current_node.edge)\n # update current node\n current_node = current_node.parent_node\n # check if there are LCAs in this internal node with the current leaf:\n for k in leaf_paths[current_node.label].iterkeys():\n # if a PW path exists, this is not LCA, but higher up the tree, so ignore.\n # if the PW does not exist yet, this is a LCA:\n if (node.label, k) not in pw_paths:\n pw_paths[(node.label, k)] = [p.label for p in leaf_paths[current_node.label][k] + path]\n # store in this internal node the path from the leaf:\n leaf_paths[current_node.label][node.label] = list(path)\n\n # Now, with all the PW paths, create the LP:\n # min cx\n # s.a. Ax >= b => -Ax <= -b (the linprog package needs <= type of ineq.)\n # where A is a n x m matrix, n is the number of paths (ineqs.), m the number of edges.\n n = len(extant_genomes)*(len(extant_genomes)-1)/2\n m = e_idx\n A = np.zeros((n,m))\n b = np.zeros(n)\n c = np.ones(m)\n\n # for each path, fill the correct row of the A matrix and b vector:\n for i, (l, path) in enumerate((pw_paths.iteritems())):\n b[i] = -dcj.dcj_distance(extant_genomes[l[0]], extant_genomes[l[1]])\n for j in path:\n A[i][j] = -1\n if method == \"lp\":\n # solve the LP:\n result = linprog(c, A_ub=A, b_ub=b).x\n elif method == \"least_squares\":\n # alternatively: least squares:\n result = lstsq(A, b)[0]\n else:\n print >> sys.stderr, \"Unknown method for branch length estimation, skipping...\"\n return\n # Apply the lengths in the tree:\n for e, x in zip(edges, result):\n e.length = x\n # the edges from the root are \"ambiguous\", so each gets the average of the children;\n # from the solution, usually one gets zero and the other the full length; (for binary trees)\n children = tree.seed_node.child_nodes()\n if len(children) == 2:\n avg = np.mean([node.edge.length for node in children])\n for node in children:\n node.edge.length = avg", "def select_case_2(data,labels,T,budget,batch_size):\n\n n_nodes = len(T[1]) #total nodes in T\n n_samples = len(data) #total samples in data\n L = np.zeros(n_nodes) #majority label\n p1 = np.zeros(n_nodes) #empirical label frequency\n n = np.zeros(n_nodes) #number of points sampled from each node\n error = []#np.zeros(n_samples) #error at each round\n root = n_nodes-1 #corresponds to index of root\n P = np.array([root])\n L[root] = 1\n\n for i in range(budget):\n v_selected = np.array([])\n\n for b in range(batch_size):\n #TODO: select a node from P biasing towards choosing nodes in areas where the observed labels are less pure\n\n w = np.array([])\n za = 0.95\n\n for j in range(len(P)):\n leaves = get_leaves([], P[j], T, n_samples)\n num_leaves = len(leaves)\n p_v = max(p1[P[j]], 1-p1[P[j]]) # majority label frequency\n p_up = p_v + za * np.sqrt(p_v * (1-p_v)/num_leaves)\n wv = num_leaves/n_samples\n\n w = np.append(w, wv * (1.0 - p_up))\n\n if (np.sum(w) == 0):\n w = w + 1.0/len(w)\n else:\n w = w / np.sum(w)\n #print(\"weights:\", w)\n\n v = random.choices(population = range(len(P)), weights = w, k=1)\n v = P[v[0]]\n #print(\"Selected internal node:\", v)\n\n #TODO: pick a random leaf node from subtree Tv and query its label\n z = random.choice(get_leaves([], v, T, n_samples))\n #print(\"Selected to query:\", z)\n l = labels[z]\n\n #TODO: update empirical counts and probabilities for all nodes u on path from z to v\n z = np.array([z])\n n, p1 = update_empirical(n,p1,v,z,l,T)\n\n v_selected = np.append(v_selected, v)\n v_selected = v_selected.astype(int)\n\n #TODO: update admissible A and compute scores; find best pruning and labeling\n P_best, L[v] = best_pruning_and_labeling(n,p1,v_selected,T,n_samples)\n #print(\"best Pruning:\", P_best)\n #TODO: update pruning P and labeling L\n P = np.delete(P, np.argwhere(P==v))\n P = np.union1d(P, P_best)\n #print(\"Updated Pruning:\", P)\n\n #TODO: temporarily assign labels to every leaf and compute error\n L = assign_labels(L,v_selected,v_selected,T,n_samples)\n e = compute_error(L,labels)\n error.append(e)\n\n if (i % 100 == 0):\n print(e)\n\n for v in P:\n #TODO: assign labels to all nodes under the current pruning\n L = assign_labels(L,v,v,T,n_samples)\n\n return L, np.array(error)", "def bhc(data, data_model, crp_alpha=1.0):\n # initialize the tree\n nodes = dict((i, Node(np.array([x]), data_model, i, crp_alpha))\n for i, x in enumerate(data))\n n_nodes = len(nodes)\n linkage_matrix = []\n lmls = []\n merged_node_index = n_nodes\n while n_nodes > 1:\n max_rk = float('-Inf')\n merged_node = None\n\n # for each pair of clusters (nodes), compute the merger score.\n for left_idx, right_idx in it.combinations(nodes.keys(), 2):\n tmp_node = Node.as_merge(nodes[left_idx], nodes[right_idx])\n\n logp_left = nodes[left_idx].logp\n logp_right = nodes[right_idx].logp\n logp_comb = tmp_node.logp\n\n log_pi = tmp_node.log_pi\n\n numer = log_pi + logp_comb\n\n neg_pi = log(-expm1(log_pi))\n denom = logaddexp(numer, neg_pi+logp_left+logp_right)\n\n log_rk = numer-denom\n\n if log_rk > max_rk:\n max_rk = log_rk\n merged_node = tmp_node\n merged_right = right_idx\n merged_left = left_idx\n\n # Merge the highest-scoring pair\n merged_node.num_children = nodes[merged_left].num_children + nodes[merged_right].num_children\n merged_node.index = merged_node_index\n merged_left_index = nodes[merged_left].index\n merged_right_index = nodes[merged_right].index\n del nodes[merged_right]\n nodes[merged_left] = merged_node\n \n linkage_matrix.append([merged_left_index, merged_right_index, np.fabs(denom), merged_node.num_children])\n merged_node_index += 1\n n_nodes -= 1\n # The denominator of log_rk is at the final merge is an estimate of the\n # marginal likelihood of the data under DPMM\n # lml = denom\n return np.asarray(linkage_matrix)", "def triplets_correct(\n tree1: CassiopeiaTree,\n tree2: CassiopeiaTree,\n number_of_trials: int = 1000,\n min_triplets_at_depth: int = 1,\n) -> Tuple[\n Dict[int, float], Dict[int, float], Dict[int, float], Dict[int, float]\n]:\n\n # keep dictionary of triplets correct\n all_triplets_correct = defaultdict(int)\n unresolved_triplets_correct = defaultdict(int)\n resolvable_triplets_correct = defaultdict(int)\n proportion_unresolvable = defaultdict(int)\n\n # create copies of the trees and collapse process\n T1 = copy.deepcopy(tree1)\n T2 = copy.deepcopy(tree2)\n\n T1.collapse_unifurcations()\n T2.collapse_unifurcations()\n\n # set depths in T1 and compute number of triplets that are rooted at\n # ancestors at each depth\n depth_to_nodes = critique_utilities.annotate_tree_depths(T1)\n\n max_depth = np.max([T1.get_attribute(n, \"depth\") for n in T1.nodes])\n for depth in range(max_depth):\n\n score = 0\n number_unresolvable_triplets = 0\n\n # check that there are enough triplets at this depth\n candidate_nodes = depth_to_nodes[depth]\n total_triplets = sum(\n [T1.get_attribute(v, \"number_of_triplets\") for v in candidate_nodes]\n )\n if total_triplets < min_triplets_at_depth:\n continue\n\n for _ in range(number_of_trials):\n\n (i, j, k), out_group = critique_utilities.sample_triplet_at_depth(\n T1, depth, depth_to_nodes\n )\n\n reconstructed_outgroup = critique_utilities.get_outgroup(\n T2, (i, j, k)\n )\n\n is_resolvable = True\n if out_group == \"None\":\n number_unresolvable_triplets += 1\n is_resolvable = False\n\n # increment score if the reconstructed outgroup is the same as the\n # ground truth\n score = int(reconstructed_outgroup == out_group)\n\n all_triplets_correct[depth] += score\n if is_resolvable:\n resolvable_triplets_correct[depth] += score\n else:\n unresolved_triplets_correct[depth] += score\n\n all_triplets_correct[depth] /= number_of_trials\n\n if number_unresolvable_triplets == 0:\n unresolved_triplets_correct[depth] = 1.0\n else:\n unresolved_triplets_correct[depth] /= number_unresolvable_triplets\n\n proportion_unresolvable[depth] = (\n number_unresolvable_triplets / number_of_trials\n )\n\n if proportion_unresolvable[depth] < 1:\n resolvable_triplets_correct[depth] /= (\n number_of_trials - number_unresolvable_triplets\n )\n else:\n resolvable_triplets_correct[depth] = 1.0\n\n return (\n all_triplets_correct,\n resolvable_triplets_correct,\n unresolved_triplets_correct,\n proportion_unresolvable,\n )", "def num_samples_trend_test(beta, delta, std_dev, alpha=0.05, n=4, num_iter=1000,\n tol=1e-6, num_cycles=10000, m=5):\n # Initialize the parameters\n power = 1.0 - beta\n P_d = 0.0\n cycle_num = 0\n min_diff_P_d_and_power = abs(P_d - power)\n best_P_d = P_d\n max_n = n\n min_n = n\n max_n_cycle = 1\n min_n_cycle = 1\n # Print information for user\n print(\"Delta (gradient): {}\".format(delta))\n print(\"Standard deviation: {}\".format(std_dev))\n print(\"Statistical power: {}\".format(power))\n\n # Compute an estimate of probability of detecting a trend if the estimate\n # Is not close enough to the specified statistical power value or if the\n # number of iterations exceeds the number of defined cycles.\n while abs(P_d - power) > tol and cycle_num < num_cycles:\n cycle_num += 1\n # print(\"Cycle Number: {}\".format(cycle_num))\n count_of_trend_detections = 0\n\n # Perform MK test for random sample.\n # could use range here\n for i in range(num_iter):\n r = np.random.normal(loc=0.0, scale=std_dev, size=n)\n x = r + delta * np.arange(n)\n trend, h, p, z = mann_kendall_test(x, alpha)\n if h:\n count_of_trend_detections += 1\n P_d = float(count_of_trend_detections) / num_iter\n\n # Determine if P_d is close to the power value.\n if abs(P_d - power) < tol:\n # print(\"P_d: {}\".format(P_d))\n # print(\"{} samples are required\".format(n))\n return n\n\n # Determine if the calculated probability is closest to the statistical\n # power.\n if min_diff_P_d_and_power > abs(P_d - power):\n min_diff_P_d_and_power = abs(P_d - power)\n best_P_d = P_d\n\n # Update max or min n.\n if n > max_n and abs(best_P_d - P_d) < tol:\n max_n = n\n max_n_cycle = cycle_num\n elif n < min_n and abs(best_P_d - P_d) < tol:\n min_n = n\n min_n_cycle = cycle_num\n\n # In case the tolerance is too small we'll stop the cycling when the\n # number of cycles, n, is cycling between the same values.\n elif (abs(max_n - n) == 0 and\n cycle_num - max_n_cycle >= m or\n abs(min_n - n) == 0 and\n cycle_num - min_n_cycle >= m):\n # print(\"Number of samples required has converged.\")\n # print(\"P_d: {}\".format(P_d))\n # print(\"Approximately {} samples are required\".format(n))\n return n\n\n # Determine whether to increase or decrease the number of samples.\n if P_d < power:\n n += 1\n print(\"P_d: {}\".format(P_d))\n print(\"Increasing n to {}\".format(n))\n print(\"\")\n else:\n n -= 1\n print(\"P_d: {}\".format(P_d))\n print(\"Decreasing n to {}\".format(n))\n print(\"\")\n if n == 0:\n raise ValueError(\"Number of samples = 0. This should not happen.\")", "def subsample_leaves(\n self,\n tree: CassiopeiaTree,\n collapse_source: Optional[str] = None,\n collapse_duplicates: bool = True,\n ) -> CassiopeiaTree:\n n_merges = (\n self.__number_of_merges\n if self.__number_of_merges is not None\n else int(tree.n_cell * self.__ratio)\n )\n if n_merges >= len(tree.leaves):\n raise LeafSubsamplerError(\n \"Number of required merges exceeds number of leaves in the tree.\"\n )\n if n_merges == 0:\n raise LeafSubsamplerError(\"No merges to be performed.\")\n # Tree needs to have character matrix defined\n if tree.character_matrix is None:\n raise CassiopeiaTreeError(\"Character matrix not defined.\")\n\n merged_tree = copy.deepcopy(tree)\n for _ in range(n_merges):\n # Choose first leaf\n leaf1 = np.random.choice(merged_tree.leaves)\n leaf1_state = merged_tree.get_character_states(leaf1)\n\n # Choose second leaf with weight proportional to inverse distance\n distances = merged_tree.get_distances(leaf1, leaves_only=True)\n leaves = []\n weights = []\n for leaf in sorted(distances.keys()):\n if leaf == leaf1:\n continue\n leaves.append(leaf)\n weights.append(1 / distances[leaf])\n leaf2 = np.random.choice(\n leaves, p=np.array(weights) / np.sum(weights)\n )\n\n leaf2_state = merged_tree.get_character_states(leaf2)\n\n # Merge these two leaves at the mean time of the two leaves.\n # Note that the mean time of the two leaves may never be earlier than\n # the LCA time, because each of the leaf times must be greater than or\n # equal to the LCA time.\n # If the tree is ultrametric, this preserves ultrametricity.\n new_leaf = f\"{leaf1}-{leaf2}\"\n lca = merged_tree.find_lca(leaf1, leaf2)\n new_time = (\n merged_tree.get_time(leaf1) + merged_tree.get_time(leaf2)\n ) / 2\n new_state = []\n for char1, char2 in zip(leaf1_state, leaf2_state):\n new_char = []\n if not isinstance(char1, tuple):\n char1 = (char1,)\n if not isinstance(char2, tuple):\n char2 = (char2,)\n new_state.append(char1 + char2)\n merged_tree.add_leaf(lca, new_leaf, states=new_state, time=new_time)\n merged_tree.remove_leaves_and_prune_lineages([leaf1, leaf2])\n\n if collapse_source is None:\n collapse_source = merged_tree.root\n merged_tree.collapse_unifurcations(source=collapse_source)\n\n if collapse_duplicates:\n merged_tree.collapse_ambiguous_characters()\n\n return merged_tree", "def weightedParsimony(tree, node, character, distances, alphabet):\n if node.getValue(\"A\") == -1: #node is not a leaf\n i = node.getChildren()[0]\n j = node.getChildren()[1]\n \n for c in alphabet:\n #Variables to hold best choice\n minI = 10000\n minIChar = ''\n minJ = 10000\n minJChar = '' #Variable named in honor of J. Chartove '14\n\n #Make recursive call to fill in lower portion of tree\n weightedParsimony(tree, i, c, distances, alphabet)\n weightedParsimony(tree, j, c, distances, alphabet)\n\n #Find lowest-score value to use in this scenario\n for b in alphabet:\n dist = distances[c][b]\n\n #Best choice for child i\n if dist+i.getValue(b) < minI:\n minI = dist+i.getValue(b)\n minIChar = b\n\n #Best choice for child j\n if dist+j.getValue(b) < minJ:\n minJ = dist+j.getValue(b)\n minJChar = b\n\n #Store best value and path in current node \n node.setValue(c, minI+minJ)\n node.setPath(c, minIChar, minJChar)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the targets from the flat yaml file, checks opts for location but defaults to /etc/salt/roster
def targets(tgt, tgt_type="glob", **kwargs): template = get_roster_file(__opts__) rend = salt.loader.render(__opts__, {}) raw = compile_template( template, rend, __opts__["renderer"], __opts__["renderer_blacklist"], __opts__["renderer_whitelist"], mask_value="*passw*", **kwargs ) conditioned_raw = {} for minion in raw: conditioned_raw[str(minion)] = salt.config.apply_sdb(raw[minion]) return __utils__["roster_matcher.targets"](conditioned_raw, tgt, tgt_type, "ipv4")
[ "def get_targets():\n assert target_exist(),\\\n '''The directory 'Targets' does not exist or is not in the same directory as python script'''\n for i in os.listdir('Targets'):\n if i.endswith('.fasta'):\n yield i", "def parse_yaml_file(path_to_file) -> recipe.Recipe:\n with open(path_to_file, \"r\") as stream:\n try:\n return recipe.from_dict(**yaml.safe_load(stream))\n except yaml.YAMLError as exc:\n print(exc)", "def grab(source: str, target: str)->tuple[_after,_yaml_details]:\r\n spattern = load_yaml(source)\r\n tpattern = load_yaml(target)\r\n for part in spattern: # Template checking\r\n if not (part in tpattern or part == \"settings\"):\r\n if part.startswith(\"_\"): # For parts with same pattern\r\n bpart = part[2:] # Base part\r\n if bpart in spattern: # Since template is same, tokens also same\r\n spattern[part][\"tokens\"] = spattern[bpart][\"tokens\"]\r\n else:\r\n exit(f\"{error_msg} {bpart} for {part} not found\")\r\n if bpart in tpattern: # Template checking\r\n tpattern[part] = tpattern[bpart]\r\n continue\r\n part = bpart\r\n if not spattern[part][\"tokens\"]:\r\n tpattern[part] = \"\"\r\n continue\r\n exit(f\"{error_msg} Template for {part} not found\")\r\n after, rest = extract(spattern)\r\n return after, (rest, tpattern)", "def load_defaults(defaults_file: list = []):\n cfg = Config(\"configs/default.yaml\")\n # cfg = cfg.update_config(Config(\"configs/dataset.yaml\"))\n for file in defaults_file:\n print(file)\n cfg = deep_update(cfg, Config(file))\n \n cfg = Opts(cfg).parse_args()\n \n cfg = load_enviroment_path(cfg)\n return cfg", "def parse_source_descriptions(data, file_path='<string>'):\n try:\n descriptions = load_yaml(data)\n verify_source_description_list(descriptions)\n\n except yaml.YAMLError as exc:\n if hasattr(exc, 'problem_mark'):\n mark = exc.problem_mark.line\n col = exc.problem_mark.column\n error(\"Invalid YAML in source list file '{0}' at '{1}:{2}':\\n\"\n .format(file_path, mark + 1, col + 1) + to_str(exc))\n else:\n error(\"Invalid YAML in source list file '{0}':\\n\"\n .format(file_path) + to_str(exc))\n descriptions = []\n return (file_path, descriptions)", "def main(targets, source):\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)\n for target_yaml in targets:\n target_yaml = os.path.abspath(target_yaml)\n merged_yaml = merge(target_yaml, source)\n with open(target_yaml, 'w') as outfile:\n yaml.safe_dump(merged_yaml, outfile, default_flow_style=False)\n logging.info(\"YAML target merge with source successful. Bye!\")", "def task_config():\n for file in os.listdir(f\"{PWD}/config\"):\n yield {\n 'name': file,\n 'actions': [f\"ln -s {PWD}/config/{file} {HOME}/.config/{file}\"],\n 'targets': [f'{HOME}/.config/{file}'],\n 'clean': [f'rm {HOME}/.config/{file}']\n }", "def test_targets_for_shard(self) -> pulumi.Input[Sequence[pulumi.Input['TestTargetsForShardArgs']]]:\n return pulumi.get(self, \"test_targets_for_shard\")", "def parse_targets():\n\n #Creates a list from the file within the Targets folder\n target_file_list = []\n for targets in get_targets():\n target_file_list.append(targets)\n\n #Creates a list from the file within the uniquenames folder\n sequence_file_list = []\n for sequences in get_uniquenames():\n sequence_file_list.append(sequences)\n\n #Iterates over the number of elements within the target_file_list\n #For each item a file is opened from both the Targets and uniquenames folder and two new list are created\n for index in xrange(len(target_file_list)):\n target_file_path = 'Targets/' + target_file_list[index]\n with open(target_file_path, 'r') as target_file:\n target_list = target_file.read().split('>')\n\n sequence_file_path = 'uniquenames/' + sequence_file_list[index]\n with open(sequence_file_path, 'r') as sequence_file:\n sequence_list = sequence_file.read().split()\n\n #If there is a match between an item within the target_list and sequence_list that target item is appended\n #to the file created below\n output_file_name = 'fastafiles/' + str(target_file_list[index]).replace('Trinity', 'Done')\n with open(output_file_name, 'w') as output:\n for tar in target_list:\n for seq in sequence_list:\n match = re.match(seq, tar)\n if match:\n output.write(tar)", "def load_settings():\n with open(os.path.join(SCRIPT_DIR, '../road-shields.yml'), 'r') as fh:\n return yaml.load(fh, Loader=yaml.FullLoader)", "async def yaml_resources(self):\n await self.log.debug('yaml_resources', 'Started')\n resources = {}\n yamlfile = \"{}/ui-lovelace.yaml\".format(self.base_dir)\n if os.path.isfile(yamlfile):\n with open(yamlfile, encoding='utf-8',\n errors='ignore') as localfile:\n load = yaml.load(localfile, Loader)\n resources = load.get('resources', {})\n localfile.close()\n else:\n await self.log.error(\n 'yaml_resources', 'Lovelace config in yaml file not found')\n await self.log.debug('yaml_resources', resources)\n return resources", "def find_context_yml_file(cls, search_start_dir=os.getcwd()):\n yml_path = None\n\n for i in range(4):\n logger.debug(\"Searching for config file {} ({} layer deep)\".format(search_start_dir, i))\n\n potential_ge_dir = os.path.join(search_start_dir, cls.GE_DIR)\n\n if os.path.isdir(potential_ge_dir):\n potential_yml = os.path.join(potential_ge_dir, cls.GE_YML)\n if os.path.isfile(potential_yml):\n yml_path = potential_yml\n logger.debug(\"Found config file at \" + str(yml_path))\n break\n # move up one directory\n search_start_dir = os.path.dirname(search_start_dir)\n\n return yml_path", "def extract_masters(opts, masters=\"master\", port=None, raise_if_empty=True):\n if port is not None:\n master_port = opts.get(port)\n else:\n master_port = opts.get(\"master_port\")\n try:\n master_port = int(master_port)\n except ValueError:\n master_port = None\n\n if not master_port:\n emsg = \"Invalid or missing opts['master_port'].\"\n log.error(emsg)\n raise ValueError(emsg)\n\n entries = opts.get(masters, [])\n\n if not entries:\n emsg = \"Invalid or missing opts['{}'].\".format(masters)\n log.error(emsg)\n if raise_if_empty:\n raise ValueError(emsg)\n\n hostages = []\n # extract candidate hostage (hostname dict) from entries\n if is_non_string_sequence(entries): # multiple master addresses provided\n for entry in entries:\n if isinstance(entry, Mapping): # mapping\n external = entry.get(\"external\", \"\")\n internal = entry.get(\"internal\", \"\")\n hostages.append(dict(external=external, internal=internal))\n\n elif isinstance(entry, str): # string\n external = entry\n internal = \"\"\n hostages.append(dict(external=external, internal=internal))\n\n elif isinstance(entries, Mapping): # mapping\n external = entries.get(\"external\", \"\")\n internal = entries.get(\"internal\", \"\")\n hostages.append(dict(external=external, internal=internal))\n\n elif isinstance(entries, str): # string\n external = entries\n internal = \"\"\n hostages.append(dict(external=external, internal=internal))\n\n # now parse each hostname string for host and optional port\n masters = []\n for hostage in hostages:\n external = hostage[\"external\"]\n internal = hostage[\"internal\"]\n if external:\n external = parse_hostname(external, master_port)\n if not external:\n continue # must have a valid external host address\n internal = parse_hostname(internal, master_port)\n masters.append(dict(external=external, internal=internal))\n\n return masters", "def extract_targets(args: Namespace) -> list:\n if type(args) is not Namespace:\n raise TypeError(\"[!] Invalid type for args\")\n\n list_targets = []\n\n if args.target_file:\n try:\n list_targets = list(\n set(list_targets + filter_targets(extract_targets_from_file(args.target_file)))\n )\n except IOError:\n print(\"[!] An error occurred when the program tries to open the file\")\n except Exception as e:\n print(e)\n\n if args.targets:\n try:\n list_targets = list(set((list_targets + filter_targets(args.targets))))\n except TypeError as e:\n print(e)\n\n return list_targets", "def merge_location_files(locfile):\n loc_tree = load_yaml_file('location') # active location file\n with file(locfile, 'r') as fd:\n old_tree = yaml.load(fd)\n\n for rat in loc_tree.keys():\n for day in loc_tree[rat]['days'].keys():\n try:\n tetrodes = old_tree[rat]['days'][day]['tetrodes']\n except KeyError:\n pass\n else:\n loc_tree[rat]['days'][day]['tetrodes'] = tetrodes\n\n write_yaml_file(loc_tree, 'location')", "def get_yaml(yamlfile):\n # FIXME test this sub\n config_file = open(\"%s/etc/%s\" % (os.environ['KERRIGAN_ROOT'], yamlfile), 'r')\n y = yaml.load(config_file)\n return y", "def list_configs():\n status, content = util.read_yaml(FUMI_YML)\n\n if not status:\n sys.exit(-1)\n\n if not content:\n util.cprint(m.NO_YML, 'red')\n sys.exit(-1)\n\n for conf in content.keys():\n is_default = content[conf].get('default', False)\n\n if is_default:\n util.cprint(m.LIST_DEFAULT % conf)\n\n else:\n util.cprint('- %s' % conf)", "def get_defaults():\n settings_file = pathlib.Path.home() / '.pudl.yml'\n\n try:\n with pathlib.Path(settings_file).open() as f:\n default_workspace = yaml.safe_load(f)\n except FileNotFoundError:\n logger.warning(\"PUDL user settings file .pudl.yml not found.\")\n default_workspace = {\"pudl_in\": None, \"pudl_out\": None}\n return default_workspace\n\n # Ensure that no matter what the user has put in this file, we get fully\n # specified absolute paths out when we read it:\n pudl_in = (\n pathlib.Path(default_workspace[\"pudl_in\"]).\n expanduser().\n resolve()\n )\n pudl_out = (\n pathlib.Path(default_workspace[\"pudl_out\"]).\n expanduser().\n resolve()\n )\n return derive_paths(pudl_in, pudl_out)", "def test_loads_from_file_is_searching_in_rkd_path(self):\n\n yaml_loader = YamlFileLoader([])\n\n d = tempfile.TemporaryDirectory()\n os.environ['RKD_PATH'] = d.name\n\n with open(d.name + '/makefile.yml', 'w') as f:\n f.write('''\nversion: org.riotkit.rkd/yaml/v1\nimports: []\ntasks: \n :join:iwa-ait:\n description: Subscribe to any local section of IWA-AIT, workers have common interest\n arguments:\n - not a list\n ''')\n\n try:\n self.assertRaises(YAMLFileValidationError,\n lambda: yaml_loader.load_from_file('makefile.yml', 'org.riotkit.rkd/yaml/v1'))\n finally:\n d.cleanup()\n os.environ['RKD_PATH'] = ''" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a Dash DataTable with the experiments in `experiments_df`.
def _get_experiment_table(id, experiments_df): return dash_table.DataTable( id={"type": "experiment-table", "index": id}, columns=[ {"name": i, "id": i, "selectable": True, "hideable": True} for i in experiments_df.columns ], data=experiments_df.compute().to_dict("records"), page_size=app._page_size, filter_action="native", sort_action="native", sort_mode="multi", row_selectable="multi", selected_rows=[], fixed_columns={"headers": True, "data": 1}, style_cell={"overflow": "hidden", "textOverflow": "ellipsis"}, style_header={"color": "#707171", "fontWeight": 700}, style_table={"minWidth": "100%"}, style_cell_conditional=[ { "if": {"column_id": "id"}, "width": "300px", "minWidth": "300px", "maxWidth": "300px", }, { "if": {"column_id": "commit_hash"}, "width": "120px", "minWidth": "120px", "maxWidth": "120px", }, ], style_data_conditional=[{"if": {"row_index": "odd"}, "backgroundColor": "#f1f7fa"}], )
[ "def generate_table(df):\n return dash_table.DataTable(\n id='table',\n columns=[\n {\"name\": i, \"id\": i, \"selectable\": True} for i in df.columns\n ],\n page_size=14,\n style_cell={'padding': '5px',#'textAlign': 'right',\n 'fontSize':12,'whiteSpace': 'normal',\n 'height': 'auto'},\n style_header={\n 'backgroundColor': 'white',\n 'fontWeight': 'bold'\n },\n style_data={\n 'whiteSpace': 'normal',\n 'height': 'auto',\n 'lineHeight': '14px'\n },\n style_table={'height': '500px', 'overflowY': 'auto'},\n style_cell_conditional=[\n {\n 'if': {'column_id': 'country'},\n 'fontWeight': 'bold',\n 'textAlign': 'left'\n }\n ],\n data=df.to_dict('records'),\n sort_action=\"native\",\n )", "def get_data_df(self):\n if self.plot_type_data == ptc.REPLICATE:\n y = self.get_replicate_y_data()\n sd = self.get_replicate_sd()\n else:\n y = [self.y_data]\n sd = self.sd\n simulation_condition_id = \\\n self.line_data[ptc.SIMULATION_CONDITION_ID].iloc[0]\n observable_id = self.line_data[ptc.OBSERVABLE_ID].iloc[0]\n df = pd.DataFrame(\n {C.Y: y, C.NAME: self.legend_name,\n C.IS_SIMULATION: self.is_simulation,\n C.DATASET_ID: self.dataset_id,\n C.SD: sd, C.SEM: self.sem,\n C.SIMULATION_CONDITION_ID: simulation_condition_id,\n C.OBSERVABLE_ID: observable_id})\n return df", "def expenses_table(expense_data=None):\n if expense_data is not None:\n expenses = expense_data # user option\n else:\n expenses = Expenses.query.filter_by(budget_id=selected_budget()).all() # query all expenses for specified budget\n id = []\n expenses_description = []\n categories = []\n expenses_amount = []\n transaction_dates = []\n due_dates_list = []\n reports = []\n if expenses:\n for expense in expenses:\n id.append(expense.id)\n expenses_description.append(expense.expense_description)\n categories.append(category_choice(expense.category))\n expenses_amount.append(round(expense.expense_amount, 2))\n transaction_dates.append(expense.transaction_date.strftime('%m/%d/%Y'))\n reports.append(due_dates(expense.due_date))\n fig = plot({\"data\":[go.Table(columnorder=[1, 2, 3, 4, 5, 6],\n columnwidth=[25, 40, 60, 35, 65, 90],\n header=dict(values=['ID', 'Category', 'Description', 'Amount', 'Transaction/Due-Date', 'Reports'],\n fill_color='#39ace7',\n font=dict(color='white', size=12),\n #fill=dict(color=['#39ace7', 'white']),\n align='center'),\n cells=dict(values=[id, categories, expenses_description, expenses_amount, transaction_dates,\n reports],\n fill_color='lightcyan',\n align='center'))],\n \"layout\":go.Layout(margin=dict(t=50, l=25, r=25, b=50))}, output_type='div')\n return fig", "def cards_df(url=CARD_URL):\n return pd.DataFrame(get_cards(url))", "def generate_tweet_table(dataframe):\n return dash_table.DataTable(id=\"responsive-table\",\n columns=[{'name': 'Date', 'id':'date', 'type': 'datetime'},\n {'name': 'Tweet', 'id':'tweet', 'type': 'text'},\n {'name': 'Sentiment', 'id':'sentiment', 'type': 'numeric'},\n {'name': 'Link', 'id':'link', 'type': 'text', 'presentation':'markdown'}],\n data = dataframe.to_dict('records'),\n style_header={\n 'backgroundColor': 'rgb(52, 73, 94)',\n 'fontWeight': 'bold',\n 'color': colors['text'],\n 'textAlign': 'left',\n 'fontSize': '12pt',\n 'height': 'auto',\n 'width': 'auto'\n },\n style_cell={'padding': '5px',\n 'backgroundColor': colors['background'],\n 'color': colors['table-text'],\n 'textAlign':'left',\n 'height':'auto',\n 'whiteSpace':'normal',\n 'lineHeight':'15px',\n 'width':'auto'},\n style_as_list_view=True,\n style_data_conditional=[\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.3'\n },\n 'backgroundColor': colors['sl-negative-sentiment'],\n 'color': colors['ex-negative-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.6'\n },\n 'backgroundColor': colors['ex-negative-sentiment'],\n 'color': 'white'\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.3'\n },\n 'backgroundColor': colors['sl-positive-sentiment'],\n 'color': colors['ex-positive-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.6'\n },\n 'backgroundColor': colors['ex-positive-sentiment'],\n 'color': 'white'\n },\n ]),", "def generate_flagged_tweet_table(dataframe):\n return dash_table.DataTable(id=\"responsive-table\",\n columns=[{'name': 'Date', 'id':'date', 'type': 'datetime'},\n {'name': 'Tweet', 'id':'tweet', 'type': 'text'},\n {'name': 'Sentiment', 'id':'sentiment', 'type': 'numeric'},\n {'name': 'Link', 'id':'link', 'type': 'text', 'presentation':'markdown'}],\n data = dataframe.to_dict('records'),\n style_header={\n 'backgroundColor': 'rgb(52, 73, 94)',\n 'fontWeight': 'bold',\n 'color': colors['text'],\n 'textAlign': 'left',\n 'fontSize': '12pt',\n 'height': 'auto',\n 'width': 'auto'\n },\n style_cell={'padding': '5px',\n 'backgroundColor': colors['background'],\n 'color': colors['table-text'],\n 'textAlign':'left',\n 'height':'auto',\n 'whiteSpace':'normal',\n 'lineHeight':'15px',\n 'width':'auto'},\n style_as_list_view=True,\n style_data_conditional=[\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.3'\n },\n 'backgroundColor': colors['sl-negative-sentiment'],\n 'color': colors['ex-negative-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} < -0.6'\n },\n 'backgroundColor': colors['ex-negative-sentiment'],\n 'color': 'white'\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.3'\n },\n 'backgroundColor': colors['sl-positive-sentiment'],\n 'color': colors['ex-positive-sentiment']\n },\n {\n 'if': {\n 'filter_query': '{sentiment} > 0.6'\n },\n 'backgroundColor': colors['ex-positive-sentiment'],\n 'color': 'white'\n },\n ]),", "def get_filtered_ophys_experiment_table(include_failed_data=False):\n cache = get_qc_cache()\n experiments = cache.get_experiment_table()\n experiments = experiments.reset_index()\n experiments['super_container_id'] = experiments['specimen_id'].values\n\n experiments = reformat_experiments_table(experiments)\n if include_failed_data:\n filtered_experiments = experiments[experiments.experiment_workflow_state.isin(['passed', 'failed'])]\n else:\n # sessions = sdk_utils.get_filtered_sessions_table(cache, include_multiscope=True, require_full_container=False, require_exp_pass=False)\n # sessions = sessions.reset_index()\n # filtered_experiments = experiments[experiments.ophys_session_id.isin(sessions.ophys_session_id.unique())]\n filtered_experiments = experiments.copy()\n filtered_experiments = filtered_experiments[filtered_experiments.experiment_workflow_state == 'passed']\n filtered_experiments = filtered_experiments[filtered_experiments.container_workflow_state != 'failed'] # include containers in holding\n\n return filtered_experiments", "def _table_viewer(table, rows_per_page=25, fields=None):\n\n # TODO(gram): rework this to use datalab.utils.commands.chart_html\n\n if not table.exists():\n raise Exception('Table %s does not exist' % str(table))\n\n if not table.is_listable():\n return \"Done\"\n\n _HTML_TEMPLATE = u\"\"\"\n <div class=\"bqtv\" id=\"{div_id}\">{static_table}</div>\n <br />{meta_data}<br />\n <script src=\"/static/components/requirejs/require.js\"></script>\n <script>\n\n require.config({{\n paths: {{\n base: '/static/base',\n d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.13/d3',\n plotly: 'https://cdn.plot.ly/plotly-1.5.1.min.js?noext',\n jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min'\n }},\n map: {{\n '*': {{\n datalab: 'nbextensions/gcpdatalab'\n }}\n }},\n shim: {{\n plotly: {{\n deps: ['d3', 'jquery'],\n exports: 'plotly'\n }}\n }}\n }});\n\n require(['datalab/charting', 'datalab/element!{div_id}', 'base/js/events',\n 'datalab/style!/nbextensions/gcpdatalab/charting.css'],\n function(charts, dom, events) {{\n charts.render('gcharts', dom, events, '{chart_style}', [], {data},\n {{\n pageSize: {rows_per_page},\n cssClassNames: {{\n tableRow: 'gchart-table-row',\n headerRow: 'gchart-table-headerrow',\n oddTableRow: 'gchart-table-oddrow',\n selectedTableRow: 'gchart-table-selectedrow',\n hoverTableRow: 'gchart-table-hoverrow',\n tableCell: 'gchart-table-cell',\n headerCell: 'gchart-table-headercell',\n rowNumberCell: 'gchart-table-rownumcell'\n }}\n }},\n {{source_index: {source_index}, fields: '{fields}', legacy: 'true'}},\n 0,\n {total_rows});\n }}\n );\n </script>\n \"\"\"\n\n if fields is None:\n fields = datalab.utils.commands.get_field_list(fields, table.schema)\n div_id = datalab.utils.commands.Html.next_id()\n meta_count = ('rows: %d' % table.length) if table.length >= 0 else ''\n meta_name = str(table) if table.job is None else ('job: %s' % table.job.id)\n if table.job:\n if table.job.cache_hit:\n meta_cost = 'cached'\n else:\n bytes = datalab.bigquery._query_stats.QueryStats._size_formatter(table.job.bytes_processed)\n meta_cost = '%s processed' % bytes\n meta_time = 'time: %.1fs' % table.job.total_time\n else:\n meta_cost = ''\n meta_time = ''\n\n data, total_count = datalab.utils.commands.get_data(table, fields, first_row=0,\n count=rows_per_page)\n\n if total_count < 0:\n # The table doesn't have a length metadata property but may still be small if we fetched less\n # rows than we asked for.\n fetched_count = len(data['rows'])\n if fetched_count < rows_per_page:\n total_count = fetched_count\n\n chart = 'table' if 0 <= total_count <= rows_per_page else 'paged_table'\n meta_entries = [meta_count, meta_time, meta_cost, meta_name]\n meta_data = '(%s)' % (', '.join([entry for entry in meta_entries if len(entry)]))\n\n return _HTML_TEMPLATE.format(div_id=div_id,\n static_table=datalab.utils.commands.HtmlBuilder\n .render_chart_data(data),\n meta_data=meta_data,\n chart_style=chart,\n source_index=datalab.utils.commands\n .get_data_source_index(str(table)),\n fields=','.join(fields),\n total_rows=total_count,\n rows_per_page=rows_per_page,\n data=json.dumps(data, cls=datalab.utils.JSONEncoder))", "def _scrape_experts(url, payload=None):\n expert_df = pd.DataFrame()\n soup = utils.get_soup(url, payload=payload)\n tr = soup.find_all('tr')\n\n timestamp = datetime.now().strftime(_timestamp_format)\n for row in tr:\n if row.find('input', class_='expert') is None:\n continue\n\n expert_info = row.find_all('a')\n expert_name = expert_info[0].text\n expert_site = expert_info[1].text\n\n attrs = row.find('input').attrs\n expert_id = attrs['value']\n if 'checked' in attrs and attrs['checked'] == 'checked':\n checked = True\n else:\n checked = False\n\n updated_date = row.find_all('td')[-1].text\n\n expert_df = expert_df.append(\n pd.DataFrame({\n 'expert_id': [expert_id],\n 'expert_name': [expert_name],\n 'site': [expert_site],\n 'checked': [checked],\n 'updated_date': [updated_date],\n 'timestamp': [timestamp]\n })\n )\n\n return expert_df", "def get_experiments() -> List[Dict]:\n return _get_directory_listing_for_dash_dropdown('experiments')", "def make_df_efficient(ds):\n data = {}\n for v in ds.data_vars:\n data[v] = ds[v].to_pandas()\n return pd.DataFrame(data)", "def get_site_dt_data():\n\n metadata = MetaData()\n sites_table = Table('vw_sites', metadata, autoload=True, autoload_with=db.engine, schema='live_network')\n\n\n columns = []\n for c in sites_table.columns:\n columns.append(ColumnDT( c, column_name=c.name, mData=c.name))\n\n query = db.session.query(sites_table)\n\n # GET request parameters\n params = request.args.to_dict()\n\n row_table = DataTables(params, query, columns)\n\n return jsonify(row_table.output_result())", "def _extract_table_data(\n key: str,\n cols: List,\n schema: dict,\n sheet_name: str = \"acceptance\",\n sheet_index: int = 1\n) -> pd.DataFrame:\n df = gs.load_gs_to_dataframe(\n key=key,\n usecols=cols,\n dtype=schema,\n names=list(schema.keys()),\n sheet_name=sheet_name,\n sheet_index=sheet_index,\n skiprows=3,\n evaluate_formulas=True\n )\n df = df.dropna(how=\"all\")\n\n return df", "def _metrics_df(self) -> pd.DataFrame:\n metrics_df = pd.DataFrame(\n data=self.metrics_dict_,\n index=[\"Metrics\"],\n )\n # TODO(amir): can we do df.reindex() ?\n metrics_df = metrics_df.reindex(\n columns=[\n \"R2 Score\",\n \"Explained Variance Score\",\n \"Mean Absolute Error\",\n \"Mean Squared Error\",\n \"Mean Squared Log Error\",\n \"Mean Absolute Percentage Error\",\n \"REC AUC\",\n \"Coeff. of Variation\",\n \"Mean of Variation\",\n ],\n )\n\n # TODO(amir): move this to a utility function under utils/format.py since it is repeated\n # that would make it more general and scalable across API\n # Set CSS properties\n th_props = [\n (\"font-size\", \"12px\"),\n (\"text-align\", \"left\"),\n (\"font-weight\", \"bold\"),\n ]\n\n td_props = [\n (\"font-size\", \"12px\"),\n (\"text-align\", \"center\"),\n ]\n\n # Set table styles\n styles = [\n dict(\n selector=\"th\",\n props=th_props,\n ),\n dict(\n selector=\"td\",\n props=td_props,\n ),\n ]\n cm = sns.light_palette(\n \"blue\",\n as_cmap=True,\n )\n\n if self.display_df:\n display(\n metrics_df.style.background_gradient(\n cmap=cm,\n ).set_table_styles(styles),\n )\n\n return metrics_df", "def dataframe(self) -> pd.DataFrame:\n data = []\n columns = [\"lection\", 'season', 'week', 'day']\n for lection_membership in self.lections_in_system():\n if type(lection_membership.day) != MovableDay:\n raise NotImplementedError(f\"Cannot yet export for days of type {type(lection_membership.day)}.\")\n data.append(\n [\n lection_membership.lection.description, \n lection_membership.day.get_season_display(), \n lection_membership.day.week, \n lection_membership.day.get_day_of_week_display(), \n ]\n )\n df = pd.DataFrame(data, columns=columns)\n return df", "def _build_datatable(self):\n def _get_value_metadata(cfg, data=None):\n \"\"\"Get value from metadata.\"\"\"\n if cfg.get('key'):\n return self.metadata.get(cfg.get('key'))\n\n def _get_value_datastore(cfg, data=None):\n \"\"\"Get value(s) from datastore.\"\"\"\n # jq().transform() returns a list of string(s)\n try:\n res = jq(cfg['jqexpr']).transform(data, multiple_output=True)\n except Exception as e:\n if 'Cannot iterate over null' in str(e):\n res = [np.nan]\n else:\n print('ERROR: Unable to get value from JSON: %s' % e)\n print('ERROR: cfg = %s' % cfg)\n print('ERROR: data = %s' % data)\n exit(1)\n\n # multiply the factor if available\n if 'factor' in cfg:\n res = [x * cfg['factor'] for x in res]\n\n # return the whole list or the only value\n return res if len(res) > 1 else res[0]\n\n def _get_value_auto(cfg, data=None):\n \"\"\"Get value by calculating.\"\"\"\n if cfg['name'] == 'Sample':\n return 'all'\n if cfg['name'] == 'Path':\n value = os.path.join(data['path_lv_1'], data['path_lv_2'])\n return value\n\n def _get_value_unknown(cfg, data=None):\n print('ERROR: Unknown type in \"source\", config = \"%s\".' % cfg)\n exit(1)\n\n switch = {\n 'metadata': _get_value_metadata,\n 'datastore': _get_value_datastore,\n 'auto': _get_value_auto,\n }\n\n self.config\n self.datastore\n self.datatable = []\n\n # generate rows for the datatable\n for iterdata in self.datastore:\n # generate one row\n data = {}\n for cfg in self.config.get('columns'):\n # get and set value(s)\n name = cfg.get('name')\n data[name] = switch.get(cfg['source'],\n _get_value_unknown)(cfg, iterdata)\n\n # deal with split if needed\n need_split = False\n if self.config.get('defaults', {}).get('split'):\n # get max number of samples\n max_sample = 1\n for value in data.values():\n if isinstance(value, list) and len(value) > max_sample:\n max_sample = len(value)\n need_split = True if max_sample > 1 else False\n\n if need_split:\n # split into samples\n for index in range(1, max_sample + 1):\n sample_data = {}\n # deal with each column\n for name, value in data.items():\n if isinstance(value, list):\n # get the first value and save the rest\n sample_data[name] = value[0]\n data[name] = value[1:]\n # Set \"WRONG\" flags for user check\n if len(data[name]) == 0:\n data[name] = 'WRONG'\n else:\n sample_data[name] = value\n\n # update related columns\n if 'Sample' in data:\n sample_data['Sample'] = index\n if 'Path' in data:\n sample_data['Path'] = os.path.join(\n data['Path'], 'sample%s' % index)\n\n # save this row (sample) to datatable\n self.datatable.append(sample_data.copy())\n else:\n # no need to split, save directly\n self.datatable.append(data.copy())", "def build_df(self, cat_data):\n # Iterate through competitions, build list of dicts for df\n data_list = []\n for comp in cat_data:\n # Iterate through results per comp\n for result in comp:\n # Convert to dict\n this_dict = dict(result)\n data_list.append(this_dict)\n \n # Convert to df\n df = pd.DataFrame(data_list)\n\n return df", "def get_ContentDataFrame(self):\n return self.df", "def estimation_table(\n models,\n *,\n return_type=\"dataframe\",\n render_options=None,\n show_col_names=True,\n show_col_groups=None,\n show_index_names=False,\n show_inference=True,\n show_stars=True,\n show_footer=True,\n custom_param_names=None,\n custom_col_names=None,\n custom_col_groups=None,\n custom_index_names=None,\n custom_notes=None,\n confidence_intervals=False,\n significance_levels=(0.1, 0.05, 0.01),\n append_notes=True,\n notes_label=\"Note:\",\n stats_options=None,\n number_format=(\"{0:.3g}\", \"{0:.5f}\", \"{0:.4g}\"),\n add_trailing_zeros=True,\n escape_special_characters=True,\n siunitx_warning=True,\n):\n if not isinstance(models, (tuple, list)):\n raise TypeError(f\"models must be a list or tuple. Not: {type(models)}\")\n models = [_process_model(model) for model in models]\n model_names = _get_model_names(models)\n default_col_names, default_col_groups = _get_default_column_names_and_groups(\n model_names\n )\n column_groups = _customize_col_groups(\n default_col_groups=default_col_groups, custom_col_groups=custom_col_groups\n )\n column_names = _customize_col_names(\n default_col_names=default_col_names, custom_col_names=custom_col_names\n )\n show_col_groups = _update_show_col_groups(show_col_groups, column_groups)\n stats_options = _set_default_stats_options(stats_options)\n body, footer = _get_estimation_table_body_and_footer(\n models,\n column_names,\n column_groups,\n custom_param_names,\n custom_index_names,\n significance_levels,\n stats_options,\n show_col_names,\n show_col_groups,\n show_stars,\n show_inference,\n confidence_intervals,\n number_format,\n add_trailing_zeros,\n )\n\n render_inputs = {\n \"body\": body,\n \"footer\": footer,\n \"render_options\": render_options,\n }\n if return_type == \"render_inputs\":\n out = render_inputs\n elif str(return_type).endswith(\"tex\"):\n out = render_latex(\n **render_inputs,\n show_footer=show_footer,\n append_notes=append_notes,\n notes_label=notes_label,\n significance_levels=significance_levels,\n custom_notes=custom_notes,\n siunitx_warning=siunitx_warning,\n show_index_names=show_index_names,\n show_col_names=show_col_names,\n escape_special_characters=escape_special_characters,\n )\n elif str(return_type).endswith(\"html\"):\n out = render_html(\n **render_inputs,\n show_footer=show_footer,\n append_notes=append_notes,\n notes_label=notes_label,\n custom_notes=custom_notes,\n significance_levels=significance_levels,\n show_index_names=show_index_names,\n show_col_names=show_col_names,\n escape_special_characters=escape_special_characters,\n )\n\n elif return_type == \"dataframe\":\n if show_footer:\n footer.index.names = body.index.names\n out = pd.concat([body.reset_index(), footer.reset_index()]).set_index(\n body.index.names\n )\n else:\n out = body\n else:\n raise ValueError(\n f\"\"\"Value of return type can be either of\n ['data_frame', 'render_inputs','latex' ,'html']\n or a path ending with '.html' or '.tex'. Not: {return_type}.\"\"\"\n )\n\n return_type = Path(return_type)\n if return_type.suffix not in (\".html\", \".tex\"):\n return out\n else:\n return_type.write_text(out)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the github commit url, if it exists
def _get_github_commit_url(github_url, commit_hash): is_github_enabled = github_url is not None github_commit_url = f"{github_url[:-4]}/tree/{commit_hash}" if is_github_enabled else None return github_commit_url
[ "def get_commit_url(self, commit: str) -> str:\n raise NotImplementedError", "def commit_url(self):\n return self._commit_url", "def get_github_repo_url():\n return 'git://github.com/%s/%s.git' % (MOZILLA_GITHUB_ACCOUNT, DEEPSPEECH_GITHUB_PROJ)", "def _get_git_remote_url(git_repo):\n # if not matching something/something\n # such as a local directory \".\", then\n # simply return this unmodified.\n if not re.match(r\"^[^/]+/[^/]+$\", git_repo):\n return git_repo\n\n github_token = os.getenv(GITHUB_TOKEN_KEY)\n if github_token:\n return f\"https://{github_token}@github.com/{git_repo}\"\n return f\"git@github.com:{git_repo}\"", "def fetch_commit(self, event):\n\n gh = self.github\n user = self.options.user\n repo = self.options.project\n\n rc, data = gh.repos[user][repo].git.commits[\n event[\"commit_id\"]].get()\n if rc == 200:\n return data\n self.raise_GitHubError(rc, data, gh.getheaders())", "def get_github_compare_url(last_sha1):\n return '%s/repos/%s/%s/compare/%s...%s' % (GITHUB_API_BASE, MOZILLA_GITHUB_ACCOUNT, DEEPSPEECH_GITHUB_PROJ, last_sha1, DEEPSPEECH_GITHUB_REF)", "def git_url(cls) -> str:\n return cls.url() + \".git\"", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return \"{base}/{test_type}/{test_id}\".format(\n base=self.fork.github_url, test_type=test_type, test_id=test_id)", "def repo_url_from_metadata(filename, metadata):\n repo_url = matching_text(metadata, SOURCE_URL_REGEXES)\n if repo_url is None:\n print(f\"No repo URL in {filename}\")\n return None\n if repo_url == \"UNKNOWN\":\n print(f\"Repo URL is UNKNOWN in {filename}\")\n return None\n return repo_url", "def test_repo_get_single_commit_by_ref(self):\n pass", "def _repo_name_from_url(url_decode: str):\n github_project_name = os.path.split(url_decode.path)[-1]\n return github_project_name.replace('.git', '')", "def test_repo_get_single_commit_by_sha(self):\n pass", "def get_last_commit(github_config, owner, repo, branch='main'):\n commits_url = '/'.join([\n github_config.api_url,\n 'repos',\n owner, repo,\n 'commits'\n ])\n commits_response = requests.get(commits_url, params={'sha': 'main'},\n headers=github_config.headers)\n commits_json = commits_response.json()\n if commits_response.status_code != 200:\n raise GitHubError(\"Unable to get commits. {}\".format(\n commits_json['message']))\n\n return commits_json[0]['sha']", "def parse_github_url(url):\n \n if url is None or len(url.strip()) == 0:\n return None, None, None, None\n \n url = url.strip()\n parsed_url = urlparse(url)\n path_list = parsed_url.path.split('/')\n \n hostname = parsed_url.netloc\n org = path_list[1]\n repo = path_list[2]\n \n if len(path_list) == 5:\n pr_or_issue_number = path_list[4]\n\n return hostname, org, repo, pr_or_issue_number", "def get_git_hash():\n try:\n with open(os.devnull, \"w\") as shutup:\n return (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], stderr=shutup)\n .decode(\"utf-8\")\n .strip(\"\\n\")\n )\n except subprocess.CalledProcessError:\n return \"not_a_git_repo\"", "def git_remote_url(self) -> str:\n try:\n p = subprocess.run(\n \"git config --get remote.origin.url\".split(),\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n encoding=\"utf-8\",\n check=True,\n cwd=self.local_dir,\n )\n url = p.stdout.strip()\n # Strip basic auth info.\n return re.sub(r\"https://.*@\", \"https://\", url)\n except subprocess.CalledProcessError as exc:\n raise EnvironmentError(exc.stderr)", "def get_git_commit_sha():\n\n return os.getenv(\"GIT_COMMIT\")", "def get_commit_id(\n commit_ref: str,\n) -> str:\n try:\n return run_git([\"rev-parse\", commit_ref])\n except subprocess.CalledProcessError as e:\n abort(f\"{get_commit_id} failed with error: {e}\")", "def get_commit_hash():\n return git.Repo().head.object.hexsha" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The html layout for an individual project explorer view determined by `commit_hash` in the dashboard. A project explorer view shows a subset of experiments logged to a project in a tabular format, as well as plotted on a parallel coordinates plot.
def make_individual_project_explorer_layout(rubicon_model, commit_hash): id = str(uuid.uuid4()) experiment_table_df = rubicon_model.get_experiment_table_df(commit_hash) github_commit_url = _get_github_commit_url( rubicon_model.selected_project.github_url, commit_hash ) group_store = dcc.Store( id={"type": "group-store", "index": id}, data={"commit_hash": commit_hash}, ) group_preview_title = [ html.P( f"{len(experiment_table_df)} experiments", id="group-preview-title", className="group-preview-title", ) ] if commit_hash is not None and rubicon_model.selected_project.github_url is not None: group_preview_title.append( html.A( f"at commit {commit_hash[:7]}", id="group-preview-title-link", className="group-preview-title-link", href=github_commit_url, target="_blank", ) ) group_model_names = rubicon_model.get_model_names(commit_hash) if len(group_model_names) > 0: group_model_names_text = f"model name: {group_model_names[0]}" if len(group_model_names) > 1: group_model_names_text += f" (+{len(group_model_names) - 1} more)" group_model_names_view = html.P( group_model_names_text, id="group-preview-model-names", className="group-preview-model-names", ) else: group_model_names_view = html.P(style={"display": "none"}) chevron = html.I(className="fas fa-chevron-down") group_preview_row = dbc.Row( id={"type": "group-preview-row", "index": id}, className="group-preview-row", children=[ dbc.Row(group_preview_title, style={"margin": "inherit"}), group_model_names_view, html.Button( chevron, id={"type": "show-group-detail-collapsable-btn", "index": id}, className="show-group-detail-collapsable-btn", ), html.Button( chevron, id={"type": "hide-group-detail-collapsable-btn", "index": id}, className="hide-group-detail-collapsable-btn", hidden=True, ), ], ) experiment_table_bulk_action_button_group = html.Div( className="btn-group", children=[ html.Button( "Select All", id={"type": "select-all-btn", "index": id}, className="btn-progressive", ), html.Button( "Clear All", id={"type": "clear-all-btn", "index": id}, className="btn-progressive", ), ], ) group_detail_card = dbc.Card( id={"type": "group-detail-card", "index": id}, className="group-detail-card", children=[ dbc.CardBody( id={"type": "group-detail-card-body", "index": id}, className="group-detail-card-body", children=[ experiment_table_bulk_action_button_group, _get_experiment_table(id, experiment_table_df), _get_comparison_layout(id, rubicon_model, commit_hash), ], ) ], ) group_detail_collapsable = dbc.Collapse( id={"type": "group-detail-collapsable", "index": id}, className="group-detail-collapsable", children=[group_detail_card], ) return dbc.Row( id={"type": "individual-project-explorer", "index": id}, className="individual-project-explorer", children=[dbc.Col([group_store, group_preview_row, group_detail_collapsable])], )
[ "def make_project_explorer_layout():\n return dbc.Col(id=\"grouped-project-explorer\", className=\"grouped-project-explorer\")", "def _update_project_explorer(values):\n # if all values are 0, the user hasn't clicked a project yet\n is_waiting_for_first_click = True\n for value in values:\n if value != 0:\n is_waiting_for_first_click = False\n\n if is_waiting_for_first_click:\n return make_empty_view(\"Please select a project to view.\")\n\n # use `dash.callback_context` to get the id of the clicked project list item\n selected_id = callback_context.triggered[0][\"prop_id\"].split(\".\")[0]\n selected_project_name = json.loads(selected_id)[\"index\"]\n\n app._rubicon_model.update_selected_project(selected_project_name)\n\n project_explorer_header = dbc.Row(\n id=\"experiment-deatils-header\",\n className=\"experiment-details-header\",\n children=selected_project_name,\n )\n\n experiment_groups = app._rubicon_model._experiment_table_dfs.items()\n\n # handle no experiments view\n if len(experiment_groups) == 0:\n return [project_explorer_header, make_empty_view(\"Log some experiments to this project!\")]\n\n _project_explorers = [\n make_individual_project_explorer_layout(app._rubicon_model, group)\n for group, _ in experiment_groups\n ]\n\n return [project_explorer_header, *_project_explorers]", "def display_project():\n\n return render_template(\"project_info.html\")", "def project_overview(request, project_name):\n\n runs = []\n\n # Find all runs and how many errors for each of them\n for run in Run.objects.filter(project_name=project_name).order_by('date'):\n errors = 0\n\n for file in File.objects.filter(run=run).order_by('filename'):\n errors += len(RunError.objects.filter(file=file))\n\n runs.append({'run_obj': run, 'errors': errors})\n\n if not len(runs):\n raise Http404\n\n return render_to_response('project_overview.html', {'runs': runs})", "def appInventorProjects():\n return render_template('coding/appInventor/projects.html', title=\"App Inventor - Projects\", year=year)", "def viewproject(self):\r\n try:\r\n if self.tab_tree.isHidden():\r\n self.tab_tree.setVisible(True)\r\n else:\r\n self.tab_tree.setVisible(False)\r\n except Exception as e:\r\n print(e)", "def scratchProject():\n return render_template('coding/scratch/scratchProject.html', title='Scratch - Award Winning Project', year=year, time=year - 2015)", "def _create_commit_summary(project_name, j, config):\n prefer_username = config.get('prefer_username', True)\n title_only = config.get('title_only', False)\n\n original = j['original']\n\n for commit in original['commits']:\n committer = commit.get('committer', {})\n author = commit.get('author', {})\n\n line = []\n\n line.append(u'{RESET}[{BLUE}{name}{RESET}]'.format(\n name=project_name,\n **HookService.colors\n ))\n\n line.append(u'{GREEN}{sha}{RESET}'.format(\n sha=commit['id'][:7],\n **HookService.colors\n ))\n\n line.append(u'-')\n\n # Show the committer.\n attribute_to = None\n if prefer_username:\n attribute_to = author.get('username')\n if not attribute_to:\n attribute_to = committer.get('username')\n if not attribute_to:\n attribute_to = author.get('name')\n if not attribute_to:\n attribute_to = committer.get('name')\n\n if attribute_to:\n line.append(u'{ORANGE}{attribute_to}{RESET}'.format(\n attribute_to=attribute_to,\n **HookService.colors\n ))\n line.append(u'-')\n\n message = commit['message']\n message_lines = message.split('\\n')\n if title_only and len(message_lines) > 0:\n message = message_lines[0]\n # Cap the commit message to 1000 characters, this should be around two\n # lines on IRC and stops really long messages from spamming channels.\n if len(message) > COMMIT_MESSAGE_LENGTH_LIMIT:\n message = message[:COMMIT_MESSAGE_LENGTH_LIMIT] + '...'\n line.append(message)\n\n yield u' '.join(line)", "def view_single_repo(request, repo_name):\n milestones = get_single_repo_milestone_query(is_open=True)\n\n try:\n chosen_repo = Repository.objects.select_related('organization', 'parent_repository').get(is_visible=True, github_name=repo_name)\n except Repository.DoesNotExist:\n raise Http404(\"Repository not found: %s\" % repo_name)\n\n # Repository has no parent -- so it is a parent\n # Pull milestones:\n # - from this repository (e.g. dataverse)\n # - any repositories that have this repo as a parent (e.g. geoconnect)\n #\n if not chosen_repo.parent_repository:\n milestones = milestones.filter(Q(repository=chosen_repo)|Q(repository__parent_repository=chosen_repo))\n else:\n # This is a child repository, only show its milestones\n milestones = milestones.filter(repository=chosen_repo)\n\n milestones_list = list(milestones.order_by('due_on'))\n\n current_date = datetime.now()\n for ms in milestones_list:\n if ms.due_on:\n ms.days_remaining = ms.due_on.replace(tzinfo=None) - current_date#.date()\n\n\n #open_closed_cnts = milestones.values('open_issues', 'closed_issues')\n open_closed_cnts = get_issue_counts_query_base(chosen_repo).values('open_issues', 'closed_issues')\n num_open_issues = sum(x['open_issues'] for x in open_closed_cnts)\n num_closed_issues = sum( x['closed_issues'] for x in open_closed_cnts)\n\n d = {}\n d['is_current_milestones_single_repository'] = True\n d['repos'] = Repository.objects.select_related('organization', 'parent_repository').filter(parent_repository__isnull=True).filter(is_visible=True)\n\n if chosen_repo.alt_title_display_name:\n d['page_title'] = 'Milestones: %s' % chosen_repo.alt_title_display_name\n else:\n d['page_title'] = 'Milestones: %s' % chosen_repo\n d['page_title_link'] = chosen_repo.get_github_view_url()\n #d['page_title_link'] = chosen_repo.get_github_view_milestones_url()\n\n d['chosen_repository'] = chosen_repo\n d['milestone_count'] = len(milestones_list)#.count()\n d['milestones'] = milestones_list\n\n d['num_open_issues'] = num_open_issues\n d['num_closed_issues'] = num_closed_issues\n \n\n d['SINGLE_COLUMN'] = True\n #print(d)\n return render_to_response('milestones/view_single_repo.html'\\\n , d\\\n , context_instance=RequestContext(request))", "def graph_commits_by_individual(dictionary):\n st.title(\"Commit Information\") # dispaly relevant title for dataframe\n\n df = pd.DataFrame.from_dict(dictionary, orient=\"index\").T\n\n columns = st.multiselect(\n label=\"Enter the names of specific contributors below:\", options=df.columns\n ) # allow users to display specific contributor information on dataframe graph\n st.subheader(\"Stacked column chart:\")\n st.bar_chart(\n df[columns][1:2]\n ) # display dataframe/graph that vizualizes commit info\n data_dict = {}\n for user in columns:\n data_dict[user] = dictionary[user][\"COMMITS\"]\n data_frame = pd.DataFrame.from_dict(data_dict, orient=\"index\")\n st.subheader(\"Bar chart:\")\n st.bar_chart(data_frame) # display dataframe/graph that vizualizes commit info\n return df", "def projects(request):\n repo = \"milestone4\"\n response = requests.get(f\"https://api.github.com/repos/nemixu/{repo}/branches/master\")\n \n # url = f\"https://api.github.com/repos/nemixu/{repo}/branches/master\"\n payload = {}\n headers = {} \n \n \n response = requests.request('GET', response, headers=headers, data = payload)\n \n print(response.text.encode('utf8'))\n return render (request, 'projects.html')", "def projects_ls(ctx, verbose):\n ctx.verbose = verbose\n hes = HoruzES(\"\", ctx)\n indexes = hes.indexes()\n if indexes:\n rtable.add_column(\"Projects\", style=\"cyan\", no_wrap=True)\n for i in indexes:\n rtable.add_row(i)\n ctx.log(rtable)", "def _generate_project(self, project_op: Project):\n\n template = open(\n \"{0}/project.tmpl\".format(self.template_directory), 'r').read()\n\n selected_cols = project_op.selected_cols\n\n data = {\n \"IN_REL\": project_op.get_in_rel().name,\n \"OUT_REL\": project_op.out_rel.name,\n \"PROJ_COLS\": ','.join(str(c.idx) for c in selected_cols),\n \"NUM_COLS\": len(selected_cols)\n }\n\n return pystache.render(template, data)", "def print_commits(self):\n\n for commit in self.repository_mining.traverse_commits():\n print(f'Commit : {commit.hash}')\n print(f'Parents : {commit.parents}')", "def dashboard(request):\n\n '''\n Retrieve the username to pass it to the template.\n The login_required decorator ensures that the username is always present.\n '''\n username = request.user.username\n\n '''\n Show the project the user is involved in, and the project owned, separately.\n If the user is not authenticated show the login page.\n '''\n\n # weird syntax to put OR in a query\n projects = Project.objects.all().filter(Q(coders=request.user) | Q(owner=request.user)).distinct()\n #projects = Project.objects.all().filter(coders=request.user)\n #owned = Project.objects.all().filter(owner=request.user)\n\n return render(request,\n 'codegui/dashboard.html',\n {'username':username,\n 'projects':projects})", "def show_project():\n\n\t# TO DO: maybe add something to check whether the title exists in the database? or does it do this already?\n\ttitle = request.args.get('title')\n\n\t# unpacking from the get_project_by_title function\n\ttitle, description, max_grade = hackbright.get_project_by_title(title)\n\n\t# get all the students that completed that project from the get_grades_by_title function\n\tgrades = hackbright.get_grades_by_title(title)\n\n\treturn render_template(\"project_info.html\", title=title, description=description, max_grade=max_grade, grades=grades)", "def view(self,) -> pn.pane.HTML:\r\n return pn.pane.HTML(self.__html__())", "def make_projects_chart(self):\n df, _ = reportutils.dataframe_from_json(self.data_path)\n reportutils.plot_project_bar(df)", "def makeProj():\n return render_template('maker/projects.html', title=\"Maker - Projects\", year=year)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The callback to show and collapse each group. Also toggles the show/hide buttons. Triggered when the show or hide button is clicked.
def _toggle_experiment_group_collapsable(last_show_click, last_hide_click): last_show_click = last_show_click if last_show_click else 0 last_hide_click = last_hide_click if last_hide_click else 0 # "show" is clicked: open collabsable, hide "show" button & un-hide "hide" button if int(last_show_click) > int(last_hide_click): return True, True, False # "hide" is clicked: close collabsable, un-hide "show" button & hide "hide" button elif int(last_hide_click) > int(last_show_click): return False, False, True # nothing yet is clicked: return default states else: return False, False, True
[ "def showGroupMenu( self ):\n group_active = self.isGroupingActive()\n group_by = self.groupBy()\n \n menu = XMenu(self)\n menu.setTitle('Grouping Options')\n menu.setShowTitle(True)\n menu.addAction('Edit Advanced Grouping')\n \n menu.addSeparator()\n \n action = menu.addAction('No Grouping')\n action.setCheckable(True)\n action.setChecked(not group_active)\n \n action = menu.addAction('Advanced')\n action.setCheckable(True)\n action.setChecked(group_by == self.GroupByAdvancedKey and group_active)\n if ( group_by == self.GroupByAdvancedKey ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n menu.addSeparator()\n \n # add dynamic options from the table schema\n tableType = self.tableType()\n if ( tableType ):\n columns = tableType.schema().columns()\n columns.sort(key = lambda x: x.displayName())\n for column in columns:\n action = menu.addAction(column.displayName())\n action.setCheckable(True)\n action.setChecked(group_by == column.displayName() and\n group_active)\n \n if ( column.displayName() == group_by ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n point = QPoint(0, self.uiGroupOptionsBTN.height())\n action = menu.exec_(self.uiGroupOptionsBTN.mapToGlobal(point))\n \n if ( not action ):\n return\n elif ( action.text() == 'Edit Advanced Grouping' ):\n print 'edit advanced grouping options'\n elif ( action.text() == 'No Grouping' ):\n self.setGroupingActive(False)\n \n elif ( action.text() == 'Advanced' ):\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(self.GroupByAdvancedKey)\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()\n \n else:\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(str(action.text()))\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()", "def toggle(self, *_):\n if self._expanded:\n self.collapse()\n else:\n self.expand()", "def set_visibility(group, status=True):\n for tree in group:\n tree.visible = True", "def cmd_toggle_split(self):\r\n self.currentStack.toggleSplit()\r\n self.group.layoutAll()", "def displayChildren(self,master):\n for c in master.children:\n c.draw()\n \n if c.children and c.expanded:\n c.displayChildren(c)", "def createCollapseButtons(self):\n with formLayout(nd=100) as form:\n expBtn = button(l='Expand All', h=20, bgc=[0.26, 0.26, 0.26], c=Callback(self.collapseAll, False))\n cllBtn = button(l='Collapse All', h=20, bgc=[0.26, 0.26, 0.26], c=Callback(self.collapseAll, True))\n formLayout(form, e=True,\n af=[(expBtn, 'left', 1), (cllBtn, 'right', 1)],\n ap=[(expBtn, 'right', 1, 50), (cllBtn, 'left', 1, 50)]\n )", "def expand_collapsed_clicked(self):\n # Sets the button to using the correct text and other ui elements\n exp_coll = \"Expand Collapsed\"\n coll = \"Collapse Files\"\n cur = self.ui.expandCollapseButton.text()\n should_expand = True if cur == exp_coll else False\n new = coll if should_expand else exp_coll\n self.ui.expandCollapseButton.setText(new)\n\n # Handles what happens to the actual files\n if should_expand:\n self.expand_collapsed_items()\n else:\n self.collapse_items()", "def set_collapsed() -> None:\n global is_collapsed\n is_collapsed = True\n conf = config.APP.get_cur_conf(GenOptions)\n config.APP.store_conf(attrs.evolve(conf, music_collapsed=True))\n localisation.set_text(base_lbl, TRANS_BASE_COLL)\n toggle_btn_exit()\n\n # Set all music to the children - so those are used.\n set_suggested(WINDOWS[MusicChannel.BASE].chosen_id)\n\n for wid in exp_widgets:\n wid.grid_remove()", "def fun(obj):\n if is_checked:\n obj.show()\n else:\n obj.hide()", "def toggleMultiBeamPlot(self):\n if self.mb_dock.isVisible(): self.mb_dock.hide()\n else: self.mb_dock.show()", "def toggle_button_group(*args: ElementType,\n class_: Optional[str] = None,\n size: Optional[str] = BGSize.DEFAULT,\n **kwargs: PropertyValue) -> div:\n kwargs[\"data-toggle\"] = \"buttons\"\n return button_group(*args, class_=join(\"btn-group-toggle\", class_), size=size, **kwargs)", "def toggleChildren(self):\n javascript = \"WebElements.toggleVisibility('\" + self.childContainer.fullId() + \"');\"\n return javascript", "def toggle(self):\n # type: () -> bool\n if self._is_collapsed:\n self.expand()\n else:\n self.collapse()\n return not self._is_collapsed", "def SetVisible(self, visible):\n if self._visible != visible:\n self._visible = visible\n for action in self._actions:\n action._SetGroupVisible(visible)", "def matchCollapsedState( self ):\n collapsed = not self.isChecked()\n if self._inverted:\n collapsed = not collapsed\n \n if ( not self.isCollapsible() or not collapsed ):\n for child in self.children():\n if ( not isinstance(child, QWidget) ):\n continue\n \n child.show()\n \n self.setMaximumHeight(MAX_INT)\n self.adjustSize()\n \n if ( self.parent() ):\n self.parent().adjustSize()\n else:\n self.setMaximumHeight(self.collapsedHeight())\n \n for child in self.children():\n if ( not isinstance(child, QWidget) ):\n continue\n \n child.hide()", "def _on_cb_grid(self, evt):\r\n self.sub_plots.show_grid(self.cb_grid.IsChecked())\r\n #redraw plots\r\n self.canvas.draw()", "def _collapse_helper(self) -> None:\n self._expanded = False\n for subtree in self._subtrees:\n subtree._collapse_helper()", "def action_after_hide(self):", "def Collapse(self):\n if not self.expanded:\n return\n self.expanded = not self.expanded\n prev_selction = self.tree.current_selected\n self.update = True\n self.draw()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The callback to render a new experiment comparison plot. Triggered when new rows in the experiment table are selected. or deselected.
def _update_experiment_comparison_plot( experiment_table_data, experiment_table_selected_rows, hidden_columns, anchor, data ): if experiment_table_selected_rows is None or len(experiment_table_selected_rows) == 0: return [html.Div()] commit_hash = data["commit_hash"] selected_experiment_ids = [ experiment_table_data[row]["id"] for row in experiment_table_selected_rows ] anchor_data, dimensions = app._rubicon_model.get_dimensions( commit_hash, selected_experiment_ids, hidden_columns, anchor ) return [ dcc.Graph( figure=go.Figure( go.Parcoords( line=dict(color=anchor_data, colorscale="plasma", showscale=True), dimensions=dimensions, ) ) ) ]
[ "def on_selection_change_callback(self,attr,old,new):\n\n # (un)lock Save button\n if len(self.cds.selected.indices) > 0:\n self.save.disabled = False\n else:\n self.save.disabled = True\n\n # make selection in the heatmap\n dates = []\n for i in self.cds.selected.indices:\n dates.append(self.cds.data[\"datetime_date\"][i])\n selection = []\n i = 0\n for d in self.cds_OxCGRTHeatmap.data[\"datetime_date\"]:\n if d in dates:\n selection.append(i)\n i += 1\n self.cds_OxCGRTHeatmap.selected.indices = selection", "def plot_select_callback(self):\n self.current_sparam = vna.SParam(self.plot_select.get())\n self.update_widgets()", "def previewJVplot(self):\n self.selectionError.pack_forget() # deletes the error if it was previously displayed\n self.selectedItems = self.viewDataTree.selection()\n if len(self.selectedItems) != 1:\n self.selectionError.configure(text= \"Please select one item\")\n self.selectionError.pack()\n return\n columnvalues = self.viewDataTree['columns'] # get file info from selected item\n self.attributeList = []\n deviceValues = self.viewDataTree.item(self.selectedItems, 'values')\n for k, value in enumerate(deviceValues,start=0):\n self.attributeList.append((columnvalues[k], value))\n self.allJVData = DataMethods.dataFrameJV_get(self)\n filenames = self.allJVData.groupby(by=\"File\")\n for filename in filenames:\n if self.isSelectedFile(deviceValues, filename[0]):\n filesJVData = filename[1]\n break\n self.makeJVPreviewPlot(filesJVData)", "def on_toolbar_replot(self):\n self.log.debug(\"on_toolbar_replot()\")\n\n try:\n self.collection.get_active().read_form()\n except AttributeError:\n self.log.debug(\"on_toolbar_replot(): AttributeError\")\n pass\n\n self.plot_all()", "def visualize(self, observation, action):\n raise NotImplementedError", "def on_create_selection(self, callback):\n\n self._cb_selection.append(callback)", "def _selection_changed_slot(self, selected, deselected):\n # self._test_sel_index(selected, deselected)\n self._sel_index_2(selected, deselected)", "def updateGUI(self):\n #Plot Data\n if self.obsdata and self.moddata:\n if self.ui.set_typeblock_radio.isChecked():\n observed = self.readTableData()\n modelled = self.modelled_data\n\n observedvalues = []\n modelledvalues = []\n for i in observed.keys():\n if i not in modelled[0]:\n continue\n observedvalues.append(round(float(observed[i]),2))\n modelledvalues.append(round(float(modelled[1][modelled[0].index(i)]),2))\n\n if self.ui.plottype_combo.currentIndex() == 0:\n #PLOT SCATTER MODELLED VS OBSERVED\n self.plotCalibrationScatter(observedvalues, modelledvalues)\n elif self.ui.plottype_combo.currentIndex() == 1:\n #PLOT RESIDUALS (Obs - Mod)\n pass\n elif self.ui.plottype_combo.currentIndex() == 2:\n #PLOT ERROR AS HISTOGRAM\n pass\n\n else:\n observed = float(self.ui.set_totvalue_box.text())\n modelled = self.modelled_data\n\n if self.ui.plottype_combo.currentIndex() == 0:\n #PLOT HISTOGRAM MODELLED VS OBSERVED\n self.plotCalibrationHistogram(observed, modelled)\n elif self.ui.plottype_combo.currentIndex() == 1:\n #PLOT RESIDUALS (Obs - Mod)\n pass\n elif self.ui.plottype_combo.currentIndex() == 2:\n #PLOT ERROR AS HISTOGRAM\n pass\n\n else:\n self.ui.calibrationView.setHtml(\"\")\n\n\n #Update Results Browser\n if self.moddata and self.obsdata:\n summaryline = \"\"\n summaryline += \"Summary of Calibration: \\n\"\n summaryline += \"---------------------------\\n\"\n\n if self.ui.set_typeblock_radio.isChecked():\n #1.1 General Reporting Stuff, data stats\n summaryline += \"Observed Data Points: \"+str(len(observedvalues))+\"\\n\"\n summaryline += \"Modelled Data Points: \"+str(len(modelled[0]))+\"\\n\"\n summaryline += \"Data Points not compared: \"+str(abs(len(modelled[0])-len(observedvalues)))+\"\\n\\n\"\n\n #1.2 Goodness of Fit Criterion\n if self.ui.set_eval_nash.isChecked():\n nashE = ubcal.calculateNashE(observedvalues, modelledvalues)\n summaryline += \"Nash-Sutcliffe E = \"+str(round(nashE,2))+\"\\n\"\n else:\n summaryline += \"Nash-Sufcliffe E = (not calculated) \\n\"\n if self.ui.set_eval_rmse.isChecked():\n rmse = ubcal.calculateRMSE(observedvalues, modelledvalues)\n summaryline += \"RMSE = \"+str(round(rmse,2))+\"\\n\"\n else:\n summaryline += \"RMSE = (not calculated) \\n\"\n if self.ui.set_eval_error.isChecked():\n avgerr, minerr, maxerr, e10, e30, e50 = ubcal.calculateRelativeError(observedvalues, modelledvalues)\n summaryline += \"Average Relative Error = \"+str(round(avgerr,1))+\"%\\n\"\n summaryline += \"Min. Relative Error = \"+str(round(minerr,1))+\"%\\n\"\n summaryline += \"Max Relative Error = \"+str(round(maxerr,1))+\"%\\n\\n\"\n summaryline += \"Data Points with < 10% Error = \"+str(round(e10,0))+\"\\n\"\n summaryline += \"Data Points with < 30% Error = \"+str(round(e30,0))+\"\\n\"\n summaryline += \"Data Points with < 50% Error = \"+str(round(e50,0))+\"\\n\"\n else:\n summaryline += \"Relative Error = (not calculated) \\n\"\n else:\n summaryline += \"Observed Data: Using Total Value \\n\"\n summaryline += \"Modelled Data: Using Total Value \\n\\n\"\n\n if self.ui.set_eval_error.isChecked():\n if observed == 0:\n err = 100.0\n else:\n err = (observed - modelled)/observed * 100.0\n summaryline += \"Relative Error = \"+str(abs(round(err,1)))+\"%\\n\"\n else:\n summaryline += \"Relative Error = (not calculated)\"\n\n self.ui.out_box.setPlainText(summaryline)\n\n else:\n self.ui.out_box.clear()\n self.ui.out_box.setPlainText(\"Results:\\n\")\n\n return True", "def on_agv_selected_callback(self, event):\n\n \"\"\"Handle the radio button selection in the part location tab.\n \"\"\"\n # Mark the radio button as selected\n # self.tab1_option0_radio_value.set(1)\n\n # clear the fields for the part information\n self.clear_tab1_option0_part_info()\n\n # delete widgets from the part information frame\n self.delete_tab1_option0_part_info_combo_box()\n\n # if an AGV is selected\n if self.tab1_option0_selected_agv_value.get() != 'None':\n # set the radio button as selected\n output = \"Part location on AGVs: \" + self.tab1_option0_selected_agv_value.get()\n # Create the form to fill out the part information for the selected AGV\n self.create_part_info_for_agv()\n else:\n output = \"Select an AGV.\"\n self.number_of_parts_selected_label.set(\"\")\n self.delete_part_number_combo_box()\n\n self.agv_selection_label.set(output)\n\n # Update the label\n label = ttk.Label(self.right_part_location_frame,\n textvariable=self.agv_selection_label, font='Arial 17 bold', anchor=\"w\").grid(column=0,\n row=0,\n sticky='w',\n padx=30,\n pady=30)\n save_button = ttk.Button(self.right_part_location_frame, text=\"Save\", command=self.save_part_location_clicked).grid(column=2,\n row=0,\n sticky='e',\n pady=30)", "def __onpick(self, event):\n self.logger.debug(\"running\")\n legend_line = event.artist\n if legend_line in self.__legend_plot_links:\n plot_lines = self.__legend_plot_links[legend_line]\n else:\n self.logger.debug(\"done, no matched lines\")\n return\n for line in plot_lines:\n visible = not line.get_visible()\n line.set_visible(visible)\n if visible:\n legend_line.set_alpha(1.0)\n else:\n legend_line.set_alpha(0.2)\n self.figure.canvas.draw()\n self.logger.debug(\"done\")", "def item_selection_changed(self):\n pass", "def observe(self, slot):\n\n self.cell_changed.connect(slot)", "def plot_callback():\n global current_pv\n striptool.update(current_pv)", "def _on_cb_grid(self, evt):\r\n self.sub_plots.show_grid(self.cb_grid.IsChecked())\r\n #redraw plots\r\n self.canvas.draw()", "def _updateCellPlot(self):\n if (self._previousSelectedAttribute == self._selectedAttribute and self._previousSelectedAttributeCell == self._selectedCell) or self._selectedCell is None:\n return\n \n self._previousSelectedAttribute = self._selectedAttribute\n self._previousSelectedAttributeCell = self._selectedCell\n \n data = []\n timestamps = []\n time = 0\n variables = self.getCellVariables()\n for frameNumber in range(*self._parser.getFrameRange()):\n cell = self.getSelectedCell(frameNumber)\n if cell.shape[0] == 0:\n continue\n data.append(cell[variables[self._selectedAttribute]])\n timestamps.append(time)\n time += 1\n \n if self._cellFigure is not None:\n plt.figure(self._cellFigure.get_label())\n self._cellAx.clear()\n ln, = self._cellAx.plot(timestamps, data)\n self._cellAx.set_xlabel('time')\n self._cellAx.set_ylabel(self._selectedAttribute)\n self._cellAx.set_title(self._selectedAttribute + \" for cell \" + str(int(self._selectedCell)))\n ln.set_color('orange')", "def on_menuNewExperiment_activate(self, widget):\n gladefilename = \"main.glade\"\n gladefile = os.path.dirname(__file__) + \"/\" + gladefilename\n\n windowname = \"dialogCreateExperiment\"\n try:\n experiment = gtk.glade.XML(gladefile, windowname)\n dialog = experiment.get_widget(\"dialogCreateExperiment\")\n\n # populate the experiment available list\n retval = dialog.run()\n\n\n \n if retval == 1:\n # OK was hit, get the relvant values\n entry = experiment.get_widget(\"entryExperimentFilename\")\n name= entry.props.text\n self.recorder.CreateExperiment(name)\n # now set the cursor to the newest experiment\n \n else:\n pass\n except NameError:\n print \"Name already exists\" \n pass\n dialog.hide()\n self.treeviewExperiments.expand_all()", "def onAttriChange(self, action):\n self._selectedAttribute = action.new\n self._updateCellPlot()", "def interactive_plot(df, fig, display_func, event=\"hover\"):\n\n output = Output()\n\n def update(trace, points, state):\n ind = points.point_inds[0]\n row = df.loc[ind].to_dict()\n with output :\n output.clear_output(wait=True)\n display_func(row)\n\n fig = go.FigureWidget(data=fig.data, layout=fig.layout)\n\n if event == \"hover\":\n fig.data[0].on_hover(update)\n else:\n fig.data[0].on_click(update)\n\n return VBox([fig, output])", "def handleItemClick_on_table(self):\r\n try:\r\n index_lst = []\r\n for item in self.show_all_table_widget.selectedItems():\r\n index_lst.append(item.row())\r\n print(\"index_lst = \", index_lst)\r\n index = index_lst[-1]\r\n print(\"index = \", index_lst[-1])\r\n a_lst, b_lst = self.all_lst[index][0], self.all_lst[index][1]\r\n self.a1 = deepcopy(a_lst[0])\r\n self.b1 = deepcopy(b_lst[0])\r\n self.a2 = deepcopy(a_lst[1])\r\n self.b2 = deepcopy(b_lst[1])\r\n self.plot_cell()\r\n except Exception as e:\r\n print(e)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The callback to render the grouped project explorer. Triggered when a project is selected in the project selection list.
def _update_project_explorer(values): # if all values are 0, the user hasn't clicked a project yet is_waiting_for_first_click = True for value in values: if value != 0: is_waiting_for_first_click = False if is_waiting_for_first_click: return make_empty_view("Please select a project to view.") # use `dash.callback_context` to get the id of the clicked project list item selected_id = callback_context.triggered[0]["prop_id"].split(".")[0] selected_project_name = json.loads(selected_id)["index"] app._rubicon_model.update_selected_project(selected_project_name) project_explorer_header = dbc.Row( id="experiment-deatils-header", className="experiment-details-header", children=selected_project_name, ) experiment_groups = app._rubicon_model._experiment_table_dfs.items() # handle no experiments view if len(experiment_groups) == 0: return [project_explorer_header, make_empty_view("Log some experiments to this project!")] _project_explorers = [ make_individual_project_explorer_layout(app._rubicon_model, group) for group, _ in experiment_groups ] return [project_explorer_header, *_project_explorers]
[ "def make_project_explorer_layout():\n return dbc.Col(id=\"grouped-project-explorer\", className=\"grouped-project-explorer\")", "def on_select(self):\n self.selected = True\n for tile in self.divisions:\n if tile is not self:\n tile.group_selected = True", "def viewproject(self):\r\n try:\r\n if self.tab_tree.isHidden():\r\n self.tab_tree.setVisible(True)\r\n else:\r\n self.tab_tree.setVisible(False)\r\n except Exception as e:\r\n print(e)", "def selected_proj(self, value):\n self.proj['text'] = value\n self.redraw()", "def make_individual_project_explorer_layout(rubicon_model, commit_hash):\n id = str(uuid.uuid4())\n\n experiment_table_df = rubicon_model.get_experiment_table_df(commit_hash)\n github_commit_url = _get_github_commit_url(\n rubicon_model.selected_project.github_url, commit_hash\n )\n\n group_store = dcc.Store(\n id={\"type\": \"group-store\", \"index\": id},\n data={\"commit_hash\": commit_hash},\n )\n\n group_preview_title = [\n html.P(\n f\"{len(experiment_table_df)} experiments\",\n id=\"group-preview-title\",\n className=\"group-preview-title\",\n )\n ]\n\n if commit_hash is not None and rubicon_model.selected_project.github_url is not None:\n group_preview_title.append(\n html.A(\n f\"at commit {commit_hash[:7]}\",\n id=\"group-preview-title-link\",\n className=\"group-preview-title-link\",\n href=github_commit_url,\n target=\"_blank\",\n )\n )\n\n group_model_names = rubicon_model.get_model_names(commit_hash)\n if len(group_model_names) > 0:\n group_model_names_text = f\"model name: {group_model_names[0]}\"\n if len(group_model_names) > 1:\n group_model_names_text += f\" (+{len(group_model_names) - 1} more)\"\n\n group_model_names_view = html.P(\n group_model_names_text,\n id=\"group-preview-model-names\",\n className=\"group-preview-model-names\",\n )\n else:\n group_model_names_view = html.P(style={\"display\": \"none\"})\n\n chevron = html.I(className=\"fas fa-chevron-down\")\n\n group_preview_row = dbc.Row(\n id={\"type\": \"group-preview-row\", \"index\": id},\n className=\"group-preview-row\",\n children=[\n dbc.Row(group_preview_title, style={\"margin\": \"inherit\"}),\n group_model_names_view,\n html.Button(\n chevron,\n id={\"type\": \"show-group-detail-collapsable-btn\", \"index\": id},\n className=\"show-group-detail-collapsable-btn\",\n ),\n html.Button(\n chevron,\n id={\"type\": \"hide-group-detail-collapsable-btn\", \"index\": id},\n className=\"hide-group-detail-collapsable-btn\",\n hidden=True,\n ),\n ],\n )\n\n experiment_table_bulk_action_button_group = html.Div(\n className=\"btn-group\",\n children=[\n html.Button(\n \"Select All\",\n id={\"type\": \"select-all-btn\", \"index\": id},\n className=\"btn-progressive\",\n ),\n html.Button(\n \"Clear All\",\n id={\"type\": \"clear-all-btn\", \"index\": id},\n className=\"btn-progressive\",\n ),\n ],\n )\n\n group_detail_card = dbc.Card(\n id={\"type\": \"group-detail-card\", \"index\": id},\n className=\"group-detail-card\",\n children=[\n dbc.CardBody(\n id={\"type\": \"group-detail-card-body\", \"index\": id},\n className=\"group-detail-card-body\",\n children=[\n experiment_table_bulk_action_button_group,\n _get_experiment_table(id, experiment_table_df),\n _get_comparison_layout(id, rubicon_model, commit_hash),\n ],\n )\n ],\n )\n\n group_detail_collapsable = dbc.Collapse(\n id={\"type\": \"group-detail-collapsable\", \"index\": id},\n className=\"group-detail-collapsable\",\n children=[group_detail_card],\n )\n\n return dbc.Row(\n id={\"type\": \"individual-project-explorer\", \"index\": id},\n className=\"individual-project-explorer\",\n children=[dbc.Col([group_store, group_preview_row, group_detail_collapsable])],\n )", "def _project_created(self, worker, output, error):\n self._update_project_config(path=worker.path)\n self.update_visibility(disabled=False)\n\n # Select the project after created\n for i in range(self.list_projects.count()):\n item = self.list_projects.item(i)\n\n if worker.path == item.path:\n self.list_projects.setCurrentRow(i)\n self.list_projects.itemClicked.emit(item)\n update_pointer()", "def on_storeProject(self):\n self.log.detail(\">>> Launch 'Store Project' ...\")\n selItems = self.tw_allProjects.selectedItems() or []\n if selItems:\n #-- Check Project --#\n if selItems[0].project in self.getPinedProjects:\n mess = \"!!! Project %r already in pinedProjects, Skipp !!!\" % selItems[0].project\n pQt.errorDialog(mess, self)\n raise ValueError(mess)\n #-- Add Poject --#\n self.foundation.userGroups._user.addPinedProject(selItems[0].project)\n self.foundation.userGroups._user.writeFile()\n #-- Refresh --#\n self.rf_projectTree('myProjects')", "def onRunProjectButtonClicked(self, widget):\n prj_descriptions = prj_func.getProjectDescriptions()\n\n prj_data = list(prj_descriptions.items())\n prj_data.sort()\n prj_names = [name for name, description in prj_data]\n prj_items = [u'%s\\t:\\t%s' % (name, description) for name, description in prj_data]\n selected_prj_idx = dlg_func.getSingleChoiceIdxDlg(parent=self, title='PROJECTS',\n prompt_text=u'Select a project to run:',\n choices=prj_items)\n if selected_prj_idx >= 0:\n selected_prj_name = prj_names[selected_prj_idx]\n self._project_manager.run(selected_prj_name)\n\n self.getGtkTopObject().close()", "def get_context_data(self, **kwargs):\n # Get super's context. This is the dictionary of variables for the base template being rendered.\n context = super(GroupView, self).get_context_data(**kwargs)\n\n # Add the project to the context.\n context['group'] = self.group\n context['projects'] = DataProject.objects.filter(group=self.group, visible=True).order_by(F('order').asc(nulls_last=True))\n\n return context", "def rf_projectTree(self, treeName):\n #-- Get Projects --#\n if treeName == 'allProjects':\n self.log.detail(\"Build 'All Projects' tree ...\")\n projects = self.foundation.project.projects\n treeWidget = self.tw_allProjects\n else:\n self.log.detail(\"Build 'My Projects' tree ...\")\n projects = self.foundation.userGroups._user.userPinedProjects\n treeWidget = self.tw_myProjects\n #-- Populate Tree --#\n treeWidget.clear()\n for project in projects:\n projectFile = pFile.conformPath(os.path.join(self.foundation.__projectsPath__, project, '%s.py' % project))\n datas = pFile.readDictFile(projectFile)\n newItem = self.new_projectItem(project, datas, treeWidget)\n treeWidget.addTopLevelItem(newItem)\n #-- Refresh --#\n self.rf_treeColumns(treeWidget)\n treeWidget.sortItems(0, QtCore.Qt.AscendingOrder)", "def display_project():\n\n return render_template(\"project_info.html\")", "def on_removeProject(self):\n self.log.detail(\">>> Launch 'remove Project' ...\")\n selItems = self.tw_myProjects.selectedItems() or []\n if selItems:\n #-- Check Project --#\n if selItems[0].project not in self.getPinedProjects:\n mess = \"!!! Project %r not found, Skipp !!!\" % selItems[0].project\n pQt.errorDialog(mess, self)\n raise ValueError(mess)\n #-- Remove Poject --#\n self.foundation.userGroups._user.delPinedProject(selItems[0].project)\n self.foundation.userGroups._user.writeFile()\n #-- Refresh --#\n self.rf_projectTree('myProjects')", "def item_project(self, name):\n selector = (\n './/ul[contains(@class, \"dropdown-menu\")]/li//span[contains('\n '@class, \"dropdown-title\") and contains(., \"{}\")]'.format(name))\n\n item = ui.UI(By.XPATH, selector)\n item.container = self\n return item", "def populateGroups(self):\n\n # get a list of the existing folders in projects\n selectedProject = self.projectMenu.currentText()\n project = os.path.join(self.projectPath, selectedProject)\n existingGroups = os.listdir(project)\n folders = []\n\n # find out which returned items are directories\n for each in existingGroups:\n if os.path.isdir(os.path.join(project, each)):\n folders.append(each)\n\n # otherwise, add each project to the combo box\n self.groupMenu.clear()\n self.groupMenu.addItem(\" \")\n for each in folders:\n self.groupMenu.addItem(each)\n\n # populate characters\n self.populateCharacters()", "def __showRecentMultiProjectsMenu(self):\n self.recentMultiProjects = []\n self.rsettings.sync()\n self.__loadRecentMultiProjects()\n \n self.recentMultiProjectsMenu.clear()\n \n idx = 1\n for rmp in self.recentMultiProjects:\n if idx < 10:\n formatStr = '&{0:d}. {1}'\n else:\n formatStr = '{0:d}. {1}'\n act = self.recentMultiProjectsMenu.addAction(\n formatStr.format(\n idx, Utilities.compactPath(rmp, self.maxMenuFilePathLen)))\n act.setData(rmp)\n idx += 1", "def populateProjects(self):\n\n # if the project path doesn't exist on disk, create it\n if not os.path.exists(self.projectPath):\n os.makedirs(self.projectPath)\n\n # get a list of the existing folders in projects\n existingProjects = os.listdir(self.projectPath)\n folders = []\n\n # find out which returned items are directories\n for each in existingProjects:\n if os.path.isdir(os.path.join(self.projectPath, each)):\n folders.append(each)\n\n # add each project to the combo box\n self.projectMenu.clear()\n for each in folders:\n self.projectMenu.addItem(each)\n\n # find selected project and populate groups\n self.populateGroups()", "def onSelected(self):\r\n self.node.scene.grScene.itemSelected.emit()", "def _create_project_list(self):\n # Create a ListStore model\n self._model = gtk.ListStore(bool, object)\n for project in self._projects:\n self._model.append([True, project])\n\n # Create the TreeView\n view = gtk.TreeView(self._model)\n view.set_headers_visible(False)\n\n # Create the check-box column\n toggle_renderer = gtk.CellRendererToggle()\n toggle_renderer.set_property('activatable', True)\n toggle_renderer.connect(\"toggled\", self._toggled_cb, (self._model, 0))\n toggle_column = gtk.TreeViewColumn('Save', toggle_renderer)\n toggle_column.add_attribute(toggle_renderer, 'active', 0)\n view.append_column(toggle_column)\n\n # Create the project column\n def render_func(treeviewcolumn, renderer, model, iter):\n project = model.get_value(iter, 1)\n renderer.set_property('text', project.name)\n return\n text_renderer = gtk.CellRendererText() \n text_column = gtk.TreeViewColumn('Project', text_renderer)\n text_column.set_cell_data_func(text_renderer, render_func)\n view.append_column(text_column)\n\n return view", "def _refresh_project_entry(self, project):\n action_path = \"/MainMenu/ProjectMenu/%s\" % project.get_id()\n action = self._ui_manager.get_action(action_path)\n action.set_property('label', project.name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The html layout for the dashboard's grouped project explorer view. This view holds a collection of collapsable project explorer groups. Each group shows some top level information and provides interactions to drill into the underlying group's data.
def make_project_explorer_layout(): return dbc.Col(id="grouped-project-explorer", className="grouped-project-explorer")
[ "def make_individual_project_explorer_layout(rubicon_model, commit_hash):\n id = str(uuid.uuid4())\n\n experiment_table_df = rubicon_model.get_experiment_table_df(commit_hash)\n github_commit_url = _get_github_commit_url(\n rubicon_model.selected_project.github_url, commit_hash\n )\n\n group_store = dcc.Store(\n id={\"type\": \"group-store\", \"index\": id},\n data={\"commit_hash\": commit_hash},\n )\n\n group_preview_title = [\n html.P(\n f\"{len(experiment_table_df)} experiments\",\n id=\"group-preview-title\",\n className=\"group-preview-title\",\n )\n ]\n\n if commit_hash is not None and rubicon_model.selected_project.github_url is not None:\n group_preview_title.append(\n html.A(\n f\"at commit {commit_hash[:7]}\",\n id=\"group-preview-title-link\",\n className=\"group-preview-title-link\",\n href=github_commit_url,\n target=\"_blank\",\n )\n )\n\n group_model_names = rubicon_model.get_model_names(commit_hash)\n if len(group_model_names) > 0:\n group_model_names_text = f\"model name: {group_model_names[0]}\"\n if len(group_model_names) > 1:\n group_model_names_text += f\" (+{len(group_model_names) - 1} more)\"\n\n group_model_names_view = html.P(\n group_model_names_text,\n id=\"group-preview-model-names\",\n className=\"group-preview-model-names\",\n )\n else:\n group_model_names_view = html.P(style={\"display\": \"none\"})\n\n chevron = html.I(className=\"fas fa-chevron-down\")\n\n group_preview_row = dbc.Row(\n id={\"type\": \"group-preview-row\", \"index\": id},\n className=\"group-preview-row\",\n children=[\n dbc.Row(group_preview_title, style={\"margin\": \"inherit\"}),\n group_model_names_view,\n html.Button(\n chevron,\n id={\"type\": \"show-group-detail-collapsable-btn\", \"index\": id},\n className=\"show-group-detail-collapsable-btn\",\n ),\n html.Button(\n chevron,\n id={\"type\": \"hide-group-detail-collapsable-btn\", \"index\": id},\n className=\"hide-group-detail-collapsable-btn\",\n hidden=True,\n ),\n ],\n )\n\n experiment_table_bulk_action_button_group = html.Div(\n className=\"btn-group\",\n children=[\n html.Button(\n \"Select All\",\n id={\"type\": \"select-all-btn\", \"index\": id},\n className=\"btn-progressive\",\n ),\n html.Button(\n \"Clear All\",\n id={\"type\": \"clear-all-btn\", \"index\": id},\n className=\"btn-progressive\",\n ),\n ],\n )\n\n group_detail_card = dbc.Card(\n id={\"type\": \"group-detail-card\", \"index\": id},\n className=\"group-detail-card\",\n children=[\n dbc.CardBody(\n id={\"type\": \"group-detail-card-body\", \"index\": id},\n className=\"group-detail-card-body\",\n children=[\n experiment_table_bulk_action_button_group,\n _get_experiment_table(id, experiment_table_df),\n _get_comparison_layout(id, rubicon_model, commit_hash),\n ],\n )\n ],\n )\n\n group_detail_collapsable = dbc.Collapse(\n id={\"type\": \"group-detail-collapsable\", \"index\": id},\n className=\"group-detail-collapsable\",\n children=[group_detail_card],\n )\n\n return dbc.Row(\n id={\"type\": \"individual-project-explorer\", \"index\": id},\n className=\"individual-project-explorer\",\n children=[dbc.Col([group_store, group_preview_row, group_detail_collapsable])],\n )", "def _update_project_explorer(values):\n # if all values are 0, the user hasn't clicked a project yet\n is_waiting_for_first_click = True\n for value in values:\n if value != 0:\n is_waiting_for_first_click = False\n\n if is_waiting_for_first_click:\n return make_empty_view(\"Please select a project to view.\")\n\n # use `dash.callback_context` to get the id of the clicked project list item\n selected_id = callback_context.triggered[0][\"prop_id\"].split(\".\")[0]\n selected_project_name = json.loads(selected_id)[\"index\"]\n\n app._rubicon_model.update_selected_project(selected_project_name)\n\n project_explorer_header = dbc.Row(\n id=\"experiment-deatils-header\",\n className=\"experiment-details-header\",\n children=selected_project_name,\n )\n\n experiment_groups = app._rubicon_model._experiment_table_dfs.items()\n\n # handle no experiments view\n if len(experiment_groups) == 0:\n return [project_explorer_header, make_empty_view(\"Log some experiments to this project!\")]\n\n _project_explorers = [\n make_individual_project_explorer_layout(app._rubicon_model, group)\n for group, _ in experiment_groups\n ]\n\n return [project_explorer_header, *_project_explorers]", "def get_context_data(self, **kwargs):\n # Get super's context. This is the dictionary of variables for the base template being rendered.\n context = super(GroupView, self).get_context_data(**kwargs)\n\n # Add the project to the context.\n context['group'] = self.group\n context['projects'] = DataProject.objects.filter(group=self.group, visible=True).order_by(F('order').asc(nulls_last=True))\n\n return context", "def showGroupMenu( self ):\n group_active = self.isGroupingActive()\n group_by = self.groupBy()\n \n menu = XMenu(self)\n menu.setTitle('Grouping Options')\n menu.setShowTitle(True)\n menu.addAction('Edit Advanced Grouping')\n \n menu.addSeparator()\n \n action = menu.addAction('No Grouping')\n action.setCheckable(True)\n action.setChecked(not group_active)\n \n action = menu.addAction('Advanced')\n action.setCheckable(True)\n action.setChecked(group_by == self.GroupByAdvancedKey and group_active)\n if ( group_by == self.GroupByAdvancedKey ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n menu.addSeparator()\n \n # add dynamic options from the table schema\n tableType = self.tableType()\n if ( tableType ):\n columns = tableType.schema().columns()\n columns.sort(key = lambda x: x.displayName())\n for column in columns:\n action = menu.addAction(column.displayName())\n action.setCheckable(True)\n action.setChecked(group_by == column.displayName() and\n group_active)\n \n if ( column.displayName() == group_by ):\n font = action.font()\n font.setBold(True)\n action.setFont(font)\n \n point = QPoint(0, self.uiGroupOptionsBTN.height())\n action = menu.exec_(self.uiGroupOptionsBTN.mapToGlobal(point))\n \n if ( not action ):\n return\n elif ( action.text() == 'Edit Advanced Grouping' ):\n print 'edit advanced grouping options'\n elif ( action.text() == 'No Grouping' ):\n self.setGroupingActive(False)\n \n elif ( action.text() == 'Advanced' ):\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(self.GroupByAdvancedKey)\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()\n \n else:\n self.uiGroupBTN.blockSignals(True)\n self.setGroupBy(str(action.text()))\n self.setGroupingActive(True)\n self.uiGroupBTN.blockSignals(False)\n \n self.refreshResults()", "def viewproject(self):\r\n try:\r\n if self.tab_tree.isHidden():\r\n self.tab_tree.setVisible(True)\r\n else:\r\n self.tab_tree.setVisible(False)\r\n except Exception as e:\r\n print(e)", "def viewGroups(request):\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Select all the events from the events table and save them into a dictionary,\n # pass to the showevents template\n\n context = getViewGroupsData(request)\n return render(request, 'showgroups.html', context)", "def groups_ajax():\n base_query = db.session.query(ResearchGroup)\n\n abbrv = {'search': ResearchGroup.abbreviation,\n 'order': ResearchGroup.abbreviation,\n 'search_collation': 'utf8_general_ci'}\n active = {'order': ResearchGroup.active}\n name = {'search': ResearchGroup.name,\n 'order': ResearchGroup.name,\n 'search_collation': 'utf8_general_ci'}\n colour = {'search': ResearchGroup.colour,\n 'order': ResearchGroup.colour,\n 'search_collation': 'utf8_general_ci'}\n website = {'search': ResearchGroup.website,\n 'order': ResearchGroup.website,\n 'search_collation': 'utf8_general_ci'}\n\n columns = {'abbrv': abbrv,\n 'active': active,\n 'name': name,\n 'colour': colour,\n 'website': website}\n\n with ServerSideSQLHandler(request, base_query, columns) as handler:\n return handler.build_payload(ajax.admin.groups_data)", "def populateGroups(self):\n\n # get a list of the existing folders in projects\n selectedProject = self.projectMenu.currentText()\n project = os.path.join(self.projectPath, selectedProject)\n existingGroups = os.listdir(project)\n folders = []\n\n # find out which returned items are directories\n for each in existingGroups:\n if os.path.isdir(os.path.join(project, each)):\n folders.append(each)\n\n # otherwise, add each project to the combo box\n self.groupMenu.clear()\n self.groupMenu.addItem(\" \")\n for each in folders:\n self.groupMenu.addItem(each)\n\n # populate characters\n self.populateCharacters()", "def initTreeElements(self):\n\t\t# Get application path:\n\t\tdir = os.path.dirname(os.path.realpath(__file__))\n\n\t\ttry:\n\t\t\t# Remove all widgets from the metadata layout:\n\t\t\tfor i in reversed(range(self.showMetadataLayout.count())): \n\t\t\t\twidgetToRemove = self.showMetadataLayout.itemAt(i).widget()\n\t\t\t\tself.showMetadataLayout.removeWidget(widgetToRemove)\n\t\t\t\twidgetToRemove.setParent(None)\n\t\t\n\t\t\t# Remove first all items in the tree view (there's a bug in Qt):\n\t\t\tself.treeWidget.collapseAll()\n\t\t\tself.treeWidget.setCurrentItem(self.root)\n\t\t\tself.root.takeChildren()\n\t\t\tself.treeWidget.removeItemWidget(self.root,0)\n\t\t\t\n\t\t\t# Add root element as the name of the file:\n\t\t\t \n\t\t\tself.treeWidget.addTopLevelItem(self.root) \n\t\t\tself.root.setIcon(0, QIcon(dir + \"/resources/home.png\"))\n\t\t\tself.root.setData(0, Qt.UserRole, self.HDF5File.name)\n\t\t\tself.root.setText(0, os.path.basename(self.HDF5File.filename))\t\n\t\t\tself.root.setExpanded(True)\n\n\t\t\t# Get all the groups:\n\t\t\tfor key in self.HDF5File.keys():\n\t\t\t\tself._initTreeElements(self.HDF5File[key], self.root)\n\n\t\texcept IOError as e: \n\t\t\t eprint(\"Unable to open file: \" + self.HDF5File + \".\")", "def group(self):\n\n return self.parent().group()", "def group_list(request, org_id):\n group_html, _ = view_util.group_list_html(int(org_id))\n\n return HttpResponse(group_html)", "def construct_tabs(self):\n for key, config_list in self.groups.items():\n page = ConfigPage(self.notebook, config_list)\n self.notebook.AddPage(page, key)\n self.clean_edit_state()", "def edit_skill_groups():\n if not validate_is_admin_or_convenor('edit_tags'):\n return home_dashboard()\n\n return render_template('admin/transferable_skills/edit_skill_groups.html', subpane='groups')", "def write_layout(self):\n # Welcome message\n if self.verbose > 0:\n print(\"[info] Generating layout in {0}...\".format(self.layoutdir))\n\n # Top selection panel\n indices = [\n \"\"\"<li><a href=\"{{{{ pathto('generated/{0}') }}}}\">\"\"\"\n \"\"\"{1}</a></li>\"\"\".format(x, self.title_for(x))\n for x in self.module_names]\n\n # Carousel items\n carousel_items = [item for item in os.listdir(self.carouselpath)]\n if len(carousel_items) == 0:\n raise IOError(\"No data found in folder '{0}'.\".format(\n self.carouselpath))\n images = []\n indicators = []\n for cnt, item in enumerate(carousel_items):\n if cnt == 0:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='0' \"\n \"class='active'></li>\")\n images.append(\n \"\"\"<div class=\"active item\">\"\"\"\n \"\"\"<a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</div></a>\"\"\" % item)\n else:\n indicators.append(\n \"<li data-target='#examples_carousel' data-slide-to='{0}' \"\n \"</li>\".format(cnt))\n images.append(\n \"\"\"<div class=\"item\"><a href=\"{{pathto('index')}}\">\"\"\"\n \"\"\"<img src=\"{{ pathto('_static/carousel/%s', 1) }}\">\"\"\"\n \"\"\"</a></div>\"\"\" % item)\n\n # Create layout maping\n pysphinxdoc_info = {}\n info_file = os.path.join(os.path.dirname(__file__), \"info.py\")\n with open(info_file) as open_file:\n exec(open_file.read(), pysphinxdoc_info)\n layout_info = {\n \"NAME_LOWER\": self.root_module_name,\n \"NAME_UPPER\": self.root_module_name.upper(),\n \"INDEX\": \"\\n\".join(indices),\n \"CAROUSEL_INDICATORS\": \"\\n\".join(indicators),\n \"CAROUSEL_IMAGES\": \"\\n\".join(images),\n \"DESCRIPTION\": self.rst2html(self.release_info[\"DESCRIPTION\"]),\n \"SUMMARY\": self.rst2html(self.release_info[\"SUMMARY\"]),\n \"LOGO\": self.root_module_name,\n \"URL\": self.release_info[\"URL\"],\n \"EXTRAURL\": (self.release_info.get(\"EXTRAURL\") or\n pysphinxdoc_info[\"URL\"]),\n \"EXTRANAME\": self.release_info.get(\"EXTRANAME\") or \"PYSPHINXDOC\"\n }\n\n # Start writting the layout\n template_layout_file = os.path.join(\n os.path.dirname(__file__), \"resources\", \"layout.html\")\n layout_file = os.path.join(self.layoutdir, \"layout.html\")\n self.write_from_template(layout_file, template_layout_file,\n layout_info)", "def create_layout():\n return html.Div(\n [\n dcc.Store(id='slug_memory'), # local storage for collection slug value\n html.Div(\n [\n html.H1('Collections', className=\"text-center mt-3\"),\n html.P('Utforsk noen utvalgte collections. Trykk på bildeknappene for å se samlingene', className=\"text-center\"),\n generate_img_buttons(),\n ],\n className=\"header\",\n ),\n html.Div([\n html.H1(id=\"col_title\", className=\"text-muted\"),\n html.Div(id=\"col_pag\"),\n ], className=\"d-md-flex flex-md-row justify-content-sm-between\"),\n html.Div(id=\"collection_content\"),\n ],\n className=\"main\"\n )", "def _layoutHGroup(group, parent, children, labels):\n\n if group.wrap: sizer = wx.WrapSizer(wx.HORIZONTAL)\n else: sizer = wx.BoxSizer(wx.HORIZONTAL)\n\n for cidx in range(len(children)):\n\n vItem = group.children[cidx]\n\n if isinstance(vItem, parts.LinkBox):\n sizer.Add(children[cidx], flag=wx.ALIGN_CENTER_VERTICAL |\n wx.ALIGN_CENTER_HORIZONTAL)\n\n else:\n\n if labels is not None and labels[cidx] is not None:\n\n if group.vertLabels:\n panel = wx.Panel(parent, style=wx.SUNKEN_BORDER)\n pSizer = wx.BoxSizer(wx.VERTICAL)\n panel.SetSizer(pSizer)\n\n labels[ cidx].Reparent(panel)\n children[cidx].Reparent(panel)\n\n pSizer.Add(labels[ cidx], flag=wx.EXPAND)\n pSizer.Add(children[cidx], flag=wx.EXPAND)\n sizer .Add(panel, flag=wx.EXPAND)\n else:\n sizer.Add(labels[ cidx], flag=wx.EXPAND)\n sizer.Add(children[cidx], flag=wx.EXPAND, proportion=1)\n else:\n sizer.Add(children[cidx], flag=wx.EXPAND, proportion=1)\n\n # TODO I have not added support\n # for child groups with borders\n\n parent.SetSizer(sizer)", "def rf_projectTree(self, treeName):\n #-- Get Projects --#\n if treeName == 'allProjects':\n self.log.detail(\"Build 'All Projects' tree ...\")\n projects = self.foundation.project.projects\n treeWidget = self.tw_allProjects\n else:\n self.log.detail(\"Build 'My Projects' tree ...\")\n projects = self.foundation.userGroups._user.userPinedProjects\n treeWidget = self.tw_myProjects\n #-- Populate Tree --#\n treeWidget.clear()\n for project in projects:\n projectFile = pFile.conformPath(os.path.join(self.foundation.__projectsPath__, project, '%s.py' % project))\n datas = pFile.readDictFile(projectFile)\n newItem = self.new_projectItem(project, datas, treeWidget)\n treeWidget.addTopLevelItem(newItem)\n #-- Refresh --#\n self.rf_treeColumns(treeWidget)\n treeWidget.sortItems(0, QtCore.Qt.AscendingOrder)", "def pi_group_browser():\n\n c = get_cursor()\n\n c.execute(\"\"\"select * from pi_design_group\"\"\")\n\n rows = c.fetchall()\n # now grab the associated product designs\n for row in rows:\n c.execute(\"\"\"select pd.*, p.name as product_name\n from (product_design as pd, product as p)\n where pd.pi_design_group_id = %s\n and p.product_id = pd.product_id\n order by product_design_id\"\"\",\n (row['pi_design_group_id'],))\n\n row['product_designs'] = c.fetchall()\n for product_design in row['product_designs']:\n c.execute(\"\"\"select b480x430_afile, b96x96_afile\n from product_design_detail_image\n where product_design_id = %s\n order by seq\"\"\",\n (product_design['product_design_id'],))\n product_design['detail_images'] = c.fetchall()\n\n pi_groups = {\n 'pi_design_groups': rows\n }\n\n c.execute(\"\"\"select * from pi_product_group\"\"\")\n\n rows = c.fetchall()\n # now grab the associated product designs\n for row in rows:\n c.execute(\"\"\"select pd.*, p.name as product_name\n from (product_design as pd, product as p)\n where pd.pi_product_group_id = %s\n and p.product_id = pd.product_id\n order by product_design_id\"\"\",\n (row['pi_product_group_id'],))\n\n row['product_designs'] = c.fetchall()\n for product_design in row['product_designs']:\n c.execute(\"\"\"select b480x430_afile, b96x96_afile\n from product_design_detail_image\n where product_design_id = %s\n order by seq\"\"\",\n (product_design['product_design_id'],))\n product_design['detail_images'] = c.fetchall()\n\n pi_groups['pi_product_groups'] = rows\n\n return pi_groups", "def group_list(self, request, queryset=None, extra_context=None):\n if queryset is None:\n queryset = self.model.objects.all().select_related()\n queryset = queryset.order_by(self.order_groups_by)\n\n return list_detail.object_list(request, queryset,\n template_object_name='group',\n extra_context=extra_context or {},\n paginate_by=self.paginate_groups_by,\n template_name=self.list_template_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inject variables and functions into all Flask contexts
def inject_into_context(): return dict( dev_server = running_local # Variable dev_server is True if running on the GAE development server )
[ "def register_context_processors(app: Flask) -> None:\n app.context_processor(inject_get_alerts)\n app.context_processor(inject_get_hidden_alerts)\n app.context_processor(inject_a11y_url)", "def create_context(self):\n flask.g.context = self._context_class()", "def make_shell_context():\n import pandas as pd\n import numpy as np\n from flask import current_app as app\n from .data import db\n from .data.hobolink import get_live_hobolink_data\n from .data.hobolink import request_to_hobolink\n from .data.predictive_models import process_data\n from .data.usgs import get_live_usgs_data\n from .data.usgs import request_to_usgs\n from .twitter import compose_tweet\n return locals()", "def register_misc(app: Flask):\n\n # In most cases this is equivalent to:\n # >>> from flask.json import JSONEncoder\n # However this way of doing it is safe in case an extension overrides it.\n JSONEncoder = app.json_encoder\n\n class CustomJSONEncoder(JSONEncoder):\n \"\"\"Add support for Decimal types and datetimes.\"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n elif isinstance(o, datetime.date):\n return o.isoformat()\n else:\n return super().default(o)\n\n app.json_encoder = CustomJSONEncoder\n\n @app.shell_context_processor\n def make_shell_context():\n \"\"\"This function makes some objects available in the Flask shell without\n the need to manually declare an import. This is just a convenience for\n using the Flask shell.\n \"\"\"\n import pandas as pd\n import numpy as np\n from flask import current_app as app\n from .data import db\n from .data.hobolink import get_live_hobolink_data\n from .data.hobolink import request_to_hobolink\n from .data.predictive_models import process_data\n from .data.usgs import get_live_usgs_data\n from .data.usgs import request_to_usgs\n from .twitter import compose_tweet\n return locals()", "def register_shellcontext(app):\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n return {\n 'db': db,\n 'model': models\n }\n\n app.shell_context_processor(shell_context)", "def ctx(app, request):\n ctx = app.app_context()\n ctx.push()\n\n def teardown():\n ctx.pop()\n\n request.addfinalizer(teardown)\n\n return ctx", "def setup_environ(app, global_conf, app_conf):\n\n from examplesite.lib.templating import make_templating\n couchish_config = adminish.config.make_couchish_config(app_conf, 'examplesite.model')\n adminish_config = adminish.config.make_adminish_config(couchish_config, store_factory=lambda request: request.environ['couchish'])\n notification_service = NotificationService(global_conf['smtpHost'], emailFromAddress=global_conf['emailFromAddress'], swallowSMTPErrors=True, emailTemplateDir=global_conf['emailTemplateDir'])\n templating = make_templating(app_conf)\n\n def application(environ, start_response):\n\n # Add additional keys to the environ here.\n _db = couchdb.Database(app_conf['couchish.db.url'])\n cache_db = couchdb.Database(app_conf['cache.db.url'])\n db = couchish.CouchishStore(_db, couchish_config, pre_flush_hook=wrap_hook(environ, hooks.pre_flush_hook), post_flush_hook=wrap_hook(environ, hooks.post_flush_hook))\n environ['restish.templating'] = templating\n environ['couchish'] = db\n environ['cache'] = cache_db\n environ['adminish'] = adminish_config\n environ['searcher'] = index.Searcher(db, app_conf['index_dir'], adminish_config = adminish_config)\n environ['notification'] = notification_service\n return app(environ, start_response)\n\n return application", "def populate_jinja_environment(self, env):\n env.filters['registry'] = self.registry\n env.globals['flattened_url'] = self.flattened_url\n env.globals['new_etcd_discovery_token'] = self.new_etcd_discovery_token\n env.globals['load_coreos_ami'] = self.load_coreos_ami_id\n env.globals['dockersystemd'] = self._dockersystemd_template", "def ctx_setter(*args, **kwargs):\n from ..server import app_factory\n\n def setup_ctx():\n app = app_factory()\n app_ctx = flask.ctx.AppContext(app)\n app_ctx.push()\n\n env = werkzeug.test.EnvironBuilder(*args, **kwargs).get_environ()\n req_ctx = flask.ctx.RequestContext(app, env)\n req_ctx.push()\n\n return nose.tools.with_setup(setup_ctx)", "def inject_env():\n\n return dict(site.config, current_menu=current_menu)", "def setup_flask_watch(app):\n app.before_request(_start_watching_request)\n app.teardown_request(_end_watching_request)", "def request_context(app):\n\n with app.application.test_request_context() as req_context:\n yield req_context", "def user_context(function):\n\n @functools.wraps(function)\n def wrapper(*func_args, **func_kwargs):\n context = _get_context()\n func_kwargs['user_context'] = context\n return function(*func_args, **func_kwargs)\n\n return wrapper", "def add_context(self, name, value):\n raise NotImplementedError(\"please mixin an environment class\")", "def init_from_wsgi_environ(wsgi_env):\n\n for ctxvar in [v for _, v in globals().items()\n if isinstance(v, contextvars.ContextVar)]:\n value = wsgi_env.get('HTTP_X_APPENGINE_' + ctxvar.name)\n if value is not None:\n if ctxvar.name == 'USER_IS_ADMIN':\n\n value = (value == '1')\n ctxvar.set(value)", "def init_app():\n\n # __name__ is the path of the current Python module, Flask needs to know\n # where it is located to setup paths.\n # instance_relative_config tells the app that config files are not relative\n # to the instance folder.\n app = Flask(__name__, instance_relative_config=False)\n\n # gets the config information from the Config class that is stored in the\n # config.py file. This class gets the variables from the .env file\n app.config.from_object(\"config.Config\")\n\n # Creates an Environment object from flask_assets to hold a collection of\n # bundles and configuration. If initialised with an instance of Flask app\n # then webassets Jinja2 extention is automatically registered.\n assets = Environment()\n\n # the app is passed to Envoronment.init_app to allow usage by multiple\n # applications rather than passing a fixed application object, see url below:\n # https://flask-assets.readthedocs.io/en/latest/#flask_assets.Environment\n assets.init_app(app)\n\n # gets the context of the current app, in case there are multiple flask apps\n # running at the same time.\n # Import parts of our core Flask app\n with app.app_context():\n\n # imports and executes routes.py which assigns different URLs to\n # different functions which can render HTML pages from jinja2 templates\n from . import routes\n\n # import the compile_static_assets function from the assets.py file.\n # This function compiles a bunch of stylesheets when the app variable\n # FLASK_ENV is set to \"development\"\n from .assets import compile_static_assets\n\n # Import Dash application init_dashboard(server) function\n from .plotlydash.dashboard import init_dashboard\n\n # Give the init_dashboard function the existing flask object (app) to be\n # used as the main server that this sub-app will run on.\n app = init_dashboard(app)\n\n # Compile static assets -\n # THIS WAS TURNED OFF AS IT WAS BREAKING GOOGLE APP ENGINE\n # compile_static_assets(assets)\n\n # return the fully configured/setup app to the wsgi.py file to be run\n return app", "def main(api_app, revision_):\n\tapp.jinja_env.globals.update(revision=revision_)\n\tapp.register_blueprint(api_app, url_prefix='/api')\n\tapp.debug = settings.DEBUG\n\tapp.secret_key = settings.SECRET_KEY\n\texpire_check()", "def _initialize_flask():\n logger.info('Starting flask object')\n global app\n if app:\n pass\n else:\n app = Flask(__name__)\n\n # http://flask.pocoo.org/docs/0.10/config/\n app.config.from_pyfile('service-stag.py')\n app.config['TESTING'] = True\n app.debug = True\n return app", "def set_context(self, context):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process a move coming in from the client
def _process_move(game, movelist): assert game is not None if game.is_over(): return jsonify(result = Error.GAME_NOT_FOUND) player_index = game.player_to_move() # Parse the move from the movestring we got back m = Move(u'', 0, 0) try: for mstr in movelist: if mstr == u"pass": # Pass move m = PassMove() break if mstr[0:5] == u"exch=": # Exchange move m = ExchangeMove(mstr[5:]) break if mstr == u"rsgn": # Resign from game, forfeiting all points m = ResignMove(game.state.scores()[game.state.player_to_move()]) break sq, tile = mstr.split(u'=') row = u"ABCDEFGHIJKLMNO".index(sq[0]) col = int(sq[1:]) - 1 if tile[0] == u'?': # If the blank tile is played, the next character contains # its meaning, i.e. the letter it stands for letter = tile[1] tile = tile[0] else: letter = tile # print(u"Cover: row {0} col {1}".format(row, col)) m.add_cover(row, col, tile, letter) except Exception as e: logging.info(u"Exception in _process_move(): {0}".format(e).encode("latin-1")) m = None # Process the move string here # Unpack the error code and message err = game.state.check_legality(m) msg = "" if isinstance(err, tuple): err, msg = err if err != Error.LEGAL: # Something was wrong with the move: # show the user a corresponding error message return jsonify(result = err, msg = msg) # Serialize access to the following code section with _autoplayer_lock: # Move is OK: register it and update the state game.register_move(m) # If it's the autoplayer's move, respond immediately # (can be a bit time consuming if rack has one or two blank tiles) opponent = game.player_id_to_move() is_over = game.is_over() if not is_over and opponent is None: game.autoplayer_move() is_over = game.is_over() # State may change during autoplayer_move() if is_over: # If the game is now over, tally the final score game.finalize_score() # Make sure the new game state is persistently recorded game.store() # If the game is now over, and the opponent is human, add it to the # zombie game list so that the opponent has a better chance to notice # the result if is_over and opponent is not None: ZombieModel.add_game(game.id(), opponent) # Notify the opponent, if he is not a robot and has one or more active channels if opponent is not None: # Send a game update to the opponent channel, if any, including # the full client state. board.html listens to this. ChannelModel.send_message(u"game", game.id() + u":" + str(1 - player_index), json.dumps(game.client_state(1 - player_index, m))) # Notify the opponent that it's his turn to move. main.html listens to this. # !!! TODO: Figure out a way to have board.html listen to these # !!! notifications as well, since we now have a gamelist there ChannelModel.send_message(u"user", opponent, u'{ "kind": "game" }') # Return a state update to the client (board, rack, score, movelist, etc.) return jsonify(game.client_state(player_index))
[ "async def send_move(self, direction: Direction):", "def handle_move(self, params):\n # Example\n #[]|move|p1a: Espeon|Toxic|p2a: Quagsire|[from]Magic Bounce\n\n # Set actionParams for sub events\n self.actionParams = params\n\n if len(params) >= 6:\n fromStr = params[5]\n if \"Magic Bounce\" in fromStr:\n print(\"Magic Bounce handled\")\n return\n\n # Get team id and pokemon name\n teamID, pokeName = self.strip_team(params[2])\n moveUsed = string_parse(params[3])\n\n poke = None\n # True if our team\n if teamID == self.id:\n # Get the pokemon from our team\n poke = self.get_pokemon(self.team, pokeName)\n else:\n # Get the pokemon from the enemy team\n poke = self.get_pokemon(self.enemyTeam, pokeName)\n # Check to see if its in their move list, otherwise add it\n if moveUsed not in poke.pokeMoveDict.keys():\n print(\"%s not seen before by this pokemon\" % moveUsed)\n poke.add_move(moveUsed)\n \n if moveUsed == \"struggle\":\n print(\"TODO: Handle struggle\")\n return\n\n # Decrease PP of move\n print(\"%s pp decreased\" % moveUsed)\n poke.pokeMoveDict[moveUsed][\"curr_pp\"] -= 1", "def on_move_success(self, user, move, target):", "def move(cfg, data):\n if cfg.amcl_process:\n with LiliSocket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, POSE_SERVICE_PORT))\n goal = {}\n goal['x'] = data['x']\n goal['y'] = data['y']\n goal['z'] = 0.\n goal['yaw'] = data['yaw']\n goal['name'] = 'goal_robot' # ???\n s.sendall(json.dumps(goal))\n return True\n else:\n print \"Please start AMCL first\"\n return False", "def do_execute(self):\n try:\n self.result = self.game.move(self.move)\n except InvalidMove:\n self.result = INVALID_MOVE", "def handle_move_arm_to_position(arm_position_entry, mqtt_sender):\n print(\"Move to\", arm_position_entry.get())\n mqtt_sender.send_message(\"move_arm_to_position\", [arm_position_entry.get()])", "def on_move_hit(self, user, move, battle):", "def handle_watch_move(self, msg):\n # build notification msg and execute notify send\n subject = \"move detected for %s\" % msg.addr\n txt = \"move detected [fabric: %s, %s, addr: %s] from %s to %s\" % (\n msg.fabric,\n msg.src[\"vnid_name\"] if len(msg.src[\"vnid_name\"])>0 else \"vnid:%d\" % msg.vnid,\n msg.addr,\n eptMoveEvent(**msg.src).notify_string(include_rw=(msg.type!=\"mac\")),\n eptMoveEvent(**msg.dst).notify_string(include_rw=(msg.type!=\"mac\")),\n )\n msg.wf.send_notification(\"move\", subject, txt)", "def handle_move_arm_to_position(arm_position_entry, mqtt_sender):\n print(\"Move arm to position\", arm_position_entry.get())\n mqtt_sender.send_message(\"move_arm_to_position\", [arm_position_entry.get()])", "def _callback_cmd_moves(self, cmd):\n\n meas_num = cmd.get_meas_per_pose()\n if meas_num is None:\n meas_num = int(self._params['meas_per_move'])\n\n det = cmd.get_deterministic()\n\n for pose in cmd.get_poses():\n\n # If multiple scans per move:\n if meas_num >= 0:\n # For each scan while stopped\n for _ in range(meas_num):\n self._scan(deterministic=det)\n\n # If one scan per several moves:\n else:\n if self._move_cnt % abs(meas_num) == 0:\n self._move_cnt = 0\n self._scan(deterministic=det)\n\n # Move robot to new pose(s)\n self._move(pose, deterministic=det)\n self._move_cnt += 1", "def process_incoming(self):\n\n if not self.queue:\n return\n\n while self.queue.qsize():\n at, to = self.queue.get(0)\n self._make_move(at, to)\n self.waiting = False", "def handle_move_arm_to_position(arm_position_entry, mqtt_sender):\n arm_position_entry = arm_position_entry.get()\n print('move arm to position', arm_position_entry)\n mqtt_sender.send_message('arm_to_position', [arm_position_entry])", "def process_move(self, move, player):\n try: \n self.moves[player.name][self.sticks] = move\n except KeyError:\n self.moves[player.name] = {}\n self.moves[player.name][self.sticks] = move\n self.sticks -= move", "def move(user, move_from, move_to):\n #check if it is turn of given user\n #make sure its valid move\n #store new GameMove\n pass", "def do_move(self, line): # pylint: disable=invalid-name\n self._CheckState([actuator_types.kActuatorStateArmed,\n actuator_types.kActuatorStateRunning])\n self._CheckServosSelected()\n try:\n angle = float(line)\n except ValueError:\n raise ServoClientError('Invalid angle: \\'%s\\'' % line)\n\n if not self._listener.AllServosArmed():\n raise ServoClientError('Servos not armed.')\n\n self._runner.StartRun([(angle, 1)])\n print 'Running...'", "async def process_single_move_request(cls, id_) -> dict:\n url = \"https://pokeapi.co/api/v2/move/{}\"\n try:\n async with aiohttp.ClientSession() as session:\n response = await RequestApi.get_data(id_, url, session)\n\n # print(response)\n return response\n except Exception:\n print(\"Invalid Move Entered! Try Again\")\n exit()", "def move( self, move_vector ):", "def on_moved(self, e):\n logger.info('Move event from path : {}\\n to path: {}'.format(e.src_path, e.dest_path))\n rel_src_path = self.relativize_path(e.src_path)\n rel_dest_path = self.relativize_path(e.dest_path)\n\n source_shared = self._is_shared_file(rel_src_path)\n dest_shared = self._is_shared_file(rel_dest_path)\n\n # this check that move event isn't modify event.\n # Normally this never happen but sometimes watchdog fail to understand what has happened on file.\n # For example Gedit generate a move event instead copy event when a file is saved.\n if not os.path.exists(e.src_path):\n cmd = 'move'\n else:\n logger.warning('WARNING this is COPY event from MOVE EVENT!')\n cmd = 'copy'\n\n if source_shared and not dest_shared: # file moved from shared path to not shared path\n # upload the file\n new_md5 = self.hash_file(e.dest_path)\n data = {\n 'filepath': rel_dest_path,\n 'md5': new_md5\n }\n\n response = self.conn_mng.dispatch_request('upload', data)\n if response['successful']:\n event_timestamp = response['content']['server_timestamp']\n self.client_snapshot[rel_dest_path] = [event_timestamp, new_md5]\n self.update_local_dir_state(event_timestamp)\n\n if cmd == 'move':\n # force the re-download of the file at next synchronization\n try:\n self.shared_snapshot.pop(rel_src_path)\n except KeyError:\n pass\n else:\n self.stop(1, response['content'])\n\n elif source_shared and dest_shared: # file moved from shared path to shared path\n if cmd == 'move':\n # force the re-download of the file moved at the next synchronization\n try:\n self.shared_snapshot.pop(rel_src_path)\n except KeyError:\n pass\n\n # if it has modified a file tracked by shared snapshot, then force the re-download of it\n try:\n self.shared_snapshot.pop(rel_dest_path)\n except KeyError:\n pass\n\n elif not source_shared and dest_shared: # file moved from not shared path to shared path\n if cmd == 'move':\n # delete file on server\n response = self.conn_mng.dispatch_request('delete', {'filepath': rel_src_path})\n if response['successful']:\n event_timestamp = response['content']['server_timestamp']\n if self.client_snapshot.pop(rel_src_path, 'ERROR') == 'ERROR':\n logger.warning('WARNING inconsistency error during delete operation!\\n'\n 'Impossible to find the following file in stored data (client_snapshot):\\n'\n '{}'.format(rel_src_path))\n self.update_local_dir_state(event_timestamp)\n else:\n self.stop(1, response['content'])\n\n # if it has modified a file tracked by shared snapshot, then force the re-download of it\n try:\n self.shared_snapshot.pop(rel_dest_path)\n except KeyError:\n pass\n\n else: # file moved from not shared path to not shared path (standard case)\n if not self.client_snapshot.get(rel_src_path)[1]:\n self.stop(1, 'WARNING inconsistency error during {} operation!\\n'\n 'Impossible to find the following file in stored data (client_snapshot):\\n'\n '{}'.format(cmd, rel_src_path))\n md5 = self.client_snapshot[rel_src_path][1]\n data = {'src': rel_src_path,\n 'dst': rel_dest_path,\n 'md5': md5}\n # Send data to connection manager dispatcher and check return value.\n # If all go right update client_snapshot and local_dir_state\n response = self.conn_mng.dispatch_request(cmd, data)\n if response['successful']:\n event_timestamp = response['content']['server_timestamp']\n self.client_snapshot[rel_dest_path] = [event_timestamp, md5]\n if cmd == 'move':\n # rel_src_path already checked\n self.client_snapshot.pop(rel_src_path)\n self.update_local_dir_state(event_timestamp)\n logger.debug('{} event completed.'.format(cmd))\n else:\n self.stop(1, response['content'])", "def parse_move(s):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a string representation of an Elo score, or a hyphen if none
def elo_str(elo): return unicode(elo) if elo else u"-"
[ "def nice_score(self):\r\n return '%s (%d) at %s (%d)' \\\r\n % (self.away, self.score_away, self.home, self.score_home)", "def printable_highscores(self)->str:\n position = 0\n tmp_scores = self.scores\n\n if len(tmp_scores) < 11:\n tmp_scores.extend(self.default_scores(10-len(self.scores)))\n\n string = '{:<10}{:<20}{:<10}{:<10}{:<10}{:<15}\\n'.format(\n self.headers[0], self.headers[1], self.headers[2],\n self.headers[3], self.headers[4], self.headers[5]\n )\n\n for scores in tmp_scores:\n position += 1\n string += '{:<10}{:<20}{:<10}{:<10}{:<10}{:<15}\\n'.format(\n position, scores[0], scores[1], scores[2],\n minutes_seconds(int(scores[3])), scores[4]\n )\n\n return string", "def get_score_designation(plays, scores):\n scoring_play_flag = 0\n if len(plays) > 0:\n last_score = plays[-1][2]\n if scores == last_score:\n scoring_play_flag = 0\n if scores != last_score:\n scoring_play_flag = 1\n if scoring_play_flag == 0:\n return \"\"\n if scoring_play_flag == 1:\n return \"Scoring Play\"", "def _minus_format(string):\n if rc['axes.unicode_minus'] and not rc['text.usetex']:\n string = string.replace('-', '\\N{MINUS SIGN}')\n if REGEX_MINUS_ZERO.match(string):\n string = string[1:]\n return string", "def __loadscore(self):\n if not self.__infos[5].isdigit():\n #The score is showed in char\n return self.handleCharScore(self.__infos[5])\n return float(self.__infos[5])", "def PhredToASCII(self, PhredScore):\n try:\n return self.Phred33[PhredScore]\n except KeyError:\n return 'Cannot retrieve ASCII character. Valid integers range from 0 to 93!'\n sys.exit(1)", "def grade_statement(score):\n if score < 0:\n return \"Invalid score\"\n else:\n if score >= 90:\n return \"Excellent\"\n elif score >= 50:\n return \"Passable\"\n else:\n return \"Bad\"", "def compact_repr(self):\n return self.main_score_name + \" \" + SCORE_FORMAT_STRING.format(self.main_score) + \" (tr. loss \" \\\n + SCORE_FORMAT_STRING.format(self.train_loss) + \") rep. \" + str(self.repetition) + \"; ep. \" \\\n + str(self.epoch) + \" (\" + self.model_name + \")\"", "def print_pos_neg(num):\n \n if num > 0:\n return \"positive\"\n elif num == 0: \n return \"neutral\"\n else:\n return \"negative\"", "def to_str(self, rouge_dict):\n strs = []\n for rouge_name, d in sorted(rouge_dict.items()):\n for metric, score in sorted(d.items()):\n # strs.append('{}-{}={:.4f}'.format(rouge_name, metric, score))\n strs.append('%s-%s=%.4f' % (rouge_name, metric, score))\n str = ', '.join(strs)\n return str", "def build_conflicting_scores_string(conflicting_scores):\n string_builder = \"\"\n for i in conflicting_scores:\n string_builder += f\"\\n\\\"{i[0]}\\\" match score: {i[1]}\"\n\n return string_builder", "def readable_score(self):\n return (\n \"%.3f\" % self.duplication_consistency_score\n if self.duplication\n else \"Not available\"\n )", "def compute(self, scoring_elements: str) -> str:\n return str(CVSS2(vector=scoring_elements).base_score)", "def emojify(t_score):\n i = t_score['ids'][0]\n return get_emoji(i).decode('unicode-escape')", "def convert_phred(letter):\n \n score =ord(letter)-33\n return score", "def parse_negative(self):\n\n self.skip_whitespace()\n char = self.peek()\n if char == '-':\n self.index += 1\n return -1 * self.parse_parenthesis()\n else:\n return self.parse_number()", "def _parse_fantano_score(playlist_item) -> str:\n \n score_pattern = \"[a-zA-Z0-9 ]+/10\"\n\n found_score = re.search(score_pattern, playlist_item[\"snippet\"][\"description\"])\n if found_score:\n return found_score.group(0).split(\"/\")[0]\n \n return None", "def fix_minus(s):\n return (s.replace('-', '\\N{MINUS SIGN}')\n if mpl.rcParams['axes.unicode_minus']\n else s)", "def compute(self, scoring_elements: str) -> str:\n return str(CVSS3(vector=scoring_elements).base_score)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of active and zombie games for the current user
def _gamelist(): result = [] cuid = User.current_id() if not cuid: return result now = datetime.utcnow() # Place zombie games (recently finished games that this player # has not seen) at the top of the list for g in ZombieModel.list_games(cuid): opp = g["opp"] # User id of opponent u = User.load(opp) nick = u.nickname() result.append({ "uuid": g["uuid"], "url": url_for('board', game = g["uuid"], zombie = "1"), # Mark zombie state "oppid": opp, "opp": nick, "fullname": u.full_name(), "sc0": g["sc0"], "sc1": g["sc1"], "ts": Alphabet.format_timestamp(g["ts"]), "my_turn": False, "overdue": False, "zombie": True, "fairplay": u.fairplay(), "tile_count" : Alphabet.BAG_SIZE # All tiles accounted for }) # Sort zombies in decreasing order by last move, i.e. most recently completed games first result.sort(key = lambda x: x["ts"], reverse = True) # Obtain up to 50 live games where this user is a player i = list(GameModel.list_live_games(cuid, max_len = 50)) # Sort in reverse order by turn and then by timestamp of the last move, # i.e. games with newest moves first i.sort(key = lambda x: (x["my_turn"], x["ts"]), reverse = True) # Iterate through the game list for g in i: opp = g["opp"] # User id of opponent ts = g["ts"] overdue = False fairplay = False fullname = "" if opp is None: # Autoplayer opponent nick = Game.autoplayer_name(g["robot_level"]) else: # Human opponent u = User.load(opp) nick = u.nickname() fullname = u.full_name() fairplay = u.fairplay() delta = now - ts if g["my_turn"]: # Start to show warning after 12 days overdue = (delta >= timedelta(days = Game.OVERDUE_DAYS - 2)) else: # Show mark after 14 days overdue = (delta >= timedelta(days = Game.OVERDUE_DAYS)) result.append({ "uuid": g["uuid"], "url": url_for('board', game = g["uuid"]), "oppid": opp, "opp": nick, "fullname": fullname, "sc0": g["sc0"], "sc1": g["sc1"], "ts": Alphabet.format_timestamp(ts), "my_turn": g["my_turn"], "overdue": overdue, "zombie": False, "fairplay": fairplay, "tile_count" : g["tile_count"] }) return result
[ "def get_user_games(self, request):\n user = User.get_user_by_name(request.user_name)\n games = Game.query(ndb.OR(Game.user_x == user.key,\n Game.user_o == user.key)). \\\n filter(Game.game_over == False).filter(Game.game_cancelled == False)\n\n if not user:\n raise endpoints.BadRequestException('User not found!')\n\n return UserGameFroms(games = [game.to_form('Active User Games') for game in games])", "def getGamesWithStatus(self, user, status):\n\n if user == None:\n return []\n\n hostGamesInProgress = self.cm.getGamesTable().query(IndexName='HostId-StatusDate-index', KeyConditionExpression=(Key('HostId').eq(user) & Key('StatusDate').begins_with(status))) \n \n oppGamesInProgress = self.cm.getGamesTable().query(IndexName='OpponentId-StatusDate-index', KeyConditionExpression=(Key('OpponentId').eq(user) & Key('StatusDate').begins_with(status))) \n\n games = self.mergeQueries(hostGamesInProgress['Items'],\n oppGamesInProgress['Items'])\n return games", "def get_inactive_games(cls):\n one_hour_ago = datetime.datetime.now() + datetime.timedelta(hours=-1)\n games = (\n cls.query()\n .filter(\n cls.game_state.IN([\n cls.GameState.WAITING_FOR_OPPONENT,\n cls.GameState.PREPARING_BOARD,\n cls.GameState.PLAYER_ONE_TURN,\n cls.GameState.PLAYER_TWO_TURN\n ]))\n .filter(cls.last_update <= one_hour_ago)\n .order(-cls.last_update)\n .fetch()\n )\n return games", "def get_game_list():\n pass", "def _active_games(wakeup_time):\n logger.info(\"_active_games() - Looking for any games within wakeup time of {}\".format(wakeup_time))\n games = _games_in_week(_cur_year, _cur_week, _cur_season_phase)\n logger.info(\"{} games found\".format(len(games)))\n active = []\n for info in games:\n if not _game_is_active(info, wakeup_time):\n continue\n active.append(info)\n\n logger.info(\"{} are active\".format(len(active)))\n if len(active) != 0:\n logger.info(\"Active Games:::::::::::::::::::::\")\n for game in active:\n logger.info(game)\n return active", "def active_games(request, username):\n if request.user.username != username:\n # TODO Fix this\n return redirect('failed_test')\n else:\n active = request.user.game_set.filter(is_complete=False)\n return render(request, 'game/active_games.html', {'active': active})", "def _active_games(inactive_interval):\r\n games = _games_in_week(_cur_year, _cur_week, _cur_season_phase)\r\n active = []\r\n for info in games:\r\n if not _game_is_active(info, inactive_interval):\r\n continue\r\n active.append(info)\r\n return active", "def game_status(self):\n\t\tactive_players = []\n\t\tfor player in self.players.itervalues():\n\t\t\tif player.alive:\n\t\t\t\tactive_players.append(player.name)\n\n\t\treturn {\n\t\t\t'game_active': self.active,\n\t\t\t'turn':self.turn,\n\t\t\t'active_players': active_players\n\t\t}", "def get_game_list(self):\n game_list = self.dal.get_games()\n return make_response(True, data=game_list)", "def get_games_today():\n td = datetime.datetime.today()\n yd = datetime.datetime.today() - datetime.timedelta(days=1)\n\n games_yesterday = mlbgame.day(year=yd.year, month=yd.month, day=yd.day)\n games_today = mlbgame.day(year=td.year, month=td.month, day=td.day)\n \n games = games_today + games_yesterday\n score_boards = []\n for g in games:\n sb = ScoreBoard(g)\n score_boards.append(sb)\n \n return score_boards", "def get_alive_players(self):\n return [x for x in self.joueurs.keys() if self.joueurs[x].is_alive()]", "def get_all_games():\n return league.GameLog().overall()['GAME_ID'].unique()", "def todays_games(self):\n unplayed_games = []\n live_games = []\n finished_games = []\n games_data = self.games_data\n game_headers = games_data[0]['headers']\n game_sets = games_data[0]['rowSet']\n header_list = [\n 'GAME_STATUS_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID', 'GAME_ID', 'GAME_DATE_EST', 'GAME_STATUS_TEXT'\n ]\n for game in game_sets:\n # game_info = list(zip(game_headers, game))\n game_info = dict(zip(game_headers, game))\n game_data = {x.lower(): game_info.get(x) for x in header_list}\n # game_data = {x.lower(): self._get_data(game_info, x) for x in header_list}\n logging.info(json.dumps(game_data, indent=2))\n game_data['home_record'] = self.get_team_record(game_data['home_team_id'])\n game_data['away_record'] = self.get_team_record(game_data['visitor_team_id'])\n game_data['home_team'] = self._team_ids.get(game_data['home_team_id'])\n game_data['away_team'] = self._team_ids.get(game_data['visitor_team_id'])\n status = game_data['game_status_id']\n if status == '1':\n unplayed_games.append(game_data)\n elif status == '2' or status == '3':\n score_headers = games_data[1]['headers']\n score_sets = games_data[1]['rowSet']\n game_scores = []\n for score in score_sets:\n game_scores.append(list(zip(score_headers, score)))\n for score in game_scores:\n game_id = self._get_data(score, 'GAME_ID')\n team_id = self._get_data(score, 'TEAM_ID')\n points = self._get_data(score, 'PTS')\n if game_id == game_data['game_id']:\n if team_id == game_data['home_team_id']:\n game_data['home_team_score'] = points\n elif team_id == game_data['visitor_team_id']:\n game_data['away_team_score'] = points\n if status == '2':\n live_games.append(game_data)\n elif status == '3':\n finished_games.append(game_data)\n Games = namedtuple('Status', ['unplayed', 'live', 'final'])\n games_info = Games(unplayed=unplayed_games, live=live_games, final=finished_games)\n # CACHE.set(game_data['id'], game_data)\n return games_info", "def test_list_games_1(self):\n username = 'adam' # From fixtures\n url = '/chess/user/' + username + '/game/'\n c = Client()\n\n response = c.get(url)\n assert_that(response.status_code, is_(HTTP_200_OK))\n\n games = json.loads(response.content)\n assert_that(len(games), is_(10))\n\n for game in games:\n # TODO: Enhance validation as format is finalized, use Color.WHITE.name\n # Add checks for other properties\n assert_that(game[\"active_player\"], equal_to(\"WHITE\"))", "async def get_current_games(self):\n sql = (\"SELECT event_id, player_points, clan_points \"\n \"FROM rcs_events \"\n \"WHERE event_type_id = 1 AND CURRENT_TIMESTAMP BETWEEN start_time AND end_time\")\n row = await self.bot.pool.fetchrow(sql)\n if row:\n return {\n \"games_id\": row['event_id'],\n \"player_points\": row['player_points'],\n \"clan_points\": row['clan_points']\n }\n else:\n return None", "def get_user_teams(self):\n teams = self.query(\"https://fantasysports.yahooapis.com/fantasy/v2/users;use_login=1/games;codes=\" + self.game_code +\n \"/teams/\", [\"users\", \"0\", \"user\", \"games\"])\n if isinstance(teams, list):\n return sorted(teams, key=lambda x: x.get(\"game\").season)\n else:\n return teams\n\n # return sorted(self.query(\n # \"https://fantasysports.yahooapis.com/fantasy/v2/users;use_login=1/games;codes=\" + self.game_code +\n # \"/teams/\", [\"users\", \"0\", \"user\", \"games\"]), key=lambda x: x.get(\"game\").season)", "def test_view_own_games(self):\n kwds = {'comp_slug': self.space.slug}\n games_url = reverse(\"game_list\", kwargs=kwds)\n with self.loggedInAs(\"alice\", \"123\"):\n response = self.client.get(games_url)\n self.assertTrue(len(response.context['games']) > 0)\n for game in response.context['games']:\n self.assertIn(self.alice_team, game.teams.all())\n self.assertEqual(2, game.teams.count()) # sanity check", "def all_games(self):\r\n\t\tfor game in self.games.values():\r\n\t\t\tyield game", "def history_games(request, username):\n if request.user.username != username:\n # TODO Fix this\n return redirect('failed_test')\n else:\n finished = request.user.lobby_set.order_by('-pub_date').filter(\n pub_date__date = timezone.now())\n return render(\n request, 'game/history_games.html', {'finished': finished})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of Elo ratings of the given kind ('all' or 'human')
def _rating(kind): result = [] cuser = User.current() cuid = None if cuser is None else cuser.id() # Generate a list of challenges issued by this user challenges = set() if cuid: challenges.update([ch[0] # Identifier of challenged user for ch in iter(ChallengeModel.list_issued(cuid, max_len = 20))]) rating = memcache.get(kind, namespace="rating") if rating is None: # Not found: do a query rating = list(RatingModel.list_rating(kind)) # Store the result in the cache with a lifetime of 1 hour memcache.set(kind, rating, time=1 * 60 * 60, namespace="rating") for ru in rating: uid = ru["userid"] if not uid: # Hit the end of the list break is_robot = False usr = None inactive = False if uid.startswith(u"robot-"): is_robot = True nick = Game.autoplayer_name(int(uid[6:])) fullname = nick chall = False fairplay = False else: usr = User.load(uid) if usr is None: # Something wrong with this one: don't bother continue nick = usr.nickname() if not User.is_valid_nick(nick): nick = u"--" fullname = usr.full_name() chall = uid in challenges fairplay = usr.fairplay() inactive = usr.is_inactive() games = ru["games"] if games == 0: ratio = 0 avgpts = 0 else: ratio = int(round(100.0 * float(ru["wins"]) / games)) avgpts = int(round(float(ru["score"]) / games)) result.append({ "rank": ru["rank"], "rank_yesterday": ru["rank_yesterday"], "rank_week_ago": ru["rank_week_ago"], "rank_month_ago": ru["rank_month_ago"], "userid": uid, "nick": nick, "fullname": fullname, "chall": chall, "fairplay": fairplay, "inactive": inactive, "elo": ru["elo"], "elo_yesterday": ru["elo_yesterday"], "elo_week_ago": ru["elo_week_ago"], "elo_month_ago": ru["elo_month_ago"], "games": games, "games_yesterday": ru["games_yesterday"], "games_week_ago": ru["games_week_ago"], "games_month_ago": ru["games_month_ago"], "ratio": ratio, "avgpts": avgpts }) return result
[ "def get_top_ratings(self, kindle_count):\n logger.info(\"Generating rec kindle for all user\")\n ratings = self.model.recommendForAllUsers(kindle_count)\n\n return ratings.toJSON().collect()", "def get_elo_ratings(self):\n\n elo_ratings = dict()\n for _player in self._players:\n elo_ratings[_player] = _player.elo_rating\n return elo_ratings", "def getBeerRatings(url):\n if not url: #no URL\n return [] #no beers\n #make soup from the beer page for a brewery\n soup = makeSoup(url + '/beer')\n #get the lists of tags for beers and their ratings\n list1, list2 = getBeerTags(soup)\n return [getBeerRatingFromTags(tag1, tag2)\n for tag1, tag2 in zip(list1, list2)]", "def get_rat_list():\n return sorted(metadata.keys())", "def ratings(ticker):\n\n # Contact API\n api_key = os.environ.get(\"FMP_API_KEY\")\n url = f\"https://financialmodelingprep.com/api/v3/rating/{ticker}?apikey={api_key}\"\n response = urlopen(url)\n ratings = response.read().decode(\"utf-8\")\n return json.loads(ratings)", "def get_ratings():\n cursor = connection.cursor()\n sql = \"SELECT oj.USERs.HANDLE ,oj.rating_distribution.color , COUNT(oj.participant.contest_id) , oj.USERS.RATING \\\nFROM oj.users left join oj.participant ON (oj.USERS.USER_ID = oj.participant.USER_ID ) \\\n left join oj.rating_distribution on (oj.rating_distribution.minimum_rating <= oj.users.rating and oj.users.rating <= oj.rating_distribution.maximum_rating ) \\\nGROUP BY oj.users.USER_ID , oj.USERs.HANDLE , oj.USERS.RATING ,oj.rating_distribution.color \\\nORDER BY oj.USERS.RATING desc , count(*) desc , oj.users.handle asc;\"\n cursor.execute(sql)\n result = cursor.fetchall()\n cursor.close()\n return result", "def getRatingList(self):\n lst = []\n for team in self.__getTeamList():\n lst.append((team.name,team.rating))\n return lst", "def read(self):\n raw_ratings = list()\n for i, uid, iid, rating, _ in super().read():\n raw_ratings.append((uid, iid, float(rating)))\n return raw_ratings", "def get_average_rating(self):\n connection = sqlite3.connect(\"db.sqlite3\")\n cursor = connection.cursor()\n\n cursor.execute(\"\"\"\n SELECT ROUND(AVG(vote_value), 1) FROM u_app_rating\n WHERE rated_doggo_id=?\"\"\", (self,))\n\n return cursor.fetchone()", "def calculate_mean(collection):\n ratings = []\n for game in collection['items']['item']:\n ratings.append(float(game['stats']['rating']['@value']))\n mean = sum(ratings)/len(ratings)\n return mean", "def average_rating(self):\n ratings = GameRating.objects.filter(game=self)\n\n # Sum all of the ratings for the game\n total_rating = 0\n for rating in ratings:\n total_rating += rating.rating\n\n if len(ratings):\n return total_rating / len(ratings)\n\n # Calculate the averge and return it.\n # If you don't know how to calculate averge, Google it.", "def ratings_for_player(self, name, current=False):\n if self.config[\"uncased\"]:\n name = name.lower()\n player = self.player_by_name(name)\n if current:\n return (\n round(player.days[-1].elo),\n round(player.days[-1].uncertainty * 100),\n )\n return [[d.day, round(d.elo), round(d.uncertainty * 100)] for d in player.days]", "def pets(x):\n return select_against_homogenous_group(x, [\"Non-human\"])", "def get_club_elo_ratings():\n print('#get_club_elo_ratings()')\n results_dict = {}\n\n # Get data for all teams.\n team_data = MY_SESSION.get(constants.FANTASY_API_URL).json()['teams']\n\n # Get Elo data for today's date.\n date_string = datetime.datetime.now().strftime('%Y-%m-%d')\n elo_data = urllib.request.urlopen(constants.CLUB_ELO_URL + date_string)\n parsed_elo_data = csv.reader(codecs.iterdecode(elo_data, 'utf-8'))\n\n # Loop through the Elo data. When we find a premier league team,\n # add it's ID and Elo to the results dictionary.\n for line in parsed_elo_data:\n if len(line) > 5:\n elo_rating = line[4]\n # Need to fix some of the names from the Elo website.\n if line[1] == 'Tottenham':\n club_name = 'Spurs'\n elif line[1] == 'Man United':\n club_name = 'Man Utd'\n else:\n club_name = line[1]\n\n for team in team_data:\n if team['name'] == club_name:\n results_dict[team['id']] = float(elo_rating)\n print('#get_club_elo_ratings returning: ', results_dict)\n return results_dict", "def get_rating():\n id = request.params.get('row_id')\n row = db((db.thumbs.post_id == id) &\n (db.thumbs.rater == get_user())).select().first()\n rating_up = row.rating_up if row is not None else False\n rating_down = row.rating_down if row is not None else False\n return dict(rating_up=rating_up, rating_down=rating_down)", "def data_kinds():\n # Q,actual amount average makes sense / O, order raking them?/ N, category\n \n dic = {}\n dic = {'YEAR': 'O', \n 'MONTH': 'O',\n 'DAY': 'O',\n 'DAY_OF_WEEK': 'N',\n 'AIRLINE': 'N',\n 'FLIGHT_NUMBER':'N',\n 'TAIL_NUMBER': 'N',\n 'ORIGIN_AIRPORT':'N',\n 'DESTINATION_AIRPORT':'N',\n 'SCHEDULED_DEPARTURE': 'Q',\n 'DEPARTURE_TIME': 'Q',\n 'DEPARTURE_DELAY':'Q',\n 'TAXI_OUT': 'Q',\n 'WHEELS_OFF': 'Q' , \n 'SCHEDULED_TIME': 'Q', \n 'ELAPSED_TIME': 'Q', \n 'AIR_TIME':'Q', \n 'DISTANCE' : 'Q',\n 'WHEELS_ON' : 'Q', \n 'TAXI_IN':'Q',\n 'SCHEDULED_ARRIVAL':'Q', \n 'ARRIVAL_TIME':'Q',\n 'ARRIVAL_DELAY':'Q', \n 'DIVERTED': 'N', \n 'CANCELLED':'N', \n 'CANCELLATION_REASON':'N',\n 'AIR_SYSTEM_DELAY':'Q', \n 'SECURITY_DELAY':'Q', \n 'AIRLINE_DELAY':'Q',\n 'LATE_AIRCRAFT_DELAY':'Q', \n 'WEATHER_DELAY':'Q' \n }\n \n\n return dic", "def getShowRating(self):\r\n data = showInformation.getJson(self.infourl)\r\n rating = float(data[\"rating\"][\"average\"])\r\n return rating", "def print_average_rating(params):\n info_list = get_info_from_dataset(['rating'], params)\n average = sum(float(loc[0]) for loc in info_list) / len(info_list)\n print(f'The average rating is {average}')\n print('-' * 80)", "def scores_vs_rating():\n\n rating_comparison = {\n 1: [], 2: [], 3: [], 4: [], 5: []\n }\n\n rating_key = \"like_rating_specific\"\n\n for user, session in Session.get_users_with_surveys():\n\n boundary = HistogramBoundary(user)\n\n survey = user.get_survey()\n\n for playlist_index, playlist in enumerate(session.recommendations):\n survey_ratings = survey[f\"playlist{playlist_index+1}\"][rating_key]\n\n for track_index, track in enumerate(playlist[\"tracks\"]):\n\n track_rating, _ = boundary.get_boundary_score(track)\n\n survey_rating = int(survey_ratings[f'Song{track_index + 1}'])\n\n rating_comparison[survey_rating].append(track_rating)\n\n result_string = \"\"\n\n for rating_bin, scores in rating_comparison.items():\n result_string += f\"{rating_bin}: {statistics.mean(scores):.3f}, \"\n result_string = result_string[:-2]\n print(result_string)\n\n for rating_bin, scores in rating_comparison.items():\n\n plt.hist(scores, bins=20)\n plt.title(f\"Rating: {rating_bin} (total: {len(scores)})\")\n plt.xlim((0.0, 8.0))\n plt.show()\n\n t_tests = {}\n for i in range(1, 6):\n t_tests[i] = {}\n for j in range(1, 6):\n if i != j:\n\n t_test_score = ttest_ind(\n rating_comparison[i], # [:min_amount],\n rating_comparison[j], # [:min_amount],\n equal_var=False\n )\n t_tests[i][j] = t_test_score[1]\n\n pprint(t_tests)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if the given opponent is waiting on this user's challenge
def _opponent_waiting(user_id, opp_id): return ChannelModel.exists(u"wait", user_id, opp_id)
[ "def opp_ready(c):\n if not is_timed(c[1]):\n return False\n # Timed challenge: see if there is a valid wait channel connection\n # where the opponent is waiting for this user\n return _opponent_waiting(cuid, c[0])", "def can_challenge(self, challenger):\n try:\n return (\n challenger.userprofile.rank > self.rank >= (challenger.userprofile.rank - 2)\n and not self.has_open_challenge\n and challenger.userprofile.is_available\n )\n except UserProfile.DoesNotExist:\n return False", "def can_decline(self):\n can_decline = False\n\n if self.rank == 1:\n return can_decline\n\n # get the last 2 matches for this user\n matches = Match.objects.filter(Q(challenger=self.user) | Q(opponent=self.user)).order_by('-challenge_time')[:2]\n\n user_challenged_count = 0\n\n for match in matches:\n if match.declined:\n continue\n\n if match.opponent == self.user:\n user_challenged_count += 1\n\n if user_challenged_count == 2:\n can_decline = True\n\n return can_decline", "def checkVote(self):\n if self.nextMission(): # Voting is for some part of the next mission\n mission = self.nextMission()\n if mission.team: # voting is to decide if the mission succeeds\n if not self.readyCheck(mission.team): return False\n else: return self.attemptNextMission()\n \n else: # voting is to decide if the proposed mission team is deployed\n if not self.readyCheck(self.players): return False\n else: return self.createTeam()\n \n else: # Missions have not been created yet, still in setup\n if not self.readyCheck(self.players): return False\n else: return self.initPlayers()", "def is_ready_to_start(self):\n is_left_resolved = self.__left_participant.get_competitor() is not None\n is_right_resolved = self.__right_participant.get_competitor() is not None\n is_winner_resolved = self.__winner.get_competitor() is not None\n return is_left_resolved and is_right_resolved and not is_winner_resolved", "def is_win(self):\n if self._is_terminal:\n return self.board[self.player_goal_idx] > self.board[self.opponent_goal_idx]", "def needs_decision(self) -> bool:\n\n return self._is_controlled and self._selected_player is None", "def completed_by_players(self):\n finished_players = 0\n for player in self.players:\n if len(player.guess) == len(self.sequence):\n finished_players += 1\n return finished_players == len(self.players)", "def check_if_tie(self):\n if(self.total_turns < 200):\n return False\n else: return True", "def in_game(self):\n try:\n if self.p.poll() is None:\n return True\n else:\n return False\n except:\n return False", "def has_player(self, user):\n return self.player_one == user.key or self.player_two == user.key", "def _check_for_completion(self) -> None:\r\n # Assume (for contradiction) that game is complete.\r\n is_complete = True\r\n for c in self.mf.all_coords:\r\n exp_val = self.mf.completed_board[c]\r\n if type(exp_val) is CellNum and exp_val != self.board[c]:\r\n is_complete = False\r\n break\r\n\r\n if is_complete:\r\n logger.info(\"Game won\")\r\n\r\n self.end_time = tm.time()\r\n self.state = GameState.WON\r\n self.mines_remaining = 0\r\n\r\n for c in self.mf.all_coords:\r\n if (\r\n self.mf.cell_contains_mine(c)\r\n and type(self.board[c]) is not CellHitMine\r\n ):\r\n self._set_cell(c, CellFlag(self.mf[c]))", "def wait_for_challenge_to_be_solved():\n input(\"Please solve the challenge. When done, press Enter to continue...\")", "def is_player_ready(question, answer_type):\n answer = str(input(question))\n return True if answer.lower() in answer_type else False", "def check_action_required(self):\n for player in list(self.existing_players.values()):\n if player.action_required == 1:\n return True", "def readyCheck(self,players):\n for player in players:\n if player.vote == 0: return False\n return True", "def endState(self):\n return not(self.state.winner() == -1 and len(self.state.getLegalAction(self.state.agent)) > 0)", "def is_a_captain(self):\n\n from scheduler.models import Challenge # Avoid circular import\n\n my_challenges = (Challenge.objects.filter(\n Q(roster1__captain=self) |\n Q(roster2__captain=self))\n )\n\n if len(my_challenges) > 0:\n return my_challenges\n else:\n return False", "def finished(self):\n return self.board == self.goal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of challenges issued or received by the current user
def _challengelist(): result = [] cuid = User.current_id() def is_timed(prefs): """ Return True if the challenge is for a timed game """ if prefs is None: return False return prefs.get("duration", 0) > 0 def opp_ready(c): """ Returns True if this is a timed challenge and the opponent is ready to play """ if not is_timed(c[1]): return False # Timed challenge: see if there is a valid wait channel connection # where the opponent is waiting for this user return _opponent_waiting(cuid, c[0]) if cuid is not None: # List received challenges i = iter(ChallengeModel.list_received(cuid, max_len = 20)) for c in i: u = User.load(c[0]) # User id nick = u.nickname() result.append({ "received": True, "userid": c[0], "opp": nick, "fullname": u.full_name(), "prefs": c[1], "ts": Alphabet.format_timestamp(c[2]), "opp_ready" : False }) # List issued challenges i = iter(ChallengeModel.list_issued(cuid, max_len = 20)) for c in i: u = User.load(c[0]) # User id nick = u.nickname() result.append({ "received": False, "userid": c[0], "opp": nick, "fullname": u.full_name(), "prefs": c[1], "ts": Alphabet.format_timestamp(c[2]), "opp_ready" : opp_ready(c) }) return result
[ "def get_challenge_suggestions(self, cr, uid, context=None):\r\n challenge_info = []\r\n challenge_obj = self.pool.get('gamification.challenge')\r\n challenge_ids = challenge_obj.search(cr, uid, [('invited_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\r\n for challenge in challenge_obj.browse(cr, uid, challenge_ids, context=context):\r\n values = {\r\n 'id': challenge.id,\r\n 'name': challenge.name,\r\n 'description': challenge.description,\r\n }\r\n challenge_info.append(values)\r\n return challenge_info", "def getSkillsChallenge(self) -> list:\n return self.skillsChallenged", "def get_remaining_submissions(request, challenge_pk):\n phases_data = {}\n challenge = get_challenge_model(challenge_pk)\n challenge_phases = ChallengePhase.objects.filter(\n challenge=challenge\n ).order_by(\"pk\")\n if not is_user_a_host_of_challenge(request.user, challenge_pk):\n challenge_phases = challenge_phases.filter(\n challenge=challenge, is_public=True\n ).order_by(\"pk\")\n phase_data_list = list()\n for phase in challenge_phases:\n (\n remaining_submission_message,\n response_status,\n ) = get_remaining_submission_for_a_phase(\n request.user, phase.id, challenge_pk\n )\n if response_status != status.HTTP_200_OK:\n return Response(\n remaining_submission_message, status=response_status\n )\n phase_data_list.append(\n RemainingSubmissionDataSerializer(\n phase, context={\"limits\": remaining_submission_message}\n ).data\n )\n phases_data[\"phases\"] = phase_data_list\n participant_team = get_participant_team_of_user_for_a_challenge(\n request.user, challenge_pk\n )\n phases_data[\"participant_team\"] = participant_team.team_name\n phases_data[\"participant_team_id\"] = participant_team.id\n return Response(phases_data, status=status.HTTP_200_OK)", "def notify_users_about_challenge(request):\n if request.user.is_authenticated and request.user.is_superuser:\n if request.method == \"GET\":\n template_name = \"notification_email_data.html\"\n return render(request, template_name)\n\n elif request.method == \"POST\":\n users = User.objects.exclude(email__exact=\"\").values_list(\n \"email\", flat=True\n )\n subject = request.POST.get(\"subject\")\n body_html = request.POST.get(\"body\")\n\n sender = settings.CLOUDCV_TEAM_EMAIL\n\n email = EmailMessage(\n subject,\n body_html,\n sender,\n [settings.CLOUDCV_TEAM_EMAIL],\n bcc=users,\n )\n email.content_subtype = \"html\"\n\n try:\n email.send()\n return render(\n request,\n \"notification_email_conformation.html\",\n {\"message\": \"All the emails are sent successfully!\"},\n )\n except SMTPException:\n logger.exception(traceback.format_exc())\n return render(\n request, \"notification_email_data.html\", {\"errors\": 1}\n )\n else:\n return render(request, \"error404.html\")\n else:\n return render(request, \"error404.html\")", "def requested_users(self):\n return self.expense.contribution_set.filter(contributed__isnull=True)", "def list_data_challenges(request, template_name='projects/list-data-challenges.html'):\n\n context = {}\n context['projects'] = DataProject.objects.filter(is_challenge=True, visible=True).order_by(F('order').asc(nulls_last=True))\n\n return render(request, template_name, context=context)", "async def list_reminders(self, ctx: Context) -> None:\n # Get all the user's reminders from the database.\n data = await self.bot.api_client.get(\n \"bot/reminders\",\n params={\"author__id\": str(ctx.author.id)}\n )\n\n # Make a list of tuples so it can be sorted by time.\n reminders = sorted(\n (\n (rem[\"content\"], rem[\"expiration\"], rem[\"id\"], rem[\"mentions\"])\n for rem in data\n ),\n key=itemgetter(1)\n )\n\n lines = []\n\n for content, remind_at, id_, mentions in reminders:\n # Parse and humanize the time, make it pretty :D\n expiry = time.format_relative(remind_at)\n\n mentions = \", \".join([\n # Both Role and User objects have the `mention` attribute\n f\"{mentionable.mention} ({mentionable})\" async for mentionable in self.get_mentionables(mentions)\n ])\n mention_string = f\"\\n**Mentions:** {mentions}\" if mentions else \"\"\n\n text = textwrap.dedent(f\"\"\"\n **Reminder #{id_}:** *expires {expiry}* (ID: {id_}){mention_string}\n {content}\n \"\"\").strip()\n\n lines.append(text)\n\n embed = discord.Embed()\n embed.colour = discord.Colour.og_blurple()\n embed.title = f\"Reminders for {ctx.author}\"\n\n # Remind the user that they have no reminders :^)\n if not lines:\n embed.description = \"No active reminders could be found.\"\n await ctx.send(embed=embed)\n return\n\n # Construct the embed and paginate it.\n embed.colour = discord.Colour.og_blurple()\n\n await LinePaginator.paginate(\n lines,\n ctx, embed,\n max_lines=3,\n )", "def get(self):\n users = User.query()\n for user in users:\n # Get all the unfinished user games.\n games = Game.query(Game.user == user.key, Game.game_over == False)\n if games.count() > 0:\n subject = 'This is a reminder!'\n body = 'Hello {0}, This is a reminder that you have Hangman game in progress! ' \\\n 'Let\\'s play and have some fun!'\\\n .format(user.name)\n # This will send emails to the users who have pending active games.\n mail.send_mail('noreply@{}.appspotmail.com'.\n format(app_identity.get_application_id()),\n user.email,\n subject,\n body)", "def get_all_consent_uncached(self, user):\n raise NotImplementedError", "def get_challenge_listing(self, project_ids=\"\", limit=10, page=0, only_enabled='true'):\n query_params = {\n \"projectIds\": str(project_ids),\n \"limit\": str(limit),\n \"page\": str(page),\n \"onlyEnabled\": str(only_enabled)\n }\n response = self.get(\n endpoint=\"/challenges/listing\",\n params=query_params\n )\n return response", "async def handle_user_owned_request_listing(request):\n # Future authorization check here\n\n # Check for incorrect client query here\n\n user = request.match_info[\"user\"]\n\n try:\n ret = await request.app[\"db_conn\"].get_request_owned(user)\n except InterfaceError:\n handle_dropped_connection(request)\n\n return aiohttp.web.json_response(ret)", "def getPendingUsers(self, account, include_reasons=False):\n return EquipmentACLInfo.getPendingUsers(account, self, include_reasons)", "def collect_response(channel_id, timestamp):\n logger.info(\"Getting reactions\")\n response = client.reactions_get(\n channel=channel_id,\n timestamp=timestamp\n )\n logger.debug(f\"Reactions response{response}\")\n # TODO: need to handle when the message isnt there\n reactions = response['message'].get('reactions', [])\n participants = []\n for reaction in reactions:\n participants += reaction['users']\n participants_set = list(set(participants))\n logger.info(\"Finished getting participants\")\n logger.debug(f\"Participants are{participants_set}\")\n return participants_set", "def getUserQuestions(self):\n return self.base.get(\"user_questions\", [])", "async def handle_user_made_request_listing(request):\n # Future authorization check here\n\n # Check for incorrect client query here\n\n user = request.match_info[\"user\"]\n\n try:\n ret = await request.app[\"db_conn\"].get_request_made(user)\n except InterfaceError:\n handle_dropped_connection(request)\n\n return aiohttp.web.json_response(ret)", "def print_challenges(challenges_data):", "def get_all_consent_uncached(self, user):\n stored_consent = self._get_stored_consent_for_user(user)\n result = {}\n\n if stored_consent:\n for key, value in stored_consent.consent_grants.items():\n if value:\n result[key] = Consent.GRANTED\n else:\n result[key] = Consent.DENIED\n\n return result", "def filter_user_attempts(request, credentials: dict = None) -> List[QuerySet]:\n\n username = get_client_username(request, credentials)\n\n filter_kwargs_list = get_client_parameters(\n username, request.axes_ip_address, request.axes_user_agent\n )\n attempts_list = [\n AccessAttempt.objects.filter(**filter_kwargs)\n for filter_kwargs in filter_kwargs_list\n ]\n return attempts_list", "async def get_stakeholders(self, request: Request) -> OkResult:\n my_url = self.get_url()\n stakeholders = await request.app._models.User.gets(self._table, roles = {\"$regex\": f\"@{my_url}$\"})\n result = {}\n for stakeholder in stakeholders:\n for rolepath in stakeholder.roles:\n if rolepath.endswith(f'@{my_url}'):\n role = rolepath.split('@')[0]\n if role not in result.keys():\n result[role] = []\n result[role].append(stakeholder)\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if this is a timed challenge and the opponent is ready to play
def opp_ready(c): if not is_timed(c[1]): return False # Timed challenge: see if there is a valid wait channel connection # where the opponent is waiting for this user return _opponent_waiting(cuid, c[0])
[ "def is_player_ready(question, answer_type):\n answer = str(input(question))\n return True if answer.lower() in answer_type else False", "def in_game(self):\n try:\n if self.p.poll() is None:\n return True\n else:\n return False\n except:\n return False", "def check_if_tie(self):\n if(self.total_turns < 200):\n return False\n else: return True", "def player_alive(self)->bool:\n return self.tries_used < 9", "def is_ready_to_start(self):\n is_left_resolved = self.__left_participant.get_competitor() is not None\n is_right_resolved = self.__right_participant.get_competitor() is not None\n is_winner_resolved = self.__winner.get_competitor() is not None\n return is_left_resolved and is_right_resolved and not is_winner_resolved", "def is_chance(self):\n return self.player == -1", "def can_countdown():\n if get_current_round(g) != \"day\":\n return False, 'It is not day.'\n elif not is_player_alive(g, user_id):\n return False, 'You are not in the game.'\n # get list of all alive\n # get list of votes\n # if list of votes == all alive - 1\n elif len(get_all_alive(g))- 1 == len(get_all_votes(g).keys()):\n return True, None\n else:\n return False, 'Can not start countdown now.'", "def needs_decision(self) -> bool:\n\n return self._is_controlled and self._selected_player is None", "def is_win(self):\n if self._is_terminal:\n return self.board[self.player_goal_idx] > self.board[self.opponent_goal_idx]", "def completed_by_players(self):\n finished_players = 0\n for player in self.players:\n if len(player.guess) == len(self.sequence):\n finished_players += 1\n return finished_players == len(self.players)", "def is_timed_out(self):\n if not self.config.competition_mode:\n return\n if self.end_time is None or self.start_time is None:\n return False\n return self.end_time - self.start_time > self.config.time_limits.game", "def can_challenge(self, challenger):\n try:\n return (\n challenger.userprofile.rank > self.rank >= (challenger.userprofile.rank - 2)\n and not self.has_open_challenge\n and challenger.userprofile.is_available\n )\n except UserProfile.DoesNotExist:\n return False", "def healthy_won(self):\n end_time = self.get_length()\n return self.timestamp_to_healthy[end_time] != 0 and self.timestamp_to_contaminated[end_time] == 0", "def is_my_turn(self):\r\n return len(self.valid_pos) != 0", "def hasWon(self, player: int) -> bool:\r\n\r\n return (self.fields[0][0] == player and self.fields[0][1] == player and\r\n self.fields[0][2] == player) or \\\r\n (self.fields[1][0] == player and self.fields[1][1] == player and\r\n self.fields[1][2] == player) or \\\r\n (self.fields[2][0] == player and self.fields[2][1] == player and\r\n self.fields[2][2] == player) or \\\r\n (self.fields[0][0] == player and self.fields[1][0] == player and\r\n self.fields[2][0] == player) or \\\r\n (self.fields[0][1] == player and self.fields[1][1] == player and\r\n self.fields[2][1] == player) or \\\r\n (self.fields[0][2] == player and self.fields[1][2] == player and\r\n self.fields[2][2] == player) or \\\r\n (self.fields[0][0] == player and self.fields[1][1] == player and\r\n self.fields[2][2] == player) or \\\r\n (self.fields[0][2] == player and self.fields[1][1] == player and\r\n self.fields[2][0] == player)", "def is_time_to_act(self):\n if self.act_hack:\n return False\n\n if self.count_to_actuate <= 0:\n return True\n else:\n return False", "def is_finished(self):\n return self.lives == 0 or all(char in self.guesses for char in self.word)", "def is_paused(self) -> bool:", "def is_assisted(self):\n return self.is_made and hasattr(self, \"player2_id\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
App Engine is starting a fresh instance warm it up by loading word database
def start(): wdb = Wordbase.dawg() ok = u"upphitun" in wdb # Use a random word to check ('upphitun' means warm-up) logging.info(u"Start/warmup, instance {0}, ok is {1}".format( os.environ.get("INSTANCE_ID", ""), ok)) return jsonify(ok = ok)
[ "def preload(self):\n self.db", "def start():\n app_svc = AppService.create(__package__)\n\n db_path = app_svc.config_svc[\"archive_db_path\"]\n init_db(db_path)\n\n app_svc.start(routes(config=app_svc.config_svc))", "def bootstrap(self):\n url = self.engine.url\n engine = create_engine(str(url))\n connection = None\n for i in range(10): # retries\n try:\n connection = engine.connect()\n except:\n print \"DBServer is probably not up yet, Retrying ...\"\n time.sleep(i * 5)\n continue\n if not connection:\n raise Exception(\"Couldn't connect to DBServer even after retries!\")\n\n self.Base.metadata.create_all(bind=self.engine)\n connection.close()", "def db():", "async def on_startup():\n app.state.ENGINE_READER = create_engine(\n settings.reader_connection_string, echo=settings.debug\n )\n app.state.ENGINE_WRITER = create_engine(\n settings.writer_connection_string, echo=settings.debug\n )\n app.state.DB_READER = sessionmaker(\n autocommit=False, autoflush=False, bind=app.state.ENGINE_READER\n )\n app.state.DB_WRITER = sessionmaker(\n autocommit=False, autoflush=False, bind=app.state.ENGINE_WRITER\n )", "def _initialize_db():\n # TODO(metzman): Most of the strings in this function should probably be\n # configurable.\n\n db_utils.initialize()\n # One time set up for any db used by FuzzBench.\n models.Base.metadata.create_all(db_utils.engine)\n\n # Now set up the experiment.\n with db_utils.session_scope() as session:\n experiment_name = 'oss-fuzz-on-demand'\n experiment_exists = session.query(models.Experiment).filter(\n models.Experiment.name == experiment_name).first()\n if experiment_exists:\n raise Exception('Experiment already exists in database.')\n\n db_utils.add_all([\n db_utils.get_or_create(models.Experiment,\n name=experiment_name,\n git_hash='none',\n private=True,\n experiment_filestore='/out/filestore',\n description='none'),\n ])\n\n # Set up the trial.\n trial = models.Trial(fuzzer=os.environ['FUZZER'],\n experiment='oss-fuzz-on-demand',\n benchmark=os.environ['BENCHMARK'],\n preemptible=False,\n time_started=scheduler.datetime_now(),\n time_ended=scheduler.datetime_now())\n db_utils.add_all([trial])", "def start_scrapping():\n start = time.clock()\n make_db()\n end = time.clock()\n print('time elapsed:', str(end-start))", "def load(self):\n self.db = info()", "def restart_scripts_after_flush():\n global FLUSHING_INSTANCES\n ScriptDB.objects.validate()\n FLUSHING_INSTANCES = False", "def init_database(web_service):\r\n with web_service.app_context():\r\n global __database\r\n mongo = PyMongo(web_service, config_prefix = 'MONGO')\r\n\r\n __database = mongo.db\r\n\r\n database_backup.setup()", "def setup_db():\n create_service_db()", "def sit_mysql():\n start_and_wait_mysql(db_service_name=\"mysql_sit\")", "def init_db():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n with app.open_resource('knowledge.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n with app.open_resource('users.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n \n db.commit()", "def seed_db():\n Seed().run()", "def generate_embeddings(self):\n print(\"Beginning Step#1: Embeddings Generation\")\n if self.embeddings_save_path == 'default':\n self.embedding_model = Embedding(self.database_name, self.database_path)\n else:\n self.embedding_model = Embedding(self.database_name, self.database_path, self.embeddings_save_path)\n if self.embedding_model.generate_embeddings():\n print(\"Embeddings generation is complete.\")\n print(\"\")", "def pub_initcache(db_path='elasticsearch_uploads.sqlite3'):\n dbconn = sqlite3.connect(db_path)\n # auto commit\n dbconn.isolation_level = None\n db = dbconn.cursor()\n\n create_tables(db)\n sys.stdout.write('db created at %s\\n' % db_path)", "def bootstrap_catalog(self):\n LoggingManager().log(\"Bootstrapping catalog\", LoggingLevel.INFO)\n init_db()", "def before_request():\n g.db = connect_db()", "def heavy_init(cls):\n cfg.CONF.set_default('connection', 'sqlite://', group='database')\n cfg.CONF.set_default('max_overflow', -1, group='database')\n cfg.CONF.set_default('max_pool_size', 1000, group='database')\n\n qinling_opts = [\n (config.API_GROUP, config.api_opts),\n (config.PECAN_GROUP, config.pecan_opts),\n (config.ENGINE_GROUP, config.engine_opts),\n (config.STORAGE_GROUP, config.storage_opts),\n (config.KUBERNETES_GROUP, config.kubernetes_opts),\n (None, [config.launch_opt])\n ]\n for group, options in qinling_opts:\n cfg.CONF.register_opts(list(options), group)\n\n db_api.setup_db()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A client channel has been connected
def channel_connected(): chid = request.form.get('from', None) # logging.info(u"Channel connect from id {0}".format(chid).encode('latin-1')) # Mark the entity as being connected ChannelModel.connect(chid) return jsonify(ok = True)
[ "def connected(client):", "def connected(client):\r\n # Subscribe to changes on a feed named Counter.\r\n print('Subscribing to Feed {0}'.format(FEED_ID))\r\n client.subscribe(\"led1\")\r\n client.subscribe(\"led2\")\r\n print('Waiting for feed data...')", "def on_connected(self):\n log.debug('on_connected called.')", "def channelJoined(self, channel):", "def joined(self, channel):\n # ello", "def on_connect(self, client, userdata, rc):\n print(\"Connected with result code: \" + str(rc))\n self.subscribe(\"orchestra/glock\")", "async def channel_ready(self) -> None:\n state = self.get_state(try_to_connect=True)\n while state != grpc.ChannelConnectivity.READY:\n await self.wait_for_state_change(state)\n state = self.get_state(try_to_connect=True)", "def on_peer_connected(peer, peer_count):", "def on_welcome(self, c, e):\n print \"Connected to server successfully\"\n # Probably unnecessary, since sending a server password will\n # log us in regardless whether we have our main nick or not.\n c.privmsg(\"NickServ\",'IDENTIFY %s' % self.password)\n time.sleep(4) # Let identification succeed before joining channels\n c.join(self.channel)\n if self.listen and self.listenchannels:\n for chan in self.listenchannels:\n c.join(chan)", "def on_connect():\n\n print('User connected')", "def is_connected(self):\n return self.client is not None", "async def websocket_connected(ws):\n await ws.send_str(json.dumps({\"subject\": Subject.websocket.value, \"event\": Event.connected.value}))\n logger.debug(\"websocket: new connection from user %s\", ws.cirrina.web_session.get(\"username\"))", "def acknowledgeNewClient(self, client, server):\n print(\"New client connected: {}\".format(client))", "def pusher_connected(self, data):\n # Inform user that pusher is done connecting\n self.logger.info(\"Pusherclient connected\")\n\n # Bind the events we want to listen to\n self.callback_client.bind(\"payment_authorized\",\n self.payment_authorized)\n self.callback_client.bind(\"shortlink_scanned\",\n self.shortlink_scanned)", "def on_connect(self, client, userdata, flags, retcode):\n refresh = \"{}/{}\".format(self.root_topic, REFRESH)\n self.log.info(\n \"Connected with client %s, userdata %s, flags %s, and \"\n \"result code %s. Subscribing to refresh command topic %s\",\n client,\n userdata,\n flags,\n retcode,\n refresh,\n )\n\n self.connected = True\n\n # Publish the ONLINE message to the LWT\n self._publish_mqtt(ONLINE, self.lwt, True)\n\n # Resubscribe on connection\n for reg in self.registered:\n self.log.info(\"on_connect: Resubscribing to %s\", reg)\n self.client.subscribe(reg)\n\n # causes sensors to republish their states\n self.msg_processor(\"MQTT connected\")", "def connect(self):\n self.connected = True", "def onFrontConnected(self):\n self.connect_status = True\n self.on_event(type=EVENT_LOG, data=\"行情服务器连接成功\")\n self.login()", "def post(self):\n\n client_id = self.request.get('from')\n logging.info(\"Connecting client update channel \"+client_id)\n add_update_client(client_id)", "def on_user_join(self, nick, channel):\r\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Forces a tardy user to resign, if the game is overdue
def forceresign(): user_id = User.current_id() if user_id is None: # We must have a logged-in user return jsonify(result = Error.LOGIN_REQUIRED) uuid = request.form.get('game', None) game = None if uuid is None else Game.load(uuid) if game is None: return jsonify(result = Error.GAME_NOT_FOUND) # Only the user who is the opponent of the tardy user can force a resign if game.player_id(1 - game.player_to_move()) != User.current_id(): return jsonify(result = Error.WRONG_USER) try: movecount = int(request.form.get('mcount', "0")) except: movecount = -1 # Make sure the client is in sync with the server: # check the move count if movecount != game.num_moves(): return jsonify(result = Error.OUT_OF_SYNC) if not game.is_overdue(): return jsonify(result = Error.GAME_NOT_OVERDUE) # Send in a resign move on behalf of the opponent return _process_move(game, [u"rsgn"])
[ "def lose_game(self):\n self.end_game(\"You have lost!\")", "def stop(self):\n self._state = Checkers.State.GAME_OVER # game is aborted\n self._eog = Checkers.EoG.ABORT\n self._winner = None\n self.tend = time.time()\n ts = time.localtime(self.tend)\n tstr = f\"{ts.tm_hour:02}:{ts.tm_min:02}:{ts.tm_sec:02}\"\n self.add_event_to_history(f\"ABORTED@{tstr}\")", "async def resign(self, ctx, user: discord.Member, team_name: str):\n franchise_role, tier_role = await self.team_manager_cog._roles_for_team(ctx, team_name)\n trans_channel = await self._trans_channel(ctx)\n gm_name = self._get_gm_name(ctx, franchise_role)\n message = \"{0} was re-signed by the {1} ({2} - {3})\".format(\n user.mention, team_name, gm_name, tier_role.name)\n\n if franchise_role not in user.roles or tier_role not in user.roles:\n try:\n await self.add_player_to_team(ctx, user, team_name)\n free_agent_roles = await self.find_user_free_agent_roles(ctx, user)\n if len(free_agent_roles) > 0:\n for role in free_agent_roles:\n await user.remove_roles(role)\n except Exception as e:\n await ctx.send(e)\n\n if trans_channel:\n await trans_channel.send(message)\n await ctx.send('Done')\n else:\n await ctx.send(\"Unable to complete transaction as transaction channel is not set.\")", "def resign_game(self):\n # Check for player turn and assign opposing player as winner.\n if self._player_turn == \"BLACK\":\n self.set_game_state(\"WHITE_WON\")\n else:\n self.set_game_state(\"BLACK_WON\")\n return self.get_game_state()", "def dance(self):\n self.send(\"DANCE\")", "def takeDamage(self):\n\t\tif self._invincible_time <= 0:\n\t\t\tself.health -= 1\n\t\t\t# make player invincible\n\t\t\tself._invincible_time = 2500 # invincible for 2.5s", "def gameEnd(self):\n self.game_screen.wheel.disableSpin()\n self.game_over = True\n print(\"Game over\")", "def end_lockdown():\n notice()", "def end_game():\n\tprint(\"Thank you for playing\")\n\texit()", "def event_m10_10_14010():\r\n \"\"\"State 0,2: Disabling specific damage\"\"\"\r\n SetDamageImmunityByCharacterId(601000, 210100100, 1)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()", "def _check_and_kill(self):\n if not self._lost and self._miss_counter >= self._miss_limit:\n self._lost = True\n\n # If it has never been activated, kill it immediately.\n if not self._activated:\n self.visual_state = VisualState.KILLED", "def checkLives(self):\n if self._game.getTries() <= 0:\n self._state = STATE_COMPLETE\n #make a label that says game over, click to restart\n self._mssg = GLabel(x = GAME_WIDTH/3, y = GAME_HEIGHT/2.0, text='Lol, noob. Click to Start Again')\n self.draw()", "async def cancel(self, ctx):\r\n adv = await AssetCreation.getAdventure(self.client.pg_con, ctx.author.id)\r\n if adv is None:\r\n await ctx.reply('You aren\\'t travelling. Use `travel` to explore somewhere new!')\r\n return\r\n\r\n #If they're on an expedition, handle it differently\r\n if adv['destination'] == 'EXPEDITION':\r\n await self.completeExpedition(ctx, adv['adventure'])\r\n return \r\n\r\n current = int(time.time())\r\n if current >= adv['adventure']: #Then enough time has passed and the adv is complete\r\n low_bound = int((location_dict[adv['destination']]['CD']**1.5)/2500)\r\n high_bound = int((location_dict[adv['destination']]['CD']**1.6)/5000)\r\n gold = random.randint(low_bound, high_bound)\r\n xp = random.randint(low_bound, high_bound)\r\n acolyte_xp = int(xp / 10)\r\n getWeapon = random.randint(1,10)\r\n if getWeapon == 1:\r\n await AssetCreation.createItem(self.client.pg_con, ctx.author.id, random.randint(15, 40), \"Common\")\r\n\r\n #Class bonus\r\n role = await AssetCreation.getClass(self.client.pg_con, ctx.author.id)\r\n if role == 'Traveler':\r\n gold *= 3\r\n\r\n await AssetCreation.giveAdventureRewards(self.client.pg_con, xp, gold, adv['destination'], ctx.author.id)\r\n\r\n #Also give bonuses to acolytes if any\r\n acolyte1, acolyte2 = await AssetCreation.getAcolyteFromPlayer(self.client.pg_con, ctx.author.id)\r\n\r\n if acolyte1 is not None:\r\n await AssetCreation.giveAcolyteXP(self.client.pg_con, acolyte_xp, acolyte1)\r\n if acolyte2 is not None:\r\n await AssetCreation.giveAcolyteXP(self.client.pg_con, acolyte_xp, acolyte2)\r\n\r\n if getWeapon == 1:\r\n await ctx.reply(f\"You arrived at `{adv['destination']}`! On the way you earned `{xp}` xp and `{gold}` gold. You also found a weapon!\")\r\n else:\r\n await ctx.reply(f\"You arrived at `{adv['destination']}`! On the way you earned `{xp}` xp and `{gold}` gold.\")\r\n\r\n #Check for level ups\r\n await AssetCreation.checkLevel(self.client.pg_con, ctx, ctx.author.id, aco1=acolyte1, aco2=acolyte2)\r\n\r\n else:\r\n await AssetCreation.setAdventure(self.client.pg_con, None, None, ctx.author.id)\r\n await ctx.reply('You decided not to travel.')", "def decline_invitation(user: models.User, game: models.Game):\n if game.invited != user:\n raise RequestError(2111)\n _end_socket_session(game.host, game)\n game.delete_instance()", "def retry(self):\n return self.yes_no(\"Would you like to try again?\")", "def end_game():\n\n print(\"Thanks for playing! %s\" % USER_NAME)\n GPIO.cleanup()\n sys.exit(0)", "def leave_ingame(self):\n pass", "def die(self):\n super(Ship, self).die()\n self.game.end()", "def check_invincibility(self):\n if not self.hittable and self.time_hit + 1200 <= pygame.time.get_ticks():\n self.hittable = True\n else:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set a user preference
def setuserpref(): user = User.current() if user is None: # We must have a logged-in user return jsonify(result = Error.LOGIN_REQUIRED) # Check for the beginner preference and convert it to bool if we can beginner = request.form.get('beginner', None) if beginner is not None: if beginner == u"false": beginner = False elif beginner == u"true": beginner = True if beginner is not None and isinstance(beginner, bool): # Setting a new state for the beginner preference user.set_beginner(beginner) # Check for the ready state and convert it to bool if we can ready = request.form.get('ready', None) if ready is not None: if ready == u"false": ready = False elif ready == u"true": ready = True if ready is not None and isinstance(ready, bool): # Setting a new state for the ready preference user.set_ready(ready) # Check for the ready_timed state and convert it to bool if we can ready_timed = request.form.get('ready_timed', None) if ready_timed is not None: if ready_timed == u"false": ready_timed = False elif ready_timed == u"true": ready_timed = True if ready_timed is not None and isinstance(ready_timed, bool): # Setting a new state for the ready_timed preference user.set_ready_timed(ready_timed) user.update() return jsonify(result = Error.LEGAL)
[ "def user_preference(self, user_preference):\n\n self._user_preference = user_preference", "def set_pref(pref_name, pref_value):\n CFPreferencesSetValue(\n pref_name, pref_value, BUNDLE_ID,\n kCFPreferencesAnyUser, kCFPreferencesCurrentHost)\n CFPreferencesAppSynchronize(BUNDLE_ID)\n print \"set pref\"\n try:\n CFPreferencesSetValue(\n pref_name, pref_value, BUNDLE_ID,\n kCFPreferencesAnyUser, kCFPreferencesCurrentHost)\n CFPreferencesAppSynchronize(BUNDLE_ID)\n except Exception:\n pass", "def set_and_save_preference(name: str, value: Union[bool, Path, str]):\n set_preference(name, value)\n write_to_dotenv(\"OPENBB_\" + name, str(value))", "def set(key, value):\n\tglobals()[key] = value\n\tuser_conf.save(key, value)", "def set(key, value, description=\"\"):\n p = Preference.select(Preference.q.pref_key == key)\n if p.count() == 0:\n Preference(pref_key=key, \n pref_value=value,\n pref_description=description)\n else:\n p[0].pref_value = value\n if description:\n p[0].pref_description = description", "def set(option_type, user_id=None):", "def create_userpref(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_pref = UserPref(user=user)\n user_pref.save()", "def set_preference(request):\n if request.user.is_authenticated and request.is_ajax() and request.POST.get('video') != None:\n\n # Set a preference\n obj = Preference.objects.filter(user=request.user, video=request.POST.get('video'))\n \n # Either add new or update a preference\n # TODO: This saves neutral preferences as well, maybe we don't want that...\n if obj:\n obj.update(preference=request.POST.get('preference'))\n else:\n p = Preference(user=request.user, name='', video=request.POST.get('video'), preference=request.POST.get('preference'))\n p.save()\n return HttpResponse('ok')\n else:\n return HttpResponse('error')", "def set_pref_all(name, value):\n UserMetadata.delete().where(UserMetadata.key == name).execute()\n if value == \"1\":\n for user in User.select():\n UserMetadata.create(uid=user.uid, key=name, value=\"1\")", "def set_user(self, user):\n self.user = user", "def set_resident_key(self, value:bool):\n self._prefs[PREFERENCE_KEY.RK.value] = value\n self._write_prefs()", "def setPrefs(key, value):\n import ij.Prefs\n ij.Prefs.set(key, str(value))\n ij.Prefs.savePreferences()", "def putProfileSetting(name, value):\n\tglobal settingsDictionary\n\tif name in settingsDictionary and settingsDictionary[name].isProfile():\n\t\tsettingsDictionary[name].setValue(value)", "def set_user_mode(self, nickname, mode):\n \n self.users[nickname] = (self.get_user(nickname), mode)", "def set_profile_variable(self, request, key, value):\r\n\r\n profile_id = self._get_profile_id(request.AUTHENTICATED_USER)\r\n settings = self.__profiles.get(profile_id, {})\r\n if key in settings:\r\n value = eval_valuestring(value, settings[key])\r\n if not value:\r\n return\r\n\r\n settings[key] = value\r\n profiles = self.__profiles\r\n profiles[profile_id] = settings\r\n self.__profiles = profiles", "def setValue(self, value):\n self.userInput.setValue(value)", "def write_pref(pref_name, pref, s3_bucket, prefs_path='metadata/Preferences.plist'):\n prefs_plist = read_plist_s3(prefs_path, s3_bucket)\n prefs_plist[pref_name] = pref\n write_plist_s3(prefs_plist, prefs_path, s3_bucket)", "def save_preferences(request):\n if (request.method == 'POST'):\n cur_user = request.user\n info = UserPreferences.objects.get(user=cur_user)\n\n if (request.POST['recycle_lifetime']):\n info.recyclebin_lifetime = request.POST['recycle_lifetime']\n\n if (request.POST['first_name']):\n cur_user.first_name = request.POST['first_name']\n\n if (request.POST['last_name']):\n cur_user.last_name = request.POST['last_name']\n\n info.save()\n cur_user.save()\n\n messages.success(request, \"User Info updated.\")\n return redirect('dashboard')", "async def set_user_admin(self, userid, value):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load all chat messages on a conversation channel
def chatload(): if not User.current_id(): # We must have a logged-in user return jsonify(ok = False) channel = request.form.get('channel', u"") messages = [] if channel: # Return the messages sorted in ascending timestamp order. # ChatModel.list_conversations returns them in descending # order since its maxlen limit cuts off the oldest messages. messages = [ dict( from_userid = cm["user"], msg = cm["msg"], ts = Alphabet.format_timestamp(cm["ts"]) ) for cm in sorted(ChatModel.list_conversation(channel), key=lambda x: x["ts"]) ] return jsonify(ok = True, messages = messages)
[ "def get_messages(self, channel):\n # needed to avoid API rate limits\n time.sleep(10)\n\n try:\n room = self.connection.get_room(channel.name)\n except hypchat.requests.HttpNotFound as e:\n logger.error(\n \"room %s at %s not found\" % (channel.name, self.server))\n return None\n except requests.exceptions.ConnectionError as e:\n self.connection = hypchat.HypChat(self.token, endpoint=self.server)\n room = self.connection.get_room(channel.name)\n except hypchat.requests.HttpGatewayTimeout as e:\n self.connection = hypchat.HypChat(self.token, endpoint=self.server)\n room = self.connection.get_room(channel.name)\n try:\n messages = list(room.history(maxResults=90).contents())\n except hypchat.requests.HttpGatewayTimeout as e:\n logger.error(e)\n return\n old_cursor = channel.cursor\n logger.info(\n \"Fetching message from %s (%s)\" % (channel.name, self.server))\n scrap_counter.labels('hipcat', room['name']).inc()\n for message in messages:\n d = message['date']\n message_date = datetime(\n d.year, d.month, d.day,\n d.hour, d.minute, d.second, d.microsecond,\n None\n )\n if message_date <= old_cursor:\n continue\n if message_date > old_cursor:\n old_cursor = message_date\n if type(message['from']) == unicode:\n msg = \"%s@%s | %s\" % \\\n (message['from'], channel.name, message['message'])\n else:\n msg = \"%s@%s | %s\" % \\\n (message['from']['name'],\n channel.name, message['message'])\n if channel.include_pattern and \\\n not self.match_pattern(\n channel.include_pattern, message['message']):\n msg = 'Message skipped as not in include_pattern'\n logger.info(msg)\n channel.cursor = old_cursor\n continue\n self.enqueue(queue=channel.queue, message=msg)\n read_msg_counter.labels('hipchat', room['name']).inc()\n channel.cursor = old_cursor", "def get_messages(self, channel):\n\n def datetime_to_ts(date):\n return (date - datetime(1970, 1, 1)).total_seconds()\n\n def ts_to_datetime(ts):\n return datetime.fromtimestamp(float(ts))\n try:\n _channel = self.get_channel(channel)\n except ValueError as e:\n logger.error(\"channel %s at %s not found\" %\n (channel.name, self.server))\n return None\n logger.info(\"Fetching message from %s (%s)\" %\n (channel.name, self.server))\n if 'is_group' in _channel and _channel['is_group']:\n api_uri = 'groups.history'\n else:\n api_uri = 'channels.history'\n\n if channel.cursor_ts == 0:\n channel.cursor_ts = datetime_to_ts(channel.cursor)\n\n try:\n raw = self.connection.api_call(\n api_uri,\n channel=_channel['id'],\n oldest=channel.cursor_ts)\n except Exception as e:\n logger.exception(e)\n return\n resp = json.loads(json.dumps(raw))\n old_cursor = channel.cursor_ts\n scrap_counter.labels('slack', channel.name).inc()\n for message in resp['messages']:\n d = message['ts']\n message_date = ts_to_datetime(d) # FIXME: can we safely remove this unused variable ?\n\n if d <= old_cursor:\n continue\n if d > old_cursor:\n old_cursor = d\n if message['type'] == 'message':\n try:\n user = self.get_user(message['user'])\n userName = user['name']\n except:\n userName = message['username']\n msg = \"%s@%s | %s\" % \\\n (userName, channel.name, BeautifulSoup(message['text'], \"html.parser\").text)\n self.enqueue(queue=channel.queue, message=msg)\n read_msg_counter.labels('slack', channel.name).inc()\n channel.cursor_ts = old_cursor", "def _load_selfchats(self) -> Dict[str, List[Dict[str, Any]]]:\n conversations = {}\n for m in self.models:\n model_fp = self.chat_files[m]\n conversations[m] = []\n with open(model_fp) as f_read:\n for line in f_read:\n conversations[m].append(json.loads(line.strip()))\n return conversations", "def get_all_chats(self):\n\n with ChatMapper() as mapper:\n return mapper.find_all()", "def get_messages():\n lst_messages = General_chat.query.order_by(General_chat.chatID.asc()).all()\n return lst_messages", "async def load_chats(self) -> None:\n self.users = await self._get_chats(TYPE_USER)\n self.forums = await self._get_chats(TYPE_FORUM)\n self.teams = await self._get_chats(TYPE_TEAM)\n if self._cache:\n self._cache.save(TYPE_USER, self.users)\n self._cache.save(TYPE_FORUM, self.forums)\n self._cache.save(TYPE_TEAM, self.teams)", "def get_messages_by_channel(self, channel):\r\n return self.filter(db_receivers_channels=channel).exclude(db_hide_from_channels=channel)", "def get_channel_messages(\n cl: slack.WebClient,\n channel_id: str,\n oldest: datetime = None,\n limit=100000) -> Optional[List[dict]]:\n\n # to unixtime\n oldest = oldest or datetime.now() - timedelta(days=1)\n oldest = int(time.mktime(oldest.utctimetuple()))\n\n answer = cl.conversations_history(channel=channel_id, oldest=oldest, limit=limit)\n\n # TODO: logging & error handling & \"'has_more': True\" handling\n messages = answer['messages']\n\n return messages", "def collect_new_messages(self):\n new_msgs = self.client_session.fetch_new_messages(\n len(self.chat_messages_of_session))\n for each in new_msgs:\n self.chat_messages_of_session.append(each)", "def get_messages(self, channel_name, days):\n oldest = self.now - days * 86400\n cid = self.slacker.get_channelid(channel_name)\n\n if oldest in self.cache.get(cid, {}):\n self.logger.debug(\"Returning %s cached messages for #%s over %s days\", len(self.cache[cid][oldest]), channel_name, days)\n return self.cache[cid][oldest]\n\n messages = self.slacker.get_messages_in_time_range(oldest, cid)\n self.logger.debug(\"Fetched %s messages for #%s over %s days\", len(messages), channel_name, days)\n\n messages = [x for x in messages if x.get(\"subtype\") is None or x.get(\"subtype\") in self.config.included_subtypes]\n self.logger.debug(\"Filtered down to %s messages based on included_subtypes: %s\", len(messages), \", \".join(self.config.included_subtypes))\n\n if cid not in self.cache:\n self.cache[cid] = {}\n self.cache[cid][oldest] = messages\n\n return messages", "def get_all_chats():\n return pd.read_csv(file_names['CHATS'])", "async def download_all_messages(\n self, conversation, conversation_in_db, event_id=None\n ):\n while True:\n downloaded_messages = []\n new_messages = []\n # Filter out message types that we're not interested in.\n for event in await self.download_message_batch(conversation, event_id):\n if isinstance(event, ChatMessageEvent):\n downloaded_messages.append(event)\n else:\n logger.verbose(\n \"Ignoring unsupported message type (%s) ..\", type(event)\n )\n # Process the messages in reverse chronological order because this\n # is how the Google Hangouts API works and staying as consistent\n # as possible with that should guarantee that we don't cause gaps.\n for event in sorted(\n downloaded_messages, key=lambda e: event.timestamp, reverse=True\n ):\n attributes = dict(\n conversation=conversation_in_db,\n external_id=event.id_,\n html=self.get_message_html(event),\n text=event.text,\n timestamp=event.timestamp,\n )\n # Messages from unknown senders (without unique identification)\n # are stored in the local database without an associated contact.\n if event.user_id.gaia_id not in self.bogus_user_ids:\n attributes[\"sender\"] = self.find_contact_by_external_id(\n event.user_id.gaia_id\n )\n created, message = self.get_or_create_message(**attributes)\n if created:\n new_messages.append(message)\n if not new_messages:\n return\n # Continue searching for older messages based on the event id\n # of the oldest message in the set of new messages that we've\n # just downloaded.\n new_messages = sorted(new_messages, key=lambda m: m.timestamp)\n event_id = new_messages[0].external_id\n logger.verbose(\"Searching for new messages older than %s ..\", event_id)\n # Commit every set of newly downloaded chat messages to disk\n # immediately, so that we don't have to download messages more\n # than once when we crash due to rate limiting or other API\n # errors emitted by the Hangouts API.\n self.archive.commit_changes()\n # FIXME Poor man's rate limiting :-).\n logger.info(\"Sleeping for a second ..\")\n time.sleep(1)", "def read_messages(self, entity):\n messages = self.return_message_history()\n if messages != None:\n for i in range(len(messages)-1, -1, -1): #Correcting the output order by reversing it\n message = messages[i]\n try:\n if message.sender != None and isinstance(message, types.Message) and not message.sender.bot: #only reads messages not send by bots\n self.group_checker(message.message) #Checks if group mentions are in messages\n self.msg_count += 1\n self.output += \"{channel} ~~~ {chatid} {date} {messageid} {user} {reply_to} ~~~ {content} \\n\".format(\n channel=self.dialog.name, #Dialog Name\n chatid=self.dialog.entity.id, #Dialog id\n date=message.date, #Message date\n user=message.from_id, #Sender ID\n messageid=message.id,\n reply_to=message.reply_to_msg_id if message.reply_to_msg_id != None else 0,\n content=message.message, #content\n )\n self.client.send_read_acknowledge(entity, message=message) #marks messages of whole entity as read\n except AttributeError as e:\n print(e)\n except errors.rpc_error_list.RpcCallFailError as e: #Telegram internal error\n print(e)\n time.sleep(2)\n self.read_messages(entity)", "def list_messages(self):\r\n \r\n UI = self._input_ui.get()\r\n if self._lastselectedfriend == None:\r\n return \r\n friendname = self._lastselectedfriend\r\n participants = [UI, friendname]\r\n \r\n msg=['download chat history', participants]\r\n encoded = json.dumps(msg) \r\n self._client._s.send(encoded)\r\n\r\n encoded_chat = self._client._s.recv(4096)\r\n unencoded = json.loads(encoded_chat)\r\n if self._current_chat_history != unencoded:\r\n self._current_chat_history = unencoded\r\n self.show_chat()\r\n self._chatdisplay.see(tk.END)", "def fetch_all_channels(self):\n try:\n response = self.client.conversations_list()\n channels = response['channels']\n except SlackApiError as error:\n self.logger.warning(\n f\"slack {self.fetch_all_channels.__name__} request failed and raised error: {error.response['error']}\")\n return channels", "def _fetch(self):\n\t\trequest = self._connection.get('conversations.json')\n\n\t\tif request.status_code != 200:\n\t\t\traise errors.DiaspyError('wrong status code: {0}'.format(request.status_code))\n\t\tmailbox = request.json()\n\t\tself._mailbox = [models.Conversation(self._connection, c['conversation']['id']) for c in mailbox]", "def get_conversations(self):\n url, querystring, headers = hdc.create_api_request()\n\n url = url + \"/\" + self.hdt_id + \"/conversation\"\n\n querystring['OPERATION_NAME'] = \"GET_CONVERSATIONS\"\n del querystring['INPUT_DATA']\n # print querystring\n ticket_conversation_details = hdc.fetch_from_helpdesk(url, querystring, headers)\n\n self.conversations = list(ticket_conversation_details)\n return self.conversations", "def load_chatrooms(self, always_join):\n log.msg(\"load_chatrooms() called...\")\n df = self.dbpool.runInteraction(\n botutil.load_chatrooms_from_db, self, always_join\n )\n # Send a presence update, which in the case of the first login will\n # provoke any offline messages to be sent.\n df.addCallback(self.send_presence)\n df.addErrback(botutil.email_error, self, \"load_chatrooms() failure\")", "def retrieve_chat_messages(chat_ids, chat_info, from_gui = False):\n dm_analysis_results = {}\n for dm in chat_ids:\n print(\"Analyzing Direct Message Thread With User: \" + chat_info[dm][0])\n messages = []\n r = requests.get(\"https://api.groupme.com/v3/direct_messages?token=\" + TOKEN + \"&other_user_id=\" + str(dm))\n messages.append(r.json()['response']['direct_messages'][0])\n message_id = messages[0]['id']\n while True:\n r = requests.get(\"https://api.groupme.com/v3/direct_messages?token=\" + TOKEN + \"&other_user_id=\" + str(dm) + \"&before_id=\" + str(message_id))\n try:\n if len(r.json()['response']['direct_messages']) == 0:\n break\n messages += r.json()['response']['direct_messages']\n message_id = messages[-1]['id']\n except ValueError:\n break\n print(\"\\rRetrieved \" + str(len(messages)) + \"/\" + str(chat_info[dm][1]) + \" messages.\", end='')\n dm_analysis_results[dm] = messages\n print(\"\")\n return dm_analysis_results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show game review page
def review(): # This page does not require - and should not require - a logged-in user game = None uuid = request.args.get("game", None) if uuid is not None: # Attempt to load the game whose id is in the URL query string game = Game.load(uuid) if game is None or not game.is_over(): # The game is not found: abort return redirect(url_for("main")) try: move_number = int(request.args.get("move", "0")) except: move_number = 0 if move_number > game.num_moves(): move_number = game.num_moves() elif move_number < 0: move_number = 0 state = game.state_after_move(move_number if move_number == 0 else move_number - 1) best_moves = None if game.allows_best_moves(): # Serialize access to the following section with _autoplayer_lock: # Show best moves if available and it is proper to do so (i.e. the game is finished) apl = AutoPlayer(state) best_moves = apl.generate_best_moves(19) # 19 is what fits on screen player_index = state.player_to_move() user = User.current() if user and game.has_player(user.id()): # Look at the game from the point of view of this player user_index = game.player_index(user.id()) else: # This is an outside spectator: look at it from the point of view of # player 0, or the human player if player 0 is an autoplayer user_index = 1 if game.is_autoplayer(0) else 0 return render_template("review.html", game = game, state = state, player_index = player_index, user_index = user_index, move_number = move_number, best_moves = best_moves)
[ "def render_review_page():\n title = 'Review'\n pending = Record.get_all_pending_records()\n return render_template('review.html', page_title=title, pending=pending)", "def display_make_game():\n return render_template('make_games.html')", "def game_page(request, pk):\n owns_game = request.user.game_set.filter(game_id=pk)\n if owns_game.count() == 0:\n return redirect('failed_test')\n else:\n game = request.user.game_set.get(game_id=pk)\n teams = request.user.profile.team_set.all()\n return render(\n request, 'game/game_page.html', {'game': game, 'teams': teams})", "def add_review():\n if request.method == \"POST\":\n submit = {\n \"game_name\": request.form.get(\"game_name\"),\n \"review_description\": request.form.get(\"review_description\"),\n \"created_date\": datetime.today().strftime('%d-%m-%Y'),\n \"updated_date\": datetime.today().strftime('%d-%m-%Y'),\n \"created_by\": session[\"user\"]\n }\n mongo.db.reviews.insert_one(submit)\n flash(\"Review Successfully Added\")\n if get_acc_type() == \"admin\":\n return redirect(url_for(\"admin\", username=get_user()))\n else:\n return redirect(url_for(\"profile\", username=get_user()))\n\n games = mongo.db.games.find().sort(\"name\", 1)\n\n return render_template(\"add_review.html\",\n games=games,\n username=get_user(),\n acc_type=get_acc_type())", "def review_view(request):\n return render(request, 'front/review.html')", "def view_game():\n\n return render_template(\"pages/index.html\")", "def render_game_page():\n\n return render_template('tanks.html')", "def scorecard(request):\n data = Representative.objects.all()\n \n return render(request, 'base/scorecard.html', {'data': data})", "def game_details(id):\n game = Game.query.filter(\n Game.api_id == id).first()\n\n if not game:\n game = add_game_to_db(id)\n\n collection_api_ids = get_collection_api_ids(g.user)\n\n return render_template('game_detail.html', game=game, collection_api_ids=collection_api_ids)", "def add_review(): \n return render_template('addReview.html')", "def test_reviewer_can_view_votes_tab(self):\n self._add_to_reviewers_group()\n response = self.client.get(\n reverse(self.reverse_view_name, args=self.reverse_view_args)\n )\n self.assertContains(response, \"proposal-reviews\")", "def game_master_page(request):\n \"\"\"load game master page\"\"\"\n\n route_list = Routes.objects.all()\n questions = Questions.objects.all()\n games = Gamecode.objects.all()\n return render(request, 'app/game_master_page.html',{\"route_list\":route_list,\"questions\":questions,\"games\":games})", "def show_leaderboard():\n \n season = current_app.config['CURRENT_SEASON']\n region = request.args.get('region', '')\n mode = request.args.get('mode', '')\n \n if not region: \n region = 'pc-krjp'\n if not mode:\n mode = 'solo'\n \n if mode == 'solo': \n mod = SoloStats \n elif mode == 'duo':\n mod = DuoStats\n elif mode == 'squad':\n mod = SquadStats\n else:\n return redirect(url_for('.show_leaderboard'))\n \n leaders = dao.query(mod).\\\n filter_by(season=season).\\\n filter_by(gameType=mode).\\\n join(mod.player).\\\n filter(Player.region==region).\\\n order_by(desc(mod.rating)).\\\n limit(100).all()\n \n return render_template('leaderboard.html', mode=mode,\n region=region,\n leaders=leaders)", "def show_game_outcome(self, winner):\n self.output.show_content(\"\\n\")\n self.output.show_banner()\n if winner is None:\n self.output.show_content(\"Game Over\")\n else:\n self.output.show_content(f\"Game Over: {winner} wins the game!\")\n self.output.show_content(\"Final scores:\")\n self.output.show_content(f\"\\t{self.p1.name()}: \".ljust(15) +\n f\"{self.p1_score} rounds\")\n self.output.show_content(f\"\\t{self.p2.name()}: \".ljust(15) +\n f\"{self.p2_score} rounds\")\n self.output.show_content(f\"\\tTies: \".ljust(15) +\n f\"{self.number_ties} rounds\")\n self.output.show_line(\"\\t\")\n self.output.show_content(\"\\tTotal rounds: \".ljust(15) +\n f\"{self.rounds_played}\")\n self.output.show_banner()", "def LeaderBoards():\n if 'user' in session:\n \n Beginner=mongo.db.tblBeginner.find({ \"$query\": {}, \"$orderby\": { 'Moves' : 1, 'Time': 1 }}).limit(10)\n Expert=mongo.db.tblExpert.find({ \"$query\": {}, \"$orderby\": { 'Moves' : 1 , 'Time': 1 }}).limit(10)\n GamesPlayed=mongo.db.tblGamesPlayed.find({ \"$query\": {}, \"$orderby\": { 'Games' : -1 }}).limit(10)\n \n return render_template('LeaderBoards.html', beginnerList=Beginner, expertList=Expert, Games=GamesPlayed, user=session['user'])\n else:\n return render_template('error.html',error = 'Unauthorized Access')", "def show_game(self):\n self.G.show_board() # Call the graph's show_board.", "def review_index(request):\n context = {'reviews': Review.objects.all()}\n return render(request, 'reviews/review_index.html', context)", "def review_submit_view(request):\n return render(request, 'front/review_submit.html')", "def generate_overview(self, player):\r\n if self.players == 1:\r\n if player == 1:\r\n playing = 'Atharva'\r\n else:\r\n playing = 'You'\r\n else:\r\n playing = 'Player {number}'.format(number=player+1)\r\n print(\"#####################################################################\")\r\n print(\r\n f'{playing} Total Runs : {self.runs[player]}\\t\\tWickets: {self.wickets[player]}')\r\n print(\"#####################################################################\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the largest Collatz sequence under num.
def euler14(num: int) -> int: longest = 1, 1 for i in range(num // 2, num): c = collatz_sequence_length(i) if c > longest[1]: longest = i, c return longest
[ "def longest_collatz_sequence_below_n(n):\n lookup = dict()\n for i in range(n):\n if i % 100000 == 0:\n print(i)\n collatz_sequence_length(i, lookup)\n\n max_key, max_value = max(lookup.items(), key=(lambda kv: kv[1]))\n return max_key, max_value", "def find_collatz_stopping_time(num):\n validate_integers(num)\n chain_length = 1\n while num != 1:\n if num % 2 == 0:\n num = num // 2\n else:\n num = num*3 + 1\n chain_length += 1\n return chain_length", "def find_largest_prime_factor(num):\n\n largest_prime_factor = 0\n # special case: divide by two as much as you can\n while num % 2 == 0:\n num /= 2\n largest_prime_factor = 2\n div = 3\n # divide till there is no number left\n while div <= num:\n if num % div == 0:\n num /= div\n largest_prime_factor = div\n div += 2\n\n return largest_prime_factor", "def largestChain(self, number):\n\n for i in range(1, number):\n self.chainLength(i)\n sorted_values = sorted(self.cache, key=self.cache.get, reverse=True)\n return (sorted_values[0])", "def largest_pf(num):\n\ti = 2\n\tlargest_div = 1\n\tmax_factor = int(sqrt(num))\n\twhile(num > i):\n\t\tif num % i == 0:\n\t\t\tnum = num/i\n\t\t\ti = 2\n\t\t\tmax_factor = int(sqrt(num))\n\t\telse:\n\t\t\ti += 1\n\treturn i", "def largestMCFinderJit( flow1na, MCna ):\n\n # Select the maximum flow level\n maxFlow = max(MCna[:,2])\n\n mcIDMAX = 0\n nDaughtersMAX = 0\n for mcID in flow1na[:,0]:\n nDaughters = recursiveDaughterCounter( mcID, MCna )\n if nDaughters > nDaughtersMAX:\n mcIDMAX = mcID\n nDaughtersMAX = nDaughters\n\n\n return mcIDMAX", "def find_largest_digit(n):\n\t# Compared with other int, max_int will update to the biggest\n\tmax_int = 0\n\treturn find_largest_digit_helper(n, max_int)", "def collatz_eval (i, j) :\n assert(i > 0)\n assert(j > 0)\n start = min(i, j)\n end = max(i, j)\n max_cycle_length = 1\n # Optimization - if start is less than half of end, then max cycle length is same as end/2, end\n if start < (end >> 1) :\n start = end >> 1 \n for n in range(start, end+1) :\n cycles = collatz_cycles(n)\n if cycles > max_cycle_length: max_cycle_length = cycles \n assert(max_cycle_length > 0)\n return max_cycle_length", "def largest_prime_factor(number):\n prime_factor = 2\n while number >= prime_factor**2:\n if number % prime_factor == 0:\n number //= prime_factor\n else:\n prime_factor += 1\n return number", "def collatz_len(n):\n if n == 1:\n return 1\n else:\n return 1 + collatz_len(collatz_step(n))", "def compute_largest_prime_factor(n):\t\n\tit = 2\n\twhile it < math.sqrt(n):\n\t\tif n % it == 0:\n\t\t\tbreak \n\t\tit += 1\n\n\t# w\n\tif it > math.sqrt(n):\n\t\treturn n\n\telse:\n\t\treturn max(it, compute_largest_prime_factor(n/it))", "def largest_perm(n):\n\t# p = sorted(set(int(''.join(x)) for x in permutations(str(n))))\n\treturn int(''.join(sorted(str(n), reverse=True)))", "def max_of_three(num1,num2,num3):\n\n\tgreatest = num1\n\tif num2 > greatest:\n\t\tgreatest = num2\n\tif num3 > greatest:\n\t\tgreatest = num3\n\n\treturn greatest", "def biggest_number(*args):\n print(max(args))\n return max(args)", "def find_longest_chain(num):\n\n max_number = None\n max_length = 0\n\n for n in range(num, 1, -1):\n length = calc_chain_len(n)\n #print \"DEBUG {}: {} {}\".format(n, length, build_chain(n))\n if length > max_length:\n max_length = length\n max_number = n\n\n return max_number, max_length", "def collatz(n):\n sequence = [n]\n while (n > 1):\n n = collatz_step(n)\n sequence.append(n)\n return sequence", "def max_number(self, table):\n exp = \"\"\"SELECT MAX(num) FROM %s\"\"\" % table\n\n # try:\n cur = self.conn.cursor()\n cur.execute(exp)\n\n return cur.fetchone()\n # except Exception:\n # return None", "def max_multiplicand(digit_count: int) -> int:\n return int(pandigit_string[digit_count::-1])", "def fahrentheit_to_celsuis(n):\n return n-32*5/9" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test profile_creation_date and install_date are properly reformatted relative to their previous values
def test_unix_dates(aggregate_data): # true values as defined in ms-test-data.json # min install_day - pcd true_client_days_to_install = { 1: {"min_install_day": 16890 - 15000}, 2: {"min_install_day": None}, 3: {"min_install_day": 16800 - 15002}, 4: {"min_install_day": None}, } for client_id, value in true_client_days_to_install.items(): data = aggregate_data.filter(aggregate_data.client_id == client_id).collect()[0] first_install = data.first_addon_install_date pcd = data.profile_creation_date print(first_install, pcd) fmt = "%Y%m%d" days_to_install = ( ( dt.datetime.strptime(first_install, fmt) - dt.datetime.strptime(pcd, fmt) ).days if first_install is not None else None ) assert ( true_client_days_to_install[client_id]["min_install_day"] == days_to_install )
[ "def test_pretty_datetime(self):\n now = datetime.now()\n nf_core.list.pretty_date(now)\n now_ts = time.mktime(now.timetuple())\n nf_core.list.pretty_date(now_ts)", "def add_date_info(out_dict):\n out_dict.update({'# Run Date': strftime('%Y_%m_%d_%H_%M_%S', localtime())})\n return out_dict", "def install_date(self) -> str:\n return self._install_date", "def _creation_date(self) -> str:\n return maya.parse(self.metadata.get(\"creationDate\")).iso8601().replace(\":\", \"\")", "def test_bolg_creation_date(self):\n\t\tself.assertEqual(str(self.test_bolg['created'].date()), self.testbolg_created)", "def demo_restore_date_created(commcare_user):\n if commcare_user.is_demo_user:\n restore = DemoUserRestore.objects.get(id=commcare_user.demo_restore_id)\n if restore:\n return restore.timestamp_created", "def pytestcase_test_metavision_software_date():\n\n # Get expected output\n commit_command = \"@GIT_COMMAND_GET_COMMIT_DATE_QUOTES_ESCAPED@\"\n expected_output, error, err_code = shell_tools.execute_cmd(commit_command)\n assert err_code == 0, \"******\\nError while executing cmd '{}':{}\\n{}\\n******\".format(\n commit_command, expected_output, error)\n\n # Run app\n cmd = \"./metavision_software_info --date\"\n output, error_code = pytest_tools.run_cmd_setting_mv_log_file(cmd)\n\n # Check app exited without error\n assert error_code == 0, \"******\\nError while executing cmd '{}':{}\\n******\".format(cmd, output)\n\n # Check that we get the expected output\n assert output.rstrip() == expected_output.rstrip()", "def test__build_guild_profile_description(guild_profile, current_date_time):\n DateTimeMock.set_current(current_date_time)\n mocked = vampytest.mock_globals(\n build_guild_profile_description, 5, {'DateTime': DateTimeMock, 'isinstance': is_instance_mock}\n )\n \n return mocked(guild_profile)", "def test__build_user_with_guild_profile_description(user, guild_profile, current_date_time):\n DateTimeMock.set_current(current_date_time)\n mocked = vampytest.mock_globals(\n build_user_with_guild_profile_description, 6, {'DateTime': DateTimeMock, 'isinstance': is_instance_mock}\n )\n \n return mocked(user, guild_profile)", "def test_collect_new_manifest_initial_ingest(self):\n partition_date = datetime.date.today()\n export_time = datetime.datetime.now()\n current_manifests = {}\n bigquery_mapping = {partition_date: export_time}\n expected_bill_date = partition_date.replace(day=1)\n expected_assembly_id = f\"{partition_date}|{export_time}\"\n expected_filename = f\"{expected_bill_date.strftime('%Y%m')}_{partition_date}\"\n downloader = self.downloader\n new_manifests = downloader.collect_new_manifests(current_manifests, bigquery_mapping)\n for manifest_metadata in new_manifests:\n self.assertEqual(manifest_metadata[\"bill_date\"], expected_bill_date)\n self.assertEqual(manifest_metadata[\"assembly_id\"], expected_assembly_id)\n self.assertEqual(manifest_metadata[\"files\"], [expected_filename])", "def test_creation_assert_codex_date_updated(self):\n self.assertEqual(self.information.update_date, self.codex.nested_update_date)", "def test_collect_new_manifest_no_bigquery_update(self):\n partition_date = datetime.date.today()\n export_time = datetime.datetime.now()\n current_manifests = {partition_date: export_time}\n bigquery_mapping = {partition_date: export_time}\n expected_bill_date = partition_date.replace(day=1)\n expected_assembly_id = f\"{partition_date}|{export_time}\"\n expected_filename = f\"{expected_bill_date.strftime('%Y%m')}_{partition_date}\"\n downloader = self.downloader\n new_manifests = downloader.collect_new_manifests(current_manifests, bigquery_mapping)\n for manifest_metadata in new_manifests:\n self.assertEqual(manifest_metadata[\"bill_date\"], expected_bill_date)\n self.assertEqual(manifest_metadata[\"assembly_id\"], expected_assembly_id)\n self.assertEqual(manifest_metadata[\"files\"], [expected_filename])", "def test_nextdate_c2(self):", "def test_creation_assert_codex_date_updated(self):\n self.assertEqual(self.page.nested_update_date, self.codex.nested_update_date)", "def test_get_manifest_context_for_date(self):\n self.maxDiff = None\n dh = DateHelper()\n start_date = dh.this_month_start\n invoice_month = start_date.strftime(\"%Y%m\")\n p_uuid = uuid4()\n expected_assembly_id = f\"{p_uuid}:{self.etag}:{invoice_month}\"\n downloader = self.create_gcp_downloader_with_mocked_values(provider_uuid=p_uuid)\n expected_files = create_expected_csv_files(\n dh.this_month_start.date(), downloader.scan_end, invoice_month, self.etag, True\n )\n with patch(\n \"masu.external.downloader.gcp.gcp_report_downloader.GCPReportDownloader._process_manifest_db_record\",\n return_value=2,\n ):\n report_dict = downloader.get_manifest_context_for_date(start_date.date())\n self.assertEqual(report_dict.get(\"manifest_id\"), 2)\n self.assertEqual(report_dict.get(\"files\"), expected_files)\n self.assertEqual(report_dict.get(\"compression\"), UNCOMPRESSED)\n self.assertEqual(report_dict.get(\"assembly_id\"), expected_assembly_id)", "def backup_quality_profile(self, language, qualityProfile, organization):", "def creation_date(context: dict) -> str:\n return datetime.fromtimestamp(os.path.getctime(context['item_path'])).strftime('%Y-%m-%d %H:%M')", "def _config_is_in_new_format(self, config):\r\n return any([profile_data for profile_data in config.values() \\\r\n if \"date_modified\" in profile_data])", "def test_basic_file_name_to_date_full_ungridded(self):\n\n self.assertTrue(\n evaluation.basic_file_name_to_date(\n BASIC_FILE_NAME_FULL_UNGRIDDED\n ) ==\n VALID_DATE_STRING\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the aggregation by channel, locale and app_version.
def test_agg_by_channel_locale_and_version(aggregate_data): # true values as defined in ms-test-data.json true_values = { "normalized_channel": {"release": 1, "beta": 2, "nightly": 1}, "locale": {"en-US": 2, "de": 1, "ru": 1}, "app_version": {"57": 2, "56": 1, "58": 1}, } for grouping_field in ("normalized_channel", "locale", "app_version"): counts = aggregate_data.groupBy(grouping_field).count().collect() for i in counts: assert true_values[grouping_field][i[grouping_field]] == i["count"]
[ "def testRetrieveChannelsByGenre(self):\n self.assert_(self.epg.channelsByGenre())", "def test_mongodb_pusher_at_app_creation(self):\n # Create the test app\n self.login(\"testuser\", \"password\")\n url = \"appcomposer/tests_data/relativeExample/i18n.xml\"\n rv = self.flask_app.post(\"/composers/translate/selectlang\", data={\"appname\": \"UTApp\", \"appurl\": url},\n follow_redirects=True)\n assert rv.status_code == 200 # Page found code.\n\n # Ensure that after a short while we have all the bundles in the MongoDB.\n time.sleep(2)\n\n bundles = pusher.mongo_bundles.find({\"spec\": url})\n bundles = {b[\"bundle\"]: b for b in bundles}\n\n print bundles\n\n assert len(bundles) == 2\n\n data = bundles[\"all_ALL_ALL\"][\"data\"]\n data = json.loads(data)\n assert data[\"hello_world\"] == \"Hello World.\"\n\n data = bundles[\"de_ALL_ALL\"][\"data\"]\n data = json.loads(data)\n assert data[\"hello_world\"] == \"Hallo Welt.\"", "def testRetrieveChannel(self):\n self.assert_(self.epg.channels())", "def test_version_groups(self, pokedex):\n query = pokedex.query(versions.VersionGroup).join(\n versions.Generation.version_groups).filter(\n versions.Generation.id_ == 1)\n assert query.count() == 2", "def test_team_builder_config_product_groups_count_get(self):\n pass", "def test_package_versions_with_platform(self):\n self._create_finished_release() # this release should not appear in result\n rid1 = self._create_release(platforms=['specific_platform'])\n pid1 = self._create_package(rid1, name='packageOne', version='1.0.1')\n self._start_package(pid1)\n self._stop_package(pid1)\n\n result = orlo.queries.package_versions(platform='specific_platform').all()\n\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0][0], 'packageOne')", "def test_get_all_variants(gemini_case_obj):\n plugin = GeminiPlugin()\n plugin.add_case(gemini_case_obj)\n\n filters = {}\n result = plugin.variants('643594', filters=filters, count=1000)\n variants = result.variants\n nr_of_variants = result.nr_of_variants\n\n assert nr_of_variants == 14", "def testRetrieveMovieChannels(self):\n self.assert_(self.epg.movieChannels())", "def test_execute_query_curr_month_by_subscription_guid_w_order_by_subscription_guid(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&order_by[subscription_guid]=asc&group_by[subscription_guid]=*\" # noqa: E501\n query_params = self.mocked_query_params(url, AzureCostView)\n handler = AzureReportQueryHandler(query_params)\n query_output = handler.execute_query()\n data = query_output.get(\"data\")\n self.assertIsNotNone(data)\n self.assertIsNotNone(query_output.get(\"total\"))\n total = query_output.get(\"total\")\n current_totals = self.get_totals_costs_by_time_scope(handler, self.this_month_filter)\n expected_cost_total = current_totals.get(\"cost_total\")\n self.assertIsNotNone(expected_cost_total)\n result_cost_total = total.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertIsNotNone(result_cost_total)\n self.assertEqual(result_cost_total, expected_cost_total)\n\n cmonth_str = self.dh.this_month_start.strftime(\"%Y-%m\")\n for data_item in data:\n month_val = data_item.get(\"date\")\n month_data = data_item.get(\"subscription_guids\")\n self.assertEqual(month_val, cmonth_str)\n self.assertIsInstance(month_data, list)\n self.assertEqual(len(month_data), 1)\n current = \"0\"\n for month_item in month_data:\n self.assertIsInstance(month_item.get(\"subscription_guid\"), str)\n self.assertIsInstance(month_item.get(\"values\"), list)\n self.assertIsNotNone(month_item.get(\"values\")[0].get(\"subscription_guid\"))\n data_point = month_item.get(\"values\")[0].get(\"subscription_guid\")\n if data_point == \"1 Other\":\n continue\n self.assertLess(current, data_point)\n current = data_point", "def testSnapshotCounts_GroupByComponent(self, mockSnapshotCountsQuery):\n request = issues_pb2.IssueSnapshotRequest(timestamp=1531334109,\n project_name='proj', group_by='component',\n query='rutabaga:rutabaga', canned_query=2)\n mc = monorailcontext.MonorailContext(\n self.services, cnxn=self.cnxn, requester='owner@example.com')\n mockSnapshotCountsQuery.return_value = (\n {'component1': 123, 'component2': 987},\n ['rutabaga'],\n True)\n\n response = self.CallWrapped(self.issues_svcr.IssueSnapshot, mc, request)\n\n self.assertEqual(2, len(response.snapshot_count))\n self.assertEqual('component1', response.snapshot_count[0].dimension)\n self.assertEqual(123, response.snapshot_count[0].count)\n self.assertEqual('component2', response.snapshot_count[1].dimension)\n self.assertEqual(987, response.snapshot_count[1].count)\n self.assertEqual(1, len(response.unsupported_field))\n self.assertEqual('rutabaga', response.unsupported_field[0])\n self.assertTrue(response.search_limit_reached)\n mockSnapshotCountsQuery.assert_called_once_with(self.project, 1531334109,\n 'component', label_prefix='', query='rutabaga:rutabaga',\n canned_query='is:open')", "def test_execute_query_by_subscription_guid_by_service(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[subscription_guid]=*&group_by[service_name]=*\" # noqa: E501\n query_params = self.mocked_query_params(url, AzureCostView)\n handler = AzureReportQueryHandler(query_params)\n query_output = handler.execute_query()\n data = query_output.get(\"data\")\n self.assertIsNotNone(data)\n self.assertIsNotNone(query_output.get(\"total\"))\n total = query_output.get(\"total\")\n current_totals = self.get_totals_costs_by_time_scope(handler, self.this_month_filter)\n expected_cost_total = current_totals.get(\"cost_total\")\n self.assertIsNotNone(expected_cost_total)\n result_cost_total = total.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertIsNotNone(result_cost_total)\n self.assertEqual(result_cost_total, expected_cost_total)\n\n cmonth_str = DateHelper().this_month_start.strftime(\"%Y-%m\")\n for data_item in data:\n month_val = data_item.get(\"date\")\n month_data = data_item.get(\"subscription_guids\")\n self.assertEqual(month_val, cmonth_str)\n self.assertIsInstance(month_data, list)\n for month_item in month_data:\n try:\n UUID(month_item.get(\"subscription_guid\"), version=4)\n except ValueError as exc:\n self.fail(exc)\n self.assertIsInstance(month_item.get(\"service_names\"), list)", "def test_get_variants(gemini_case_obj):\n plugin = GeminiPlugin()\n plugin.add_case(gemini_case_obj)\n\n filters = {}\n result = plugin.variants('643594', filters=filters, count=5)\n variants = result.variants\n nr_of_variants = result.nr_of_variants\n\n assert nr_of_variants == 5", "def test_all_versionables_collected(self):\n app = apps.get_app_config('djangocms_versioning')\n page_versionable = VersioningCMSConfig.versioning[0]\n poll_versionable = PollsCMSConfig.versioning[0]\n blog_versionable = BlogpostCMSConfig.versioning[0]\n comment_versionable = BlogpostCMSConfig.versioning[1]\n self.assertListEqual(\n app.cms_extension.versionables,\n [page_versionable, poll_versionable, blog_versionable, comment_versionable]\n )", "def test_contribute_and_aggregate(self):\n self.maxDiff = None\n self._create_mock_queues()\n source = DSSSourceRef.for_dss_source('foo_source:/0')\n fqids = [\n DSSBundleFQID(source=source,\n uuid='56a338fe-7554-4b5d-96a2-7df127a7640b',\n version='2018-03-28T15:10:23.074974Z'),\n DSSBundleFQID(source=source,\n uuid='b2216048-7eaa-45f4-8077-5a3fb4204953',\n version='2018-03-29T10:40:41.822717Z')\n ]\n\n # Load canned bundles\n bundles = {\n fqid: self._load_canned_bundle(fqid)\n for fqid in fqids\n }\n\n # Synthesize initial notifications\n notifications = [\n dict(action='add',\n catalog=self.catalog,\n notification=self.client.synthesize_notification(fqid))\n for fqid in fqids\n ]\n\n # Invoke the service once to produce a set of expected entities so we\n # don't need to hard-code them. Keep in mind that this test is not\n # intended to cover the service, only the controller.\n expected_digest = defaultdict(list)\n for fqid, bundle in bundles.items():\n contributions = self.index_service.transform(self.catalog, bundle, delete=False)\n for contribution in contributions:\n assert isinstance(contribution, Contribution)\n # Initially, each entity gets a tally of 1\n expected_digest[contribution.entity.entity_type].append(1)\n\n # Prove that we have two contributions per \"container\" type, for when we\n # test poison tallies and deferrals below. Note that the two project\n # contributions are to the same entity, the bundle contributions are not.\n for entity_type in ['projects', 'bundles']:\n self.assertEqual([1, 1], expected_digest[entity_type])\n\n # Test partitioning and contribution\n for i in range(2):\n mock_plugin = MagicMock()\n notified_fqids = list(map(self._fqid_from_notification, notifications))\n notified_bundles = [bundles[fqid] for fqid in notified_fqids]\n mock_plugin.fetch_bundle.side_effect = notified_bundles\n mock_plugin.resolve_bundle.side_effect = DSSBundleFQID.from_json\n mock_plugin.sources = [source]\n with patch.object(IndexService, 'repository_plugin', return_value=mock_plugin):\n with patch.object(BundlePartition, 'max_partition_size', 4):\n event = list(map(self._mock_sqs_record, notifications))\n self.controller.contribute(event)\n\n # Assert plugin calls by controller\n expected_calls = [call(fqid.to_json()) for fqid in notified_fqids]\n self.assertEqual(expected_calls, mock_plugin.resolve_bundle.mock_calls)\n expected_calls = list(map(call, notified_fqids))\n self.assertEqual(expected_calls, mock_plugin.fetch_bundle.mock_calls)\n\n # Assert partitioned notifications, straight from the retry queue\n notifications = self._read_queue(self._notifications_retry_queue)\n if i == 0:\n # Fingerprint the partitions from the resulting notifications\n partitions = defaultdict(set)\n for n in notifications:\n fqid = self._fqid_from_notification(n)\n partition = BundlePartition.from_json(n['notification']['partition'])\n partitions[fqid].add(partition)\n # Assert that each bundle was partitioned ...\n self.assertEqual(partitions.keys(), set(fqids))\n # ... into two partitions. The number of partitions depends on\n # the patched max_partition_size above and the number of\n # entities in the canned bundles.\n self.assertEqual([2] * len(fqids), list(map(len, partitions.values())))\n else:\n # The partitions resulting from the first iteration should not\n # need to be paritioned again\n self.assertEqual([], notifications)\n\n # We got a tally of one for each\n tallies = self._read_queue(self._tallies_queue)\n digest = self._digest_tallies(tallies)\n self.assertEqual(expected_digest, digest)\n\n # Test aggregation\n notifications = map(partial(self._mock_sqs_record), tallies)\n with patch.object(IndexWriter, 'write', side_effect=TransportError):\n try:\n self.controller.aggregate(notifications)\n except TransportError:\n pass\n else:\n self.fail()\n\n self.assertEqual([], self._read_queue(self._tallies_queue))\n\n # Poison the two project and the two bundle tallies, by simulating\n # a number of failed attempts at processing them\n attempts = self.controller.num_batched_aggregation_attempts\n # While 0 is a valid value, the test logic below wouldn't work with it\n self.assertGreater(attempts, 0)\n notifications = [\n self._mock_sqs_record(tally,\n attempts=(attempts + 1\n if tally['entity_type'] in {'bundles', 'projects'}\n else 1))\n for tally in tallies\n ]\n self.controller.aggregate(notifications, retry=True)\n\n tallies = self._read_queue(self._tallies_retry_queue)\n digest = self._digest_tallies(tallies)\n # The two project tallies were consolidated (despite being poisoned) and\n # the resulting tally was deferred\n expected_digest['projects'] = [2]\n # One of the poisoned bundle tallies was referred. Since it was\n # poisoned, all other tallies were deferred\n expected_digest['bundles'] = [1]\n self.assertEqual(expected_digest, digest)\n\n # Aggregate the remaining deferred tallies\n notifications = map(self._mock_sqs_record, tallies)\n self.controller.aggregate(notifications, retry=True)\n\n # All tallies were referred\n self.assertEqual([], self._read_queue(self._tallies_retry_queue))\n self.assertEqual([], self._read_queue(self._tallies_queue))", "def test_aggregate(self):\n cron = Cron(self.internal_api)\n\n # To insulate the expected aggregation stats from changes to the\n # populate script, we'll create a separate cohort and classroom. For\n # larger things we'll rely on the stuff set by the populate script,\n # e.g. self.program.\n cohort = self.researcher_api.create('cohort', {\n 'name': 'DGN 2015',\n 'code': 'lion mackerel',\n 'program': self.program.id,\n 'school': self.school.id,\n })\n self.researcher_api.associate('set_owner', self.school_admin, cohort)\n classroom = self.school_admin_api.create('classroom', {\n 'name': \"English 201\",\n 'user': self.school_admin.id,\n 'program': self.program.id,\n 'cohort': cohort.id,\n })\n student_activities = self.school_admin_api.init_activities(\n 'student', self.school_admin.id, self.program.id,\n cohort_id=cohort.id, classroom_id=classroom.id)\n db.get([cohort.key(), classroom.key()])\n db.get([a.key() for a in student_activities])\n\n # To test aggregating across multiple users, we'll need several\n # students\n student_params = {'user_type': 'student', 'classroom': classroom.id}\n\n mystery_finisher = self.public_api.create('user', student_params)\n absentee = self.public_api.create('user', student_params)\n refusee = self.public_api.create('user', student_params)\n expelee = self.public_api.create('user', student_params)\n mr_perfect = self.public_api.create('user', student_params)\n non_finisher = self.public_api.create('user', student_params)\n wrong_name = self.public_api.create('user', student_params)\n\n # This student will be in another classroom, and we won't update her,\n # proving that cohort aggregation re-queries more than just the changed\n # stuff.\n other_classroom = self.school_admin_api.create('classroom', {\n 'name': \"English 202\",\n 'user': self.school_admin.id,\n 'program': self.program.id,\n 'cohort': cohort.id,\n })\n other_student_activities = self.school_admin_api.init_activities(\n 'student', self.school_admin.id, self.program.id,\n cohort_id=cohort.id, classroom_id=other_classroom.id)\n other_student = self.public_api.create(\n 'user', {'user_type': 'student', 'classroom': other_classroom.id})\n\n students = [mystery_finisher, absentee, refusee, expelee, mr_perfect,\n non_finisher, wrong_name]\n student_keys = [s.key() for s in students]\n\n others = [other_student, other_classroom] + other_student_activities\n other_keys = [e.key() for e in others]\n\n ### Aggregate initial state\n\n # Assume and simulate that enough time passes between data recording\n # and cron execution that entities become consistent.\n db.get(student_keys)\n db.get(other_keys)\n\n cron.aggregate()\n\n # Every student have the same aggregation data for both activities\n # because no one has done anything yet. So just loop and check against\n # the same reference.\n for s in db.get(student_keys):\n self.assertFalse(s.certified)\n correct_stats = {'progress': None}\n self.assertEqual(s.aggregation_data[1], correct_stats)\n self.assertEqual(s.aggregation_data[2], correct_stats)\n\n # Both activities should be the same also\n a1, a2 = db.get([a.key() for a in student_activities])\n correct_stats = {\n 'total_students': 7,\n 'certified_students': 0,\n 'certified_study_eligible_dict': {\n 'n': 0,\n 'completed': 0,\n 'makeup_eligible': 0,\n 'makeup_ineligible': 0,\n 'uncoded': 0\n },\n }\n self.assertEqual(a1.aggregation_data, correct_stats)\n self.assertEqual(a2.aggregation_data, correct_stats)\n\n # The other activities should look like this (this is the last time\n # we'll have to check it because we won't be changing it any more):\n a1, a2 = db.get([a.key() for a in other_student_activities])\n correct_stats = {\n 'total_students': 1,\n 'certified_students': 0,\n 'certified_study_eligible_dict': {\n 'n': 0,\n 'completed': 0,\n 'makeup_eligible': 0,\n 'makeup_ineligible': 0,\n 'uncoded': 0\n },\n }\n self.assertEqual(a1.aggregation_data, correct_stats)\n self.assertEqual(a2.aggregation_data, correct_stats)\n\n # Check cohort (has our seven plus one other)\n cohort = db.get(cohort.key())\n correct_cohort_stats = {\n 'unscheduled': 2, 'scheduled': 0, 'behind': 0, 'completed': 0,\n 'incomplete_rosters': 2,\n 'total_students': 8,\n 'certified_students': 0,\n 'certified_study_eligible_dict': {\n 'n': 0,\n 'completed': 0,\n 'makeup_eligible': 0,\n 'makeup_ineligible': 0,\n 'uncoded': 0\n },\n }\n self.assertEqual(cohort.aggregation_data[1], correct_cohort_stats)\n self.assertEqual(cohort.aggregation_data[2], correct_cohort_stats)\n\n ### Pretend the school admin just certified some students and aggregate\n ### again.\n\n # NOT changing mystery_finisher proves that the aggregator re-queries\n # for unchanged users associated with the same activity.\n certified_students = [absentee, refusee, expelee, mr_perfect,\n non_finisher]\n for s in certified_students:\n s.certified = True\n db.put(certified_students)\n\n # Assume and simulate that enough time passes between data recording\n # and cron execution that entities become consistent.\n db.get(student_keys)\n\n cron.aggregate()\n\n # Every student should be the same for both activities.\n for s in db.get(student_keys):\n correct_stats = {'progress': None}\n self.assertEqual(s.aggregation_data[1], correct_stats)\n self.assertEqual(s.aggregation_data[2], correct_stats)\n\n # Both activities should be the same also\n a1, a2 = db.get([a.key() for a in student_activities])\n correct_stats = {\n 'total_students': 7,\n 'certified_students': 5,\n 'certified_study_eligible_dict': {\n 'n': 5,\n 'completed': 0,\n 'makeup_eligible': 0,\n 'makeup_ineligible': 0,\n 'uncoded': 5\n },\n }\n self.assertEqual(a1.aggregation_data, correct_stats)\n self.assertEqual(a2.aggregation_data, correct_stats)\n\n # Check cohort\n cohort = db.get(cohort.key())\n correct_cohort_stats = {\n 'unscheduled': 2, 'scheduled': 0, 'behind': 0, 'completed': 0,\n 'incomplete_rosters': 2,\n 'total_students': 8,\n 'certified_students': 5,\n 'certified_study_eligible_dict': {\n 'n': 5,\n 'completed': 0,\n 'makeup_eligible': 0,\n 'makeup_ineligible': 0,\n 'uncoded': 5\n },\n }\n self.assertEqual(cohort.aggregation_data[1], correct_cohort_stats)\n self.assertEqual(cohort.aggregation_data[2], correct_cohort_stats)\n\n ### Simulate the first session, with two students absent and one who\n ### doesn't finish. Also schedule the first activity.\n\n absentee.s1_status_code = 'A' # code for absent\n refusee.s1_status_code = 'PR' # code for parent refusal\n expelee.s1_status_code = 'E' # code for expelled\n wrong_name.s1_status_code = 'MWN' # code for merge: wrong name\n db.put([absentee, refusee, expelee, wrong_name])\n\n progress_pds = []\n pd_params = {\n 'variable': 's1__progress',\n 'program': self.program.id,\n 'activity': student_activities[0].id,\n 'activity_ordinal': 1,\n }\n # Progress on activity 1 for those who finished.\n for s in [mr_perfect, mystery_finisher, wrong_name]:\n pd_params['value'] = '100'\n pd_params['scope'] = s.id\n progress_pds.append(Api(s).create('pd', pd_params))\n # Progress on activity 1 for those who didn't finish.\n pd_params['value'] = '50'\n pd_params['scope'] = non_finisher.id\n progress_pds.append(Api(non_finisher).create('pd', pd_params))\n\n a1.scheduled_date = datetime.date.today()\n a1.put()\n\n # Assume and simulate that enough time passes between data recording\n # and cron execution that entities become consistent.\n db.get([pd.key() for pd in progress_pds] +\n [absentee.key(), refusee.key(), expelee.key(), a1.key()])\n\n cron.aggregate()\n\n # Check that user stats are right.\n correct_stats = [\n {'progress': 100}, # mystery_finisher\n {'progress': None}, # absentee\n {'progress': None}, # refusee\n {'progress': None}, # expelee\n {'progress': 100}, # mr_perfect\n {'progress': 50}, # non_finisher\n {'progress': 100}, # wrong_name\n ]\n for index, s in enumerate(students):\n s = db.get(s.key())\n self.assertEqual(s.aggregation_data[1], correct_stats[index])\n\n # Check that activity stats are right.\n a1 = db.get(student_activities[0].key())\n correct_stats = {\n # Total has decreased b/c MWN students are dropped from the counts\n # completely. This is because they're not really a person, they're\n # a duplicate representation of a different real person.\n 'total_students': 6,\n 'certified_students': 5,\n 'certified_study_eligible_dict': {\n 'n': 4,\n 'completed': 1,\n 'makeup_eligible': 1,\n 'makeup_ineligible': 1,\n 'uncoded': 1\n },\n }\n self.assertEqual(a1.aggregation_data, correct_stats)\n # Activity 2 shouldn't register any of the progress we've made on\n # activity 1.\n a2 = db.get(student_activities[1].key())\n correct_stats = {\n 'total_students': 6,\n 'certified_students': 5,\n 'certified_study_eligible_dict': {\n 'n': 5,\n 'completed': 0,\n 'makeup_eligible': 0,\n 'makeup_ineligible': 0,\n 'uncoded': 5\n },\n }\n self.assertEqual(a2.aggregation_data, correct_stats)\n\n # Check cohort (again, similar, but with a larger 'all' total).\n cohort = db.get(cohort.key())\n correct_cohort_stats = {\n 1: {\n 'unscheduled': 1, 'scheduled': 1, 'behind': 0, 'completed': 0,\n 'incomplete_rosters': 2,\n 'total_students': 7,\n 'certified_students': 5,\n 'certified_study_eligible_dict': {\n 'n': 4,\n 'completed': 1,\n 'makeup_eligible': 1,\n 'makeup_ineligible': 1,\n 'uncoded': 1\n },\n },\n 2: {\n 'unscheduled': 2, 'scheduled': 0, 'behind': 0, 'completed': 0,\n 'incomplete_rosters': 2,\n 'total_students': 7,\n 'certified_students': 5,\n 'certified_study_eligible_dict': {\n 'n': 5,\n 'completed': 0,\n 'makeup_eligible': 0,\n 'makeup_ineligible': 0,\n 'uncoded': 5\n },\n }\n }\n self.assertEqual(cohort.aggregation_data, correct_cohort_stats)", "def test_versions(self, pokedex):\n query = pokedex.query(versions.Version).join(\n versions.VersionGroup.versions).filter(\n versions.VersionGroup.id_ == 1)\n assert query.count() == 2", "def test_team_builder_config_product_groups_get(self):\n pass", "def test_platform_summary(self):\n\n for _ in range(0, 3):\n self._create_release(platforms=['platformOne', 'platformTwo'])\n for _ in range(0, 2):\n self._create_release(platforms=['platformTwo', 'platformThree'])\n\n result = orlo.queries.platform_summary().all()\n for platform, count in result:\n if platform == 'platformOne':\n self.assertEqual(count, 3)\n elif platform == 'platformTwo':\n self.assertEqual(count, 5)\n elif platform == 'platformThree':\n self.assertEqual(count, 2)\n else:\n raise Exception('Unexpected platform: {}'.format(str(platform)))", "def test_team_builder_config_product_groups_change_stream_get(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function returns the ratio of the total of tags in repository
def perc(tags): sum_of_perc=0 for tag in tags: # print(tag) if tag in store_tag: sum_of_perc += (store_tag[tag]/vocabulary) return (sum_of_perc*100)
[ "def vote_percentage(results):\n yes = results.count('yes')\n no = results.count('no')\n return yes/(no + yes)", "def total_votes(self):\n\n return self.up_votes - self.down_votes", "def sum_ratios_to_percentage(ratios):\n return round(sum(ratios) * 100, 2)", "def denominator(self, model, word):\r\n total = 0.0\r\n for key in model[word].keys():\r\n total += model[word][key]\r\n return total", "def vote_percent():\n for vote_amount in candidateVotes: \n votePercent = '{:.3f}'.format(float(vote_amount/TotalVotes)*100)\n candidateVotesPercent.append(votePercent)\n return candidateVotesPercent", "def get_vote_percentage(self):\n try:\n return 100 * self.get_num_votes() / self.voting.get_total_votes()\n except ZeroDivisionError:\n return 0", "def articles_total():", "def fraction(self,outcomelabel):\n d = self.counts\n total = sum(d.values())\n return d[outcomelabel]/total", "def average(t):\n def sum_helper(t):\n total, count = t.label, 1\n for b in t.branches:\n b_total, b_count = sum_helper(b)\n total += b_total\n count += b_count\n return total, count\n total, count = sum_helper(t)\n return total / count", "def _parse_branch_coverage_percentage(self, soup) -> float:\n raise NotImplementedError", "def percent(self, attr):\n total_percent = 0.0\n for obj in self._objects:\n flat, percent, per_level, percent_per_level, percent_base, percent_bonus = Build._get_object_stat(obj, attr)\n total_percent += percent\n\n return total_percent", "def share_factor(self):\n t = self.total_size()\n sumsizes = sum([HBStree.subtree_size(r) for r in self.root_versions])\n return sumsizes / t", "def used_percent(self):\n size = self.size\n\n if size > 0:\n return int(100.0 * self.used / size)\n\n return 0", "def get_total_votes(self):\n return self.total_votes", "def percent(json_dic, tag=fj.TAGS['p']):\n total = len(total_classes(json_dic))\n classes = len(classes_with_tag(json_dic, tag))\n percent = (float(classes) / total) * 100\n return percent", "def getReductionRatio(self) -> retval:\n ...", "def vote_total(self):\n return self.votes.filter(vote=+1).count() - \\\n self.votes.filter(vote=-1).count()", "def get_relevance(self, v1, v2):\n rel = np.sum(v1 * v2) / np.sqrt(np.sum(np.square(v1)) * np.sum(np.square(v2)))\n return rel", "def calculate_total_bags(graph):\n value = 0\n for node in graph:\n value += int(node[\"count\"]) + int(node[\"count\"]) * calculate_total_bags(\n node[\"inside\"]\n )\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns node at kth from end
def kth_from_end(self, k): if self._size - k < 0: raise AttributeError current = self.head for i in range(self._size - k - 1): current = current._next return current
[ "def kth_from_end(self, k):\n current = self.head\n counter = self._size - 1 - k\n if (counter < 0) | (counter >= self._size):\n raise ValueError\n while counter > 0:\n current = current._next\n counter -= 1\n return current.val", "def exc2_return_kth_to_last(ll: LinkedList, k: int) -> Node:\n raise NotImplementedError", "def __getitem__(self, k):\n # To do\n index = 0\n currNode = self._head\n if k >= self._size:\n k = self._size-1\n while index <= k:\n if index == k:\n return currNode._element\n index += 1\n currNode = currNode._next", "def kth_from_last2(linked_list, k):\n\tif linked_list is None:\n\t\traise InsufficientNodes()\n\tif k <= 0:\n\t\traise ValueError('k must be positive')\n\n\t# Count the no. of nodes\n\tcurrent = linked_list\n\tcount = 0\n\twhile current is not None:\n\t\tcurrent = current.next\n\t\tcount += 1\n\n\t# Not enough nodes\n\tif k > count:\n\t\traise InsufficientNodes()\n\n\t# Find the (count-k)th node\n\tcurrent = linked_list\n\tfor i in xrange(count - k):\n\t\tcurrent = current.next\n\n\treturn current.value", "def kth_from_end_val(self, k: int) -> any:\n if type(k) is not int:\n raise Exception('k must be an integer')\n if k < 0:\n raise Exception('k must be greater than 0')\n curr = self.head\n steps = self.ll_len() - k\n if steps < 1:\n raise Exception('k must be less than the length of the list')\n for _ in range(1, steps):\n curr = curr.next\n return curr.val", "def exc2_return_kth_to_last_first(ll: LinkedList, k: int) -> LinkedList:\n if not ll.head or not ll.head.next:\n return ll\n node = ll.head\n for _ in range(k - 1):\n node = node.next\n ll.head = node\n return ll", "def k_from_end_recursive(current, k):\n\n if not current:\n return 0\n\n r = count_to_end(current.next, k)\n if isinstance(r, int):\n n = r + 1\n if n == k:\n return current\n else:\n return n\n else:\n return r", "def find_kth_last(a_list: LinkedList, k: int) -> Any:\n current = a_list.head\n runner = a_list.head\n for _ in range(k):\n runner = runner.next\n if runner is None:\n return None\n\n while runner.next:\n current = current.next\n runner = runner.next\n\n return current.data", "def get(self, k):\n\tnode_list=[]\n\n\tcurrent_node = self.root\n\n\twhile(true):\n\t\tif (current_node==None):\n\t\t\tbreak\n\t\telse:\n\t\t\tif (k==current_node.get_key())\n\t\t\t\t\tnode_list.append(current_node)\n\t\t\t\t\tcurrent_node= current_node.get_left()\n\t\t\telse:\n\t\t\t\tif (k<current_node.get_key()):\n\t\t\t\t\tcurrent_node=current_node.get_left()\n\t\t\t\telif(k>current_node.get_key()):\n\t\t\t\t\tcurrentNode=currentNode.getRight()\n\t\treturn node_list", "def kth_from_last1(linked_list, k):\n\tif linked_list is None:\n\t\traise InsufficientNodes()\n\tif k <= 0:\n\t\traise ValueError('k must be positive')\n\n\t# Advance the fast pointer by k nodes\n\tfast = slow = linked_list\n\tfor i in xrange(k):\n\t\ttry:\n\t\t\tfast = fast.next\n\t\texcept AttributeError:\n\t\t\traise InsufficientNodes()\n\n\t# Advance slow and fast pointers together till the fast\n\t# one hits the end.\n\twhile fast is not None:\n\t\tfast = fast.next\n\t\tslow = slow.next\n\n\treturn slow.value", "def find_kth_last_recursive(a_list: LinkedList, k: int):\n _, data = kth_last_recursion(a_list.head, k)\n return data", "def search(self, k):\n node = self.head\n while node is not None and node.key != k:\n node = node.next\n return node", "def reverseKGroup(head: Node, k: int) -> Node:\n i = k\n current = head\n prev = None\n nxt = None\n\n while i > 0 and current:\n nxt = current.next\n if prev:\n current.next = prev\n prev = current\n current = nxt\n i -= 1\n\n if head:\n head.next = reverseKGroup(nxt, k) if nxt else None\n\n return prev", "def find_position(self, k):\n if self.is_empty():\n return None\n else:\n p = self._subtree_search(self.root(), k)\n self._rebalance_access(p)\n return p", "def __getitem__(self, k):\n if not 0 <= k < self.n:\n return IndexError('k is out of bounds!')\n\n return self.A[k] # get element at index k", "def test(n: int, k: int):\n ll = LinkedList()\n for i in range(n):\n ll.insert(i)\n print(\"Before removing {}th node from the end:\".format(k), end=\" \")\n ll.display()\n remove_kth_node_from_end(ll.head, k)\n print(\"After removing {}th node from the end:\".format(k), end=\" \")\n ll.display()\n print()", "def selectk(xs, k):\n elem = xs[len(xs) / 2]\n\n smaller = [x for x in xs if x < elem]\n bigger = [x for x in xs if x > elem]\n\n if len(smaller) == k:\n return elem\n elif len(smaller) > k:\n return selectk(smaller, k)\n else:\n return selectk(bigger, k - len(smaller) - 1)", "def remove_kth_node_from_end(head: Node, k):\n fast, slow = head, head\n i = 0\n # if i is set to 1, then set the condition in the loop to use i<=k\n while i < k:\n # To set the differences between the fast and slow pointer is kth\n # element\n fast = fast.next\n i += 1\n\n if fast is None:\n # Then n = k. To remove the first element from the head.\n # Or remove the nth element from end.\n # to_be_remove = head\n\n head.value = head.next.value\n head.next = head.next.next\n # The code below cannot be used because the global reference still\n # references to head object. Any reference changes to head is not\n # reflected to the global.\n # to_be_remove.next = None\n return\n\n while fast.next is not None:\n # Traverse until one element before the removal node\n fast = fast.next\n slow = slow.next\n\n # the slow.next is the node to be remove\n to_be_remove = slow.next\n slow.next = slow.next.next\n\n # Update the removal node\n to_be_remove.next = None", "def getKmer( s, i, k ):\n return s[i: i + k]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses 'oh_cp' to save histograms in a file.
def oh_cp(partition, server, run_index): logging.info('Saving histograms from server %s' % (server)) s = os.system('oh_cp -p%s -s%s -H -1 -O -r %d' % (partition, server, run_index)) if s != 0: logging.warn('Output status of oh_cp was %d' % s)
[ "def making_histogram_files(filename_list,wk_dir):\n desc = {}\n for fname in filename_list:\n print(\"Loading cube for\",fname)\n fname_tmp = os.path.basename(fname)\n hname = os.path.join(wk_dir,\".\".join(fname_tmp.split(\".\")[:-1])+\"_hist.nc\")\n ppndata1=make_hist_maps.read_data_cube(fname)\n ppn_hist_cube=make_hist_maps.make_hist_ppn(ppndata1)\n iris.save(ppn_hist_cube, hname)\n\n desc.update({os.path.relpath(hname,start=wk_dir): {\n \"long_name\": \"iris histogram cubes\",\n \"description\": \"histograms saved individually for model and obs data\"}})\n update_json(\"data\",desc,wk_dir+\"/output.json\")\n return", "def make_histos(infile, det):\n # return cached results, if any\n fname = os.path.basename(infile).replace('.root', '')\n cachefile = 'output/cache/draw_pfsize_{0}_{1}.pkl'.format(fname, det)\n if os.access(cachefile, os.R_OK):\n with open(cachefile, 'rb') as f:\n return pickle.load(f)\n\n # get TTree with PFClusters\n fi = ROOT.TFile(infile)\n tree = fi.Get('ntuplizer/PFClusterTree')\n if not tree:\n raise Exception('TTree not found')\n\n result = [ROOT.TH1D('h', '', 60, 0, 6) for _ in range(6)]\n\n # fill histograms\n for ev in range(tree.GetEntriesFast()):\n if tree.GetEntry(ev) <= 0:\n raise Exception\n\n # barrel vs endcaps\n if det == 'EB':\n if abs(tree.pfEta) > 1.479:\n continue\n else:\n if abs(tree.pfEta) < 1.479:\n continue\n\n # remove \"fakes\"; NOTE: cuts were evaluated with draw_inputs.py\n if tree.pfPhoDeltaR > 0.03 or tree.pfE/tree.mcE < 0.4:\n continue\n\n pfSize = min(len(result), tree.pfSize5x5_ZS)\n ptTrue = tree.mcPt\n\n result[pfSize - 1].Fill(ptTrue)\n\n # save cache\n with open(cachefile, 'wb') as f:\n pickle.dump(result, f)\n\n return result", "def save(self, hist_data):\n object_nm = self.object_nm\n color = self.color\n data_dir = os.path.join(os.path.dirname(__file__),\n '../data/histogram_data')\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n filename = os.path.join(data_dir, object_nm+'_'+color+'.pkl.gz')\n with gzip.open(filename, 'wb') as f:\n pickle.dump(hist_data, f)", "def make(self):\n self.file.cd()\n for histogram in self.histograms:\n histogram.FillTree(self.file.EICTree)\n self.window.cd()\n histogram.Draw()\n self.page(log=histogram.log)", "def displaySaveHisto(I, filename=None):\n if np.max(I) <= 1:\n I = 255 * I\n hist, bins = np.histogram(I.flatten(), 256, range=(0, 255))\n fig = plt.figure()\n plt.bar(bins[:-1], hist, width=1)\n plt.show()\n if filename != None:\n fig.savefig(filename, bbox_inches='tight')", "def FillHistogram(histo, infile, **kwargs):\n append = kwargs.pop(\"append\", False)\n kwargs.update(IOManager._getBinning(histo))\n varexp = kwargs.get(\"varexp\")\n histoname = histo.GetName()\n histotitle = histo.GetTitle()\n histoclass = histo.ClassName()\n if not \":\" in varexp:\n assert histoclass.startswith(\"TH1\")\n elif len(varexp.split(\":\")) == 2:\n assert histoclass.startswith(\"TH2\")\n else:\n raise NotImplementedError\n htmp = IOManager.GetHistogram(infile, **kwargs)\n if append:\n histo.Add(htmp)\n else:\n htmp.Copy(histo)\n del htmp\n histo.SetName(histoname)\n histo.SetTitle(histotitle)", "def all_hists(self, fname='', action=''):\n if hasattr(self.sender(), 'text') and not action:\n action = self.sender().text()\n if 'Save' in action:\n fpath = fname if fname else self.try_browse(title='Select a File Suffix', \n file_type='CSV (*.csv);;all (*)',\n open_func=QFileDialog.getSaveFileName)\n else: fpath = 'notsaving'\n if fpath: # don't do anything if the user cancels\n fdir = os.path.dirname(fpath)\n fname = os.path.basename(fpath)\n for i in range(self._a): # fit main windows first\n self.mw[i].display_fit(fit_method='check action')\n for i in range(len(self.rw_inds)): # save re-image windows \n self.rw[i].get_histogram() # since they depend on main windows\n self.rw[i].display_fit(fit_method='check action')\n if 'Save' in action:\n self.rw[i].save_hist_data(\n save_file_name=os.path.join(fdir, self.rw[i].objectName() + fname), \n confirm=False)\n if 'Reset' in action:\n self.rw[i].image_handler.reset_arrays() \n self.rw[i].histo_handler.bf = None\n self.rw[i].hist_canvas.clear()\n self.rw[i].hist1.clear()\n self.rw[i].hist2.clear()\n for i in range(self._a): # then can save and reset main windows\n if 'Save' in action:\n self.mw[i].save_hist_data(\n save_file_name=os.path.join(fdir, self.mw[i].objectName() + fname), \n confirm=False)\n if 'Reset' in action:\n self.mw[i].image_handler.reset_arrays() \n self.mw[i].histo_handler.bf = None\n self.mw[i].hist_canvas.clear()", "def save_correlation_histogram(aggregation):\n plt.hist(get_correlations(aggregation), bins=40)\n output_file_name = '{0}/figures/{1}_correlation_histogram.png'.format(\n REPO_HOME_PATH, aggregation)\n plt.savefig(output_file_name)\n print('Saved {0}'.format(output_file_name))\n plt.clf()", "def generate_hist( cov_values, outputfile, saturation, resolution, title ):\n\t\n\tvalues = []\n\tblocks = [ cov_values[ i : i + resolution ] for i in xrange( 0, len( cov_values ), resolution ) ]\n\tfor block in blocks:\n\t\tvalues.append( min( [ np.mean( block ), saturation ] ) )\n\t\n\tfig, ax = plt.subplots()\n\t\n\tax.set_title( title )\n\tax.hist( values, bins=300, color=\"lime\" )\n\tax.set_xlim( 0, 300 )\n\t\n\tax.set_xlabel( \"sequencing coverage depth\" )\n\tax.set_ylabel( \"number of positions\" )\n\t\n\tfig.savefig( outputfile, dpi=300 )", "def computeHistograms( inFN, cols, binSizes = None, outFNs = None, getio = None ):\n\n cols = tuple( MakeSeq( cols ) )\n binSizesHere = ( .001, ) * len( cols ) if binSizes is None else tuple( MakeSeq( binSizes ) )\n outFNsHere = outFNs\n if outFNsHere is None: outFNsHere = [ AddFileSubdir( 'stats', AddFileSfx( inFN, 'hist', col ) ) for col in cols ]\n\n assert len( cols ) == len( binSizesHere ) == len( outFNsHere )\n if getio: return dict( depends_on = inFN, creates = outFNsHere ) \n # add histogram combiner\n\n hists = [ Histogrammer( binSize = binSize ) for binSize in binSizesHere ]\n z = IDotData( inFN )\n for h, c, outFN in zip( hists, cols, outFNsHere ):\n h.addVals( z[ c ] )\n h.save( outFN )", "def make_histogram(self): # connected to make histogram (btn_histogram)\n print(\"make hist\")\n# self.calculate_images()\n self.intensitys = np.linspace(0,10,10)\n self.intensitys2 = self.intensitys\n try:\n self.intensitys2 = np.concatenate((self.intensitys,\n self.intensitys2))\n except:\n self.intensitys2 = self.intensitys\n self.doit()\n\n self.histo_data = True", "def plot_hist(list_of_data, plot_title, bin_sz):\r\n \r\n from plotly.offline import plot, iplot, init_notebook_mode\r\n import plotly.graph_objs as go\r\n\r\n # This line is necessary for offline mode.\r\n init_notebook_mode(connected=False)\r\n \r\n data = []\r\n \r\n for d in list_of_data:\r\n \r\n trace_tmp = go.Histogram(\r\n x=d,\r\n opacity=0.33,\r\n autobinx=False,\r\n xbins=dict(start=min(d),end=max(d),size=bin_sz) \r\n )\r\n \r\n data.append(trace_tmp)\r\n\r\n layout = go.Layout(title = plot_title, barmode='overlay')\r\n fig = go.Figure(data=data, layout=layout)\r\n\r\n iplot(fig, filename='Histograms')", "def extract_histograms(filelist,h_to_save,str1,str2,tit='waveform, Run ',debug=False):\n wf_hist = []\n \n for fi in filelist:\n if debug:\n print('Extract histograms from file ',fi)\n print(' str1 = ',str1, 'str2 = ',str2, ' h_to_save = ',h_to_save)\n tokens = fi.split(str1)\n run = tokens[1].split(str2)[0]\n \n MyFile = TFile(fi) \n gDirectory.pwd()\n dir = gDirectory\n lhist = ldir(dir,MyFile,debug=False)\n \n # lhist is a list of all histograms in a file\n \n for hist in lhist:\n if debug:\n print(' examine histogram ',hist.GetName())\n if h_to_save == hist.GetName() or h_to_save == '*':\n hh = hist.ReadObj()\n if h_to_save == '*':\n title = hh.GetTitle() + '_' + run\n else:\n title = h_to_save + '_' + run\n if debug:\n print(' save histogram ',hist.GetName(), ' with title ',title)\n ev_hist = hh.Clone()\n ev_hist.SetTitle(title)\n ev_hist.SetDirectory(0)\n wf_hist.append((run,ev_hist))\n return wf_hist", "def save(histogram, histo_path):\n log.debug(\"saving histogram to %s\", histo_path.with_suffix(\".npz\"))\n\n # create output directory if it does not exist yet\n if not os.path.exists(histo_path.parent):\n os.mkdir(histo_path.parent)\n np.savez(\n histo_path.with_suffix(\".npz\"),\n yields=histogram[\"yields\"],\n sumw2=histogram[\"sumw2\"],\n bins=histogram[\"bins\"],\n )", "def histogram(self, channels, files):\n\n f = open(os.path.join(self.directory, \"describe.txt\"),\"a\")\n for channel in channels:\n bin_max = self.graphMax([channel], files, level=\"90%\")\n bin_min = self.graphMin([channel], files)\n plt.xlim(bin_min, bin_max)\n\n for file in files:\n data = self.exps[file][channel]\n binsnp = np.linspace(self.graphMin([channel], [file]), self.graphMax([channel], [file]), 50)\n n, bins, patches = plt.hist(data, bins=binsnp, alpha=0.7, label=file)\n f.write(\"{\" + \"{0} {1}\".format(file, channel) + \" : \" + str(self.statistics(data)) + \"}\")\n #(mu_control, sigma_control) = scipy.stats.norm.fit(controlCols[index])\n #y = scipy.stats.norm.pdf(bins, mu_control, sigma_control)\n #plt.plot(bins, y, '--', label='Control Normal PDF', color='blue')\n plt.legend(loc='best')\n title = \"Histogram of {0} for {1}\".format(channel, files)\n plt.title(title)\n plt.ylabel(\"Frequency\")\n plt.ylim(0)\n plt.xlabel(\"Fluorescence Value\")\n plt.yscale(\"linear\")\n plt.xscale(\"linear\")\n plt.savefig(os.path.join(self.directory, title+\".png\"))\n plt.clf()\n f.close()", "def write_multiyear_histogram_file(outfile, histos, ind_names, minmax, \r\n day_range=None, geog_box=None) :\r\n\r\n # create file, dimensions, variables\r\n ofile = create_multiyear_histogram_file(outfile, ind_names, minmax)\r\n\r\n # record the doy range used to generate this data.\r\n if day_range is not None : \r\n ofile.start_day = day_range.start\r\n ofile.end_day = day_range.stop-1\r\n\r\n # record the geographic bounding box used to generate this data\r\n if geog_box is not None : \r\n ofile.min_lon = geog_box[0]\r\n ofile.max_lon = geog_box[1]\r\n ofile.min_lat = geog_box[2]\r\n ofile.max_lat = geog_box[3]\r\n \r\n # store variables\r\n names = [ 'occurrence', 'burned_occurrence', \r\n 'burned_forest', 'burned_forest_occ', \r\n 'burned_not_forest', 'burned_not_forest_occ',\r\n 'burned_other', 'burned_other_occ', 'burned_total'] \r\n # things which count 0.5 deg cells are int32, things which count \r\n # MODIS cells are int64\r\n types = [ np.int32, np.int32,\r\n np.int64, np.int32,\r\n np.int64, np.int32,\r\n np.int64, np.int32, np.int64 ]\r\n for name, hist, t in zip(names, histos, types) : \r\n v = ofile.createVariable(name, t, ind_names) \r\n v[:] = hist.H \r\n v.count = hist.count\r\n v.total = hist.total\r\n\r\n # close outfile\r\n ofile.close()", "def __init__(self, filename):\n self.im = Image.open(filename).convert(\"RGB\")\n #split the image into into RGB bands\n self.bands = self.im.split()\n for self.color in range(3):\n self.hist = self.bands[self.color].histogram()\n self.generateHist(\"library/\" + str(self.color) + \"pre.jpg\")", "def save_hue(data, filename):\n fig, ax = plt.subplots()\n x = np.linspace(0, 2*np.pi, 201)\n ax.plot(x, data)\n fig.savefig(filename)\n plt.close()", "def binaryStats(saveLocation='', ECC=True, PB=True, SEMI=True, hist=False, numBins=100, inf='', to_return=False):\n if not saveLocation == '':\n if not os.path.exists(saveLocation):\n os.makedirs(saveLocation)\n columns = [0]\n indicies = [1, 1, 1]\n if ECC:\n columns = columns + [6]\n indicies[1] += 1\n indicies[2] += 1\n if PB:\n columns = columns + [7]\n indicies[2] += 1\n if SEMI:\n columns = columns + [8]\n bevData, meta = ip.bh_data('bev.82', columns, meta_data={}, info=inf)\n stats = {}\n for val in bevData:\n if not val[0] in stats:\n stats[val[0]] = {'ECC' : [] , 'PB' : [] , 'SEMI' : []}\n if ECC:\n stats[val[0]]['ECC'].append(val[indicies[0]])\n if PB:\n stats[val[0]]['PB'].append(val[indicies[1]])\n if SEMI:\n stats[val[0]]['SEMI'].append(val[indicies[2]])\n if to_return:\n return(stats)\n all_keys = list(stats.keys())\n all_keys.sort()\n stats_keys = []\n for key in all_keys:\n non_zero = False\n for sub_key in stats[key]:\n if stats[key][sub_key] != []:\n non_zero = True\n if non_zero:\n stats_keys.append(key)\n plt.figure(1)\n colormap = plt.cm.coolwarm\n plt.gca().set_prop_cycle(cycler('color', [colormap(i) for i in np.linspace(0, 0.9, len(stats))]))\n plt.figure(2)\n plt.gca().set_prop_cycle(cycler('color', [colormap(i) for i in np.linspace(0, 0.9, len(stats))]))\n plt.figure(3)\n plt.hold(True)\n plt.gca().set_prop_cycle(cycler('color', [colormap(i) for i in np.linspace(0, 0.9, len(stats))]))\n for i, key in enumerate(stats_keys):\n temp_AU_list = []\n for item in stats[key]['SEMI']:\n AU = (10.0 ** item) * 0.00465047\n temp_AU_list.append(math.log10(AU))\n stats[key]['SEMI'] = temp_AU_list\n stats[key]['ECC'].sort()\n stats[key]['PB'].sort()\n stats[key]['SEMI'].sort()\n ECC_tot = _buildFraction(stats[key]['ECC'])\n PB_tot = _buildFraction(stats[key]['PB'])\n SEMI_tot = _buildFraction(stats[key]['SEMI'])\n if ECC:\n plt.figure(1)\n ECC_plot, = plt.step(([0] +stats[key]['ECC']),([0] + ECC_tot), where='post')\n if PB:\n plt.figure(2)\n PB_plot, = plt.step(([0] + stats[key]['PB']),([0] + PB_tot), where='post')\n if SEMI:\n plt.figure(3)\n SEMI_plot, = plt.step(([0] + stats[key]['SEMI']),([0] + SEMI_tot), where='post')\n if i==0:\n if ECC:\n ECC_pi = ECC_plot\n if PB:\n PB_pi = PB_plot\n if SEMI:\n SEMI_pi = SEMI_plot\n if i == (len(stats_keys) - 1):\n if ECC:\n ECC_pf = ECC_plot\n if PB:\n PB_pf = PB_plot\n if SEMI:\n SEMI_pf = SEMI_plot\n min_key = stats_keys[0]\n max_key = stats_keys[-1]\n if ECC:\n plt.figure(5)\n Z = [[0,0],[0,0]]\n levels = range(int(min_key), int(max_key), 5)\n CS3 = plt.contourf(Z, levels, cmap=colormap)\n plt.clf()\n plt.figure(1)\n x1, x2, y1, y2 = plt.axis()\n plt.axis([x1, x2, 0, 1.1])\n plt.colorbar(CS3) \n plt.title('Binary Black Hole Eccentricity CDF')\n plt.xlabel('Eccentricity')\n plt.ylabel('Eccentricity Fraction')\n plt.savefig((saveLocation + 'ECC.png'))\n if PB:\n plt.figure(5)\n Z = [[0,0],[0,0]]\n levels = range(int(min_key), int(max_key), 5)\n CS3 = plt.contourf(Z, levels, cmap=colormap)\n plt.clf()\n plt.figure(2)\n x1, x2, y1, y2 = plt.axis()\n plt.axis([x1, x2, 0, 1.1])\n plt.colorbar(CS3) \n plt.title('Binary Black Hole Period CDF')\n plt.xlabel('Log_10( Period (days))')\n plt.ylabel('Period Fraction')\n plt.savefig((saveLocation + 'PB.png'))\n\n time = []\n max = []\n mean = []\n median = []\n mode = []\n min = []\n key_list = stats.keys()\n key_list.sort()\n for key in key_list:\n npList = np.asarray(stats[key]['PB'])\n modeList = scipy.stats.mode(npList).mode\n for i in range(len(modeList)):\n time.append(key)\n max.append(np.amax(npList))\n mean.append(np.mean(npList))\n median.append(np.median(npList))\n mode.append(modeList[i])\n min.append(np.amin(npList))\n plt.figure(4)\n plt.plot(time, max, '-')\n plt.plot(time, mean, '-')\n plt.plot(time, median, '-')\n plt.plot(time, mode, '-')\n plt.plot(time, min, '-')\n plt.legend(['Max', 'Mean', 'Median', 'Mode', 'Min'])\n plt.title('Black Hole Binary Statistics Over Time')\n plt.xlabel('Physical Time (MY)')\n plt.ylabel('Log_10( Period (days))')\n plt.savefig((saveLocation + 'bBH_PBStats.png'))\n if SEMI:\n plt.figure(5)\n Z = [[0,0],[0,0]]\n levels = range(int(min_key), int(max_key), 5)\n CS3 = plt.contourf(Z, levels, cmap=colormap)\n plt.clf()\n plt.figure(3)\n x1, x2, y1, y2 = plt.axis()\n plt.axis([x1, x2, 0, 1.1])\n plt.colorbar(CS3) \n plt.title('Binary Black Hole Semi-major Axis CDF')\n plt.xlabel('Log_10( Semi-major Axis (Au))')\n plt.ylabel('Semi-major Axis Fraction')\n plt.savefig((saveLocation + 'SEMI.png'))\n plt.close('all')\n if hist:\n if ECC:\n for time in stats:\n plt.figure()\n n, bins, patches = plt.hist(stats[time]['ECC'], numBins, normed=False, histtype='bar', rwidth=1)\n plt.title('Eccentricity Distribution of Black Hole Binaries: {0} MY'.format(time))\n plt.xlabel('Eccentricity')\n plt.ylabel('N')\n plt.savefig((saveLocation + 'ECC.{0}MY.png'.format(time)))\n plt.close('all')\n if PB:\n for time in stats:\n plt.figure()\n n, bins, patches = plt.hist(stats[time]['PB'], numBins, normed=False, histtype='bar', rwidth=1)\n plt.title('Period Distribution of Black Hole Binaries: {0} MY'.format(time))\n plt.xlabel('Log_10( Period (days))')\n plt.ylabel('N')\n plt.savefig((saveLocation + 'PB.{0}MY.png'.format(time)))\n plt.close('all')\n if SEMI:\n for time in stats:\n plt.figure()\n n, bins, patches = plt.hist(stats[time]['SEMI'], numBins, normed=False, histtype='bar', rwidth=1)\n plt.title('Semi-major Axis Distribution of Black Hole Binaries: {0} MY'.format(time))\n plt.xlabel('Log_10( Semi-major Axis (Au))')\n plt.ylabel('N')\n plt.savefig((saveLocation + 'SEMI.{0}MY.png'.format(time)))\n plt.close('all')\n plt.close('all')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new empty file with the provided name
def new_file(name): if os.path.exists(name): os.unlink(name) return file(name, 'wt')
[ "def createEmptyFile(self, filename):\n pylabs.q.logger.log('creating an empty file with name & path: %s'%filename,9)\n if filename is None:\n raise ArithmeticError('Not enough parameters passed to system.fs.createEmptyFile: %s'%filename)\n try:\n open(filename, \"w\").close()\n pylabs.q.logger.log('Empty file %s has been successfully created'%filename)\n except Exception:\n raise RuntimeError(\"Failed to create an empty file with the specified filename: %s\"%filename)", "def mk_file(filename):\n if not os.path.exists(filename):\n open(filename, 'w').close() # noqa: WPS515", "def create_file(name, content):\n with open(name, 'w') as file:\n file.write(content)", "def _create_filename(self, filename):", "def make_empty_file(file_path: str):\r\n open(file_path, 'a').close()", "def create_file(file_name, size):\n with open(file_name, 'wb') as f:\n if size:\n f.seek(size - 1)\n f.write(b'\\x00')", "def _create_empty_file() -> None:\n FileManager.write_data(data={})", "def create_file():\n with open(now.strftime(\"%Y-%m-%d-%H-%M-%S-%f\") + \".txt\", \"w\") as file:\n file.write(\"\")", "def createFile(dest):\n date = t.localtime(t.time())\n name = '%d.%d.%d' %(date[2],date[1],date[0])\n fullName = dest + name \n\n if not(path.isfile(fullName)):\n f = open(fullName,'w')\n f.write('\\n'*30)\n f.close()\n print name", "def new_file(section, lab, s_name, file_name, grader):\n new_file_boilerplate = (\"Name: {0}\\nSection: {1}\\nLab: {2}\\nGrader: {3}\\n\"\n \"\".format(s_name, section, lab, grader))\n if os.path.isfile(file_name):\n return\n else:\n with open(file_name, 'w') as f:\n f.write(new_file_boilerplate)", "def create_no_file():\n with tempfile.NamedTemporaryFile('w', delete=True) as temp:\n name = temp.name\n return name", "def newBlankFile():\n c = Calibrator()\n name = str(time.time())\n c.save_to(calibrationFilesRoot+name+\".calib.txt\")\n return name", "def create_modified_file(self):\n file_name = os.path.join(self.dir, str(uuid.uuid4()))\n # create the file\n with open(file_name, \"wb\") as file_handler:\n file_handler.write(b\"\\0\")\n\n st = os.stat(file_name)\n access_time = st[ST_ATIME]\n modified_time = st[ST_MTIME]\n\n os.utime(file_name, (access_time, modified_time + (4 * 3600)))", "def initializeNewFile(fname, overwrite, lattice, params, makeActionSrc, extraGroups=[]):\n\n fname = Path(fname)\n if fname.exists():\n if overwrite:\n fname.unlink()\n getLogger(__name__).info(\"Output file %s exists -- overwriting\", fname)\n else:\n getLogger(__name__).error(\"Output file %s exists and not allowed to overwrite\", fname)\n raise RuntimeError(\"Output file exists\")\n\n with h5.File(str(fname), \"w-\") as h5f:\n for group in extraGroups:\n createH5Group(h5f, group)\n\n writeMetadata(fname, lattice, params, makeActionSrc)", "def new_file(self, kind):\n kind = kind.title()\n if kind == \"Folder\":\n filename = f\"{self.location}{os.sep}new_folder\"\n else:\n filename = f\"{self.location}{os.sep}new_file\"\n inc = ''\n while os.path.exists(filename + str(inc)):\n if inc:\n inc = f\"({int(inc[1:-1])+1})\"\n else:\n inc = \"(1)\"\n filename = f\"{filename}{inc}\"\n try:\n if kind == \"Folder\":\n os.makedirs(filename)\n else:\n os.mknod(filename)\n Pub.notify(\"App\", f\"{self.pid}: {kind} - {filename} created\")\n except OSError:\n logger.error(f\"Error creating {filename}\", exc_info=True)\n Pub.notify(\"App\", f\"{self.pid}: Error creating {filename}\")", "def create_file(self, suffix = '', prefix = 'vt_tmp'):\n (fd, name) = tempfile.mkstemp(suffix=suffix,\n prefix=prefix,\n dir=self.directory)\n os.close(fd)\n result = basic_modules.File()\n result.name = name\n result.upToDate = True\n self.files[name] = result\n return result", "def createFile(self, sFilename, sMode = 'wb'):\n assert sMode in [ 'wb', 'w', 'wU' ];\n\n # Try raw file first.\n sFile1 = os.path.join(config.g_ksFileAreaRootDir, self.sBaseFilename + '-' + sFilename);\n try:\n if not os.path.exists(os.path.dirname(sFile1)):\n os.makedirs(os.path.dirname(sFile1), 0o755);\n oFile = open(sFile1, sMode);\n except Exception as oXcpt1:\n return str(oXcpt1);\n return oFile;", "def _create_fake_cache_file(self, file_name):\n fobj = open(os.path.join(self.cache_path, file_name), 'w')\n fobj.close()", "def new_file(self, name):\n if self.recording:\n \"\"\"\n Throw an error if in the middle of recording \n \"\"\"\n RuntimeError(\"IN THE MIDDLE OF RECORDING\")\n return\n print name\n self._name = name + \".csv\"\n with open(self._name, \"a\") as f:\n writer = csv.writer(f, delimiter=\",\")\n writer.writerow(self.sensor_names)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the file with the provided name
def rm_file(name): os.remove(name)
[ "def delete(self, name):\n name = self.clean_name(name)\n\n # ensure all data has been flushed from the writer for this file and close it\n if name in self.writers and not self.writers[name].closed:\n self.writers[name].flush()\n self.writers[name].close()\n\n full_path = os.path.join(self.working_folder, name)\n # if the file exists, remove it\n if os.path.exists(full_path):\n os.remove(full_path)", "def deleteFile(fileName):\n os.remove(calibrationFilesRoot+fileName+\".calib.txt\")", "def remove_file(curdir, filename):\n #os.remove(os.path.join(curdir,filename)) TODO: use os.remove if possible\n os.system(\"rm -f %s\" %os.path.join(curdir,filename))", "def del_file(self, path: str) -> None:\n cmd = b''.join([\n ev3.DELETE_FILE,\n str.encode(path) + b'\\x00' # NAME\n ])\n self.send_system_cmd(cmd)", "def delete_file(filename):\n delete_file_v2(filename)", "def delete_file(path, fileName=None):\r\n if fileName:\r\n path = os.path.join(path, fileName)\r\n if os.path.isfile(path):\r\n os.remove(path)", "def unlink(filename):\t\n try:\n os.unlink(filename)\n except:\n os.remove(filename)", "def system_remove_file(filename: str) -> None:\n status = subprocess.run([\"rm\", filename])\n if status.returncode != 0:\n raise SystemProcessError", "def remove_file(fname):\r\n os.remove(fname)\r\n __remove_pyc_pyo(fname)", "def file_remove(self, path):\n if not isinstance(path, basestring):\n raise TypeError(\"path can only be an instance of type basestring\")\n self._call(\"fileRemove\",\n in_p=[path])", "def remove(self, file):\n pass", "def gcs_remove_file(file_name):\n storage_blob = storage.Blob(file_name, storage_bucket)\n\n if storage_blob.exists():\n print('removing {file_name} from gcs://{project}'.format(file_name=file_name, project=gcp['project']))\n storage_blob.delete()\n\n return file_name", "def delete(file):\n\tif exists(file):\n\t\tos.unlink(file)", "def removeData(self, fileName):\n self.removePath(f\"{DATA_DIRNAME}/{fsdecode(fileName)}\")", "def delete_data(file_name=\"data_log.csv\"):\n if os.path.exists(file_name):\n os.remove(file_name)", "def remove_file(file_to_remove):\n if os.path.isfile(file_to_remove):\n if VERBOSE:\n print('\\tRemoving file {}'.format(file_to_remove))\n os.remove(file_to_remove)\n else:\n print('\\tFile {} was not found.'.format(file_to_remove))", "def _nodeFileRemover(self, name):\n node = self.G.node[name]\n f = os.path.abspath(os.path.join(self.dir, self.G.node[name]['file']))\n if (node['ownership'] == 'yes' and os.path.exists(f)):\n self.filesToRemove.add(f)\n deleteImage(f)\n\n for path, ownership in self.G.graph['nodeFilePaths'].iteritems():\n for pathvalue in getPathValues(node, path):\n if pathvalue and len(pathvalue) > 0 and (ownership not in node or node[ownership] == 'yes'):\n f = os.path.abspath(os.path.join(self.dir, pathvalue))\n if (os.path.exists(f)):\n self.filesToRemove.add(f)\n deleteImage(f)\n self.U.append(dict(name=name, action='removeNode', **self.G.node[name]))\n self.G.remove_node(name)", "def delete_local_file(path_to_file):\n os.remove(path_to_file)\n return", "def remove_file(fname):\n try:\n os.remove(fname)\n except OSError, msg:\n string = \\\n'''util.remove_file error (probably the dread Windows...)\nsys.platform = %s\nError Message:\n%s\n''' % (sys.platform, msg)\n print >> sys.stderr, string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wait for a process to produce output, within a certain timeout. Checks, every seconds, for output in the files ./.out and ./.err, for a maximum of seconds. If any of these files is detected not to be empty, the wait is interrupted.
def wait_for_output(proc_name, timeout=None, sleep_resolution=1): count = 0 logging.debug('Waiting for %s to initialize' % proc_name) while not timeout or count < timeout: if os.path.getsize("./%s.err" % proc_name): logging.warning('There was a problem with the initialization of %s. ' 'Trying to continue...' % proc_name) return -1 if os.path.getsize("./%s.out" % proc_name): logging.debug('%s initialized correctly' % proc_name) return 0 time.sleep(sleep_resolution) count += sleep_resolution logging.warning('Could not confirm the initialization of %s after %.2fs. ' 'Trying to continue...' % (proc_name, count)) return 1
[ "def wait_for_output(self):\n p = select.poll()\n p.register(self.__proc__.stdout, select.POLLIN)\n start_time = time.time() \n\n while True: \n if p.poll(1): #if somthing in stdout\n time.sleep(1) #it not a perfect soluntion\n break", "def check_timeout(self):\n while self.runtime.get() <= self.opts.process_timeout:\n if not any(p.is_alive() for p in self.procs): # Check if any process has exited\n break\n time.sleep(1)\n else:\n self.logger.info(\"Timed out.\")\n self.terminate_early()", "def _wait(self, timeout=None):\n if not self._processes:\n return\n start = time.time()\n while True:\n try:\n if all(not process.is_alive() for process in self._processes):\n # All the workers are dead\n return\n if timeout and time.time() - start >= timeout:\n # Timeout\n return\n time.sleep(0.1)\n except:\n pass", "def wait(self, timeout=None):\n deadline = None if timeout is None else time.time() + timeout\n while True:\n # Because poll is guarded by the lock, we can just use busy-loop with\n # 0.1 secs interval (chosen heuristically).\n result = self.poll()\n if (result is not None or\n (deadline is not None and time.time() >= deadline)):\n # If subprocess is terminated or timed out, return the result.\n return result\n time.sleep(0.1)", "def wait(self, pattern='Done', interval=None,\n epatterns=['error','Error','STACK','Traceback']):\n increasing_interval = False\n if interval is None:\n increasing_interval = True\n interval = 10\n if self.wdir != '':\n stderr = \"%s/%s\"%(self.wdir, self.stderr)\n else:\n stderr = self.stderr\n debug.log(\"\\nWaiting for %s to finish...\"%str(self.name))\n if self.status == 'Executing':\n self.update_timer(-time()) # TIME START\n found = False\n if self.queue is not None:\n # Handling programs running on the compute servers\n # Waiting for error log to be created.\n # Prolonged waiting can be caused by the queue being full, or the\n # server being unavailable.\n debug.log(\" Waiting for the error log to be created (%s)...\"%(\n stderr))\n # Set maximum amount of seconds to wait on the errorlog creation,\n # before assuming queue failure.\n max_queued_time = 10800\n while ( not os.path.exists(stderr)\n and time()+self.timer < max_queued_time\n and time()+self.timer > 0\n ):\n debug.log(\" Waiting... (max wait time left: %s seconds)\"%(\n str(max_queued_time-time()-self.timer)))\n sleep(interval)\n if increasing_interval:\n interval *= 1.1\n \n if os.path.exists(stderr):\n if increasing_interval:\n interval = 10\n # File created looking for pattern\n debug.log('\\nError log created, waiting for program to finish...')\n # calculate max loops left based on set walltime and check interval\n max_time = time() + self.walltime * 60 * 60\n while time() < max_time:\n with open_(stderr) as f:\n for l in f.readlines()[-5:]: # last five lines\n if pattern in l:\n found = True\n max_time = 0\n break\n elif any([ep in l for ep in epatterns]):\n found = False\n max_time = 0\n break\n if max_time > 0:\n debug.log(' Waiting... (max wait-time left: %s seconds)'%(\n str(max_time-time())))\n sleep(interval)\n if found:\n debug.log(\" Program finished successfully!\")\n self.status = 'Done'\n else:\n debug.log(\"Error: Program took too long, or finished with error!\")\n if self.verbose:\n debug.print_out(\n \"Technical error occurred!\\n\",\n \"The service was not able to produce a result.\\n\",\n (\"Please check your settings are correct, and the file \"\n \"type matches what you specified.\\n\"),\n (\"Try again, and if the problem persists please notify the\"\n \" technical support.\\n\")\n )\n self.status = 'Failure'\n else:\n debug.log(\n (\"Error: %s still does not exist!\\n\")%(stderr),\n (\"This error might be caused by the cgebase not being \"\n \"available!\")\n )\n if self.verbose:\n debug.print_out(\n \"Technical error occurred!\\n\",\n (\"This error might be caused by the server not being \"\n \"available!\\n\"),\n (\"Try again later, and if the problem persists please notify \"\n \"the technical support.\\n\"),\n \"Sorry for any inconvenience.\\n\"\n )\n self.status = 'Failure'\n if not self.p is None:\n self.p.wait()\n self.p = None\n else:\n # Handling wrappers running on the webserver\n if self.p is None:\n debug.log(\"Program not instanciated!\")\n self.status = 'Failure'\n else:\n ec = self.p.wait()\n if ec != 0:\n debug.log(\"Program failed on execution!\")\n self.status = 'Failure'\n elif os.path.exists(stderr):\n with open_(stderr) as f:\n for l in f.readlines()[-5:]: # last five lines\n if pattern in l:\n found = True\n break\n elif any([ep in l for ep in epatterns]):\n found = False\n break\n if found:\n debug.log(\" Program finished successfully!\")\n self.status = 'Done'\n else:\n debug.log(\"Error: Program failed to finish properly!\")\n if self.verbose:\n debug.print_out(\"Technical error occurred!\\n\",\n \"The service was not able to produce a result.\\n\",\n \"Please check your settings are correct, and the file \"+\n \"type matches what you specified.\", \"Try again, and if \"+\n \"the problem persists please notify the technical \"+\n \"support.\\n\")\n self.status = 'Failure'\n else:\n debug.log((\"Error: %s does not exist!\\n\")%(stderr),\n \"This error might be caused by the cgebase not being \"+\n \"available!\")\n if self.verbose:\n debug.print_out(\"Technical error occurred!\\n\",\n \"This error might be caused by the server not being \"+\n \"available!\\n\", \"Try again later, and if the problem \"+\n \"persists please notify the technical support.\\n\",\n \"Sorry for any inconvenience.\\n\")\n self.status = 'Failure'\n self.p = None\n self.update_timer(time()) # TIME END\n debug.log(\" timed: %s\"%(self.get_time()))\n else:\n debug.log(\" The check-out of the program has been sorted previously.\")", "def monitor_output(path:str, success: str, failure: str, timeout: float) -> bool:\n start = time.time()\n while True:\n with open(path, \"r\") as f:\n now = time.time()\n out = f.read()\n if success in out:\n return True\n elif failure in out:\n return False\n elif now - start > timeout:\n return False", "def targetSshWaitOutput(self):\n if self.targetSsh is not None:\n self.targetSsh.waitOutput()", "def wait(self, forward_err):\n if self.proc is None:\n raise expect_error(\"No child process\")\n c_stdin = self.proc.w_channels[0]\n c_stdout = self.proc.r_channels[1]\n c_stderr = self.proc.r_channels[2]\n if forward_err:\n forward_dict = { self.stdin : (c_stdin,1),\n c_stdout : (self.stdout,0),\n c_stderr : (self.stderr,0) }\n else:\n forward_dict = { self.stdin : (c_stdin,1),\n c_stdout : (self.stdout,0) }\n while len(self.processes) > 0 or \\\n c_stdout.is_closed() == 0 or \\\n c_stderr.is_closed() == 0:\n self.expect_({}, forward_dict, 1)\n term_status = self.proc.term_status\n self.proc = None\n return term_status", "def runTimedCmd(timeout, cmd, indata=None, outhandler=None, errhandler=None):\n\n\t## create the process\n\tif type(indata) == type(file(\"/dev/null\")):\n\t\tintype = indata\n\t\tprint \"setting timed cmd input to be a file type\"\n\telse:\n\t\tintype = None\n\n\tif type(outhandler) == type(file(\"/dev/null\")):\n\t\touttype = outhandler\n\telse:\n\t\touttype = subprocess.PIPE\n\n\tif type(errhandler) == type(file(\"/dev/null\")):\n\t\terrtype = errhandler \n\telse:\n\t\terrtype = subprocess.PIPE\n\tproc = subprocess.Popen(cmd, stdin=intype,\n\t\tstderr=errtype, stdout=outtype)\n\n\toutdata=[]\n\terrdata=[]\n\t## Output to monitor\n\tp_out = None\n\tp_err = None\n\tmonitor = []\n\tif (outtype == subprocess.PIPE):\n\t\tp_out = proc.stdout\n\t\tmonitor.append(p_out)\n\t\n\tif (errtype == subprocess.PIPE):\n\t\tp_err = proc.stderr\n\t\tmonitor.append(p_err)\n\n\t## Grab data and append until process ends or times out\n\twith processTimeout(timeout, proc.pid):\n\t\tif (len(monitor)) == 0:\n\t\t\tresultcode = proc.wait()\n\t\telse:\n\t\t\twantmore = True\n\t\t\twhile wantmore:\n\t\t\t\t# Check if subprocess has already exited\n\t\t\t\tif proc.poll() is not None:\n\t\t\t\t\tbreak\n\n\t\t\t\t# See what's ready to be read \n\t\t\t\treadable,writable,exceptional=select.select(monitor,[],monitor)\n\n\t\t\t\tfor s in exceptional:\n\t\t\t\t\twantmore = False\n\n\t\t\t\tfor s in readable:\n\t\t\t\t\tline = s.readline()\n\t\t\t\t\tif s is p_out and len(line) > 0:\n\t\t\t\t\t\tif outhandler is None:\n\t\t\t\t\t\t\toutdata.append(line)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\touthandler(proc.pid, line)\n\t\t\t\t\tif s is p_err and len(line) > 0:\n\t\t\t\t\t\tif errhandler is None:\n\t\t\t\t\t\t\terrdata.append(line)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\terrhandler(proc.pid, line)\t\n\n\tresultcode = proc.wait()\n\n\t## There is a race above where proc.poll() may indicate process\n\t## is finished, but errdata or outdata still has buffered data.\n\t## Grab it here\n\tif p_out is not None:\n\t\tfor line in p_out.readlines():\n\t\t\tif outhandler is None:\n\t\t\t\toutdata.append(line)\n\t\t\telse:\n\t\t\t\touthandler(proc.pid, line)\n\tif p_err is not None:\n\t\tfor line in p_err.readlines():\n\t\t\tif errhandler is None:\n\t\t\t\terrdata.append(line)\n\t\t\telse:\n\t\t\t\terrhandler(proc.pid, line)\n\t\t\t\n\n\treturn resultcode,outdata,errdata", "def wait(self, timeout):\n if not self.waitg.wait(timeout):\n self.fail(\"test did not finish within %s seconds\" % timeout)\n\n failed_to_finish = []\n for t in self.threadv:\n try:\n t.join(1)\n except AssertionError:\n self.failed_event.set()\n failed_to_finish.append(t.name)\n if failed_to_finish:\n self.fail(\"threads did not finish: %s\" % failed_to_finish)\n del self.threadv # avoid cyclic garbage\n\n if self.failed():\n self.testcase.fail('\\n\\n'.join(self.failv))", "def _wait_all(self, timeout=0):\n t_wait = datetime.now()\n self._proc.wait(timeout=timeout or None)\n t_wait = datetime.now() - t_wait\n try:\n # Wait for all processes in the process group to finish\n while not timeout or t_wait.total_seconds() < timeout:\n t_poll = datetime.now()\n os.killpg(self._jobid, 0)\n time.sleep(self._wait_poll_secs)\n t_poll = datetime.now() - t_poll\n t_wait += t_poll\n\n # Final check\n os.killpg(self._jobid, 0)\n raise _TimeoutExpired\n except (ProcessLookupError, PermissionError):\n # Ignore also EPERM errors in case this process id is assigned\n # elsewhere and we cannot query its status\n return", "def run_timed_subprocess(cmd, timeout):\n proc = subprocess.Popen(cmd, shell=True, universal_newlines=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n preexec_fn=os.setsid, close_fds=True)\n for i in range(timeout):\n if proc.poll() != None:\n break\n time.sleep(1)\n else:\n os.killpg(os.getpgid(proc.pid), signal.SIGKILL)\n logmsg(\"Gradescript Timeout: {}\".format(cmd))\n \n out, err = proc.communicate()\n return out, err", "def wait_tests(self, working_dir: Path, timeout=5):\n\n def is_complete(path: Path):\n \"\"\"Return True if test is complete.\"\"\"\n\n return (path/TestRun.COMPLETE_FN).exists()\n\n runs_dir = working_dir / 'test_runs'\n end_time = time.time() + timeout\n while time.time() < end_time:\n\n completed = [is_complete(test)\n for test in dir_db.select(self.pav_cfg, runs_dir).paths]\n\n if not completed:\n self.fail(\"No tests started.\")\n\n if all(completed):\n break\n else:\n time.sleep(0.1)\n continue\n else:\n raise TimeoutError(\n \"Waiting on tests: {}\"\n .format(test.name for test in dir_db.select(self.pav_cfg,\n runs_dir).paths\n if is_complete(test)))", "def wait_tests(self, timeout=None):\n return self._testingfinished.wait(timeout)", "def wait_for_script(self, proc, timeout):\n rc = None\n if timeout <= 0:\n rc = proc.wait()\n else:\n # TODO: enhance this\n start_time = time.time()\n rc = None\n while True:\n rc = proc.poll()\n if rc is not None:\n break\n if (time.time() - start_time) > timeout:\n #if sys.platform.startswith(\"win\"):\n # subprocess.Popen(\"taskkill /T /PID %i\"%p.pid , shell=True).wait()\n # time.sleep(5)\n proc.kill()\n break\n time.sleep(1)\n\n return rc", "def wait_for(tester: TesterCallable,\n max_checks: int,\n interval: int = 1,\n label: Optional[str] = None):\n kctx = kitipy.get_current_context()\n label = label if label is not None else 'Waiting...'\n for i in range(1, max_checks, interval):\n kctx.echo(message=\"[%d/%d] %s\" % (i, max_checks, label))\n\n result = None\n succeeded = False\n\n try:\n result = tester(kctx)\n except subprocess.CalledProcessError as e:\n succeedded = False\n\n if isinstance(result, bool):\n succeeded = result\n if isinstance(result, subprocess.CompletedProcess):\n succeeded = result.returncode == 0\n\n if succeeded:\n return\n\n time.sleep(interval)\n\n kctx.fail(\"Failed to %s\" % (label.lower()))", "def _parse_output(self, wait_for=None, retain_vmread=False, timeout=60):\n # matched regex\n matched_obj = None\n\n output = ''\n if wait_for:\n # define the timeout limit\n timeout = time() + timeout\n\n buf_output = None\n while time() < timeout:\n # read the current information on terminal and remove\n # unnecessary data\n buf_output = self._format_output(\n self._s3270.ascii(), strip=True)\n\n # try to match collected output with any of the regexes\n # specified\n for match_re in wait_for:\n matched_obj = re.search(match_re, buf_output)\n # regex matched: stop processing\n if matched_obj:\n break\n\n # terminal is full: clear it\n if self._is_output_full(buf_output):\n # append current information to the output\n output += self._cleanup_status_line(buf_output)\n self._s3270.clear()\n # clear buffer so that it won't be appended twice to the\n # collected output when we leave the loop\n buf_output = None\n # machine in halted state: set again to running so that\n # pending output can be consumed\n elif self._check_status(buf_output) == 'VM READ':\n output += self._cleanup_status_line(buf_output)\n if not retain_vmread:\n self._s3270.enter()\n # clear buffer so that it won't be appended twice to the\n # collected output when we leave the loop\n buf_output = None\n\n # expected pattern matched: stop waiting\n if matched_obj:\n break\n\n # sleep for a while to avoid cpu consumption\n sleep(0.2)\n\n # leftover output available: append it to collected output\n if buf_output:\n output += self._cleanup_status_line(buf_output)\n\n else:\n # fetch all available output\n output_full = True\n while output_full:\n buf_output = self._format_output(\n self._s3270.ascii(), strip=True)\n\n # machine in halted state: set again to running so that\n # pending output can be consumed\n if self._check_status(buf_output) == 'VM READ':\n output += self._cleanup_status_line(buf_output)\n if not retain_vmread:\n self._s3270.enter()\n continue\n\n # in case output is not full we will leave the loop as all\n # available output was already collected\n output_full = self._is_output_full(buf_output)\n output += self._cleanup_status_line(buf_output)\n # clear screen to consume more output\n self._s3270.clear()\n\n return (output, matched_obj)", "def wait_for_program(program: Program, seconds_to_wait: int = 10) -> None:\n for _ in range(seconds_to_wait):\n ret = subprocess.run([\"wmctrl\", \"-l\"], capture_output=True)\n opened_windows = ret.stdout.decode().splitlines()\n for window in opened_windows:\n if program.display_name in window:\n return\n sleep(1)\n print(\n f\"Waiting for program {program.name} timed out after {seconds_to_wait} seconds\"\n )", "def waitForOutput(self, time):\r\n SimBase.trace(\"Entering ArenaService.waitForOutput()\")\r\n messagesToSend = SimBase.pendingOutput\r\n SimBase.pendingOutput = []\r\n\r\n if not SimBase.isSimulationRunning():\r\n SimBase.trace(\"Exiting ArenaService.waitForOutput()\")\r\n return (False, messagesToSend)\r\n\r\n arenaThread = threading.current_thread()\r\n arenaThread.wakeUpTime += time\r\n arenaThread.block()\r\n\r\n SimBase.trace(\"Exiting ArenaService.waitForOutput()\")\r\n return (True, messagesToSend)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the directory with the provided name, along with all its contents
def rm_dir(name): shutil.rmtree(name)
[ "def delete_tmp_dir(self,name):\r\n\t\tnorm_name = os.path.normpath(name)\r\n\t\trel_path = os.path.relpath(norm_name, os.path.abspath(VDOM_CONFIG[\"TEMP-DIRECTORY\"]))\r\n\t\tif rel_path.find('/')>=0 or rel_path.find('\\\\')>=0:\r\n\t\t\traise VDOM_exception_file_access(\"Provided file name is invalid\")\t\r\n\t\tshutil.rmtree(name)", "def directory_remove(self, path):\n if not isinstance(path, basestring):\n raise TypeError(\"path can only be an instance of type basestring\")\n self._call(\"directoryRemove\",\n in_p=[path])", "def rm_file(name):\n os.remove(name)", "def delete_directory(path: str) -> None:\n shutil.rmtree(path)", "def remove(self, name):\n slab_logger.log(15, 'Removing repo %s' % name)\n if os.path.exists(name):\n os.remove(name)", "def delete(self, name):\n name = self.clean_name(name)\n\n # ensure all data has been flushed from the writer for this file and close it\n if name in self.writers and not self.writers[name].closed:\n self.writers[name].flush()\n self.writers[name].close()\n\n full_path = os.path.join(self.working_folder, name)\n # if the file exists, remove it\n if os.path.exists(full_path):\n os.remove(full_path)", "def purge_metadata_by_name(self, name) -> None:\n meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)\n logger.debug(f\"purging metadata directory: {meta_dir}\")\n try:\n rm_rf(meta_dir)\n except OSError as e:\n raise ProcessManager.MetadataError(\n f\"failed to purge metadata directory {meta_dir}: {e!r}\"\n )", "def clean_directory(directory):\n\n for filename in os.listdir(directory):\n f = os.path.join(directory, filename)\n if os.path.isfile(f):\n sh.rm(f)", "def remove(self, dir_name):\n # todo: find out how to free memory after deletion\n if self.is_belong(dir_name):\n for d in self.data:\n if d.dir == dir_name:\n del(d.images)\n self.data.remove(d)\n return", "def remove_folder_or_file(self, path):\n url = self._base_url + \"/resources\"\n\n payload = {'path': path}\n r = requests.delete(url, headers=self.base_headers, params=payload)\n self._check_code(r)", "def delete_directory(self, dname):\n pid_ = None\n if self.root_directory:\n pid_ = self.root_directory.gdriveid\n else:\n print('no root directory', dname, self.directory_name_dict.keys())\n\n dn_list = dname.replace(BASE_DIR, 'My Drive').split('/')\n\n if dn_list[0] != 'My Drive':\n dn_list.insert(0, 'My Drive')\n assert dn_list[0] == 'My Drive'\n assert pid_ is not None\n\n for dn_ in dn_list[1:]:\n new_pid_ = None\n for item in self.directory_name_dict.get(dn_, []):\n if item.parentid == pid_:\n new_pid_ = item.gdriveid\n break\n if new_pid_:\n pid_ = new_pid_\n continue\n self.gdrive.delete_file(pid_)\n return ''", "def purge_dir(path):\n for root, dirs, files in os.walk(path, topdown=False): \n # remove all files\n for file in files:\n os.remove(os.path.join(root,file)) \n # remove all dirs\n for dir in dirs:\n os.rmdir(os.path.join(root,dir))\n os.rmdir(path)", "def remove_file(curdir, filename):\n #os.remove(os.path.join(curdir,filename)) TODO: use os.remove if possible\n os.system(\"rm -f %s\" %os.path.join(curdir,filename))", "def clean_up_cache(dir_name=_cache_dir):\n try:\n shutil.rmtree(dir_name)\n except OSError:\n # This would happen if the dir is already deleted\n # note: should be smarter and check the error code in\n # the Exception to make sure that it's a \"file not there\"\n pass\n except Exception, excp:\n # something else went wrong\n warnings.warn('Problem Deleting cache dir')\n # using repr to get the Error type in the warning\n warnings.warn(repr(excp))", "def cleanup_directory(directory_to_clean):\n try:\n chromium_utils.RemoveDirectory(directory_to_clean)\n except OSError as e:\n print 'Exception removing %s: %s' % (directory_to_clean, e)", "def recursiveRemove(path):", "def clear_dir(path, skips=None):\n if os.path.isdir(path):\n with os.scandir(path) as path_iter:\n for entry in path_iter:\n if entry.path in skips:\n continue\n try:\n if entry.is_file() or entry.is_symlink():\n os.remove(entry.path)\n else:\n shutil.rmtree(entry.path)\n except PermissionError:\n getlogger(__name__).warning(f\"could not delete path: {entry.path}\")", "def symlink_remove_directory(self, path):\n if not isinstance(path, basestring):\n raise TypeError(\"path can only be an instance of type basestring\")\n self._call(\"symlinkRemoveDirectory\",\n in_p=[path])", "def clean_file_in_dir(dirname, filename):\n for parent, _, filenames in os.walk(dirname):\n for name in filenames:\n if name == filename:\n os.remove(os.path.join(parent, name))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save img from response
def save_img(img_response): time = datetime.datetime.now().strftime("%Y-%m-%d") img_name = time + '.jpg' with open(os.path.join('download', img_name), 'wb') as f: f.write(img_response.content)
[ "def write_image(self, response):\n file_name = \"{}_network.png\".format(self.prefix)\n print(\"Writing : {}\".format(file_name), file=sys.stderr)\n\n with open(file_name, 'wb') as fh:\n fh.write(response.content)", "def OutputImage(self, session):\n session.handler.send_header(\"Content-type\", \"image/png\")\n session.handler.end_headers()\n self.canvas.save(file=session.handler.wfile, format='png')", "def handle_possible_post_image(self, request, url_suffix):\n basename = url_suffix\n if not basename:\n return None\n\n data = request.request_body\n if not data.startswith(IMAGE_DATA_PREFIX):\n logging.error('Unexpected image format for: %s', basename)\n return SimpleResponse(400)\n\n data = data[len(IMAGE_DATA_PREFIX):]\n png = base64.b64decode(data)\n filename = os.path.join(self.screenshot_dir,\n '%s-%s.png' % (request.host, basename))\n if not os.access(self.screenshot_dir, os.W_OK):\n logging.error('Unable to write to: %s', filename)\n return SimpleResponse(400)\n\n with file(filename, 'w') as f:\n f.write(png)\n return SimpleResponse(200)", "def make_image_response(image, kind):\n mimetype = types_map['.' + kind.lower()]\n io = BytesIO()\n image.save(io, kind.upper())\n io.seek(0)\n return send_file(io, mimetype=mimetype, conditional=True)", "def download_save_img(url, id):\n path_img = \"data/images/image_{}.jpg\".format(id)\n urllib.request.urlretrieve(url, path_img)", "def get_image(self):", "def save_postimg_image(session,link):\n # Load image page\n image_page_html = get_url(link)\n if image_page_html is None:\n logging.error(\"Could not load postimg iamge page: \"+repr(link))\n return []\n # Find link to full image\n # <td><textarea onmouseover=\"this.focus()\" onfocus=\"this.select()\" id=\"code_2\" scrolling=\"no\" wrap=\"off\">http://s22.postimg.org/a0wx5kzf5/c1blastfacial_bonus.png</textarea></td>\n # id=\"code_2\" scrolling=\"no\">http://s22.postimg.org/a0wx5kzf5/c1blastfacial_bonus.png</textarea></td>\n # full sized link appears to always be associated with \"code_2\"\n full_image_link_regex = \"\"\"id=\"code_2\"[^><]+>([^><]+)<\"\"\"\n full_image_link_search = re.search(full_image_link_regex, image_page_html, re.IGNORECASE|re.DOTALL)\n if full_image_link_search:\n full_image_link = full_image_link_search.group(1)\n logging.debug(\"full_image_link:\"+repr(full_image_link))\n # Save image and return id\n return download_image_links(session,[full_image_link])\n else:\n logging.error(\"Could not find full image link!\")\n logging.debug(\"locals(): \"+repr(locals()))\n assert(False)# Stop so we know to fix this", "def save_image(self, filename='out.jpg', subdir='agionic/www'):\n try:\n if self.config['VERBOSE']: self.log_msg('HTTP', 'Saving output image to file')\n filepath = os.path.join(self.CURRENT_DIR, subdir, filename)\n cv2.imwrite(filepath, self.output_image)\n except Exception as error:\n self.log_msg('SYS', 'ERROR: %s' % str(error), important=True)", "def do_POST(self):\n\n content_len = int(self.headers.get_all('content-length')[0])\n x = self.rfile.read(content_len)\n \n uuid,img = x.split(b';')\n uuid = (uuid.decode('ascii'))\n \n img = bs.b64decode(img)\n\n params = (uuid,memoryview(img))\n self.c.execute('insert into images values(?, ?)', params)\n self.send_response(200)\n self.end_headers()\n dat = self.c.execute('select * from images;')\n\n self.conn.commit()", "def save_image(self, path, bin_img):\n with open(path, \"wb\") as f:\n self.save_image_fobj(f, bin_img)", "def img(request, key):\n i = get_object_or_404(Image, key=key)\n with open(i.path, \"rb\") as f:\n return HttpResponse(f.read(), content_type=\"image/jpeg\")", "def download_img(self, return_url = False):\n\n self.kegg_image = keggapi_get(\n dbentry=self.name, option=\"image\", show_result_image=False\n )\n plt.figure()\n plt.title(self.title)\n plt.imshow(self.kegg_image)\n \n if return_url == True:\n return self.imagelink", "def frame_image(request, id):\n img = Image.objects.get(id=id)\n img_path = \"annotations/data/frames/season_{season:02d}/episode_{episode:03d}/frame_{frame:09d}.jpg\".format(\n season=img.season,\n episode=img.episode,\n frame=img.frame\n )\n with open(img_path, \"rb\") as f:\n img_data = f.read()\n return HttpResponse(img_data, content_type=\"image/jpg\")", "def save_sketch(request):\n if request.method == \"POST\" and request.is_ajax():\n imgstring = str(request.POST.get(\"img\"))\n pngstring = base64.b64decode(imgstring.split(\",\")[1])\n sketch = Sketch()\n sketch.save()\n sketch.image.save(str(sketch.pk) + \".png\", ContentFile(pngstring))\n json = '{\"sketch_id\" : \"%s\"}' % sketch.pk\n print \"new image: %s\" % json\n messages.success(request, \"successfully posted a new sketch!\")\n return HttpResponse(json, mimetype=\"application/json\")\n\n return HttpResponseNotFound(\"invalid save request\")", "def image():\n content_id = bottle.request.query.content_id or None\n if not content_id:\n return\n # TODO a whole bunch of validation and error checking...\n bytes_: bytes = s3_client.get_object(\n Bucket=image_bucket_key, Key=f\"{image_folder_key}{content_id}\"\n )[\"Body\"].read()\n # TODO make the content type dynamic\n bottle.response.set_header(\"Content-type\", \"image/jpeg\")\n return bytes_", "def get_preview_image(observation_id):\n metadata_url = ('http://pds-rings-tools.seti.org/opus/api/image/med/'\n '{}.json'.format(observation_id))\n jsonstr = urlopen(metadata_url).read().decode('utf-8')\n jsonobj = json.loads(jsonstr)['data'][0]\n image_url = jsonobj['path'] + jsonobj['img']\n print('Downloading {}'.format(image_url))\n image_path, msg = urlretrieve(image_url)\n return image_path", "def save_png(self, filename):\n if self.png:\n data = base64.decodebytes(bytes(self.png, 'ascii'))\n with open(filename, 'wb') as f:\n f.write(data)\n else:\n warnings.warn('No png image available! Try auto_save_png() instead?')", "def picture():\n if request.headers['Content-Type'] == 'application/json':\n imagedata, status = vision.analyse(request.json['image'])\n return Response(imagedata, status=status, mimetype='application/json')\n else:\n app.logger.error('Problem with data, request body looked like this (%s)', request.data)\n return Response(\"payload needs to be in JSON-format\", status=400)", "def image():\n if request.files.get('image'):\n fname = 'images/{}.jpg'.format(str(time.time()))\n request.files['image'].save(fname)\n result = img_tasks.read_img_detect_circles(fname)\n print(result)\n current_app.logger.info(\"POST {}\".format(result))\n return jsonify({'success': True}), 200\n else:\n current_app.logger.warning(\"IMAGE POST FAILED\")\n return jsonify({'success': False}), 400" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns system specific FFmpeg static path
def return_static_ffmpeg(): path = "" if platform.system() == "Windows": path += os.path.join( tempfile.gettempdir(), "Downloads/FFmpeg_static/ffmpeg/bin/ffmpeg.exe" ) elif platform.system() == "Darwin": path += os.path.join( tempfile.gettempdir(), "Downloads/FFmpeg_static/ffmpeg/bin/ffmpeg" ) else: path += os.path.join( tempfile.gettempdir(), "Downloads/FFmpeg_static/ffmpeg/ffmpeg" ) return os.path.abspath(path)
[ "def _get_ffmpeg_path():\n ffmpeg_path = os.environ.get('FFMPEG_PATH', None)\n if ffmpeg_path is None:\n # Note: try to infer standard binary name regarding of platform.\n if platform.system() == 'Windows':\n ffmpeg_path = _WINDOWS_BINARY\n else:\n ffmpeg_path = _UNIX_BINARY\n expended = _which(ffmpeg_path)\n if expended is None:\n raise IOError(f'FFMPEG binary ({ffmpeg_path}) not found')\n return expended", "def get_ffmpeg_path() -> Optional[str]:\n # Prefer using ffmpeg if it already exists in PATH.\n try:\n subprocess.call(['ffmpeg', '-v', 'quiet'])\n return 'ffmpeg'\n except OSError:\n pass\n # Failed to invoke ffmpeg from PATH, see if we have a copy from imageio_ffmpeg.\n try:\n # pylint: disable=import-outside-toplevel\n from imageio_ffmpeg import get_ffmpeg_exe\n # pylint: enable=import-outside-toplevel\n subprocess.call([get_ffmpeg_exe(), '-v', 'quiet'])\n return get_ffmpeg_exe()\n # Gracefully handle case where imageio_ffmpeg is not available.\n except ModuleNotFoundError:\n pass\n # Handle case where path might be wrong/non-existent.\n except OSError:\n pass\n # get_ffmpeg_exe may throw a RuntimeError if the executable is not available.\n except RuntimeError:\n pass\n return None", "def return_testvideo_path(fmt=\"av\"):\r\n supported_fmts = {\r\n \"av\": \"BigBuckBunny_4sec.mp4\",\r\n \"vo\": \"BigBuckBunny_4sec_VO.mp4\",\r\n \"ao\": \"BigBuckBunny_4sec_AO.mp4\",\r\n }\r\n req_fmt = fmt if (fmt in supported_fmts) else \"av\"\r\n path = \"{}/Downloads/Test_videos/{}\".format(\r\n tempfile.gettempdir(), supported_fmts[req_fmt]\r\n )\r\n return os.path.abspath(path)", "def test_download_ffmpeg():\r\n try:\r\n import glob, shutil\r\n\r\n found = glob.glob(os.path.join(tempfile.gettempdir(), \"ffmpeg-static*\"))\r\n if found and os.path.isdir(found[0]):\r\n shutil.rmtree(found[0])\r\n except Exception as e:\r\n if not isinstance(e, PermissionError):\r\n pytest.fail(str(e))", "def staticpath(self):\n return abspath('content/app-static', self.name)", "def make_plugin_release_filesystem_location():\n return os.path.join(public_filesystem_location(), make_plugin_release_filename())", "def static_dir(self):\n return os.path.join('front', self.slug, 'static')", "def static_dir():\n tests_dir = os.path.dirname(__file__)\n return os.path.join(tests_dir, 'static')", "def commandline(self): \n return self._ffmpeg_commandline()", "def testing_audio_path():\n return TEST_WAVEFILE_PATH", "def path(cls):\n\n return os.path.join(FileDir.path(), 'videos')", "def get_video_audio_joining_command(audio_path, video_path, output_path):\n return 'ffmpeg -nostats -loglevel error -i {} -i {} -codec copy -shortest {}'.format(\n video_path, audio_path, output_path)", "def video_paths(pattern):\n return glob_with_suffix(pattern, ffmpeg_video_types)", "def find_default_soundfont():\n for dirpath,dirnames,filenames in os.walk('/usr/share'):\n for f in filenames:\n if f.endswith(\".sf2\") or f.endswith(\".sf3\"):\n return \"0:0:%s\"%os.path.join(dirpath,f)\n return \"\"", "def path(snd):\n return os.path.join(snddir, snd)", "def get_static_dirs():\n package = pkg_resources.Requirement.parse (\"bqserver\")\n package_path = pkg_resources.resource_filename(package,'bq')\n return [(package_path, os.path.join(package_path, 'usage', 'public'))]", "def set_skvideo_path(ffmpeg_path=None, libav_path=None):\n if libav_path is not None:\n skvideo.setLibAVPath(libav_path)\n if ffmpeg_path is not None:\n skvideo.setFFmpegPath(ffmpeg_path)", "def static_folder(self) -> str:\n return path.join(\"web\", \"static\")", "def load_sound_library(self):\n if not AudioEngine._ffmpeg2_loaded:\n AudioEngine._ffmpeg2_loaded = True\n else:\n return\n import pyglet_ffmpeg2\n pyglet_ffmpeg2.load_ffmpeg()", "def getmedia():\n local('rsync -vraz %(user)s@%(host)s:%(site_dir)smedia .' % env)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns Test video path
def return_testvideo_path(fmt="av"): supported_fmts = { "av": "BigBuckBunny_4sec.mp4", "vo": "BigBuckBunny_4sec_VO.mp4", "ao": "BigBuckBunny_4sec_AO.mp4", } req_fmt = fmt if (fmt in supported_fmts) else "av" path = "{}/Downloads/Test_videos/{}".format( tempfile.gettempdir(), supported_fmts[req_fmt] ) return os.path.abspath(path)
[ "def video_path(self):\n assert self._video_path\n return self._video_path", "def return_testvideo(level=0):\n\tLevels = ['BigBuckBunny.mp4','20_mbps_hd_hevc_10bit.mkv','50_mbps_hd_h264.mkv','90_mbps_hd_hevc_10bit.mkv','120_mbps_4k_uhd_h264.mkv']\n\tpath = '{}/Downloads/Test_videos/{}'.format(tempfile.gettempdir(), Levels[level])\n\treturn os.path.abspath(path)", "def path(cls):\n\n return os.path.join(FileDir.path(), 'videos')", "def full_video_path(slug):\n p = join(project_dir(slug), FULL_VIDEO_BASENAME)\n return p", "def test_video(self):\n\t\t_task, _prog, _file = mock_handler_request(self.dir, 'theshadowmoose.tumblr.com/post/184562318724/another-test-post-with-video')\n\t\tres = tumblr.handle(_task, _prog)\n\t\tself.assertTrue(res, \"Tumblr video download failed!\")\n\t\tself.assertTrue(_file.exists(), \"Tumblr video was not downloaded! %s\" % res.failure_reason)\n\t\tself.assertTrue(_file.relative().endswith('.mp4'), 'Failed to use .mp4 extension for video file!')", "def test_video_mp4_should_return_true(self):\n\n video_name : str = \"video.mp4\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_video_vob_should_return_true(self):\n\n video_name : str = \"video.vob\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_video_mpg_should_return_true(self):\n\n video_name : str = \"video.mpg\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_generate_thumbnail_with_video_files_is_working_properly(self):\n thumbnail_path = \\\n self.test_media_manager.generate_thumbnail(\n self.test_video_path_mp4\n )\n\n # check if thumbnail is properly generated\n self.assertTrue(os.path.exists(thumbnail_path))\n\n # check if the thumbnail format is the default thumbnail format\n self.assertEqual(\n os.path.splitext(thumbnail_path)[1],\n self.test_media_manager.thumbnail_format\n )", "def test_video_avi_should_return_true(self):\n\n video_name : str = \"video.avi\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def get_output_path(video_path, dest_path: str = None):\n video_filename = os.path.split(video_path)[-1]\n\n if dest_path:\n output_folder = os.path.split(dest_path)[0]\n else:\n output_folder = os.path.join(os.path.split(video_path)[0], 'outputs')\n\n if not os.path.isdir(output_folder):\n os.makedirs(output_folder)\n\n output_path = os.path.join(\n output_folder, os.path.splitext(video_filename)[0]+'.mp4'\n )\n\n return output_path", "def test_video_m4v_should_return_true(self):\n\n video_name : str = \"video.m4v\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def get_video_parts(video_path):\n parts = video_path.split(os.path.sep)\n filename = parts[2]\n filename_no_ext = filename.split('.')[0]\n classname = parts[1]\n train_or_test = parts[0]\n\n return train_or_test, classname, filename_no_ext, filename", "def test_video_to_frames(self):\n video_variables = list(\n functions.video_to_frames(\"../\" + constant.FRAME_SAVE_PATH, \"../\" + constant.PATH_TO_VIDEO))\n self.assertGreater(video_variables[0], 0)\n self.assertGreater(video_variables[1], 0)\n self.assertGreater(video_variables[2], 0)", "def test_player_folder(window):\n assert window.get_current_player().get_folder() == \"tests/test_media/photos\"", "def test_video_name_with_trailing_space(self):\n\n video_name : str = \"video with trailing space.mp4 \"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_video_mov_should_return_true(self):\n\n video_name : str = \"video.mov\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_video_flv_should_return_true(self):\n\n video_name : str = \"video.flv\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_video_rmvb_should_return_true(self):\n\n video_name : str = \"video.rmvb\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns framerate of video(at path provided) using FFmpeg
def getFrameRate(path): process = subprocess.Popen( [return_static_ffmpeg(), "-i", path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) stdout, _ = process.communicate() output = stdout.decode() match_dict = re.search(r"\s(?P<fps>[\d\.]+?)\stbr", output).groupdict() return float(match_dict["fps"])
[ "def framerate_of_videofile(self):\n p = self.probe() \n assert 'streams' in p and len(['streams']) > 0\n fps = p['streams'][0]['avg_frame_rate']\n return float(fps) if '/' not in fps else (float(fps.split('/')[0]) / float(fps.split('/')[1])) # fps='30/1' or fps='30.0'", "def get_frame_rate(file: Path, ffprobe: str) -> float:\n cmd = f'ffprobe -of csv=p=0 -select_streams v:0 -show_entries stream=r_frame_rate \"{file}\"'\n print(f'[Info] executing \"{cmd}\"')\n p = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)\n p.wait()\n if p.returncode == 0:\n frame_rate = eval(p.communicate()[0])\n print(f'[Success] frame rate is {frame_rate}')\n return frame_rate\n else:\n print(p.communicate()[1].decode(), file=sys.stderr)\n raise RuntimeError('failed to get the frame rate')", "def get_video_duration(self):\n\n probe = ffmpeg.probe(self.in_path)\n video_info = next(\n s for s in probe['streams'] if s['codec_type'] == 'video')\n\n try:\n duration = int(video_info['duration'].split('.')[0])\n except:\n print(\n \"\"\"ERROR: can't extract duration of the video,\n please specify it by '-e' option.\"\"\")\n sys.exit()\n\n return duration", "def ffmpeg_parse_infos(filename, print_infos=False, check_duration=True):\n\n # open the file in a pipe, provoke an error, read output\n cmd = [get_setting(\"FFMPEG_BINARY\"), \"-i\", filename]\n\n popen_params = {\"bufsize\": 10 ** 5,\n \"stdout\": sp.PIPE,\n \"stderr\": sp.PIPE,\n \"stdin\": DEVNULL}\n\n if os.name == \"nt\":\n popen_params[\"creationflags\"] = 0x08000000\n\n proc = sp.Popen(cmd, **popen_params)\n\n proc.stdout.readline()\n proc.terminate()\n infos = proc.stderr.read().decode('utf8')\n del proc\n\n if print_infos:\n # print the whole info text returned by FFMPEG\n print(infos)\n\n lines = infos.splitlines()\n if \"No such file or directory\" in lines[-1]:\n raise IOError((\"MoviePy error: the file %s could not be found !\\n\"\n \"Please check that you entered the correct \"\n \"path.\") % filename)\n\n result = dict()\n\n # get duration (in seconds)\n result['duration'] = None\n\n if check_duration:\n try:\n keyword = 'Duration: '\n line = [l for l in lines if keyword in l][0]\n match = re.findall(\"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])\", line)[0]\n result['duration'] = cvsecs(match)\n except:\n raise IOError((\"MoviePy error: failed to read the duration of file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\") % (\n filename, infos))\n\n # get the output line that speaks about video\n lines_video = [l for l in lines if ' Video: ' in l]\n\n result['video_found'] = (lines_video != [])\n\n if result['video_found']:\n\n try:\n line = lines_video[0]\n\n # get the size, of the form 460x320 (w x h)\n match = re.search(\" [0-9]*x[0-9]*(,| )\", line)\n s = list(map(int, line[match.start():match.end() - 1].split('x')))\n result['video_size'] = s\n except:\n raise ((\"MoviePy error: failed to read video dimensions in file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\") % (\n filename, infos))\n\n # get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes\n # tbc, and sometimes tbc/2...\n # Current policy: Trust tbr first, then fps. If result is near from x*1000/1001\n # where x is 23,24,25,50, replace by x*1000/1001 (very common case for the fps).\n\n try:\n match = re.search(\"( [0-9]*.| )[0-9]* tbr\", line)\n tbr = float(line[match.start():match.end()].split(' ')[1])\n result['video_fps'] = tbr\n\n except:\n match = re.search(\"( [0-9]*.| )[0-9]* fps\", line)\n result['video_fps'] = float(line[match.start():match.end()].split(' ')[1])\n\n # It is known that a fps of 24 is often written as 24000/1001\n # but then ffmpeg nicely rounds it to 23.98, which we hate.\n coef = 1000.0 / 1001.0\n fps = result['video_fps']\n for x in [23, 24, 25, 30, 50]:\n if (fps != x) and abs(fps - x * coef) < .01:\n result['video_fps'] = x * coef\n\n if check_duration:\n result['video_nframes'] = int(result['duration'] * result['video_fps']) + 1\n result['video_duration'] = result['duration']\n else:\n result['video_nframes'] = 1\n result['video_duration'] = None\n # We could have also recomputed the duration from the number\n # of frames, as follows:\n # >>> result['video_duration'] = result['video_nframes'] / result['video_fps']\n\n lines_audio = [l for l in lines if ' Audio: ' in l]\n\n result['audio_found'] = lines_audio != []\n\n if result['audio_found']:\n line = lines_audio[0]\n try:\n match = re.search(\" [0-9]* Hz\", line)\n result['audio_fps'] = int(line[match.start() + 1:match.end()])\n except:\n result['audio_fps'] = 'unknown'\n\n return result", "def duration_in_frames_of_videofile(self):\n return int(np.floor(self.duration_in_seconds_of_videofile()*self.framerate_of_videofile()))", "def get_video_fps(video):\n\n\treturn int(cv.GetCaptureProperty(video, cv.CV_CAP_PROP_FPS))", "def get_video_dims(fname):\n try:\n import pyffmpeg\n except ImportError:\n raise ImportError(\"This function requires pyffmpeg \"\n \"<http://code.google.com/p/pyffmpeg/>\")\n mp = pyffmpeg.FFMpegReader()\n try:\n mp.open(fname)\n tracks = mp.get_tracks()\n for track in tracks:\n if isinstance(track, pyffmpeg.VideoTrack):\n break\n else:\n raise ValueError('no video track found')\n return (track.duration(),) + track.get_orig_size()\n finally:\n mp.close()", "def get_frames(video_path):\n # open a pointer to the video file initialize the width and height of the frame\n vs = cv2.VideoCapture(video_path)\n if not vs.isOpened():\n raise Exception(f'unable to open file {video_path}')\n\n total_frames = vs.get(cv2.CAP_PROP_FRAME_COUNT)\n frame_time = 0\n frame_count = 0\n print(\"total_frames: \", total_frames)\n print(\"FRAME_RATE\", FRAME_RATE)\n\n # loop over the frames of the video\n while True:\n # grab a frame from the video\n\n vs.set(cv2.CAP_PROP_POS_MSEC, frame_time * 1000) # move frame to a timestamp\n frame_time += 1/FRAME_RATE\n\n (_, frame) = vs.read()\n # if the frame is None, then we have reached the end of the video file\n if frame is None:\n break\n\n frame_count += 1\n yield frame_count, frame_time, frame", "def vid_len(path):\n return int(cv2.VideoCapture(path).get(cv2.CAP_PROP_FRAME_COUNT))", "def get_image_size_and_frames_count(path: str) -> Tuple[Tuple[int, int], int]:\n import skvideo.io\n\n vreader = skvideo.io.FFmpegReader(path)\n vlength = vreader.getShape()[0]\n img_height = vreader.getShape()[1]\n img_width = vreader.getShape()[2]\n\n img_size = (img_height, img_width)\n\n return img_size, vlength", "def GetNumFrames(vid_path):\n cap = cv2.VideoCapture(vid_path)\n total_frames = cap.get(7)\n cap.release()\n return int(total_frames)", "def test_input_framerate(c_ffmpeg):\r\n stream = cv2.VideoCapture(return_testvideo_path()) # Open stream\r\n test_video_framerate = stream.get(cv2.CAP_PROP_FPS)\r\n output_params = (\r\n {\"-input_framerate\": test_video_framerate}\r\n if (c_ffmpeg != \"wrong_path\")\r\n else {\"-input_framerate\": \"wrong_input\"}\r\n )\r\n writer = WriteGear(\r\n output=\"Output_tif.mp4\", custom_ffmpeg=c_ffmpeg, logging=True, **output_params\r\n ) # Define writer\r\n while True:\r\n (grabbed, frame) = stream.read()\r\n if not grabbed:\r\n break\r\n writer.write(frame)\r\n stream.release()\r\n writer.close()\r\n output_video_framerate = getFrameRate(os.path.abspath(\"Output_tif.mp4\"))\r\n assert test_video_framerate == output_video_framerate\r\n remove_file_safe(\"Output_tif.mp4\")", "def get_video_size(self):\n\n self.logger.info(f'Getting video size for {self.in_path}')\n probe = ffmpeg.probe(self.in_path)\n video_info = next(\n s for s in probe['streams'] if s['codec_type'] == 'video')\n width = int(video_info['width'])\n height = int(video_info['height'])\n return width, height", "def ffmpeg_parse_infos(self,filename, print_infos=False, check_duration=True):\n\n\n # open the file in a pipe, provoke an error, read output\n is_GIF = filename.endswith('.gif')\n cmd = [get_setting(\"FFMPEG_BINARY\"), \"-i\", filename]\n if is_GIF:\n cmd += [\"-f\", \"null\", \"/dev/null\"]\n\n popen_params = {\"bufsize\": 10**5,\n \"stdout\": sp.PIPE,\n \"stderr\": sp.PIPE,\n \"stdin\": DEVNULL}\n\n if os.name == \"nt\":\n popen_params[\"creationflags\"] = 0x08000000\n\n proc = sp.Popen(cmd, **popen_params)\n\n proc.stdout.readline()\n proc.terminate()\n infos = proc.stderr.read().decode('utf8')\n del proc\n\n if print_infos:\n # print the whole info text returned by FFMPEG\n print( infos )\n\n\n lines = infos.splitlines()\n if \"No such file or directory\" in lines[-1]:\n raise IOError((\"MoviePy error: the file %s could not be found !\\n\"\n \"Please check that you entered the correct \"\n \"path.\")%filename)\n\n result = dict()\n\n\n # get duration (in seconds)\n result['duration'] = None\n\n if check_duration:\n try:\n keyword = ('frame=' if is_GIF else 'Duration: ')\n line = [l for l in lines if keyword in l][0]\n match = re.findall(\"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])\", line)[0]\n result['duration'] = cvsecs(match)\n except:\n raise IOError((\"MoviePy error: failed to read the duration of file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\")%(\n filename, infos))\n\n # get the output line that speaks about video\n lines_video = [l for l in lines if ' Video: ' in l and re.search('\\d+x\\d+', l)]\n\n result['video_found'] = ( lines_video != [] )\n\n if result['video_found']:\n\n\n try:\n line = lines_video[0]\n\n # get the size, of the form 460x320 (w x h)\n match = re.search(\" [0-9]*x[0-9]*(,| )\", line)\n s = list(map(int, line[match.start():match.end()-1].split('x')))\n result['video_size'] = s\n except:\n raise IOError((\"MoviePy error: failed to read video dimensions in file %s.\\n\"\n \"Here are the file infos returned by ffmpeg:\\n\\n%s\")%(\n filename, infos))\n\n\n # get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes\n # tbc, and sometimes tbc/2...\n # Current policy: Trust tbr first, then fps. If result is near from x*1000/1001\n # where x is 23,24,25,50, replace by x*1000/1001 (very common case for the fps).\n\n try:\n match = re.search(\"( [0-9]*.| )[0-9]* tbr\", line)\n tbr = float(line[match.start():match.end()].split(' ')[1])\n result['video_fps'] = tbr\n\n except:\n match = re.search(\"( [0-9]*.| )[0-9]* fps\", line)\n result['video_fps'] = float(line[match.start():match.end()].split(' ')[1])\n\n\n # It is known that a fps of 24 is often written as 24000/1001\n # but then ffmpeg nicely rounds it to 23.98, which we hate.\n coef = 1000.0/1001.0\n fps = result['video_fps']\n for x in [23,24,25,30,50]:\n if (fps!=x) and abs(fps - x*coef) < .01:\n result['video_fps'] = x*coef\n\n if check_duration:\n result['video_nframes'] = int(result['duration']*result['video_fps'])+1\n result['video_duration'] = result['duration']\n else:\n result['video_nframes'] = 1\n result['video_duration'] = None\n # We could have also recomputed the duration from the number\n # of frames, as follows:\n # >>> result['video_duration'] = result['video_nframes'] / result['video_fps']\n\n\n lines_audio = [l for l in lines if ' Audio: ' in l]\n\n result['audio_found'] = lines_audio != []\n\n if result['audio_found']:\n line = lines_audio[0]\n try:\n match = re.search(\" [0-9]* Hz\", line)\n result['audio_fps'] = int(line[match.start()+1:match.end()-3])\n except:\n result['audio_fps'] = 'unknown'\n\n return result", "def getFrameRate(sound_file):\n\n wr = wave.open(sound_file, 'r')\n nchannels, sampwidth, framerate, nframes, comptype, compname = wr.getparams()\n return framerate", "def ToFrames(self, f):\n if type(f) == type(2.2):\n #return f*29.97\n #framrate is hardcoded at the moment, this needs to fixed\n #The framerate should be found on a per file basis.\n return f*59.94\n elif type(f) == type(\"\"):\n #return ToSeconds(f)*29.97\n return self.ToSeconds(f)*29.97", "def find_framerate(self):\n tracks_tag = self.soup.find_all(\"Tracks\")[0]\n frame_str = tracks_tag.find_all(\"frameRate\")[0].contents[0]\n frame_list = frame_str.split(\"f\")\n self.framerate = float(frame_list[1]) / 1000.0", "def convert_frames_to_video(self, pathIn,pathOut):", "def frames(self):\n f = 0\n if self.isVideo() or self.isAudio():\n if 'nb_frames' in self.__dict__:\n try:\n f = int(self.__dict__['nb_frames'])\n except Exception as e:\n pass\n return f" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Auxilary test to simply delete old ffmpeg binaries.
def test_download_ffmpeg(): try: import glob, shutil found = glob.glob(os.path.join(tempfile.gettempdir(), "ffmpeg-static*")) if found and os.path.isdir(found[0]): shutil.rmtree(found[0]) except Exception as e: if not isinstance(e, PermissionError): pytest.fail(str(e))
[ "def oldLiveCleanup():\n try:\n if(os.path.exists(c.wwwroot+'live')): #there was a live event that didn't stop properly, maybe due to a powerloss\n pxp.encstop() #stop that event and rename the folder according to the event standards\n # TODO: perhaps do some error checking here - if the powerloss occurred, might have to repair the MP4 file: \n # 0 check if the mp4 file is corrupt (one ffmpeg call can tell you that)\n # 1 delete the original mp4, \n # 2 assemble all the .ts segments into one big .TS file \n # 3 convert it to a new mp4\n except Exception as e:\n dbg.prn(dbg.ECN|dbg.ERR,\"[---]oldLiveCleanup:\",e,sys.exc_info()[-1].tb_lineno)", "def test_ffmpeg_binaries_download(paths, os_bit):\r\n file_path = \"\"\r\n try:\r\n file_path = download_ffmpeg_binaries(\r\n path=paths, os_windows=_windows, os_bit=os_bit\r\n )\r\n if file_path:\r\n assert os.path.isfile(file_path), \"FFmpeg download failed!\"\r\n shutil.rmtree(os.path.abspath(os.path.join(file_path, \"../..\")))\r\n except Exception as e:\r\n if paths == \"wrong_test_path\" or os_bit == \"wrong_bit\":\r\n pass\r\n elif isinstance(e, requests.exceptions.Timeout):\r\n logger.exceptions(str(e))\r\n else:\r\n pytest.fail(str(e))", "def test_purge_restored_backup(remove):\n swb.purge_restored_backup(\n backup_path='path/to/my backup.sql.bz2',\n db_path='path/to/my backup.sql')\n remove.assert_any_call('path/to/my backup.sql')\n remove.assert_any_call('path/to/my backup.sql.bz2')", "def clean_previous_result():\r\n delete_files('tmp/decrypted_pdf')\r\n delete_files('tmp/txt')\r\n delete_files('output')\r\n return 1", "def test_purge_restored_backup_silent_fail(remove):\n swb.purge_restored_backup(\n backup_path='path/to/my backup.sql.bz2',\n db_path='path/to/my backup.sql')\n nose.assert_equal(remove.call_count, 2)", "def test_remove_from_bin_file_exists(self):\n _, temp_file_path = tempfile.mkstemp()\n try:\n with open(temp_file_path, \"w+\") as temp_file:\n temp_file.write(\"hobo\")\n self.directory.symlink_to_bin(\"newfile\", temp_file_path)\n self.directory.remove_from_bin(\"newfile\")\n assert not os.path.exists(\n os.path.join(self.directory.bin_path(), \"newfile\")\n )\n os.mkdir(os.path.join(self.directory.bin_path(), \"newfolder\"))\n self.directory.remove_from_bin(\"newfolder\")\n assert not os.path.exists(\n os.path.join(self.directory.bin_path(), \"newfolder\")\n )\n finally:\n os.unlink(temp_file_path)", "def check_ffmpeg():\n try:\n subprocess.call(['ffmpeg'], stderr=subprocess.DEVNULL)\n\n except FileNotFoundError:\n return False\n\n return True", "def test_clean():\n\n for deldir in [\"srcdata\", \"newdata\"]:\n path = Path(deldir)\n for name in path.glob(\"*.json\"):\n name.unlink()\n for name in path.glob(\"*.pickle\"):\n name.unlink()\n path.rmdir()\n\n rmtree(\"sys\")", "def test_delete(self):\n delete_files.delete_raw_from_jpg(self.data['jpg']['path'], self.data['raw']['path'], self.target)\n self.assertFalse(os.path.isfile(os.path.join(self.data['raw']['path'], '3.raw')))", "def clean_temp_files():\r\n delete_files('tmp/decrypted_pdf')\r\n delete_files('tmp/txt')\r\n return 1", "def remove_old_files():\n\n cmd_prefix = [adb_path(), \"shell\", \"run-as\", package_name()]\n ls_cmd = cmd_prefix + [\"ls\", device_app_dir()]\n\n p = subprocess.Popen(ls_cmd, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n if err:\n print err\n sys.exit(0)\n\n files = [fn for fn in out.split('\\r\\n') if fn]\n print \"Removing:\", files\n for fn in files:\n rm_cmd = cmd_prefix + [\"rm\", '-r', os.path.join(device_app_dir(), fn)]\n subprocess.call(rm_cmd)", "def clearVideo():\n files = glob.glob('/home/sewerbot/repo/SeniorDesign/site/backend/data_management/temp/outputtedVideo.mp4')\n remove(files)\n files = glob.glob('/home/sewerbot/repo/SeniorDesign/site/backend/data_management/temp/imageFront.jpg')\n remove(files)", "def delete_quality_junit_xml():\n yield\n if os.path.exists(Env.QUALITY_DIR):\n rmtree(Env.QUALITY_DIR, ignore_errors=True)", "def remove_audio_file_pre_delete_output(sender, instance, using, **kwargs): # pylint: disable=W0613\n if instance.audio_file:\n dirname = os.path.dirname(instance.audio_file.path)\n if os.path.isdir(dirname):\n shutil.rmtree(dirname)", "def test_cli_clean_fastqs_removed(\n populated_compress_fastq_api: MockCompressAPI,\n compression_files: MockCompressionData,\n sample: str,\n):\n spring_file: Path = compression_files.spring_file\n spring_metadata_file: Path = compression_files.spring_metadata_file\n fastq_first: Path = compression_files.fastq_first_file\n fastq_second: Path = compression_files.fastq_second_file\n\n # GIVEN that the SPRING compression files exist\n assert spring_file.exists()\n assert spring_metadata_file.exists()\n\n # GIVEN that the FASTQ files exists\n assert fastq_first.exists()\n assert fastq_second.exists()\n\n # WHEN running the clean command\n populated_compress_fastq_api.clean_fastq(sample)\n\n # THEN assert SPRING files exists\n assert spring_file.exists()\n assert spring_metadata_file.exists()\n\n # THEN assert that the FASTQ files are removed\n assert not fastq_first.exists()\n assert not fastq_second.exists()", "def test_shell_cmd_delete_dump_dir_should_be(self):\n self.backup._set_attributes()\n matching_str = \"rm -rf {}/dump\".format(self.home_dir)\n self.assertTrue(self.backup.executable.shell_cmd_delete_dump_dir() == matching_str)\n return", "def cleanup_files(self):\n os.system(\"rm -r /tmp/kernelpop\")", "def do_clean(number=0):\n number = int(number)\n if number == 0 or number == 1:\n number = 1\n\n files = sorted(os.listdir(\"versions\"))\n size = len(files)\n for i in range(number):\n if size > i:\n files.pop()\n with lcd(\"versions\"):\n for file_name in files:\n local(\"rm -f {}\".format(file_name))\n\n with cd(\"/data/web_static/releases\"):\n all_files = run(\"ls -tr -1\").split(\"\\r\\n\")\n files = [name for name in all_files if \"web_static_\" in name]\n size = len(files)\n for i in range(number):\n if size > i:\n files.pop()\n for file_name in files:\n run(\"rm -rf {}\".format(file_name))", "def test_WriteGear_customFFmpeg(ffmpeg_cmd, logging, output_params):\r\n writer = None\r\n try:\r\n # define writer\r\n writer = WriteGear(\r\n output=\"Output.mp4\",\r\n compression_mode=(True if ffmpeg_cmd != [\"invalid\"] else False),\r\n logging=logging,\r\n **output_params\r\n ) # Define writer\r\n\r\n # execute FFmpeg command\r\n writer.execute_ffmpeg_cmd(ffmpeg_cmd)\r\n writer.close()\r\n # assert audio file is created successfully\r\n if ffmpeg_cmd and isinstance(ffmpeg_cmd, list) and \"-acodec\" in ffmpeg_cmd:\r\n assert os.path.isfile(\"input_audio.aac\")\r\n except Exception as e:\r\n if isinstance(e, AssertionError):\r\n pytest.fail(str(e))\r\n elif isinstance(e, (ValueError, RuntimeError)):\r\n pytest.xfail(\"Test Passed!\")\r\n else:\r\n logger.exception(str(e))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing "input_framerate" parameter provided by WriteGear(in Compression Mode)
def test_input_framerate(c_ffmpeg): stream = cv2.VideoCapture(return_testvideo_path()) # Open stream test_video_framerate = stream.get(cv2.CAP_PROP_FPS) output_params = ( {"-input_framerate": test_video_framerate} if (c_ffmpeg != "wrong_path") else {"-input_framerate": "wrong_input"} ) writer = WriteGear( output="Output_tif.mp4", custom_ffmpeg=c_ffmpeg, logging=True, **output_params ) # Define writer while True: (grabbed, frame) = stream.read() if not grabbed: break writer.write(frame) stream.release() writer.close() output_video_framerate = getFrameRate(os.path.abspath("Output_tif.mp4")) assert test_video_framerate == output_video_framerate remove_file_safe("Output_tif.mp4")
[ "def check_framerate(params, framerate):\n return params.framerate == framerate", "def test_output_dimensions():\r\n dimensions = (640, 480)\r\n stream = cv2.VideoCapture(return_testvideo_path())\r\n output_params = {}\r\n if platform.system() == \"Windows\":\r\n output_params = {\r\n \"-output_dimensions\": dimensions,\r\n \"-ffmpeg_download_path\": tempfile.gettempdir(),\r\n }\r\n else:\r\n output_params = {\"-output_dimensions\": dimensions}\r\n writer = WriteGear(\r\n output=\"Output_tod.mp4\",\r\n custom_ffmpeg=return_static_ffmpeg(),\r\n logging=True,\r\n **output_params\r\n ) # Define writer\r\n while True:\r\n (grabbed, frame) = stream.read()\r\n if not grabbed:\r\n break\r\n writer.write(frame)\r\n stream.release()\r\n writer.close()\r\n\r\n output = cv2.VideoCapture(os.path.abspath(\"Output_tod.mp4\"))\r\n output_dim = (\r\n output.get(cv2.CAP_PROP_FRAME_WIDTH),\r\n output.get(cv2.CAP_PROP_FRAME_HEIGHT),\r\n )\r\n assert output_dim[0] == 640 and output_dim[1] == 480\r\n output.release()\r\n\r\n remove_file_safe(\"Output_tod.mp4\")", "def calculate_output_fps(self):\n max_interval_fps = 1 / min(\n [decoder.interval for decoder in self.decoders.values()]\n )\n self.output_fps = round(min([max_interval_fps, self.fps]))", "def test_WriteGear_compression(f_name, c_ffmpeg, output_params, result):\r\n try:\r\n stream = cv2.VideoCapture(return_testvideo_path()) # Open stream\r\n writer = WriteGear(output=f_name, compression_mode=True, **output_params)\r\n while True:\r\n (grabbed, frame) = stream.read()\r\n if not grabbed:\r\n break\r\n writer.write(frame)\r\n stream.release()\r\n writer.close()\r\n remove_file_safe(f_name)\r\n except Exception as e:\r\n if result:\r\n pytest.fail(str(e))", "def convert_framerate(fragment, width, nchannels, framerate_in, framerate_out):\n if framerate_in == framerate_out:\n return fragment\n\n new_fragment, _ = audioop.ratecv(fragment, width, nchannels, framerate_in, framerate_out, None)\n return new_fragment", "def fps():\n\n base.setFrameRateMeter(not base.frameRateMeter)\n return 'Toggled framerate meter.'", "def output_fps(self):\n return self._output_fps", "def frame_rate(self):\n return self._frame_rate", "def find_framerate(self):\n tracks_tag = self.soup.find_all(\"Tracks\")[0]\n frame_str = tracks_tag.find_all(\"frameRate\")[0].contents[0]\n frame_list = frame_str.split(\"f\")\n self.framerate = float(frame_list[1]) / 1000.0", "def test_renderer():\n # by default, use the config\n assert (\n CairoRenderer(Camera).video_quality_config[\"frame_width\"]\n == config[\"frame_width\"]\n )\n # init args override config\n assert (\n CairoRenderer(Camera, frame_width=10).video_quality_config[\"frame_width\"] == 10\n )\n\n # if config changes, reflect those changes\n with tempconfig({\"frame_width\": 100}):\n assert CairoRenderer(Camera).video_quality_config[\"frame_width\"] == 100\n # ..init args still override new config\n assert (\n CairoRenderer(Camera, frame_width=10).video_quality_config[\"frame_width\"]\n == 10\n )", "def change_frame_rate(src: Path, dst: Path, new_fps: int, ffmpeg: str, overwrite_exist: Optional[bool] = True) -> None:\n cmd = f'{ffmpeg} {\"-y\" if overwrite_exist else \"\"} -i \"{src}\" -filter:v fps=fps={new_fps} \"{dst}\"'\n print(f'[Info] executing \"{cmd}\"')\n p = sp.Popen(shlex.split(cmd))\n p.wait()\n if p.returncode != 0:\n raise RuntimeError('failed to change the frame rate')\n print(f'[Success] new video file is {dst}')", "def test_timeframes(self):\n pass", "def framerate_of_videofile(self):\n p = self.probe() \n assert 'streams' in p and len(['streams']) > 0\n fps = p['streams'][0]['avg_frame_rate']\n return float(fps) if '/' not in fps else (float(fps.split('/')[0]) / float(fps.split('/')[1])) # fps='30/1' or fps='30.0'", "def getFrameRate(path):\r\n process = subprocess.Popen(\r\n [return_static_ffmpeg(), \"-i\", path],\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.STDOUT,\r\n )\r\n stdout, _ = process.communicate()\r\n output = stdout.decode()\r\n match_dict = re.search(r\"\\s(?P<fps>[\\d\\.]+?)\\stbr\", output).groupdict()\r\n return float(match_dict[\"fps\"])", "def test_rapid_block_mode(ps,\n n_captures=100,\n sample_interval=100e-9, # 100 ns\n sample_duration=2e-3, # 1 ms\n ):\n # Configuration of Picoscope\n ps.setChannel(channel=\"A\", coupling=\"DC\", VRange=1)\n ps.setChannel(channel=\"B\", enabled=False)\n ps.setChannel(channel=\"C\", enabled=False)\n ps.setChannel(channel=\"D\", enabled=False)\n\n ps.setResolution('12')\n ps.setSamplingInterval(sample_interval, sample_duration)\n ps.setSimpleTrigger(\"A\", threshold_V=0.1, timeout_ms=1)\n\n samples_per_segment = ps.memorySegments(n_captures)\n ps.setNoOfCaptures(n_captures)\n\n data = np.zeros((n_captures, samples_per_segment), dtype=np.int16)\n\n # Measurement\n t1 = time.time()\n\n ps.runBlock()\n ps.waitReady()\n\n t2 = time.time()\n print(\"Time to record data to scope: \", str(t2 - t1))\n\n # downSampleMode raw (no downsampling) is 0x80000000. 0 is invalid!\n ps.getDataRawBulk(data=data, downSampleMode=0x80000000)\n\n t3 = time.time()\n print(\"Time to copy to RAM: \", str(t3 - t2))\n\n plt.imshow(data[:, 0:ps.noSamples], aspect='auto', interpolation='none',\n cmap=plt.cm.hot)\n plt.colorbar()\n plt.title(\"rapid block mode\")\n plt.show()\n print(\"Rapid block mode test passed.\")", "def assert_video_frames_appropriate_for_benchmark(config):\n print(\"counting frames in input\")\n frame_count = count_frames(config[\"video_path\"])\n print(\"frames counted: \", frame_count)\n assert (config[\"n_frames\"] * config[\n \"downsample\"]) <= frame_count, \"The provided video must have at least n_frames\"\n return frame_count", "def ToFrames(self, f):\n if type(f) == type(2.2):\n #return f*29.97\n #framrate is hardcoded at the moment, this needs to fixed\n #The framerate should be found on a per file basis.\n return f*59.94\n elif type(f) == type(\"\"):\n #return ToSeconds(f)*29.97\n return self.ToSeconds(f)*29.97", "def log_fps(frames, timediff):\n if timediff < 1 or frames == 0 or (frames % 100) != 0:\n return\n # Print message each 1000 frame if FPS > 100\n if frames > 100 * timediff and (frames % 1000) != 0:\n return\n logger.info('Frame {:6d}: FPS {}'.format(frames, int(frames / timediff)))", "def frames_per_second():\r\n global _time_prev, _fps\r\n time_now = time.time() * 1000.0\r\n dt = time_now - _time_prev\r\n _time_prev = time_now\r\n if dt == 0.0:\r\n return _fps.value\r\n return _fps.update(1000.0 / dt)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing "output_dimensions" special parameter provided by WriteGear(in Compression Mode)
def test_output_dimensions(): dimensions = (640, 480) stream = cv2.VideoCapture(return_testvideo_path()) output_params = {} if platform.system() == "Windows": output_params = { "-output_dimensions": dimensions, "-ffmpeg_download_path": tempfile.gettempdir(), } else: output_params = {"-output_dimensions": dimensions} writer = WriteGear( output="Output_tod.mp4", custom_ffmpeg=return_static_ffmpeg(), logging=True, **output_params ) # Define writer while True: (grabbed, frame) = stream.read() if not grabbed: break writer.write(frame) stream.release() writer.close() output = cv2.VideoCapture(os.path.abspath("Output_tod.mp4")) output_dim = ( output.get(cv2.CAP_PROP_FRAME_WIDTH), output.get(cv2.CAP_PROP_FRAME_HEIGHT), ) assert output_dim[0] == 640 and output_dim[1] == 480 output.release() remove_file_safe("Output_tod.mp4")
[ "def testOutputSize(self):\n tools = self.tools\n\n # Rather than mocks, use a special Output object.\n out = tools._out\n out._Output = self._OutputMock\n\n tools.PrepareOutputDir(None)\n fname = tools.GetOutputFilename('fred')\n text_string = 'test of output size'\n tools.WriteFile(fname, text_string)\n\n re_fname = re.compile('fred')\n re_size = re.compile('.*size: (\\d*)')\n\n tools.OutputSize('first', fname, level=cros_output.ERROR)\n self.assertEqual(self._level, cros_output.ERROR)\n self.assertTrue(re_fname.search(self._msg))\n self.assertEqual(self._color, None)\n\n # Check the default level, and that the filename length is given.\n tools.OutputSize('second', fname)\n self.assertEqual(self._level, cros_output.NOTICE)\n self.assertTrue(re_fname.search(self._msg))\n self.assertEqual(self._color, None)\n m = re_size.match(self._msg)\n self.assertEqual(m.group(1), str(len(text_string)))\n\n tools.FinalizeOutputDir()", "def test_tile_size_custom(self):\n gmp = GlobalMercatorProfile(512)\n assert gmp.tile_size == 512", "def _SetInputOutputSizes(hp, input_size, output_size):\n if hp.input_size is None:\n hp.input_size = input_size\n else:\n assert hp.input_size == input_size\n if output_size is not None:\n if hp.output_size is None:\n hp.output_size = output_size\n else:\n assert hp.output_size == output_size", "def test_dagmc_h5m_export_mesh_size(reactor_1):\n\n reactor_1.export_dagmc_h5m(\"dagmc_default.h5m\", min_mesh_size=10, max_mesh_size=20)\n reactor_1.export_dagmc_h5m(\"dagmc_bigger.h5m\", min_mesh_size=2, max_mesh_size=9)\n\n assert Path(\"dagmc_bigger.h5m\").stat().st_size > Path(\"dagmc_default.h5m\").stat().st_size", "def test_get_output_size() -> None:\n train_output_size = (5, 5, 5)\n test_output_size = (7, 7, 7)\n\n model_config = SegmentationModelBase(crop_size=train_output_size,\n test_crop_size=test_output_size,\n should_validate=False)\n assert model_config.get_output_size(execution_mode=ModelExecutionMode.TRAIN) is None\n assert model_config.get_output_size(execution_mode=ModelExecutionMode.TEST) is None\n\n model = IdentityModel()\n model_config.adjust_after_mixed_precision_and_parallel(model)\n assert model_config.get_output_size(execution_mode=ModelExecutionMode.TRAIN) == train_output_size\n assert model_config.get_output_size(execution_mode=ModelExecutionMode.TEST) == test_output_size", "def test_tile_size(self):\n gmp = GlobalMercatorProfile()\n assert gmp.tile_size == 256", "def write_dims(dimensions_dict, nc):\n\n for dimension in list(dimensions_dict.values()):\n dim_size = 0\n if dimension == 'num_characters':\n dim_size = 4\n nc.createDimension(dimension, dim_size)", "def output_dim(self):\n return self._output_dim", "def _check_output_shape_proportions(self) -> bool:\n output_shapes = [self._get_ngraph_output_shape(output) for output in self.output_layers]\n\n if len(output_shapes) == 1:\n return NetworkMetaData._is_yolo_shape(output_shapes[0])\n\n zipped_shapes = zip(*output_shapes)\n for shape in zipped_shapes:\n if any(dim % min(shape) for dim in shape):\n return False\n\n return True", "def test_extract_grid_size(self):\n parser = Parser()\n for k, v in self.GRID_SIZES.items():\n assert parser._extract_grid_size(k) == v", "def test_window_size_in_tiles():\n assert pylink_config.WINDOW_SIZE_IN_TILES == (16, 15)", "def testCopyConstructorDimensions(self, outputFilename):\n v = volumeFromFile(inputFile_ushort)\n n = volumeFromInstance(v, outputFilename)\n ns = n.sizes[0:3]\n vs = v.sizes[0:3]\n n.closeVolume()\n v.closeVolume()\n assert ns == vs", "def test_shape(setup):\n expected = (1, 36, 82, 130)\n assert setup[\"z\"].shape == expected", "def get_batch_size_1_output_voxels(outputs, b):\n b_1_outputs = {}\n for field in standard_fields.get_output_voxel_fields():\n if field in outputs and outputs[field] is not None:\n b_1_outputs[field] = outputs[field][b]\n return b_1_outputs", "def getDepthMapSize(*args, **kwargs):\n \n pass", "def visualize_dimension(algorithm, protein, dimension, size=1):\n if dimension == \"2d\":\n visualize_csv(algorithm, protein, size)\n elif dimension == \"3d\":\n visualize3d_csv(algorithm, protein)\n else:\n print(\"can only visualize in 2d or 3d\")\n exit(1)", "def output_dims(self, qargs=None):\n if self._num_qubits:\n num = self._num_qubits if qargs is None else len(qargs)\n return num * (2, )\n if qargs is None:\n return self._output_dims\n return tuple(self._output_dims[i] for i in qargs)", "def test_write_volume_5d_contig_rgb_empty():\n shape = (2, 3, 256, 64, 96, 3)\n with TempFileName('volume_5d_contig_rgb_empty') as fname:\n with TiffWriter(fname) as tif:\n tif.save(shape=shape, dtype='uint8', tile=(256, 64, 96))\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 6\n page = tif.pages[0]\n assert page.is_sgi\n assert page.is_tiled\n assert page.is_contiguous\n assert page.planarconfig == CONTIG\n assert page.photometric == RGB\n assert page.imagewidth == 96\n assert page.imagelength == 64\n assert page.imagedepth == 256\n assert page.tilewidth == 96\n assert page.tilelength == 64\n assert page.tiledepth == 256\n assert page.samplesperpixel == 3\n # self.assertEqual(page.tags['TileOffsets'].value, (352,))\n assert page.tags['TileByteCounts'].value == (4718592,)\n series = tif.series[0]\n assert len(series._pages) == 1\n assert len(series.pages) == 6\n assert series.offset is not None\n assert series.shape == shape\n image = tif.asarray()\n assert_array_equal(image.shape, shape)\n assert__str__(tif)", "def test_output_data():\n output_params = dict(\n type=\"geodetic\",\n format=\"GeoJSON\",\n path=\"my/output/directory\",\n schema=dict(properties=dict(id=\"int\"), geometry=\"Polygon\"),\n pixelbuffer=0,\n metatiling=1\n )\n output = geojson.OutputData(output_params)\n assert output.path == \"my/output/directory\"\n assert output.file_extension == \".geojson\"\n assert isinstance(output_params, dict)\n # TODO output.read()\n # TODO output.write() --> also malformed data\n # TODO ouput.tiles_exist()\n # TODO ouput.get_path()\n # TODO output.prepare_path()\n # TODO output.open() --> InputTile" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing WriteGear CompressionMode(FFmpeg) with different parameters
def test_WriteGear_compression(f_name, c_ffmpeg, output_params, result): try: stream = cv2.VideoCapture(return_testvideo_path()) # Open stream writer = WriteGear(output=f_name, compression_mode=True, **output_params) while True: (grabbed, frame) = stream.read() if not grabbed: break writer.write(frame) stream.release() writer.close() remove_file_safe(f_name) except Exception as e: if result: pytest.fail(str(e))
[ "def test_WriteGear_customFFmpeg(ffmpeg_cmd, logging, output_params):\r\n writer = None\r\n try:\r\n # define writer\r\n writer = WriteGear(\r\n output=\"Output.mp4\",\r\n compression_mode=(True if ffmpeg_cmd != [\"invalid\"] else False),\r\n logging=logging,\r\n **output_params\r\n ) # Define writer\r\n\r\n # execute FFmpeg command\r\n writer.execute_ffmpeg_cmd(ffmpeg_cmd)\r\n writer.close()\r\n # assert audio file is created successfully\r\n if ffmpeg_cmd and isinstance(ffmpeg_cmd, list) and \"-acodec\" in ffmpeg_cmd:\r\n assert os.path.isfile(\"input_audio.aac\")\r\n except Exception as e:\r\n if isinstance(e, AssertionError):\r\n pytest.fail(str(e))\r\n elif isinstance(e, (ValueError, RuntimeError)):\r\n pytest.xfail(\"Test Passed!\")\r\n else:\r\n logger.exception(str(e))", "def test_input_framerate(c_ffmpeg):\r\n stream = cv2.VideoCapture(return_testvideo_path()) # Open stream\r\n test_video_framerate = stream.get(cv2.CAP_PROP_FPS)\r\n output_params = (\r\n {\"-input_framerate\": test_video_framerate}\r\n if (c_ffmpeg != \"wrong_path\")\r\n else {\"-input_framerate\": \"wrong_input\"}\r\n )\r\n writer = WriteGear(\r\n output=\"Output_tif.mp4\", custom_ffmpeg=c_ffmpeg, logging=True, **output_params\r\n ) # Define writer\r\n while True:\r\n (grabbed, frame) = stream.read()\r\n if not grabbed:\r\n break\r\n writer.write(frame)\r\n stream.release()\r\n writer.close()\r\n output_video_framerate = getFrameRate(os.path.abspath(\"Output_tif.mp4\"))\r\n assert test_video_framerate == output_video_framerate\r\n remove_file_safe(\"Output_tif.mp4\")", "def test_compression_suite(self):\n self.app = self.make_app(argv = ['production', 'decompress', 'J.Doe_00_01', '--debug', '--force', '--fastq', '-n'])\n handler.register(ProductionController)\n self._run_app()\n l1 = self.app._output_data[\"stderr\"].getvalue()\n self.app = self.make_app(argv = ['production', 'decompress', 'J.Doe_00_01', '-f', FLOWCELL, '--debug', '--force', '--fastq', '-n'])\n handler.register(ProductionController)\n self._run_app()\n l2 = self.app._output_data[\"stderr\"].getvalue()\n self.assertTrue(len(l1) > len(l2))\n os.chdir(filedir)", "def test_output_dimensions():\r\n dimensions = (640, 480)\r\n stream = cv2.VideoCapture(return_testvideo_path())\r\n output_params = {}\r\n if platform.system() == \"Windows\":\r\n output_params = {\r\n \"-output_dimensions\": dimensions,\r\n \"-ffmpeg_download_path\": tempfile.gettempdir(),\r\n }\r\n else:\r\n output_params = {\"-output_dimensions\": dimensions}\r\n writer = WriteGear(\r\n output=\"Output_tod.mp4\",\r\n custom_ffmpeg=return_static_ffmpeg(),\r\n logging=True,\r\n **output_params\r\n ) # Define writer\r\n while True:\r\n (grabbed, frame) = stream.read()\r\n if not grabbed:\r\n break\r\n writer.write(frame)\r\n stream.release()\r\n writer.close()\r\n\r\n output = cv2.VideoCapture(os.path.abspath(\"Output_tod.mp4\"))\r\n output_dim = (\r\n output.get(cv2.CAP_PROP_FRAME_WIDTH),\r\n output.get(cv2.CAP_PROP_FRAME_HEIGHT),\r\n )\r\n assert output_dim[0] == 640 and output_dim[1] == 480\r\n output.release()\r\n\r\n remove_file_safe(\"Output_tod.mp4\")", "def test_write_compress_deflate_level():\n data = WRITE_DATA\n with TempFileName('compress_deflate_level') as fname:\n imwrite(fname, data, compress=9)\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 1\n page = tif.pages[0]\n assert not page.is_contiguous\n assert page.compression == ADOBE_DEFLATE\n assert page.planarconfig == SEPARATE\n assert page.photometric == RGB\n assert page.imagewidth == 301\n assert page.imagelength == 219\n assert page.samplesperpixel == 3\n image = tif.asarray()\n assert_array_equal(data, image)\n assert__str__(tif)", "def test_prepare_output_data_effective_compression(self):\n # Use dictionary to share data between threads\n thread_dict = {}\n original_compress = zlib.compress\n\n def my_compress(data):\n thread_dict['compress'] = threading.current_thread()\n return original_compress(data)\n\n self.mock_object(zlib, 'compress', side_effect=my_compress)\n\n service = google_dr.GoogleBackupDriver(self.ctxt)\n # Set up buffer of 128 zeroed bytes\n fake_data = b'\\0' * 128\n\n result = service._prepare_output_data(fake_data)\n\n self.assertEqual('zlib', result[0])\n self.assertGreater(len(fake_data), len(result[1]))\n self.assertNotEqual(threading.current_thread(),\n thread_dict['compress'])", "def test_prepare_output_data_effective_compression(self):\n # Use dictionary to share data between threads\n thread_dict = {}\n original_compress = zlib.compress\n\n def my_compress(data):\n thread_dict['compress'] = threading.current_thread()\n return original_compress(data)\n\n self.mock_object(zlib, 'compress', side_effect=my_compress)\n\n service = swift_dr.SwiftBackupDriver(self.ctxt)\n # Set up buffer of 128 zeroed bytes\n fake_data = b'\\0' * 128\n\n result = service._prepare_output_data(fake_data)\n\n self.assertEqual('zlib', result[0])\n self.assertGreater(len(fake_data), len(result[1]))\n self.assertNotEqual(threading.current_thread(),\n thread_dict['compress'])", "def FFMPEGCommand(file_, videoFrameRate):\n command = 'ffmpeg -y -i \"'+file_+'\" -map 0:1 -c:a copy -t 300 -map_metadata -1 audio0.mp4 -map 0:1 -c:a ac3 -t 300 -map_metadata -1 audio1.mp4 -map 0:1 -c:a aac -ac 2 -ab 320k -t 300 -map_metadata -1 audio2.mp4 -map 0:0 -c:v libx265 -x265-params \"profile=main:level=4.1:keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':scenecut=0\" -crf 18 -sc_threshold 0 -flags +cgop -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 video1080hevc.mp4 -map 0:0 -c:v libx265 -x265-params \"profile=main:level=4.1:keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':scenecut=0\" -crf 18 -sc_threshold 0 -flags +cgop -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 -vf \"scale=min(1280\\,iw):trunc(ow/a/2)*2\" video720hevc.mp4 -map 0:0 -c:v libx265 -x265-params \"profile=main:level=4.1:keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':scenecut=0\" -crf 18 -sc_threshold 0 -flags +cgop -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 -vf \"scale=min(480\\,iw):trunc(ow/a/2)*2\" video480hevc.mp4 -map 0:0 -c:v libx264 -x264opts keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':no-scenecut -sc_threshold 0 -flags +cgop -profile:v baseline -level 4.1 -crf 18 -maxrate 4000k -bufsize 4000k -vf \"scale=min(1280\\,iw):trunc(ow/a/2)*2\" -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 video720.mp4 -map 0:0 -c:v libx264 -x264opts keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':no-scenecut -sc_threshold 0 -flags +cgop -profile:v baseline -level 4.1 -crf 18 -maxrate 1500k -bufsize 1500k -vf \"scale=min(854\\,iw):trunc(ow/a/2)*2\" -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 video480.mp4'\n status = os.system(command)\n return status, command", "def test_write_compress_webp():\n data = WRITE_DATA.astype('uint8').reshape((219, 301, 3))\n with TempFileName('compress_webp') as fname:\n imwrite(fname, data, compress=('WEBP', -1))\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 1\n page = tif.pages[0]\n assert not page.is_contiguous\n assert page.compression == WEBP\n assert page.photometric == RGB\n assert page.imagewidth == 301\n assert page.imagelength == 219\n assert page.samplesperpixel == 3\n image = tif.asarray()\n assert_array_equal(data, image)\n assert__str__(tif)", "def testCompressionPartial(self):\n output_length = 40\n output, _, exhausted = compression.CompressStream(\n self.stream,\n output_length,\n 9)\n # Ensure the requested read size is <= the compressed buffer size.\n self.assertLessEqual(output_length, output.length)\n # Ensure the input stream was not exhausted.\n self.assertFalse(exhausted)", "def headercompression(is_on: bool = True):\n if is_on is True:\n is_on = \"on\"\n elif is_on is False:\n is_on = \"off\"\n else:\n raise ValueError(\"is_on neither True nor False\")\n return _run_speedify_cmd([\"headercompression\", is_on])", "def compress_with_ffmpeg_h264(data_path, output_path, crf=23,\n **kwargs):\n files = os.listdir(data_path)\n bar = progressbar.ProgressBar(max_value=len(files))\n bar.start()\n os.makedirs(join(output_path), exist_ok=True)\n\n for i, file in enumerate(files):\n bar.update(i)\n Popen('ffmpeg -loglevel 0 -hide_banner -i ' + join(data_path, file)\n + ' -c:v libx264 -preset slow -crf ' + str(crf) +\n ' -c:a copy ' + join(output_path, file.split('.')[0]) + '.avi',\n shell=True).wait()\n bar.finish()", "def make_synchronized_video_gray_piezo(self):\n tdTomato_file=self.tdTomato_registered_path\n GCaMP_file=self.gcamp_registered_path\n\n n_of_z = self.n_of_z\n frames_per_second = self.frames_per_second\n min_range1 = self.min_range1\n max_range1 = self.max_range1\n min_range2 = self.min_range2\n max_range2 = self.max_range2\n\n #Get tdTomato images\n with open(tdTomato_file, \"rb\") as f:\n tdTomato_Filtered=pickle.load(f)\n #Get GCaMP images\n with open(GCaMP_file, \"rb\") as f:\n GCaMP_Filtered=pickle.load(f)\n\n #Number of frames should be the same for tdTomato and GCaMP.\n total_frames=tdTomato_Filtered.shape[0]\n x_size=tdTomato_Filtered.shape[2]#number of columns\n y_size=tdTomato_Filtered.shape[1]#number of rows\n\n #Make a video with the tdTomato signal + GCaMP signal + prep image\n video_name = (tdTomato_file+\"synchronized_video_gray.avi\")\n #Image width will be 2 * imaging_width\n #Final \"0\" necessary for gray scale image\n video = cv2.VideoWriter(video_name,cv2.VideoWriter_fourcc(*'mp4v'),frames_per_second,(x_size*2,y_size),0)\n\n\n #For making video, all numbers below min_range1 will be treated as 0.\n #all numbers above max_range1 will be treated as max_range1 value.\n #Then normalize the image to be between 0 to 255.\n tdTomato_Filtered[tdTomato_Filtered<=min_range1]=0\n tdTomato_Filtered[tdTomato_Filtered>=max_range1]=max_range1\n range_adjusted_tdTomato=(tdTomato_Filtered/max_range1)*255\n\n #For GCaMP\n GCaMP_Filtered[GCaMP_Filtered<=min_range2]=0\n GCaMP_Filtered[GCaMP_Filtered>=max_range2]=max_range2\n range_adjusted_GCaMP=(GCaMP_Filtered/max_range2)*255\n\n #Initialize the frame\n frame_original=np.zeros((y_size,x_size*2))\n\n for video_frame in range(total_frames):\n #Insert images in the right location.\n frame_original[:,0:x_size]=range_adjusted_tdTomato[video_frame,:,:]\n frame_original[:,x_size:x_size*2]=range_adjusted_GCaMP[video_frame,:,:]\n\n frame=np.uint8(frame_original)\n\n video.write(frame)\n\n video.release()", "def __send_gzip_setting(self):\n self.__send_bool(self.gzip)", "def test_write_compress_lzma():\n data = WRITE_DATA\n with TempFileName('compress_lzma') as fname:\n imwrite(fname, data, compress='LZMA')\n assert_valid(fname)\n with TiffFile(fname) as tif:\n assert len(tif.pages) == 1\n page = tif.pages[0]\n assert not page.is_contiguous\n assert page.compression == LZMA\n assert page.planarconfig == SEPARATE\n assert page.photometric == RGB\n assert page.imagewidth == 301\n assert page.imagelength == 219\n assert page.samplesperpixel == 3\n assert page.rowsperstrip == 108\n assert len(page.dataoffsets) == 9\n image = tif.asarray()\n assert_array_equal(data, image)\n assert__str__(tif)", "def test_write(self):\n\n # Dimensions, for testing purposes\n H = 480\n W = 640\n writer = cv2.VideoWriter(self.path_vid_out, cv2.VideoWriter_fourcc(*\"MJPG\"), 30, (W, H))\n for frame in tqdm.tqdm(range(400)):\n this_frame = np.random.randint(0, 255, (H, W, 3)).astype('uint8')\n writer.write(this_frame)\n writer.release()\n logging.debug(\"Wrote test video to {}\".format(self.path_vid_out))", "def setCompression(self, compmethod: 'SbName', level: 'float const'=0.5) -> \"SbBool\":\n return _coin.SoOutput_setCompression(self, compmethod, level)", "def create_op(output_buffer): \n frame_num = 1 # Initialize frame number to order frames \n # Define output video properties\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter()\n out.open(\"OUTPUT_mp.avi\", fourcc, 20, (512, 256))\n\n try:\n while True:\n if len(output_buffer) == 0:\n continue\n else:\n # print(\"length of output buffer: \", len(output_buffer)) \n output_buffer.sort() # Sort the common list such that the frame with least frame number is at the first position (like in priority queue) \n if output_buffer[0][0] == frame_num: # If the frame number at first position corresponds to current frame number, take it otherwise skip \n frame = output_buffer.pop(0)[1] \n out.write(frame)\n frame_num += 1 \n print(\"Time elapsed so far is: \", time.time() - start)\n \n except:\n # Release resources\n out.release()", "def test_ape (archive, compression, cmd, verbosity, interactive):\n return [cmd, archive, '-v']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing WriteGear CompressionMode(FFmpeg) custom FFmpeg Pipeline by seperating audio from video
def test_WriteGear_customFFmpeg(ffmpeg_cmd, logging, output_params): writer = None try: # define writer writer = WriteGear( output="Output.mp4", compression_mode=(True if ffmpeg_cmd != ["invalid"] else False), logging=logging, **output_params ) # Define writer # execute FFmpeg command writer.execute_ffmpeg_cmd(ffmpeg_cmd) writer.close() # assert audio file is created successfully if ffmpeg_cmd and isinstance(ffmpeg_cmd, list) and "-acodec" in ffmpeg_cmd: assert os.path.isfile("input_audio.aac") except Exception as e: if isinstance(e, AssertionError): pytest.fail(str(e)) elif isinstance(e, (ValueError, RuntimeError)): pytest.xfail("Test Passed!") else: logger.exception(str(e))
[ "def test_WriteGear_compression(f_name, c_ffmpeg, output_params, result):\r\n try:\r\n stream = cv2.VideoCapture(return_testvideo_path()) # Open stream\r\n writer = WriteGear(output=f_name, compression_mode=True, **output_params)\r\n while True:\r\n (grabbed, frame) = stream.read()\r\n if not grabbed:\r\n break\r\n writer.write(frame)\r\n stream.release()\r\n writer.close()\r\n remove_file_safe(f_name)\r\n except Exception as e:\r\n if result:\r\n pytest.fail(str(e))", "def constructPipeline(self):\r\n self.pipeline = gst.Pipeline(\"pipeline\")\r\n\r\n self.audiosrc = gst.element_factory_make(\"filesrc\")\r\n self.audiosrc.set_property(\"location\", self.audioInLocation)\r\n\r\n self.videosrc = gst.element_factory_make(\"filesrc\")\r\n self.videosrc.set_property(\"location\", self.videoInLocation)\r\n\r\n self.filesink = gst.element_factory_make(\"filesink\")\r\n self.filesink.set_property(\"location\", self.outFileLocation)\r\n\r\n self.decodebin_1 = gst.element_factory_make(\"decodebin\")\r\n self.decodebin_2 = gst.element_factory_make(\"decodebin\")\r\n\r\n self.audioconvert = gst.element_factory_make(\"audioconvert\")\r\n self.audio_encoder= gst.element_factory_make(\"lame\")\r\n\r\n self.video_encoder = gst.element_factory_make(\"ffenc_mpeg4\")\r\n self.muxer = gst.element_factory_make(\"ffmux_mp4\")\r\n self.queue = gst.element_factory_make(\"queue\")\r\n\r\n # As a precaution add video capability filter\r\n # in the video processing pipeline.\r\n videocap = gst.Caps(\"video/x-raw-yuv\")\r\n self.capsFilter = gst.element_factory_make(\"capsfilter\")\r\n self.capsFilter.set_property(\"caps\", videocap)\r\n # Converts the video from one colorspace to another\r\n self.colorSpace = gst.element_factory_make(\"ffmpegcolorspace\")\r\n\r\n self.pipeline.add( self.videosrc,\r\n self.decodebin_2,\r\n self.capsFilter,\r\n self.colorSpace,\r\n self.video_encoder,\r\n self.muxer,\r\n self.filesink)\r\n\r\n self.pipeline.add(self.audiosrc,\r\n self.decodebin_1,\r\n self.audioconvert,\r\n self.audio_encoder,\r\n self.queue)\r\n\r\n # Link audio elements\r\n gst.element_link_many(self.audiosrc, self.decodebin_1)\r\n gst.element_link_many( self.audioconvert, self.audio_encoder,\r\n self.queue, self.muxer)\r\n #Link video elements\r\n gst.element_link_many(self.videosrc, self.decodebin_2)\r\n gst.element_link_many(self.capsFilter,\r\n self.colorSpace,\r\n self.video_encoder,\r\n self.muxer,\r\n self.filesink)", "def compress_with_ffmpeg_h264(data_path, output_path, crf=23,\n **kwargs):\n files = os.listdir(data_path)\n bar = progressbar.ProgressBar(max_value=len(files))\n bar.start()\n os.makedirs(join(output_path), exist_ok=True)\n\n for i, file in enumerate(files):\n bar.update(i)\n Popen('ffmpeg -loglevel 0 -hide_banner -i ' + join(data_path, file)\n + ' -c:v libx264 -preset slow -crf ' + str(crf) +\n ' -c:a copy ' + join(output_path, file.split('.')[0]) + '.avi',\n shell=True).wait()\n bar.finish()", "def ffmpeg_merge_video_audio(\n videofile,\n audiofile,\n outputfile,\n video_codec=\"copy\",\n audio_codec=\"copy\",\n logger=\"bar\",\n):\n cmd = [\n FFMPEG_BINARY,\n \"-y\",\n \"-i\",\n audiofile,\n \"-i\",\n videofile,\n \"-vcodec\",\n video_codec,\n \"-acodec\",\n audio_codec,\n outputfile,\n ]\n\n subprocess_call(cmd, logger=logger)", "def split_video_ffmpeg(\n input_video_path: str,\n scene_list: Iterable[TimecodePair],\n output_file_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER.mp4',\n video_name: Optional[str] = None,\n arg_override: str = DEFAULT_FFMPEG_ARGS,\n show_progress: bool = False,\n show_output: bool = False,\n suppress_output=None,\n hide_progress=None,\n):\n # Handle backwards compatibility with v0.5 API.\n if isinstance(input_video_path, list):\n logger.error('Using a list of paths is deprecated. Pass a single path instead.')\n if len(input_video_path) > 1:\n raise ValueError('Concatenating multiple input videos is not supported.')\n input_video_path = input_video_path[0]\n if suppress_output is not None:\n logger.error('suppress_output is deprecated, use show_output instead.')\n show_output = not suppress_output\n if hide_progress is not None:\n logger.error('hide_progress is deprecated, use show_progress instead.')\n show_progress = not hide_progress\n\n if not scene_list:\n return 0\n\n logger.info('Splitting input video using ffmpeg, output path template:\\n %s',\n output_file_template)\n\n if video_name is None:\n video_name = get_file_name(input_video_path, include_extension=False)\n\n arg_override = arg_override.replace('\\\\\"', '\"')\n\n ret_val = 0\n arg_override = arg_override.split(' ')\n scene_num_format = '%0'\n scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'\n\n try:\n progress_bar = None\n total_frames = scene_list[-1][1].get_frames() - scene_list[0][0].get_frames()\n if show_progress:\n progress_bar = tqdm(total=total_frames, unit='frame', miniters=1, dynamic_ncols=True)\n processing_start_time = time.time()\n for i, (start_time, end_time) in enumerate(scene_list):\n duration = (end_time - start_time)\n # Format output filename with template variable\n output_file_template_iter = Template(output_file_template).safe_substitute(\n VIDEO_NAME=video_name,\n SCENE_NUMBER=scene_num_format % (i + 1),\n START_TIME=str(start_time.get_timecode().replace(\":\", \";\")),\n END_TIME=str(end_time.get_timecode().replace(\":\", \";\")),\n START_FRAME=str(start_time.get_frames()),\n END_FRAME=str(end_time.get_frames()))\n\n # Gracefully handle case where FFMPEG_PATH might be unset.\n call_list = [FFMPEG_PATH if FFMPEG_PATH is not None else 'ffmpeg']\n if not show_output:\n call_list += ['-v', 'quiet']\n elif i > 0:\n # Only show ffmpeg output for the first call, which will display any\n # errors if it fails, and then break the loop. We only show error messages\n # for the remaining calls.\n call_list += ['-v', 'error']\n call_list += [\n '-nostdin', '-y', '-ss',\n str(start_time.get_seconds()), '-i', input_video_path, '-t',\n str(duration.get_seconds())\n ]\n call_list += arg_override\n call_list += ['-sn']\n call_list += [output_file_template_iter]\n ret_val = invoke_command(call_list)\n if show_output and i == 0 and len(scene_list) > 1:\n logger.info(\n 'Output from ffmpeg for Scene 1 shown above, splitting remaining scenes...')\n if ret_val != 0:\n # TODO(v0.6.2): Capture stdout/stderr and display it on any failed calls.\n logger.error('Error splitting video (ffmpeg returned %d).', ret_val)\n break\n if progress_bar:\n progress_bar.update(duration.get_frames())\n\n if progress_bar:\n progress_bar.close()\n if show_output:\n logger.info('Average processing speed %.2f frames/sec.',\n float(total_frames) / (time.time() - processing_start_time))\n\n except CommandTooLong:\n logger.error(COMMAND_TOO_LONG_STRING)\n except OSError:\n logger.error('ffmpeg could not be found on the system.'\n ' Please install ffmpeg to enable video output support.')\n return ret_val", "def split_video_ffmpeg(input_video_paths, shot_list, output_dir,\n output_file_template=\"${OUTPUT_DIR}/shot_${SHOT_NUMBER}.mp4\",\n compress_output=False,\n hide_progress=False,\n suppress_output=False):\n\n os.makedirs(output_dir, exist_ok=True)\n if not input_video_paths or not shot_list:\n return\n\n logging.info(\n 'Splitting input video%s using ffmpeg, output path template:\\n %s',\n 's' if len(input_video_paths) > 1 else '', output_file_template)\n if len(input_video_paths) > 1:\n # TODO: Add support for splitting multiple/appended input videos.\n # https://trac.ffmpeg.org/wiki/Concatenate#samecodec\n # Requires generating a temporary file list for ffmpeg.\n logging.error(\n 'Sorry, splitting multiple appended/concatenated input videos with'\n ' ffmpeg is not supported yet. This feature will be added to a future'\n ' version of ShotDetect. In the meantime, you can try using the'\n ' -c / --copy option with the split-video to use mkvmerge, which'\n ' generates less accurate output, but supports multiple input videos.')\n raise NotImplementedError()\n\n ret_val = None\n filename_template = Template(output_file_template)\n shot_num_format = '%0'\n shot_num_format += str(max(4, math.floor(math.log(len(shot_list), 10)) + 1)) + 'd'\n try:\n progress_bar = None\n total_frames = shot_list[-1][1].get_frames() - shot_list[0][0].get_frames()\n if tqdm and not hide_progress:\n progress_bar = tqdm(total=total_frames, unit='frame', miniters=1, desc=\"Split Video\")\n processing_start_time = time.time()\n for i, (start_time, end_time) in enumerate(shot_list):\n end_time = end_time.__sub__(1) # Fix the last frame of a shot to be 1 less than the first frame of the next shot\n duration = (end_time - start_time)\n # an alternative way to do it\n # duration = (end_time.get_frames()-1)/end_time.framerate - (start_time.get_frames())/start_time.framerate\n # duration_frame = end_time.get_frames()-1 - start_time.get_frames()\n call_list = ['ffmpeg']\n if suppress_output:\n call_list += ['-v', 'quiet']\n elif i > 0:\n # Only show ffmpeg output for the first call, which will display any\n # errors if it fails, and then break the loop. We only show error messages\n # for the remaining calls.\n call_list += ['-v', 'error']\n call_list += [\n '-y',\n '-ss',\n start_time.get_timecode(),\n '-i',\n input_video_paths[0]]\n if compress_output:\n call_list += '[-crf 21]' # compress\n call_list += ['-map_chapters', '-1'] # remove meta stream\n call_list += [\n '-strict',\n '-2',\n '-t',\n duration.get_timecode(),\n '-sn',\n filename_template.safe_substitute(\n OUTPUT_DIR=output_dir,\n SHOT_NUMBER=shot_num_format % (i))\n ]\n ret_val = subprocess.call(call_list)\n if not suppress_output and i == 0 and len(shot_list) > 1:\n logging.info(\n 'Output from ffmpeg for shot 1 shown above, splitting remaining shots...')\n if ret_val != 0:\n break\n if progress_bar:\n progress_bar.update(duration.get_frames()+1) # to compensate the missing one frame caused above\n if progress_bar:\n print('')\n logging.info('Average processing speed %.2f frames/sec.',\n float(total_frames) / (time.time() - processing_start_time))\n except OSError:\n logging.error('ffmpeg could not be found on the system.'\n ' Please install ffmpeg to enable video output support.')\n if ret_val is not None and ret_val != 0:\n logging.error('Error splitting video (ffmpeg returned %d).', ret_val)", "def main():\n global __args\n\n __args = parse_args()\n if int(__args.debug_mode) > -1:\n cv2.imwrite('output.jpg', np.zeros((32, 32, 3)))\n\n input_data = __args.pipeline_input\n output_dest = __args.pipeline_output\n\n if input_data.split('.')[-1] == 'mp4':\n vin = VideoFileClip(input_data)\n\n if __args.start is not None:\n vin = vin.set_start(float(__args.start))\n if __args.end is not None:\n vin = vin.set_end(float(__args.end))\n\n vout = vin.fl_image(pipeline)\n vout.write_videofile(output_dest, audio=False)\n\n elif input_data.split('.')[-1] in ['jpg', 'png']:\n img = cv2.imread(input_data)\n\n if os.path.isfile(output_dest):\n raise Exception('{} exists and is a file, not a directory.'.format(output_dest))\n\n if not os.path.isdir(output_dest):\n os.mkdir(output_dest)\n\n pipeline(img, dest=output_dest, fname=os.path.basename(input_data))\n\n else:\n raise Exception('Invalid input media format. Supported types are mp4, jpg or png.')", "def FFMPEGCommand(file_, videoFrameRate):\n command = 'ffmpeg -y -i \"'+file_+'\" -map 0:1 -c:a copy -t 300 -map_metadata -1 audio0.mp4 -map 0:1 -c:a ac3 -t 300 -map_metadata -1 audio1.mp4 -map 0:1 -c:a aac -ac 2 -ab 320k -t 300 -map_metadata -1 audio2.mp4 -map 0:0 -c:v libx265 -x265-params \"profile=main:level=4.1:keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':scenecut=0\" -crf 18 -sc_threshold 0 -flags +cgop -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 video1080hevc.mp4 -map 0:0 -c:v libx265 -x265-params \"profile=main:level=4.1:keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':scenecut=0\" -crf 18 -sc_threshold 0 -flags +cgop -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 -vf \"scale=min(1280\\,iw):trunc(ow/a/2)*2\" video720hevc.mp4 -map 0:0 -c:v libx265 -x265-params \"profile=main:level=4.1:keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':scenecut=0\" -crf 18 -sc_threshold 0 -flags +cgop -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 -vf \"scale=min(480\\,iw):trunc(ow/a/2)*2\" video480hevc.mp4 -map 0:0 -c:v libx264 -x264opts keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':no-scenecut -sc_threshold 0 -flags +cgop -profile:v baseline -level 4.1 -crf 18 -maxrate 4000k -bufsize 4000k -vf \"scale=min(1280\\,iw):trunc(ow/a/2)*2\" -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 video720.mp4 -map 0:0 -c:v libx264 -x264opts keyint=' + str(4*int(videoFrameRate[:-1])) + ':min-keyint=' + str(4*int(videoFrameRate[:-1])) + ':no-scenecut -sc_threshold 0 -flags +cgop -profile:v baseline -level 4.1 -crf 18 -maxrate 1500k -bufsize 1500k -vf \"scale=min(854\\,iw):trunc(ow/a/2)*2\" -movflags faststart -pix_fmt yuv420p -t 300 -map_metadata -1 video480.mp4'\n status = os.system(command)\n return status, command", "def export_ffmpeg(clip, export_dir, foreground):\r\n\r\n import subprocess\r\n import platform\r\n\r\n from libwiretapPythonClientAPI import (\r\n WireTapServerHandle,\r\n WireTapNodeHandle,\r\n )\r\n\r\n # Specify output dimensions here\r\n # width = 1920\r\n # height = 1080\r\n # Could alternatively use source dimensions\r\n width = clip.width #* float(scale_factor)\r\n height = clip.height # * float(scale_factor)\r\n\r\n # Specify output video codec and options here\r\n output_vcodec = str(\"-codec:v libx264 -preset slow -crf \") + str(crf_entry) + str(\" -x264-params ref=4:qpmin=4 -vf \\\"scale=(iw)/\") + str(scale_factor) + str(\"\\\":(-ih)/\") + str(scale_factor) + str(\",format=yuv420p\")\r\n output_vcodec_is_rgb = False\r\n\r\n # Specify output audio codec and options here\r\n output_acodec = str(\"-codec:a aac -b:a \")+ str(audio_bitrate_entry) + str(\" -strict -2\")\r\n\r\n print()\r\n print(\"Export with ffmpeg\")\r\n\r\n # Render clip (in foreground) if needed and commit library before extracting\r\n # clip information like the node id because they could change upon render\r\n # and commit.\r\n #\r\n # In a real workflow, you might want to consider copying the clip so it does\r\n # not get altered by the user while the export is ongoing.\r\n #\r\n clip.render()\r\n clip.commit()\r\n\r\n # Extract metadata from source clip\r\n storage_id = clip.get_wiretap_storage_id()\r\n print(\"Clip server: \" + storage_id)\r\n\r\n node_id = clip.get_wiretap_node_id()\r\n print(\"Clip Id: \" + node_id)\r\n\r\n clipname = clip.name.get_value()\r\n print(\"Clip Name: \" + clipname)\r\n\r\n tc = \"%s\" % clip.start_time\r\n print(\"Timecode (Flame): \" + tc)\r\n\r\n fps = clip.frame_rate\r\n print(\"Frame rate: %s\" % clip.frame_rate)\r\n drop = fps.find(\" DF\") > 0\r\n\r\n tc = tc[:2] + \":\" + tc[3:5] + \":\" + tc[6:8] + (\";\" if drop else \":\") + tc[9:]\r\n print(\"Timecode (FFmpeg): \" + tc)\r\n\r\n fps = fps[0 : fps.find(\"fps\") - 1]\r\n print(\"Clip fps: \" + fps)\r\n\r\n print(\"Clip depth: %d\" % clip.bit_depth)\r\n need_16_bpc = clip.bit_depth > 8\r\n\r\n audio_pipe_name = os.path.join(export_dir, \"%s.audio.pipe\" % clipname)\r\n print(\"Audio pipe: \" + audio_pipe_name)\r\n\r\n output_file = os.path.join(export_dir, \"%s.mp4\" % clipname)\r\n if os.path.exists(output_file):\r\n if not show_confirm_dialog(\r\n \"%s already exists. Do want to overwrite?\" % output_file, \"Overwrite?\"\r\n ):\r\n return\r\n print(\"Output file: \" + output_file)\r\n print()\r\n\r\n # Prepare read_frame command\r\n read_frame_cmd = (\r\n \"/opt/Autodesk/io/bin/read_frame -S '%s' -n '%s' -N -1 -W %d -H %d -b %d\"\r\n % (storage_id, node_id, width, height, 48 if need_16_bpc else 24)\r\n )\r\n\r\n # Prepare transfer of clip's color metadata to ffmpeg\r\n color_space = (\r\n \"-color_primaries %d -color_trc %d -colorspace %d \"\r\n \"-movflags write_colr \"\r\n % (\r\n clip.colour_primaries,\r\n clip.transfer_characteristics,\r\n 0 if output_vcodec_is_rgb else clip.matrix_coefficients,\r\n )\r\n )\r\n\r\n # Prepare ffmpeg command: will get its audio input from audio pipe above\r\n # and its video input from stdin (piped read_frame command)\r\n ffmpeg_cmd = (\r\n \"/usr/local/bin/ffmpeg \"\r\n +\r\n # Video input options\r\n \"-f rawvideo -pix_fmt %s -s %dx%d -r '%s' -i - \"\r\n +\r\n # Audio input options\r\n \"-ar 48000 -f s16le -ac 2 -i '%s' \"\r\n +\r\n # Video output options\r\n output_vcodec\r\n + \" \"\r\n +\r\n # Audio output options\r\n output_acodec\r\n + \" \"\r\n +\r\n # Metadata output options\r\n color_space\r\n + \"-timecode '%s' \"\r\n + \"-metadata:s:v:0 reel_name='%s' \"\r\n + \"-metadata title='%s' \"\r\n +\r\n # Output file\r\n \"-y '%s'\"\r\n ) % (\r\n \"rgb48le\" if need_16_bpc else \"rgb24\",\r\n width,\r\n height,\r\n fps,\r\n audio_pipe_name,\r\n tc,\r\n clip.tape_name,\r\n clipname,\r\n output_file,\r\n )\r\n\r\n # Prepare audio command\r\n read_audio_cmd = (\r\n \"/opt/Autodesk/io/bin/read_audio -S \" + storage_id + \" -n \" + node_id\r\n )\r\n\r\n # Run the commands\r\n if foreground:\r\n run_ffmpeg(audio_pipe_name, read_audio_cmd, read_frame_cmd, ffmpeg_cmd)\r\n\r\n else:\r\n # Background execution is performed via Backburner cmdjob.\r\n # Everything is packaged in a python command line for cmdjob consumption.\r\n\r\n cmdjob = (\r\n \"/opt/Autodesk/backburner/cmdjob \"\r\n + \"-jobName:'FFmpeg - \"\r\n + clipname\r\n + \"' \"\r\n + \"-description:'\"\r\n + clipname\r\n + \" -> \"\r\n + output_file\r\n + \"' \"\r\n + \"-servers:\"\r\n + platform.node().split(\".\")[0]\r\n + ' /usr/bin/python -c \"'\r\n + \"import sys; import os;\"\r\n + \"sys.path.insert(1,'\"\r\n + os.path.dirname(os.path.realpath(__file__))\r\n + \"');\"\r\n + \"import mp4_gui;\"\r\n + \"mp4_gui.run_ffmpeg('\"\r\n + audio_pipe_name\r\n + \"','\"\r\n + read_audio_cmd.replace(\"'\", \"\\\\'\")\r\n + \"','\"\r\n + read_frame_cmd.replace(\"'\", \"\\\\'\")\r\n + \"','\"\r\n + ffmpeg_cmd.replace(\"'\", \"\\\\'\")\r\n + \"')\"\r\n + '\"'\r\n )\r\n\r\n print(cmdjob)\r\n print()\r\n try:\r\n cmdjob_args = shlex.split(cmdjob)\r\n subprocess.check_call(cmdjob_args)\r\n except Exception as err:\r\n logging.error(traceback.format_exc())\r\n raise\r\n\r\n # Invalidate exported clip in WTG\r\n try:\r\n server = WireTapServerHandle(\"localhost:Gateway\")\r\n node = WireTapNodeHandle(server, output_file + \"@CLIP\")\r\n if not node.setMetaData(\"Invalidate\", \"\"):\r\n print(\"Unable to set meta data: \" + node.lastError())\r\n finally:\r\n # Must destroy WireTapServerHandle and WireTapNodeHandle before\r\n # uninitializing the Wiretap Client API.\r\n #\r\n node = None\r\n server = None", "def extract_audio(input_vid, audio_params):\n cmd = f'{FFMPEG} -i {join(os.getcwd(),input_vid)} -vn {audio_params} {join(os.getcwd(),\".temp\",\"audio.mkv\")}'\n Popen(cmd, shell=True).wait()", "def concatenate_video(input_video):\n with open(f'{os.getcwd()}/.temp/concat.txt', 'w') as f:\n\n for root, firs, files in os.walk(join(os.getcwd(), '.temp', 'encode')):\n for file in sorted(files):\n f.write(f\"file '{join(root, file)}'\\n\")\n\n concat = join(os.getcwd(), \".temp\", \"concat.txt\")\n audio = join(os.getcwd(), \".temp\", \"audio.mkv\")\n output = f'{input_video.split(\".\")[0]}_av1.webm'\n cmd = f'{FFMPEG} -f concat -safe 0 -i {concat} -i {audio} -c copy -y {output}'\n Popen(cmd, shell=True).wait()", "def get_video_audio_joining_command(audio_path, video_path, output_path):\n return 'ffmpeg -nostats -loglevel error -i {} -i {} -codec copy -shortest {}'.format(\n video_path, audio_path, output_path)", "def create_op(output_buffer): \n frame_num = 1 # Initialize frame number to order frames \n # Define output video properties\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter()\n out.open(\"OUTPUT_mp.avi\", fourcc, 20, (512, 256))\n\n try:\n while True:\n if len(output_buffer) == 0:\n continue\n else:\n # print(\"length of output buffer: \", len(output_buffer)) \n output_buffer.sort() # Sort the common list such that the frame with least frame number is at the first position (like in priority queue) \n if output_buffer[0][0] == frame_num: # If the frame number at first position corresponds to current frame number, take it otherwise skip \n frame = output_buffer.pop(0)[1] \n out.write(frame)\n frame_num += 1 \n print(\"Time elapsed so far is: \", time.time() - start)\n \n except:\n # Release resources\n out.release()", "def convert_frames_to_video(self, pathIn,pathOut):", "def main(args=None):\n args = parser.parse_args(args)\n data = collections.OrderedDict(\n gpus=detect_gpus(),\n hwaccels=detect_hwaccels(cmd=args.ffmpeg),\n codecs=detect_codecs(cmd=args.ffmpeg))\n json.dump(data, sys.stdout, indent=2)", "def prepare_test_frames(self, idx):\n results = copy.deepcopy(self.video_infos[idx])\n results = self.sample_clip(results)\n results['modality'] = self.modality\n results['start_index'] = self.start_index\n return self.pipeline(results)", "def extract_frames():\r\n # extract frames from training videos\r\n class_dirs = glob.glob('./train/*/')\r\n for class_dir in class_dirs:\r\n class_name = os.path.basename(os.path.normpath(class_dir))\r\n dest = './frames/train/' + class_name + '/'\r\n if not os.path.exists(dest):\r\n os.makedirs(dest)\r\n video_files = glob.glob(class_dir + '*.avi')\r\n for index, video_file in enumerate(video_files):\r\n duration = getLength(video_file)\r\n frame_dest_name = dest + 'video' + str(index) + 'frame' + '-%03d.jpg'\r\n try:\r\n\t\t# EXTRACTING FRAMES HERE\r\n ret = subprocess.call([\"ffmpeg\", \"-i\", video_file, \"-filter:v\", FRAMERATE_PROPERTY + str(float(FRAMES_PER_VIDEO)/float(duration)), \"-vframes\", FRAMES_PER_VIDEO, frame_dest_name])\r\n if ret != 0:\r\n LogFile.write(\"Failed to extract frames from %s \\n\" % video_file)\r\n except Exception as e:\r\n LogFile.write(\"Failed to extract frames from %s: \\n %s \\n\" % (video_file, e))\r\n print(e)\r\n \r\n #extract frames from testing videos\r\n class_dirs = glob.glob('./test/*/')\r\n for class_dir in class_dirs:\r\n class_name = os.path.basename(os.path.normpath(class_dir))\r\n dest = './frames/test/' + class_name + '/'\r\n if not os.path.exists(dest):\r\n os.makedirs(dest)\r\n video_files = glob.glob(class_dir + '*.avi')\r\n for index, video_file in enumerate(video_files):\r\n duration = getLength(video_file)\r\n frame_dest_name = dest + 'video' + str(index) + 'frame' + '-%03d.jpg'\r\n try:\r\n\t\t# EXTRACTING FRAMES HERE\r\n ret = subprocess.call([\"ffmpeg\", \"-i\", video_file, \"-filter:v\", FRAMERATE_PROPERTY + str(float(FRAMES_PER_VIDEO)/float(duration)), \"-vframes\", FRAMES_PER_VIDEO, frame_dest_name])\r\n if ret != 0:\r\n LogFile.write(\"Failed to extract frames from %s \\n\" % video_file)\r\n except Exception as e:\r\n LogFile.write(\"Failed to extract frames from %s: \\n %s \\n\" % (video_file, e))\r\n print(e)\r\n \r\n \"\"\"\r\n vid_class = re.sub(r'\\s+', '', capwords(vid_class))\r\n class_files = glob.glob('./ytdl/*/' + vid_class + '/' '*.avi')\r\n for video in class_files:\r\n x, data_folder, train_or_test, classname, filename = video.split('/')\r\n filename_no_ext = re.sub(r'\\.\\w{3}', '', filename)\r\n\r\n src = video\r\n dest_folder = re.split('\\d{1,3}.avi', video)[0]\r\n dest = dest_folder + filename_no_ext + '-%03d.jpg'\r\n try:\r\n\t\t# EXTRACTING FRAMES HERE\r\n ret = subprocess.call([\"ffmpeg\", \"-i\", src, \"-filter:v\", FRAME_RATE, \"-vframes\", FRAMES_PER_VIDEO, dest])\r\n if ret == 0:\r\n nb_frames = get_nb_frames_for_video(video)\r\n DataFile.write(\"%s, %s, %s, %s \\n\" % (train_or_test, classname, filename_no_ext, nb_frames))\r\n else:\r\n LogFile.write(\"Failed to extract frames from %s \\n\" % video)\r\n except Exception as e:\r\n LogFile.write(\"Failed to extract frames from %s: \\n %s \\n\" % (video, e))\r\n print(e)\r\n \"\"\"", "def apply(fx, video_in, video_out, is_complex=False, verbose=False):\n if isinstance(video_in, (list, tuple)):\n video_in = \" \".join(map(lambda v: '-i {}'.format(v), video_in))\n else:\n video_in = '-i {}'.format(video_in)\n\n if isinstance(fx, (list, tuple)):\n fx = \",\".join(fx) if len(fx) > 1 else str(fx[0])\n\n args = '{} {} \"{}\" {}'.format(video_in,\n __COMPLEX if is_complex else __SIMPLE,\n fx, video_out)\n ffmpeg(args, verbose)", "def mp4_to_m3u8(src, dst, resolution=\"SD\", output_file_name=None,net_name = \"\"):\n \n\n resolutions = {'SD':'720:576', 'HD':'1280:720', 'FHD':'1920:1080'}\n dimension = resolutions.get(resolution, 'SD')\n\n file_name = generate_file_name(file_name=output_file_name, resolution=resolution)\n dst = create_dir(dst, resolution)\n full_dst = set_path(dst, file_name)\n \n src = fix_whitespace_in_path(src)\n full_dst = fix_whitespace_in_path(full_dst)\n #print(f\"ffmpeg -i {src} -vf scale={dimension} -bsf:v h264_mp4toannexb -hls_time 60 -hls_list_size 0 -c:a aac -strict -2 -f hls {full_dst}\")\n #print(\"\\n\")\n\n #return os.system(f\"ffmpeg -i {src} -vf scale={dimension} -bsf:v h264_mp4toannexb -hls_time 60 -hls_list_size 0 -c:a aac -strict -2 -f hls {full_dst}\") #ffmpeg命令行\n \n #print(f\"ffmpeg -i {src} -vf scale={dimension} -bsf:v h264_mp4toannexb -hls_time 60 -hls_list_size 0 -c:a aac -strict -2 -f hls -hls_base_url https://link.hzer0.com/man/{net_name}/{resolution}/ {full_dst}\")\n \n return os.system(f\"ffmpeg -i {src} -vf scale={dimension} -bsf:v h264_mp4toannexb -hls_time 10 -hls_list_size 0 -c:a aac -strict -2 -f hls -hls_base_url https://link.hzer0.com/man/{net_name}/{resolution}/ {full_dst}\") #ffmpeg命令行\n #return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
activation function applies the activation (square) function to every element in the input array/ matrix
def _activ_fun(self,x): if len(x.shape) == 1: s1 = x.shape[0] squared = np.zeros((s1)) for i in range(s1): self._evaluator.square(x[i]) self._evaluator.relinearize(x[i],self._ev_key) else: s1 = x.shape[1] s2 = x.shape[2] s3 = x.shape[3] for i in range(s1): for j in range(s2): for k in range(s3): self._evaluator.square(x[0,i,j,k]) self._evaluator.relinearize(x[0,i,j,k],self._ev_key) return x
[ "def calc_activation(self, inp):\n inp_rightform = ny.matrix( inp ).T\n self.a = [inp_rightform]\n tmp = ny.dot( self.weights_layer[0], inp_rightform ) + self.bias[0]\n tmp = self.activation_function(tmp)\n\n self.a.append(tmp)\n\n for i in range(self.number_hidden_layers-1):\n\n tmp = ny.dot( self.weights_layer[i+1], tmp ) + self.bias[i+1]\n tmp = self.activation_function(tmp)\n self.a.append(tmp)\n\n tmp = ny.dot( self.weights_layer[self.number_hidden_layers], tmp )+self.bias[self.number_hidden_layers]\n tmp = self.activation_function(tmp)\n\n self.a.append(tmp)\n #eventuell muss shape von tmp angepasst werden", "def _apply_activation(self, r):\n if self.activation is None:\n \"\"\"\n No activation function was chosen.\n \"\"\"\n return r\n if self.activation == 'tanh':\n \"\"\"\n Compute tanh values for each sets of scores in x.\n \"\"\"\n return np.tanh(r)\n if self.activation == 'sigmoid':\n \"\"\"\n Compute sigmoid values for each sets of scores in x.\n \"\"\"\n return expit(r) # 1 / (1 + np.exp(-r))\n if self.activation == 'softmax':\n \"\"\"\n Compute softmax values for each sets of scores in x.\n \"\"\"\n # exps = np.exp(r - np.max(r))\n return softmax(r) # exps / np.sum(exps)\n return r", "def sigmoid(input: Matrix):\n\n # Instantiate output as a matrix same dimensions as input\n # output = [ [0 for i in range(len(input))] for j in range(len(input[0])) ] \n output = Matrix(input.size())\n\n # Perform sigmoid on all elements in input matrix\n for x in range(input.height()):\n for y in range(input.width()):\n output[x][y] = 1 / (1 + math.exp(-1 * input[x][y])) \n\n return output", "def sigmoid_array(x): \n\treturn 1 / (1 + np.exp(-x))", "def _activation(func, data):\n if func == LeakyReLU:\n return func(data, slope=0.01)\n return func(data)", "def activation_maps(self, input):\n for layer in self._model.layers:\n output = K.function([layer.get_input_at(0), K.learning_phase()],\n [layer.get_output_at(0)])\n input = output([input, 0])[0]\n\n return input.squeeze()", "def apply_layer(y_in,w,b,activation):\n # to understand the following line, watch the beginning of lecture 2\n z=np.dot(y_in,w)+b # batch processing: y_in is of shape [batchsize,num_neurons_in]\n if activation=='sigmoid':\n return(1/(1+np.exp(-z)))\n elif activation=='jump':\n return(np.array(z>0,dtype='float'))\n elif activation=='linear':\n return(z)\n elif activation=='reLU':\n return((z>0)*z)", "def linear_activate(inputs, weights, biases):\n return(np.matmul(inputs, weights.T) + biases.T)", "def append_activation(function):\n if function is None or function is keras.activations.linear:\n # Identity: https://github.com/keras-team/keras/blob/bd024a1fc1cd6d88e8bc5da148968ff5e079caeb/keras/activations.py#L187\n pass\n elif function is keras.activations.relu:\n syrenn_layers.append(pysyrenn.ReluLayer())\n else:\n print(function)\n raise NotImplementedError", "def inplace_sigmoid(X): \n \n Y = X.copy()\n X *= 0\n X += 1 / (1 + np.exp(1) ** -Y)", "def sigmoid(X):\n\n pass", "def sigmoid(data):\n for i in range(len(data)):\n data[i] = 1 / (1 + np.exp(-data[i]))", "def sigmoid(x):\n\t\n\t# Returning sigmoided array.\n\treturn 1 / (1 + np.exp(-x))", "def feedforward(self, data):\n activations = data\n for i in range(2, self.L + 1):\n activations = sigmoid((self.weights[i] @ activations) + self.biases[i])\n return activations", "def execute(self, inputs):\n\t\trun = np.vectorize(lambda x: x.execute(inputs))\n\t\tself.output = np.array(list(self.activation(run(self.neurons).reshape(self.shape))))\n\t\treturn self.output", "def eval(self, input):\n\n ## Add bias to input\n input = np.array(input) if type(input) != np.array else input\n input = np.concatenate((input, [-1]))\n input = input.reshape((1, input.size))\\\n\n ## Regression Activations\n if self.activation_type == \"linear\":\n return self.forward(input)[0,0]\n else:\n return 1 if self.forward(input)[0,0] > 0.5 else 0", "def activation_functions(self, activation_functions):\n\n self._activation_functions = activation_functions", "def evalulate(self, x):\n if self.bias_bool:\n return self.activation(self._layer_func(x) + self.bias)\n else:\n return self.activation(self._layer_func(x))", "def grad_activation_out(z_i, activation_function_output):\n if activation_function_output == \"linear\":\n return np.ones_like(z_i)\n elif activation_function_output == \"sigmoid\":\n exp_expression = np.exp(-z_i)\n return exp_expression / ((1 + exp_expression) ** 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
meanpool2 takes (1,height,width,channels) input and performs meanpooling on each of the channels matrices seperately and gives a (1,height/2,width/2,channels) output
def _meanpool2(self,x): dt = np.dtype('O') retval = np.zeros((1,int(x.shape[1]/2),int(x.shape[2]/2),x.shape[3]),dtype = dt) for chan in range(x.shape[3]): ii,jj,i,j=0,0,0,0 while i < x.shape[1]: j,jj=0,0 while j < x.shape[2]: res = self._Ciphertext() advals = [x[0,i,j,chan],x[0,i+1,j,chan],x[0,i,j+1,chan],x[0,i+1,j+1,chan]] self._evaluator.add_many(advals,res) self._evaluator.multiply_plain(res,self._encoder.encode(0.25)) retval[0,ii,jj,chan] = res jj+=1 j+=2 ii+=1 i+=2 return retval """fully_connect takes an array of length n input and multiplies with an (n x m) matrix to give an array of length m output Args: x: [n] y: [n,m] Returns: z: [m] """
[ "def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling', padding='VALID'):\n size_x, size_y = size\n stride_x, stride_y = stride\n o = tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding=padding,\n name=name)\n print(\"After \" + 'avgpool' + \" output a shape of :\" + str(o.get_shape()))\n\n return o", "def mean_allcnnc():\n from backpack.core.layers import Flatten\n return nn.Sequential(nn.AvgPool2d(kernel_size=(6, 6)), Flatten())", "def pool2d_layer(input_, pool='avg'):\n if pool == 'avg':\n x = tf.nn.avg_pool(input_, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME')\n else:\n x = tf.nn.max_pool(input_, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='SAME')\n return x", "def forward(self, x):\n return F.avg_pool2d(x, (x.shape[2], x.shape[3]))", "def pool(images, kernel_shape, stride, mode='max'):\n # getting information\n m, ih, iw, ic = images.shape\n kh, kw = kernel_shape\n sh, sw = stride\n\n # getting shape of convolutional matrix\n new_h = int(((ih - kh) / sh) + 1)\n new_w = int(((iw - kw) / sw) + 1)\n conv = np.zeros((m, new_h, new_w, ic))\n\n for i in range(new_h):\n for j in range(new_w):\n part = images[:, (i * sh): (i * sh) + kh,\n (j * sw): (j * sw) + kw]\n # here we get the new matrix of matrices\n if mode == 'max':\n result = np.max(part, axis=1)\n result = np.max(result, axis=1)\n if mode == 'avg':\n result = np.mean(part, axis=1)\n result = np.mean(result, axis=1)\n conv[:, i, j] = result\n return conv", "def adaptive_concat_pool2d(x, output_size):\n\n return torch.cat([F.adaptive_max_pool2d(x, output_size),\n F.adaptive_avg_pool2d(x, output_size)], dim=1)", "def image_pool(img, binsize, binstep=None, fun='mean'):\n Ndim = img.ndim\n if not isinstance(binsize, (list, tuple)):\n binsize = np.repeat(binsize, Ndim)\n\n if binstep is None:\n binstep = binsize\n elif not isinstance(binstep, (list, tuple)):\n binstep = np.repeat(binstep, Ndim)\n\n if fun == \"mean\":\n fun = np.nanmean\n elif fun == \"max\":\n fun = np.nanmax\n elif fun == \"min\":\n fun = np.nanmin\n\n for dim in range(Ndim):\n if binsize[dim] != 1:\n dims = np.array(img.shape)\n argdims = np.arange(img.ndim)\n argdims[0], argdims[dim] = argdims[dim], argdims[0]\n img = img.transpose(argdims)\n img = [fun(np.take(img, np.arange(int(i*binstep[dim]), int(i*binstep[dim]+binsize[dim])), 0), 0) for i in np.arange(dims[dim]//binstep[dim])]\n img = np.array(img).transpose(argdims)\n return img", "def pool(x):\n\n _, nin1, nin2, _ = x.shape.as_list()\n\n k = 2\n s = 2\n if src_args[\"pool_pad\"] == \"valid\":\n p = 0\n else:\n p = k\n\n def calc_dim(x): return int(((x + 2 * p - k) / s) + 1)\n\n if (calc_dim(nin1) < 1) or (calc_dim(nin2) < 1):\n return x, False\n\n if src_args[\"pool\"] == \"max\":\n return keras.layers.MaxPool2D(padding=src_args[\"pool_pad\"])(x), True\n elif src_args[\"pool\"] == \"avg\":\n return keras.layers.AvgPool2D(padding=src_args[\"pool_pad\"])(x), True", "def pool_average(self, x, p):\n if p > 1:\n if self.sampling is 'equiangular':\n N, M, F = x.get_shape()\n N, M, F = int(N), int(M), int(F)\n# print(M, (M/self.ratio)**0.5, (M*self.ratio)**0.5)\n x = tf.reshape(x,[N,int((M/self.ratio)**0.5), int((M*self.ratio)**0.5), F])\n x = tf.nn.avg_pool(x, ksize=[1,p**0.5,p**0.5,1], strides=[1,p**0.5,p**0.5,1], padding='SAME')\n return tf.reshape(x, [N, -1, F])\n elif self.sampling is 'icosahedron':\n return x[:, :p, :]\n else:\n x = tf.expand_dims(x, 3) # N x M x F x 1\n x = tf.nn.avg_pool(x, ksize=[1,p,1,1], strides=[1,p,1,1], padding='SAME')\n return tf.squeeze(x, [3]) # N x M/p x F\n else:\n return x", "def _avg2(image):\n \n # Convert 32-bit pixels to prevent overflow during averaging\n image = image.astype(np.uint32)\n \n # Get the height and width of each image to the nearest even number\n y_max = image.shape[0] - image.shape[0] % 2\n x_max = image.shape[1] - image.shape[1] % 2\n \n # Perform averaging\n avg_img = np.zeros(np.ceil([image.shape[0]/2,image.shape[1]/2,image.shape[2]]).astype(np.uint32))\n for z in range(4):\n avg_img[0:int(y_max/2),0:int(x_max/2),z]= (image[0:y_max-1:2,0:x_max-1:2,z] + \\\n image[1:y_max:2,0:x_max-1:2,z] + \\\n image[0:y_max-1:2,1:x_max:2,z] + \\\n image[1:y_max:2,1:x_max:2,z]) / 4\n \n # The next if statements handle edge cases if the height or width of the image has an\n # odd number of pixels\n if y_max != image.shape[0]:\n for z in range(3):\n avg_img[-1,:int(x_max/2),z] = (image[-1,0:x_max-1:2,z] + \\\n image[-1,1:x_max:2,z]) / 2\n if x_max != image.shape[1]:\n for z in range(4):\n avg_img[:int(y_max/2),-1,z] = (image[0:y_max-1:2,-1,z] + \\\n image[1:y_max:2,-1,z]) / 2\n if y_max != image.shape[0] and x_max != image.shape[1]:\n for z in range(4):\n avg_img[-1,-1,z] = image[-1,-1,z]\n return avg_img", "def _avgpool2d_reshape_reduce(x, pool_size: Tuple[int, int], *args):\n del args\n pool_height = tf.Dimension(pool_size[0])\n pool_width = tf.Dimension(pool_size[1])\n n, c, h, w = x.shape\n return (\n x.reshape([n, c, h // pool_height, pool_height, w // pool_width, pool_width])\n .reduce_sum(axis=3)\n .reduce_sum(axis=4)\n )", "def test_invalid_avg_pool2d(count_include_pad, pool_shape, padding):\n ifm_shape = [1, 4, 4, 3]\n strides = [2, 2]\n\n def get_graph():\n x = relay.var(\"x\", shape=ifm_shape, dtype=\"int8\")\n x = relay.cast(x, dtype=\"int32\")\n x = relay.nn.avg_pool2d(\n x,\n pool_shape,\n strides,\n padding=padding,\n layout=\"NHWC\",\n count_include_pad=count_include_pad,\n )\n x = relay.cast(x, dtype=\"int8\")\n func = relay.Function(relay.analysis.free_vars(x), x)\n return tvm.IRModule.from_expr(func)\n\n mod = relay.transform.InferType()(get_graph())\n partitioned_mod = ethosu.partition_for_ethosu(mod)\n assert tvm.ir.structural_equal(mod, partitioned_mod)", "def avgpoolflatten(): #%t\n return nn.Sequential(Reduce(\"b c h w -> b c\", \"mean\")) # combine avg pool + view", "def avg_pool_prediction(self, topN=0.005, min_imgs=10, output='softmax'):\n predictions_dict = {}\n labels_dict = {}\n cases_dict = {}\n for mode in self.modes:\n preds = np.empty(0) \n labels = np.empty(0) \n cases = []\n if self.save_histograms:\n set_ = self.test_set if mode=='test' else 'train'\n histogram_subdir = os.path.join(self.histogram_dir, set_)\n Path(histogram_subdir).mkdir(parents=True, exist_ok=True) \n case_list = list( self.output_dict[mode].keys() )\n for case in case_list:\n try:\n output_data = self.output_dict[mode][case][output][:,1]\n if self.save_histograms:\n save_histogram(output_data, case, histogram_subdir)\n output_data = np.sort(output_data)[::-1]\n if isinstance(topN, int):\n n_imgs = topN\n else:\n n_imgs = int(np.max((len(output_data) * topN, min_imgs)))\n if self.verbose:\n print(case, n_imgs)\n pred = np.mean(output_data[:n_imgs]).reshape(1)\n preds = np.concatenate((preds, pred))\n label = np.array(get_value_from_filename(case)).reshape(1)\n labels = np.concatenate((labels, label)) \n cases.append(case)\n except KeyError as e:\n print(case, e)\n predictions_dict[mode] = preds\n labels_dict[mode] = labels\n cases_dict[mode] = cases\n return predictions_dict, labels_dict, cases_dict", "def poolMean(inObj):\n\n inObj.gs()", "def forward_max_pool(data, pool_width, pool_height):\n input_channels, input_width, input_height = data.shape\n \n output = np.zeros((input_channels, input_width // pool_width, input_height // pool_height))\n\n for x in range(0, input_width, pool_width):\n for y in range(0, input_height, pool_height):\n\n output[:, x // pool_width, y // pool_height] = np.amax(data[:, x:(x + pool_width), y:(y + pool_height)], axis=(1, 2))\n\n return output", "def _make_conv_pool_block(\n cls,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n activation: nn.Module,\n pool_size: int,\n ) -> nn.Module:\n return nn.Sequential(\n nn.ConstantPad1d((0, kernel_size - 1), 0),\n nn.Conv1d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size\n ),\n activation,\n nn.MaxPool1d(kernel_size=pool_size)\n )", "def default_maxunpool_indices(output_shape, kernel_size, batch_size, channels, device):\n\n ph = kernel_size[0]\n pw = kernel_size[1]\n h = output_shape[0]\n w = output_shape[1]\n ih = output_shape[0] // 2\n iw = output_shape[1] // 2\n h_v = torch.arange(ih, dtype=torch.int64, device=device) * pw * ph * iw\n w_v = torch.arange(iw, dtype=torch.int64, device=device) * pw\n h_v = torch.transpose(h_v.unsqueeze(0), 1,0)\n return (h_v + w_v).expand(batch_size, channels, -1, -1)", "def compute_mean_image(index_cluster,clusters,data):\n\n l = int(np.sqrt(len(data[0])))\n M = np.zeros((l,l))\n c=0\n\n for index in clusters:\n if(index==index_cluster):\n c+=1\n\n for i,index in enumerate(clusters):\n if(index==index_cluster):\n M += compute_image(data[i])/c\n \n return(M)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorate a parser for debugging
def logparser(nametext): def lp(p): '''`Make a parser as optional. If success, return the result, otherwise return default_value silently, without raising any exception. If default_value is not provided None is returned instead. ''' @Parser def inner_parser(text, index): res = p(text, index) print(f"{nametext} {text[index:(index+10)]} {index}") return res return inner_parser return lp
[ "def build_parser(self, parser: ArgumentParser):", "def __init__(self, formatter, verbose=0):\r\n sgmllib.SGMLParser.__init__(self, verbose)\r\n self.formatter = formatter", "def _parse_line(self):\r\n #if self.debug: print '\\t ' + str(self._current_node)\r\n\r\n # PyParser setParseAction's actually execute during parsing,\r\n # So we need closures in order to change the current scope\r\n\r\n \r\n def depth_from_indentation(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n \r\n def depth_from_match(function):\r\n \"\"\" Set the depth as the start of the match \"\"\"\r\n def wrap(start, values):\r\n #print 'Depth %d | %d %s' %(self._depth, start, values)\r\n #print self._current_node\r\n self._depth = start\r\n self._current_node = function(values)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap \r\n\r\n def depth_from_nemo_tag(function):\r\n \"\"\" Start of the match is where the nemo tag is. Pass the other values to the wrapped function \"\"\"\r\n def wrap(start, values):\r\n # print 'Depth %d | %d %s' %(self._depth, start, values)\r\n self._depth = start\r\n tokens = values[1]\r\n self._current_node = function(tokens)\r\n #print self._current_node\r\n return ''\r\n\r\n return wrap\r\n\r\n\r\n\r\n # Match HTML\r\n from pyparsing import NotAny, MatchFirst\r\n html = restOfLine\r\n html.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Match Mako control tags\r\n nemo_tag = Literal('%')\r\n\r\n begin = Keyword('for') | Keyword('if') | Keyword('while')\r\n middle = Keyword('else') | Keyword('elif')\r\n end = Keyword('endfor') | Keyword('endif') | Keyword('endwhile')\r\n control = nemo_tag + (begin | middle | end)\r\n\r\n begin.setParseAction(depth_from_indentation(self._add_nesting_mako_control_node) )\r\n middle.setParseAction(depth_from_indentation(self._add_mako_middle_node))\r\n end.setParseAction(depth_from_indentation(self._add_mako_control_leaf))\r\n\r\n # Match Nemo tags\r\n argument_name = Word(alphas,alphanums+\"_-:\")\r\n argument_value = quotedString\r\n regular_argument = argument_name + Literal('=') + argument_value\r\n\r\n class_name = Literal('.').setParseAction(lambda x: 'class=')\r\n id_name = Literal('#').setParseAction(lambda x: 'id=')\r\n special_argument = (class_name | id_name) + argument_value\r\n argument = Combine(special_argument) | Combine(regular_argument)\r\n\r\n # Match single Nemo statement (Part of a multi-line)\r\n inline_nemo_html = Word(alphas) + Group(ZeroOrMore(argument))\r\n inline_nemo_html.setParseAction(depth_from_match(self._add_nemo_node))\r\n\r\n # Match first nemo tag on the line (the one that may begin a multi-statement expression) \r\n nemo_html = nemo_tag + Group(Word(alphanums+\"_-:\") + Group(ZeroOrMore(argument)))\r\n nemo_html.setParseAction(depth_from_nemo_tag(self._add_nemo_node))\r\n\r\n # Match a multi-statement expression. Nemo statements are seperated by |. Anything after || is treated as html\r\n separator = Literal('|').suppress()\r\n html_separator = Literal('||') # | Literal('|>')\r\n nemo_list = nemo_html + ZeroOrMore( separator + inline_nemo_html )\r\n inline_html = html.copy()\r\n inline_html.setParseAction(depth_from_match(self._add_inline_html_node))\r\n nemo_multi = nemo_list + Optional(html_separator + inline_html)\r\n\r\n # Match empty Nemo statement\r\n empty = nemo_tag + Empty()\r\n empty.setParseAction(depth_from_indentation(self._add_blank_nemo_node))\r\n\r\n # Match unused Mako tags\r\n mako_tags = Literal('<%') | Literal('%>') | Literal('%CLOSETEXT') | Literal('</%')\r\n mako = mako_tags\r\n mako_tags.setParseAction(depth_from_indentation(self._add_html_node))\r\n\r\n # Matches General\r\n nemo = (control | nemo_multi | empty)\r\n line = mako_tags | nemo | html\r\n\r\n # Depth Calculation (deprecated?)\r\n self._depth = len(self._c) - len(self._c.strip())\r\n\r\n #try:\r\n line.parseString(self._c)\r\n\r\n #except ParseException:\r\n # Finally if we couldn't match, then handle it as HTML\r\n #add_html_node(self._c)\r", "def forward_decl():\n\n @Parser\n def f(tokens, s):\n raise NotImplementedError('you must define() a forward_decl somewhere')\n\n return f", "def run_parser(self, parser: ArgumentParser):", "def forward_decl():\n\n @Parser\n def f(_tokens, _s):\n raise NotImplementedError(\"you must define() a forward_decl somewhere\")\n\n f.name = \"forward_decl()\"\n return f", "def __init__(self, orig_formatter=None):\n self.orig_formatter = orig_formatter", "def DefineParser(name, prefix, struct_dict, pattern):\n struct_dict[name] = MakeParser(name, prefix, struct_dict, pattern)", "def _DebugParseFileEntry(self):\n pdb.post_mortem()", "def skipForParser(parser, msg: str):\n def testdecorator(testfunc):\n def testwrapper(self, *args, **kwargs):\n if self.logfile.logname == parser:\n self.skipTest(msg)\n else:\n testfunc(self, *args, **kwargs)\n return testwrapper\n return testdecorator", "def parse_and_print_lark(source_code: str) -> None:\n from vyper_parser.cst import parse_python\n\n pretty_print_lark(parse_python(source_code))", "def with_forward_decls(suspension):\n\n @Parser\n def f(tokens, s):\n return suspension().run(tokens, s)\n\n return f", "def create_parser_impl(self, common, handler: ParserHandler) -> BaseParser:", "def apply_kwargs_parser(parser):\n def inner_decorator(handler):\n def wrapped(**kwargs):\n parser_result = parser(**kwargs)\n kwargs.update(parser_result)\n handler(**kwargs)\n return wrapped\n return inner_decorator", "def lineParser( line, context, lineByLine=True ):\n global alreadyCaught\n global preFilterHandler\n global postFilterHandler\n\n linestack = []\n done = False\n \n while not done:\n\tlinestack.append(line)\n reo = rex.search(line)\n if reo:\n gotns=reo.group(4)\n gottag=reo.group(5)\n\t gotargv=reo.group(7)\n\n\t # In case the argument list contains tags as in: @foo(@arg@)@\n\t # Process the argument list before calling the replacement function.\n\t if gotargv:\n\t\tgotargv = lineParser(gotargv, context)\n\t \n\n\t # If we got a scoped tag, lookup the correct context first.\n\t if gotns:\n\t\tif ( context.name == 'gotns'):\n\t\t curctx = context\n\t\telse:\n\t\t tmpctx = context.getContext(gotns)\n if tmpctx:\n curctx = tmpctx\n else:\n print \"No such context [ %s ]\" % gotns\n gottag = \"UNDEFINED\"\n curctx = context\n\t else:\n\t\tcurctx = context\n\n try:\n newtext = curctx.replace(gottag, gotargv)\n # Only replace 1 occurrence at a time.\n # This is potentially fragile because it allows\n # the possibility that the wrong tag is replaced\n # if there are multiple matches on a line. I'm\n # assuming their is a well defined order on the\n # matches.\n\t\tstart = reo.start(2) - 1\n line = line[:start] + rex.sub( newtext, line[start:], 1 )\n except ValueError, (tag):\n\t\tif alreadyCaught:\n\t\t print \" from text: %s\" % linestack.pop()\n\t\telse:\n\t\t print \" in text: %s\" % linestack.pop()\n\t\t\talreadyCaught = True\n\t\tlinestack.reverse()\n\t\tremaining = len(linestack)\n\t\tfor l in linestack:\n\t\t\tremaining = remaining - 1\n\t\t\tif not remaining:\n\t\t \tprint \" from original text: %s\" % (l)\n\t\t\telse:\n\t\t\t print \" from previous text: %s\" % (l)\n raise ValueError, (tag, line)\n\t except InvalidReplacement, e:\n print \" Invalid replacement text: \", e\n raise Exception(\"\\n\\n###################################################################################################\\nCaught exception while parsing @%s@ in line:\\n%s\\n##################################################################################################\\n\\n\" % (gottag, line[:80]))\n\t except UnknownContext, e:\n\t\tprint e\n\t\traise Exception(\"\\n\\n###################################################################################################\\nGenerated UnknownContext Exception [ %s ] while parsing @%s@ in line:\\n##################################################################################################\\n\\n\" % (e, gottag)) \n\t except Exception, e:\n\t\tprint e\n raise Exception(\"\\n\\n###################################################################################################\\nCaught exception while parsing @%s@ in line:\\n%s\\n##################################################################################################\\n\\n\" % (gottag, line[:80]))\n else:\n done = True\n \n # Finally replace @@@ with @.\n reo = atx.search(line)\n if reo:\n line = atx.sub('@', line)\n\n\n if postFilterHandler != None:\n\tpostHandler(line)\n \n return line", "def test_hensonclidirective_sets_parser(test_directive):\n test_directive.prepare_autoprogram()\n assert test_directive.arguments == ('henson.cli:parser',)", "def __init__(self):\n self.parser_modules = [parser_modules.__dict__.get(mod) for mod in dir(parser_modules)\n if isinstance(parser_modules.__dict__.get(mod), types.ModuleType)\n and mod.startswith(\"look_for_\")]", "def __parser(self, text):\n buffer = self.view.buffer\n \n # Snippet components\n fields = Tree()\n mirrors = []\n stop = None\n \n root_init = fields.add(None, None) #empty root\n root = root_init\n \n # Cursor\n insert = buffer.get_insert()\n insert_iter = buffer.get_iter_at_mark(insert)\n \n # Indentation stuff\n pref_manager = tf.app.preferences_manager\n spaces = pref_manager.get_value(\"indentation/use_spaces\")\n tab_width = self.view.get_tab_width()\n indent = self.document.get_indentation(insert_iter)\n \n # Create a mark at start of snippet\n begin_bound_mark = buffer.create_mark(None, insert_iter, True)\n \n # Parsing text\n i = 0\n stack = []\n while (i<len(text)):\n char = text[i]\n \n # Verifying escape char \"\\\"\n if char == \"\\\\\":\n self.view.buffer.insert_at_cursor(text[i+1])\n i += 2\n continue \n \n # Look for a snippet special component \"${}\"\n if char == '$' and (i+1) < len(text) and text[i+1] == '{':\n \n if text[i+2] == '0':\n # STOP\n stop_iter = buffer.get_iter_at_mark(buffer.get_insert())\n stop = buffer.create_mark(None, stop_iter, True)\n\n i += 3\n elif text[i+2] == \"%\":\n # MIRROR\n mirror_iter = buffer.get_iter_at_mark(buffer.get_insert())\n begin_mark = buffer.create_mark(None, mirror_iter, True)\n end_mark = buffer.create_mark(None, mirror_iter, True)\n \n #begin_mark.set_visible(True)\n \n # Get mirror number\n j = i+3\n num = []\n\n while char != '}' and char != '/':\n char = text[j]\n num.append(char)\n j += 1\n\n mirror_num = int(\"\".join(num[:-1]))\n i = j-1\n \n if char == '/':\n k = i\n brace_count = 1\n \n while True:\n \n if text[k] == '{':\n brace_count += 1\n elif text[k] == '}':\n brace_count -= 1\n \n if brace_count == 0:\n break\n \n k += 1\n \n regexp = text[i+1:k].split('/')\n i = k\n \n m = SnippetMirror(self.view, mirror_num, \n (begin_mark, end_mark))\n \n m.regexp = (regexp[0], regexp[1])\n \n else:\n m = SnippetMirror(self.view, mirror_num, \n (begin_mark, end_mark))\n mirrors.append(m)\n else:\n # FIELD\n j = i+2\n num = []\n \n char = text[j]\n while char != ':':\n num.append(char)\n j+=1\n char = text[j]\n\n num = int(\"\".join(num))\n \n field_iter = buffer.get_iter_at_mark(buffer.get_insert())\n begin_mark = buffer.create_mark(None, field_iter, True)\n #begin_mark.set_visible(True)\n \n f = SnippetField(self.view, num, (begin_mark,))\n \n root = fields.add(f, root)\n stack.append(root)\n\n i = j\n \n elif char == '}':\n if len(stack) > 0:\n node = stack.pop()\n \n if len(stack) == 0:\n root = root_init\n \n bm = node.elem.marks[0]\n end_iter = buffer.get_iter_at_mark(buffer.get_insert())\n em = buffer.create_mark(None, end_iter, True)\n #em.set_visible(True)\n node.elem.marks = (bm, em)\n\n elif len(stack) == 0:\n root = root_init\n self.view.buffer.insert_at_cursor(char)\n else:\n root = stack[-1]\n\n elif char == '\\t':\n if spaces:\n self.view.buffer.insert_at_cursor(\" \" * tab_width)\n else:\n self.view.buffer.insert_at_cursor(char)\n elif char == '\\n':\n # LINE BREAK\n buffer.insert_at_cursor(\"\\n\")\n buffer.insert_at_cursor(indent)\n else:\n self.view.buffer.insert_at_cursor(char)\n \n i+=1\n \n #Not well-formed snippet\n if len(stack) > 0:\n fields.pre_order(self.__disconnect_field_signal)\n return\n \n # Change stop gravity\n if stop != None:\n stop_iter = buffer.get_iter_at_mark(stop)\n buffer.delete_mark(stop)\n stop = buffer.create_mark(None, stop_iter, False)\n #stop.set_visible(True)\n \n # Change mirrors gravity\n for i in range(len(mirrors)):\n m = mirrors[i].marks[1]\n n = mirrors[i].marks[0]\n m_iter = buffer.get_iter_at_mark(m)\n buffer.delete_mark(m)\n new_m = buffer.create_mark(None, m_iter, False)\n #new_m.set_visible(True)\n mirrors[i].marks = (n, new_m)\n \n # Change fields gravity\n fields.pre_order(self.__fields_change_gravity)\n \n # Change begin bound gravity\n m = begin_bound_mark\n m_iter = buffer.get_iter_at_mark(m)\n buffer.delete_mark(m)\n begin_bound_mark = buffer.create_mark(None, m_iter, False)\n #begin_bound_mark.set_visible(True)\n \n # Create end bound mark\n insert_iter = buffer.get_iter_at_mark(insert)\n end_bound_mark = buffer.create_mark(None, insert_iter, False)\n #end_bound_mark.set_visible(True)\n \n# print \"root: \", fields.root\n# print \"root's children: \", fields.root.children\n \n bounds = (begin_bound_mark, end_bound_mark)\n snippet = Snippet(self.document, fields, mirrors, stop, bounds)\n self.push_snippet(snippet)\n \n if len(snippet.fields.root.children) > 0:\n buffer.place_cursor(buffer.get_iter_at_mark(begin_bound_mark))\n self.next_field()\n else:\n self.pop_snippet()", "def parse_and_print_python(source_code: str) -> None:\n pretty_print_ast(python_ast.parse(source_code))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse a hex number
def hexnumber(): return regex(r'0x[0-9a-fA-F]+').parsecmap(st(s.NUMBER))
[ "def parse_hex_digit(self):\n if self.raw:\n return self.parse_one(self.b_hex_digits)\n else:\n return self.parse_one(self.u_hex_digits)", "def _parse_as_hex(value):\n # First, remove any whitespace\n value = value.replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n\n # Strip leading 0x or 0X\n if not (value.startswith('0x') or value.startswith('0X')):\n value = value[2:]\n\n if len(value) % 2 != 00:\n raise ValueError(\"Payload size not evenly divisible by two\")\n\n if HEX_RE.match(value) is None:\n raise ValueError(\"Payload contains non-hexadecimal characters\")\n\n try:\n return binascii.a2b_hex(value)\n except TypeError:\n raise ValueError(\"Not a valid input sequence\")", "def parse_hex(self, pbs):\r\n buf = ''\r\n \r\n while pbs.has_next():\r\n char = pbs.peek()\r\n \r\n if esapi.codecs.push_back_string.is_hex_digit(char):\r\n # If char is a hex digit than add it on and keep going\r\n buf += char\r\n pbs.next()\r\n elif char == ';':\r\n # if the character is a semi-colon, eat it and quit\r\n pbs.next()\r\n break\r\n else:\r\n # malformed, just quit\r\n pbs.reset()\r\n return None\r\n try:\r\n i = int(buf, 16)\r\n return unichr(i)\r\n except ValueError:\r\n # Throw an exception for a malformed entity?\r\n return None", "def parse_nmea_checksum(nmea_line):\n return int(nmea_line[-2:], 16) # checksum hex digits as int", "def eval_hex_string_to_number(string_number :str) -> Optional[int]:\n is_hex = re.compile(HEX_REGEX_MATCH)\n is_there_match = is_hex.search(string_number)\n if is_there_match:\n return int(is_there_match.group(\"hex\"),16)\n return None", "def hex_str_to_int(input_str):\n try:\n val_to_return = int(input_str, 16)\n except Exception as e:\n val_to_return = 0\n print(e)\n return val_to_return", "def decode_hex(_hex):\n return binascii.a2b_hex(_hex)", "def unpack_hex(pdu_hex):\n return pdu.unpack_pdu(binascii.a2b_hex(hexclean(pdu_hex)))", "def testHex2Int(self):\n\n self.assertEqual(hex2Int(0xa), 10)", "def is_hex_number(s):\n return s.translate(hex_trans_table) == ''", "def parse_rgb_hex(hex_color: str) -> ColorTriplet:\n assert len(hex_color) == 6, \"must be 6 characters\"\n color = ColorTriplet(\n int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)\n )\n return color", "def from_hex(cls, hexstring):\n try:\n hexstring = _HEX_COLOR_RE.match(hexstring).group(1)\n except AttributeError:\n raise ValueError(\n \"'{}' is not a valid hexadecimal\" \"color value.\".format(hexstring)\n )\n if len(hexstring) == 3:\n components = [(2 * s) for s in hexstring]\n else:\n components = hexstring[0:2], hexstring[2:4], hexstring[4:6]\n return cls(*(int(s, 16) for s in components))", "def _hex_to_digit(toconvert):\n return \"\".join([str(int(i, 16)).zfill(2) for i in toconvert])", "def parse_color_string(color_string):\n\n if len(color_string) == 3:\n r = int(color_string[0], 16) * 17\n g = int(color_string[1], 16) * 17\n b = int(color_string[2], 16) * 17\n elif len(color_string) == 6:\n r = int(color_string[0:2], 16)\n g = int(color_string[2:4], 16)\n b = int(color_string[4:6], 16)\n else:\n ValueError('Color string must be either 3 or 6 hexadecimal digits long')\n\n return r, g, b", "def _hex_validate(hex_value):\r\n hex_value.strip('#')\r\n if len(hex_value) not in [6, 8]:\r\n raise ValueError(\"hexadecimal value expected to have 6 or 8 characters ('FFFFFF' or 'FFFFFFFF')\")\r\n if len(hex_value) == 6:\r\n hex_value += 'ff'\r\n return hex_value", "def parse_piece(c):\n return piece_number(int(c, 16))", "def estHex(s: str):\n try:\n int(s, 16)\n return True\n except ValueError:\n return False", "def hexDecode(hex_str):\n\n return hex_str.decode('hex')", "def parse_num(line):\n\treturn int(line.strip())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse a character that cannot be part of a keyword
def nonkwchar(): return regex("[^a-zA-Z0-9_]")
[ "def exclude(letter):\n return letter in string.punctuation or letter in string.whitespace", "def keyword(name):\n return Regex(name + r\"\\b\", re.U)", "def test_character_invalid():\n TestScanner._run(**{\n 'name': 'Invalid Character',\n 'expressions': {\n 'invalid': ['fo']\n },\n 'DFA': {}\n })", "def test_character_invalid_type():\n TestScanner._run(**{\n 'name': 'Invalid Character',\n 'expressions': {\n 'invalid': [True]\n },\n 'DFA': {}\n })", "def remove_character(self, parsing_key, string):\n\n return self.parsing_rules[parsing_key].sub(\"\", string)", "def require_atom(self):\n if self.is_char():\n result = self._re_escape(self.the_char)\n self.next_char()\n elif self.the_char == \"(\":\n # a regular expression\n self.next_char()\n result = \"(%s)\" % self.require_reg_exp()\n if self.the_char != \")\":\n raise RegularExpressionError(\"Expected ) at [%i]\" % self.pos)\n self.next_char()\n else:\n cclass = self.require_char_class()\n result = to_text(cclass)\n return result", "def accumulate_keyword_or_id(self, char):\n if char.isalnum():\n self.accum += char\n return self.accumulate_keyword_or_id\n\n token_str = self.done_accumulating()\n if token_str in tokens.keywords:\n self.tokens.append(tokens.Keyword(token_str))\n else:\n self.tokens.append(tokens.Identifier(token_str))\n\n return self.new_token(char)", "def is_a_miss(char):\n return char == '-'", "def unexpected_character_message(char: str) -> str:\n if char < \" \" and char not in \"\\t\\n\\r\":\n return f\"Cannot contain the invalid character {print_char(char)}.\"\n if char == \"'\":\n return (\n \"Unexpected single quote character ('),\"\n ' did you mean to use a double quote (\")?'\n )\n return f\"Cannot parse the unexpected character {print_char(char)}.\"", "def check_term(text, term, idx):\n if text[idx-1].isalnum():\n return False\n if text[idx+len(term)].isalnum():\n return False\n return True", "def not_letter(character: str) -> bool:\n return character not in LETTERS", "def processOutsideQuotes( char ):\r\n if char == '\"':\r\n return processInsideQuotes\r\n elif char in string.whitespace:\r\n words.append( \"\".join( chars ) )\r\n __empty( chars )\r\n eatWhitespace()\r\n return processOutsideQuotes\r\n else: \r\n chars.append( char )\r\n return processOutsideQuotes", "def delimiter(tok):\n return tok | failure(\"Expected delimiter: '{0}'\".format(tok))", "def __is_valid_char_name(char):\n return char.isalpha() or char.isnumeric() or char in Project.VALID_NAME_SPECIAL_CHARS", "def get_bad_character(text):\n bad_characters = set()\n\n for c in text:\n if c not in allowed:\n bad_characters.add(c)\n\n return bad_characters", "def __notBlank(self, s):\n return re.search(\"\\w+\", s)", "def preprocess_word(word):\r\n word = word.strip()\r\n # not an alphabet word\r\n if not word.isalpha():\r\n raise ValueError(f\"The word '{word}' is not a correct single word\")\r\n return word.lower()", "def remove_reserved_chars(word):\n return \"\".join(i for i in word if i not in r'\\/:*?\"<>|')", "def ignore_punctuation_outside(word):\n\tfirst_char = word[0]\n\tlast_char = word[-1]\n\tif not first_char.isalpha() and not last_char.isalpha():\n\t\treturn word[1:-1]\n\telif not first_char.isalpha() and last_char.isalpha():\n\t\treturn word[1:]\n\telif first_char.isalpha() and not last_char.isalpha():\n\t\treturn word[:-1]\n\telse:\n\t\treturn word" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse a named argument
def named_argument(): iden = yield named_arg_var_name() yield string(":") yield singlelinespaces() value = yield operand return s.Construct(s.NAMED_ARGUMENT, iden, value)
[ "def parse(name):\n\n pass", "def _parse_argument(argument_ast: dict) -> \"ArgumentNode\":\n return ArgumentNode(\n name=_parse_name(argument_ast[\"name\"]),\n value=_parse_value(argument_ast[\"value\"]),\n location=_parse_location(argument_ast[\"loc\"]),\n )", "def parse_string(parser, header, arg_name, arg_value):\n env = get_environment_var(arg_name)\n arg_value = env if env else arg_value\n if parser.has_option(header, arg_name):\n return parser.get(header, arg_name)\n return arg_value", "def test_string_argument_parsing():\n arguments = [\n {\n \"name\": \"firstname\",\n \"type\": \"str\",\n \"default\": \"Allysa P. Hacker\",\n },\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([\"--firstname\", \"john\"])\n assert values.firstname == \"john\"", "def _match_param(self, arg: str) -> Tuple[\"Param\", str, str, str]:\n param_name, param_type, param_value = parse_potential_argument(\n arg, self.prefix\n )\n # parse -arg as -a rg only applicable with prefix auto and -\n # When we didn't match any argument-like\n # with allow_attached=False\n # Or we matched but it is not defined\n name_with_attached: str = None\n if not param_type and self.prefix == \"auto\":\n # then -a1 will be put in param_value, as if `a1` is a name,\n # it should be --a1\n name_with_attached = (\n param_value\n if (\n param_name is None\n and param_value\n and param_value[:1] == \"-\"\n and param_value[1:2] != \"-\"\n )\n else None\n )\n\n elif not param_type and len(self.prefix) <= 1:\n # say prefix = '+'\n # then `a1` for `+a1` will be put as param_name, since\n # there is no restriction on name length\n name_with_attached = (\n self.prefix + param_name\n if param_name and param_name[:1] != self.prefix\n else None\n )\n\n # we cannot find a parameter with param_name\n # check if there is any value attached\n if name_with_attached and not self.get_param(param_name):\n param_name2, param_type2, param_value2 = parse_potential_argument(\n name_with_attached, self.prefix, allow_attached=True\n )\n # Use them only if we found a param_name2 and\n # arbitrary: not previous param_name found\n # otherwise: parameter with param_name2 exists\n if param_name2 is not None and (\n (self.arbitrary and param_name is None)\n or self.get_param(param_name2)\n ):\n param_name, param_type, param_value = (\n param_name2,\n param_type2,\n param_value2,\n )\n\n # create the parameter for arbitrary\n if (\n self.arbitrary\n and param_name is not None\n and not self.get_param(param_name)\n ):\n self.add_param(param_name, type=param_type)\n\n param: \"Param\" = self.get_param(param_name)\n if not param:\n return None, param_name, param_type, param_value\n\n param_maybe_overwritten: \"Param\" = param.overwrite_type(param_type)\n if param_maybe_overwritten is not param:\n self._set_param(param_maybe_overwritten)\n param = param_maybe_overwritten\n\n param.hit = True\n if param_value is not None:\n param.push(param_value)\n return param, param_name, param_type, param_value", "def parse_argument(self, argument: str, is_label_or_jump: bool = False):\n is_reference = '@' in argument\n\n if is_reference:\n argument = argument[1:]\n\n if Register(argument) in LANGUAGE_REGISTERS:\n arg_type = (\n OperationArgumentType.RegisterPointer\n if is_reference\n else OperationArgumentType.Register\n )\n arg_word = LANGUAGE_REGISTERS.index(Register(argument))\n\n elif is_inplace(argument):\n arg_type = OperationArgumentType.InPlaceValue\n arg_word = int(argument)\n\n elif is_label_or_jump:\n arg_type = OperationArgumentType.Label\n\n if argument in self.labels_table:\n label_index = self.labels_table[argument]\n else:\n label_index = max(self.labels_table.values() or [0, ]) + 1\n self.labels_table[argument] = label_index\n\n arg_word = label_index\n\n else:\n raise BadOperationArgument(argument)\n\n return OperationArgument(\n arg_type=arg_type,\n arg_word=arg_word\n )", "def getarg(self, parname):\n # list of strings that should parse to boolean true\n # we need to handle booleans separately, because bool(\"False\")\n # evaluates to True\n booltrue = ['yes','true','1','t']\n\n parname = parname.lower() # so we don't have to worry about case\n\n # Get paramlist index and check if parameter is valid\n try:\n ind = [par[0].lower() for par in self.paramlist].index(parname)\n except ValueError:\n msg = 'GetArg: There is no parameter named %s' % parname\n self.log.error(msg)\n raise KeyError(msg)\n parnameraw = self.paramlist[ind][0] # ParName in original Case\n default = self.paramlist[ind][1]\n # get from arguments if possible\n if self.arglist.has_key(parname):\n # assumes that: if value is not default, then set on command line\n # by the user.\n if self.arglist[parname] != self.parser.get_default(parnameraw):\n ret = self.arglist[parnameraw]\n self.log.debug('GetArg: from command line, done (%s=%s)'\n % (parnameraw, repr(ret)) )\n return ret\n # make temporary config entry with lowercase key names\n conftmp = {}\n if self.config.has_key(self.name): # skip if no step entry in config\n for keyname in self.config[self.name].keys():\n conftmp[keyname.lower()] = self.config[self.name][keyname]\n # get from config if possible\n if conftmp.has_key(parname):\n value = conftmp[parname]\n # If default is a sequence:\n if isinstance(default,(tuple,list)):\n # Get type for list elements\n # (if default is empty, convert to string)\n if len(default) > 0:\n outtype = type(default[0])\n else:\n outtype = str\n ret = []\n # Convert elements in list\n # Note: if the keyword only has one item in the list and there\n # is no trailing comma, configobj will read it as a string\n # instead of a 1-element list. We force to list here.\n if isinstance(value,str):\n value = [value]\n for i in xrange(len(value)):\n # Check if it's boolean\n if outtype == bool:\n if value[i].lower() in booltrue:\n ret.append(True)\n else: # default to False\n ret.append(False)\n # Not boolean - just convert to type\n else:\n ret.append(outtype(value[i]))\n # convert to tuple\n self.log.debug('GetArg: from config file, done (%s=%s)' % (parname,repr(type(default)(ret))))\n return type(default)(ret)\n # Default is not a sequence\n else:\n # Check if it's boolean\n if isinstance(default,bool) and not isinstance(value,bool):\n if value.lower() in booltrue:\n self.log.debug('GetArg: from config file, done (%s=True)' % parname)\n return True\n else:\n self.log.debug('GetArg: from config file, done (%s=False)' % parname)\n return False\n # Not boolean - just convert to type\n else:\n self.log.debug('GetArg: from config file, done (%s=%s)' % (parname,repr(type(default)(value))))\n return type(default)(value)\n # get default from parameter list\n ret = self.paramlist[ind][1]\n # return parameter\n self.log.debug('GetArg: from param list, done (%s=%s)' % (parname,repr(ret)))\n return ret", "def _parse_args(self):\n self._verify(self.args + list(self.kwargs))\n\n self.name = self.args[0]\n self.nodes = self.args[1:1+self.num_nodes]\n self.value = self._parse_values(self.args[1+self.num_nodes:])\n self.kwargs = self._parse_pairs(self.kwargs)\n # for key, value in self.kwargs.items():\n # setattr(self, key, value)", "def test_arg() -> None:\n parser = arg_parser()\n parsed = parser.parse_args(\n [\"--test\", \"test_name\", \"-n\", \"52\", \"--tool\", \"cwltool\", \"-j\", \"4\"]\n )\n assert parsed.test == \"test_name\"\n assert parsed.n == \"52\"\n assert parsed.tool == \"cwltool\"\n assert parsed.j == 4", "def _read_arg(arg):\n\n # If arg is None, just return it back\n if arg is None:\n arg_out = arg\n\n else:\n # If len(arg) == 1 and arg[0] is a valid filepath, read it as a grp file\n if len(arg) == 1 and os.path.exists(arg[0]):\n arg_out = read_grp(arg[0])\n else:\n arg_out = arg\n\n # Make sure that arg_out is a list of strings\n assert isinstance(arg_out, list), \"arg_out must be a list.\"\n assert type(arg_out[0]) == str, \"arg_out must be a list of strings.\"\n\n return arg_out", "def parse_arguments():\n\n parser = argparse.ArgumentParser(description=\"Expand vector tags to scalar tags.\")\n\n parser.add_argument(\"meshfile\",\n type=str,\n help=\"Provide a path to the mesh file.\"\n )\n parser.add_argument(\"-d\", \"--dirname\",\n type=str,\n help=\"Provide a name for the main directory.\"\n )\n parser.add_argument(\"-o\", \"--overwrite\",\n action=\"store_true\",\n help=\"Indicate whether to overwrite a directory from a previous expansion.\"\n )\n parser.add_argument(\"-e\", \"--element\",\n type=str.lower,\n default=\"hex\",\n choices=elements.keys(),\n help=\"Provide the type of MOAB element on which to expand tags.\"\n )\n\n args = parser.parse_args()\n\n # Determine the default name of the directory if none was supplied by the user.\n if args.dirname is None:\n args.dirname = '.'.join(args.meshfile.split(\"/\")[-1].split(\".\")[:-1])\n\n return args", "def pair_parse(arg, location_dict):\n equal_index = find_equal(arg)\n if equal_index == -1:\n logging.error(\"cannot find [=] in argument [%s] of -xattr\", arg)\n return -1\n\n if equal_index == 0:\n logging.error(\"no name pattern before [=] in argument [%s] of -xattr\", arg)\n return -1\n\n if equal_index == len(arg) - 1:\n logging.error(\"no value pattern after [=] in argument [%s] of -xattr\", arg)\n return -1\n\n name = arg[0:equal_index]\n # Remove the escape \\\\ or \\=\n name = name.replace(\"\\\\\\\\\", \"\\\\\").replace(\"\\\\=\", \"=\")\n if name not in CLOWNFISH_LOCATION_KEYS:\n logging.error(\"invalid key [%s], expected one of %s\",\n name, CLOWNFISH_LOCATION_KEYS)\n return -1\n\n value = arg[equal_index + 1:]\n # Remove the escape \\\\ or \\=\n value = value.replace(\"\\\\\\\\\", \"\\\\\").replace(\"\\\\=\", \"=\")\n location_dict[name] = value\n return 0", "def getarg(self, arg_name: str) -> Optional[str]:\n return next((val for arg, val in self._args.items() if arg == arg_name), None)", "def extract_arg(args, index, name, required=True):\n try:\n return args[index]\n except IndexError:\n if required:\n error('Arg required at position {}: {}'.format(index, name))", "def getArg(name, kwarg):\n\n if len(kwarg) != 1:\n raise TypeError(\"%s takes exactly 1 keyword argument\" % name)\n return kwarg.iteritems().next()", "def test_arguments_string_parsing_with_long_and_short_names_in_root_parsing_manager(root_config_parsing_manager):\n root_config_parsing_manager.add_argument('c', 'coco')\n root_config_parsing_manager.add_argument('d', 'xx', argument_type=int)\n\n check_parse_cli_result(root_config_parsing_manager, '-c 1', {'coco': '1'})\n\n check_parse_cli_result(root_config_parsing_manager, '-d 555', {'xx': 555})", "def parseArgs (args):\n result = {}\n \n for arg in args:\n try:\n (var, val) = string.split (arg, '=', 1)\n except:\n raise (SyntaxError, '%s is in the wrond format' % (arg))\n \n if (var[:2] != '--'):\n raise (SyntaxError, 'variable names must start with a ' +\n 'double dash (%s)' % (var))\n \n result[var[2:]] = val\n return (result)", "async def get_argument(self, name, default=None):\n data = dict(await self.request.post())\n if data:\n result = data.get(name, default)\n else:\n result = self.request.rel_url.query.get(name, default)\n if result is None:\n raise MissingArgumentError(name)\n return result", "def test_parsing_of_arguments_string_with_subgroup_parser_with_long_and_short_arguments_names_in_root_parsing_manager():\n parser_manager = RootConfigParsingManager()\n parser_manager.add_subgroup(name='sub')\n subparser = SubgroupConfigParsingManager('titi')\n subparser.add_argument('a', 'aaa', is_flag=True, action=store_true, default_value=False)\n subparser.add_argument('c', 'ttt', is_flag=False, action=store_val, argument_type=int)\n subparser.add_argument('n', 'name')\n parser_manager.add_subgroup_parser('sub', subparser)\n check_parse_cli_result(parser_manager, '--sub titi -a --name tutu -c 15',\n {'sub': {'tutu': {'aaa': True, 'type': 'titi', 'ttt': 15}}})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }