query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Retrieve the content of the subtitle download.
|
def getSubtitleContent(cls, version_sub_stage):
url = version_sub_stage.version_code
subtitle_page = cls._my_perform_request(url)
subtitle_url = Utils.getregexresults(
SUBSCENE_REGEX.SUBTITLE_URL_PARSER,
subtitle_page)
# If for some reason we failed.
if not subtitle_url:
WriteDebug("Failed getting the subtitle url in page: %s" % url)
return None
# regex results returned by Utils.getregexresults are returned as list.
subtitle_url = subtitle_url[0]
return Utils.DownloadSubAsBytesIO(
SUBSCENE_PAGES.DOMAIN.replace('www.', ''),
subtitle_url,
cls._build_referer(url))
|
[
"def download_subtitle(self, subtitles, filename):\n sub = subtitles[0]\n return self.download_subtitle_by_id(sub.get_id(), sub.get_download_url(), filename)",
"def download_subtitle_by_id(self, identifier, url, filename):\n working_dir = os.getcwd()\n is_tmp = False\n result_filename = None\n is_auto_filenamed = False\n # request the file\n self.__debug(\"downloading subtitle form %s\" % (url))\n request = self.__get(url)\n if not self.has_errors():\n meta = request.info()\n # construct the filename\n extension = self.__get_file_extension(meta)\n extension_len = len(extension)\n if filename is None or len(filename) == 0:\n filename = identifier + extension\n is_auto_filenamed = True\n elif os.path.isdir(filename):\n filename = os.path.join(filename, identifier) + extension\n elif len(filename) < extension_len or not filename[len(filename)-extension_len:] == extension:\n filename += extension\n self.__debug(\"compressed filename: %s\" % (filename))\n # creates a temporary directory for the zip file\n if not os.sep in filename:\n is_tmp = True\n tmp_dir = self.__create_tmp_directory()\n self.__debug(\"created temporary directory %s\" % (tmp_dir))\n filename = os.path.join(tmp_dir, filename)\n else:\n working_dir = os.path.dirname(filename)\n file_size = int(meta.getheaders(\"Content-Length\")[0])\n self.__debug(u\"%s bytes to be retrieved\" % (file_size))\n # open the file (write)\n fp = open(filename, 'wb')\n file_size_dl = 0\n block_sz = 8192\n while True:\n # read a 8k block\n _buffer = request.read(block_sz)\n if not _buffer:\n break\n # status\n file_size_dl += len(_buffer)\n fp.write(_buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / file_size)\n status = status + chr(8)*(len(status)+1)\n self.__debug(status)\n # close the file\n fp.close()\n # try to extract the subtitle\n if extension == \".zip\":\n result_filename = self.__check_zip_for_single_sub(filename, not is_auto_filenamed)\n else:\n result_filename = filename\n # move the file if in a temporary directory\n if is_tmp:\n dest_file = os.path.join(working_dir, os.path.basename(filename))\n self.__debug(\"moving file %s to %s\" % (result_filename, dest_file))\n os.rename(result_filename, dest_file)\n self.__debug(\"removing temporary directory..\")\n shutil.rmtree(tmp_dir)\n result_filename = dest_file\n return result_filename",
"def subtitle(self):\n return self._moment.get(\"subtitle\")",
"def subtitles(self):\n # type: () -> string_types\n return self._subtitles",
"def dl_sub(page):\n # start_time = time.time()\n soup = scrape_page(page)\n div = soup.find(\"div\", {\"class\": \"download\"})\n down_link = \"https://subscene.com\" + div.find(\"a\").get(\"href\")\n r = requests.get(down_link, stream=True)\n filelist = []\n for found_sub in re.findall(\n \"filename=(.+)\", r.headers[\"content-disposition\"]\n ):\n with open(found_sub.replace(\"-\", \" \"), \"wb\") as f:\n for chunk in r.iter_content(chunk_size=150):\n if chunk:\n f.write(chunk)\n filelist = zip_extractor(found_sub.replace(\"-\", \" \"))\n print(\n \"Subtitle ({}) - Downloaded\\nList of files zipped: {}\".format(\n found_sub.replace(\"-\", \" \").capitalize(), filelist\n )\n )\n return filelist\n # print(\"--- download_sub took %s seconds ---\" % (time.time() - start_time))",
"def scrape_subtitle_id(movie_id):\n url = urls.movie_page(movie_id)\n tree = lxml.html.fromstring(requests.get(url).content)\n\n # Links to the subtitle files are stored in a container\n links = tree.xpath('//*[@id=\"moviehash\"]/a/@href')\n\n return filter(lambda x: x is not None, map(_extract_id, links))",
"def get_content(url):\n host = 'https://www.ted.com'\n html = get_html(url)\n\n soup = BeautifulSoup(html, 'html.parser')\n desc = soup.find('p', {'class': \"talk-description\"}).contents[0].strip()\n location = soup.find('div', {'class': \"player-hero__meta\"}).strong.contents[0].strip()\n filmed_time = soup.find('div', {'class': \"player-hero__meta\"}).findAll('span')[1].contents[-1].strip()\n subtitle_url = host + soup.find('div', {'class': \"talk-more\"}).a.get('href', '')\n subtitle = get_subtitle(subtitle_url)\n author = soup.find('div', {'class': \"talk-speaker__details media__message\"})\n author_info = {'name': author.find('div', {'class': \"talk-speaker__name h10\"}).a.contents[0].strip(),\n 'url': host + author.find('div', {'class': \"talk-speaker__name h10\"}).a.get('href', ''),\n 'position': author.find('div', {'class': \"talk-speaker__description\"}).contents[0].strip(),\n 'desc': author.find('div', {'class': \"talk-speaker__bio\"}).contents[0].strip()}\n return author_info, desc, subtitle, location, filmed_time",
"def fetch( self ) :\n self.client.staticwiki( self )\n return self.text",
"def get_response_content(response, decode=True):\n contents = \"\"\n if response.streaming:\n actual_content = BytesIO()\n for content in response.streaming_content:\n actual_content.write(content)\n contents = actual_content.getvalue()\n actual_content.close()\n else:\n contents = response.content\n\n if decode:\n return contents.decode(\"utf-8\")\n return contents",
"def getSubtitleUrl(json_obj):\n url = None\n for subtitle in json_obj[\"video\"][\"subtitles\"]:\n if subtitle[\"url\"].endswith(\".wsrt\"):\n url = subtitle[\"url\"]\n else:\n if len(subtitle[\"url\"]) > 0:\n common.log(\"Skipping unknown subtitle: \" + subtitle[\"url\"])\n return url",
"def _parse_subtitle(html_chunk):\n subtitle = html_chunk.match(\n [\"div\", {\"class\": \"comment\"}],\n \"h2\",\n [\"span\", {\"class\": \"gray\"}],\n )\n\n return get_first_content(subtitle)",
"def test_get_subtitle(self) -> None:\r\n scratch_view: sublime.View = sublime.active_window().new_file()\r\n\r\n entity: Tab = Tab(scratch_view)\r\n\r\n self.assertEquals(\"untitled\", entity.get_subtitle())",
"def getFeedSubtitle(self, feedId):\n\t\ttry:\n\t\t\treturn self.remove.getRegex(self.getFeed(feedId=feedId).feed.subtitle)\n\t\texcept Exception:\n\t\t\tprint \"The ID %r doesn't exist.\" % feedId",
"def fetch_url_content(self, url):\n response = requests.get(url)\n response.raise_for_status()\n return response.content",
"def subtitles(strict=True):\n # max_value settings are just to avoid overflowing TIMEDELTA_MAX_DAYS by\n # using arbitrary low enough numbers.\n #\n # We also skip subs with start time >= end time, so we split them into two\n # groups to avoid overlap.\n start_timestamp_strategy = timedeltas(min_value=0, max_value=500000)\n end_timestamp_strategy = timedeltas(min_value=500001, max_value=999999)\n\n # \\r is not legal inside Subtitle.content, it should have already been\n # normalised to \\n.\n content_strategy = st.text(min_size=1).filter(lambda x: \"\\r\" not in x)\n proprietary_strategy = st.text().filter(\n lambda x: all(eol not in x for eol in \"\\r\\n\")\n )\n\n if strict:\n content_strategy = content_strategy.filter(is_strictly_legal_content)\n\n subtitle_strategy = st.builds(\n srt.Subtitle,\n index=st.integers(min_value=0),\n start=start_timestamp_strategy,\n end=end_timestamp_strategy,\n proprietary=proprietary_strategy,\n content=content_strategy,\n )\n\n return subtitle_strategy",
"def get_original_text(self):\n found_url = False\n url = \"\"\n if self._original_text:\n return self._original_text\n\n base_url = self._repository_uri + \"/\" + self.book.get_text_file_dir_path()\n\n valid_extensions = (\"-0.txt\", \"-8.txt\", \".txt\")\n if self._repository_uri.startswith(\"file://\"):\n valid_extensions = (\"-0.txt\", \"-8.txt\", \".txt\", \"-0.zip\", \"-8.zip\", \".zip\")\n for extension in valid_extensions:\n url = base_url + extension\n try:\n found_url = url_exists(url)\n except: # aleph is not reliable, just use gutenberg directly for now\n url = re.sub(self._repository_uri, \"http://www.gutenberg.org/files\", url)\n id = self.book.get_book_id_number()\n url = re.sub(self.book.get_text_file_dir_path(), f\"{id}/{id}\", url)\n\n found_url = url_exists(url)\n\n if found_url:\n break\n\n\n # TODO: once search does not find audio editions anymore uncomment this:\n # if not found_url:\n # raise Warning(\n # \"Could not find the text file for {} {}.\".format(\n # book.get_author(),\n # book.get_title()\n # )\n # )\n # TODO: once search does not find audio anymore editions remove this:\n if not found_url:\n return None\n\n try:\n raw_file_path = download_files(url, self._temporary_dir / self.book.get_text_file_name(), self.book._title)\n if raw_file_path.endswith(\".zip\"):\n self._original_text = self.unarchive_book(raw_file_path)\n path = pathlib.Path(raw_file_path)\n path.unlink()\n\n else:\n with open(raw_file_path, \"r\", encoding=\"utf8\", errors='ignore') as book_text_file:\n self._original_text = book_text_file.read()\n path = pathlib.Path(raw_file_path)\n path.unlink()\n\n except Exception as ex:\n raise ex\n\n return self._original_text",
"def content(self):\n return self.contents[self.content_tabs.currentIndex()].toPlainText()",
"def fetch(self, url):\n\n response = self.s.get(url)\n print(\"Getting content from %s, length: %d\" % (url,\n len(response.content)))\n return response",
"def parse_string(cls, filecontent):\n try:\n return cls(pysubs2.SSAFile.from_string(filecontent))\n\n except Exception as error:\n raise SubtitleParseError(\n \"Error when parsing subtitle content: {}\".format(error)\n ) from error"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a ConvertImage task.
|
def __init__(self, *args, **kwargs):
super(ConvertImageTask, self).__init__(*args, **kwargs)
self.setMetadata('dispatch.split', True)
|
[
"def convert_image(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def export_task(self, img, cont):\r\n return self._tasks_manager.create(\"export\", img=img, cont=cont)",
"def import_task(self, img, cont, img_format=None, img_name=None):\r\n return self._tasks_manager.create(\"import\", img=img, cont=cont,\r\n img_format=img_format, img_name=img_name)",
"def prepare_image(self, agent, image_info, metadata, files):\n return",
"def transform_image(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def create_output_image(img, instances):\n pass",
"def image_transform(filename, ops):\n\n args = ['convert', filename]\n\n basename = ntpath.basename(filename)\n name, ext = os.path.splitext(basename)\n ext = ext.strip('.')\n\n if 'w' in ops and 'h' in ops:\n resize = (ops['w'], ops['h'])\n resize = size_with_dpr(resize, ops)\n new_size = '{}x{}'.format(*resize)\n args.extend(['-resize', new_size])\n if 'fit' in ops:\n if ops['fit'] == 'clip':\n #\n # same behavior as `bounds` for compatibility, may be removed later\n # https://github.com/caffeinetv/snappy/issues/5\n #\n #\n pass\n elif ops['fit'] == 'crop':\n #\n # `^` is used to resize the image based on the smallest fitting dimension\n # then `-extents` crops exactly the image to the size specified from the center\n #\n args[-1] += '^'\n args.extend(['-gravity', 'center', '-extent', new_size])\n elif ops['fit'] == 'bounds':\n #\n # by default `-resize` will fit the image into the requested size\n # and keep the original aspect ratio\n #\n pass\n else:\n #\n # ignore the aspect ratio and distort the image so it always\n # generates an image exactly the size specified\n #\n args[-1] += '!'\n\n #\n # if only `w` or `h` is provided, then we scale the target side\n # to the specified value, and keep the aspect ratio.\n # `fit` does not apply when only one side is given.\n #\n elif 'w' in ops:\n resize = size_with_dpr((ops['w'],), ops)\n new_size = '{}x'.format(*resize)\n args.extend(['-resize', new_size])\n elif 'h' in ops:\n resize = size_with_dpr((ops['h'],), ops)\n new_size = 'x{}'.format(*resize)\n args.extend(['-resize', new_size])\n\n #\n # if `dpr` is provided with no resize, then we just scale the image\n #\n elif 'dpr' in ops:\n scale_factor = '{}%'.format(float(ops['dpr']) * 100)\n args.extend(['-scale', scale_factor])\n\n if 'fm' in ops:\n #\n # just use the format as filename extension,\n # then IM will handle conversion automatically\n #\n ext = ops['fm']\n\n if 'auto' in ops and ops['auto'] == 'compress':\n #\n # removes any image profile attached to the image\n #\n args.append('-strip')\n\n #\n # this will overide any existing `q` operation\n #\n if is_lossy(ext, ops) and 'q' not in ops:\n new_ops = copy(ops)\n new_ops.update({'q': AGRESSIVE_QUALITY_RATE})\n return image_transform(filename, new_ops)\n\n if is_lossy(ext, ops):\n if 'q' in ops:\n q = str(ops['q'])\n args.extend(['-quality', q])\n else:\n args.extend(['-quality', str(DEFAULT_QUALITY_RATE)])\n\n code, path = tempfile.mkstemp()\n output = path + '.' + ext\n args.append(output)\n LOG.debug('args: {}'.format(args))\n im_result = subprocess.check_output(args)\n LOG.debug('IM output: {}'.format(im_result.decode()))\n\n return output",
"def test_api_task_create(self):\n temp_image = utils.TempImageFile()\n from app import models\n models.ImageToResize.run_task = False\n with self.assertNumQueries(1):\n response = self.client.post(reverse('task-create'),\n data={'image': temp_image.file},\n format='multipart')\n if response.status_code != status.HTTP_201_CREATED:\n log.error(response.content)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def new_image():\n names = list(task.argv)\n if not names:\n if Project.prompt and task.arg_prompt is not None:\n name = prompt(task.arg_prompt)\n if name is not None and name.strip():\n names.append(name)\n\n for name in names:\n container_dir = os.path.join(Project.docker_dir, Project.docker_containers_dir, name)\n mkdir_p(container_dir)\n # populate container dir with Dockerfile and .dockerignore\n dockerfile = os.path.join(container_dir, 'Dockerfile')\n dockerignore = os.path.join(container_dir, '.dockerignore')\n shutil.copyfile(DOCKERFILE_TEMPLATE, dockerfile)\n touch(dockerignore)\n info(\"Created folder for new image at: {dir}\".format(dir=container_dir))",
"def fancyConvert(image):",
"def send2asset(img, coords, desc, assetId, scale = 30):\n task2asset = ee.batch.Export.image.toAsset(\n image = img,\n assetId = assetId,\n description = desc,\n maxPixels = 1e10,\n region = coords,\n scale = scale\n )\n return task2asset",
"def create_new_image(self):\n logging.info('Starting image \\'' + self.name + '\\' creation')",
"def generate_image(self) -> None:",
"def testResizeImage(self):\n crawler = FsPath.createFromPath(self.__sourcePath)\n resizeTask = Task.create('resizeImage')\n resizeTask.add(crawler, self.__targetPath)\n resizeTask.setOption(\"width\", \"480\")\n resizeTask.setOption(\"height\", \"270\")\n for convertToRGBA in [False, True]:\n with self.subTest(convertToRGBA=convertToRGBA):\n resizeTask.setOption(\"convertToRGBA\", convertToRGBA)\n result = resizeTask.output()\n self.assertEqual(len(result), 1)\n crawler = result[0]\n self.assertEqual(crawler.var(\"width\"), 480)\n self.assertEqual(crawler.var(\"height\"), 270)\n checkTask = Task.create('checksum')\n checkTask.add(crawler, self.__testPath)\n checkTask.output()",
"def exec_command(kwargs):\n kwargs[\"img\"] = call_kw(generate_fractal, kwargs)\n call_kw(img2output, kwargs)",
"def execute(self, image):\n\t\tself.output = image\n\t\treturn image",
"def make_task(\n name: str = '',\n run_name: str = '',\n install_script: str = '',\n instance_type: str = '',\n image_name: str = '',\n disk_size: int = 0,\n preemptible: Union[None, bool] = None,\n job: Job = None,\n task: backend.Task = None,\n create_resources=True,\n) -> Task:\n\n assert not preemptible, \"Not implemented\"\n\n def log(*_args):\n if task:\n task.log(*_args)\n else:\n util.log(*_args)\n\n # if name not specified, use name which is the same across script invocations for given image/instance-type\n name = maybe_create_name(name, instance_type, image_name)\n run_name = maybe_create_run_name(run_name, name)\n if run_name and job:\n assert run_name == job.run_.name, \"Provided Run object and run_name, but run_.name is {run_.name} while run_name is {run_name}\"\n\n if job is None:\n run_: backend.Run = backend.Run(run_name)\n else:\n run_ = job.run_\n\n if not instance_type:\n instance_type = os.environ.get('NCLUSTER_INSTANCE', 't3.micro')\n log(\"Using instance \" + instance_type)\n\n set_aws_environment()\n if create_resources:\n maybe_create_resources(task=task)\n else:\n pass\n\n placement_group = ''\n if u.instance_supports_placement_groups(instance_type):\n placement_group = run_.aws_placement_group_name\n # log(f\"Launching into placement group {placement_group}\")\n u.maybe_create_placement_group(placement_group)\n\n if not image_name:\n image_name = os.environ.get('NCLUSTER_IMAGE',\n 'amzn2-ami-hvm-2.0.20180622.1-x86_64-gp2')\n log(\"Using image \" + image_name)\n\n if preemptible is None:\n preemptible = os.environ.get('NCLUSTER_PREEMPTIBLE', False)\n preemptible = bool(preemptible)\n if preemptible:\n log(\"Using preemptible instances\")\n\n image = u.lookup_image(image_name)\n keypair = u.get_keypair()\n security_group = u.get_security_group()\n # subnet = u.get_subnet()\n ec2 = u.get_ec2_resource()\n\n instance = u.lookup_instance(name, instance_type,\n image_name)\n maybe_start_instance(instance)\n maybe_wait_for_initializing_instance(instance)\n\n # create the instance if not present\n if instance:\n log(f\"Reusing {instance}\")\n else:\n log(f\"Allocating {instance_type} for task {name}\")\n args = {'ImageId': image.id,\n 'InstanceType': instance_type,\n 'MinCount': 1,\n 'MaxCount': 1,\n 'SecurityGroupIds': [security_group.id],\n 'KeyName': keypair.name}\n\n args['TagSpecifications'] = [{\n 'ResourceType': 'instance',\n 'Tags': [{\n 'Key': 'Name',\n 'Value': name\n }]\n }]\n\n # args['NetworkInterfaces'] = [{'SubnetId': subnet.id,\n # 'DeviceIndex': 0,\n # 'AssociatePublicIpAddress': True,\n # 'Groups': [security_group.id]}]\n # placement_specs = {'AvailabilityZone': u.get_zone()}\n placement_specs = {}\n if placement_group:\n placement_specs['GroupName'] = placement_group\n\n args['Placement'] = placement_specs\n args['Monitoring'] = {'Enabled': True}\n\n if disk_size:\n assert disk_size > 0\n ebs = {\n 'VolumeSize': disk_size,\n 'VolumeType': 'gp2',\n }\n\n args['BlockDeviceMappings'] = [{\n 'DeviceName': '/dev/sda1',\n 'Ebs': ebs\n }]\n\n # Use high throughput disk (0.065/iops-month = about $1/hour)\n if 'NCLUSTER_AWS_FAST_ROOTDISK' in os.environ:\n assert not disk_size, f\"Specified both disk_size {disk_size} and $NCLUSTER_AWS_FAST_ROOTDISK, they are incompatible as $NCLUSTER_AWS_FAST_ROOTDISK hardwired disk size\"\n\n ebs = {\n 'VolumeSize': 500,\n 'VolumeType': 'io1',\n 'Iops': 11500\n }\n\n args['BlockDeviceMappings'] = [{\n 'DeviceName': '/dev/sda1',\n 'Ebs': ebs\n }]\n\n instances = []\n try:\n instances = ec2.create_instances(**args)\n except Exception as e:\n log(f\"Instance creation for {name} failed with ({e})\")\n log(\n \"You can change availability zone using export NCLUSTER_ZONE=...\")\n log(\"Terminating\")\n os.kill(os.getpid(),\n signal.SIGINT) # sys.exit() doesn't work inside thread\n\n assert instances, f\"ec2.create_instances returned {instances}\"\n log(f\"Allocated {len(instances)} instances\")\n instance = instances[0]\n\n task = Task(name, instance, # propagate optional args\n install_script=install_script,\n image_name=image_name,\n instance_type=instance_type)\n\n # have internal task/job/run hierarchy, in case of single task\n # manually initialize it\n if job is None:\n job = Job(name=name, run_=run_, tasks=[task])\n\n run_.jobs.append(job)\n\n return task",
"def create(IMGSIZE=...) -> retval:\n ...",
"def process_creator_task(task, capture_directory, args):\n cprint(\"[info] Processing task: \" + task[\"name\"], \"green\")\n task_timestamp = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n if \"configuration\" in task:\n for host_configuration in task[\"configuration\"]:\n host_configure(host_configuration[\"ip\"], host_configuration[\"command\"], task_timestamp,\n args.output_directory, args.username, args.password)\n\n tshark_process = start_tshark(task, args.interface, capture_directory, task_timestamp)\n run_command(task, task_timestamp, args.output_directory)\n time.sleep(args.delay)\n\t\t\n tshark_process.terminate()\n move_files(capture_directory, args.output_directory)\n cprint(\"[info] Finished task: \" + task[\"name\"], \"green\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This route gets the heartbeat for a token. The heartbeat is the object that contains data for proving existence of a file (for example, Swizzle, Merkle objects) Provided for nodes that need to recover their heartbeat. The heartbeat does not contain any private information, so having someone else's heartbeat does not help you.
|
def api_downstream_heartbeat(token):
with HttpHandler(app.mongo_logger) as handler:
handler.context['token'] = token
handler.context['remote_addr'] = request.remote_addr
db_token = Token.query.filter(Token.token == token).first()
if (db_token is None):
raise NotFoundError('Nonexistent token.')
beat = app.heartbeat
pub_beat = beat.get_public()
response = dict(token=db_token.token,
type=type(beat).__name__,
heartbeat=pub_beat.todict())
if (app.mongo_logger is not None):
app.mongo_logger.log_event('heartbeat',
{'context': handler.context,
'response': response})
return jsonify(response)
return handler.response
|
[
"def _new_heartbeat_frame():\n return frame.Heartbeat()",
"async def _start_heartbeat(self, ctx, arg=None):\n self.heartbeat.start()",
"def send_heartbeat(self):\n self._heartbeats += 1\n self._missed_heartbeats += 1\n self.send('~h~%d' % self._heartbeats)",
"def _send_heartbeat(self):\n LOGGER.debug('Sending heartbeat frame')\n self._send_heartbeat_frame()\n self._start_send_timer()",
"def _get_heartbeat_message(self) -> dict:\n return dict(\n category=\"event\",\n producer_snd=0,\n data=[\n dict(\n csc=\"Heartbeat\",\n salindex=0,\n data=dict(\n stream=dict(\n csc=self.remote.salinfo.name,\n salindex=self.remote.salinfo.index,\n lost=0,\n last_heartbeat_timestamp=-1,\n max_lost_heartbeats=self.heartbeat_max_lost,\n )\n ),\n ),\n ],\n )",
"def backup_heartbeat(self):\n return self.data.get('backup_heartbeat')",
"def on_heartbeat(self):\n raise NotImplementedError()",
"def get_last_heartbeat():\n with open(HEARTBEAT_LOG, 'r') as f:\n return f.readlines()[-1].rstrip()",
"def heartbeat_thread(self):\n while True:\n self.send_message(msg.Heartbeat())\n time.sleep(0.5)",
"def _send_heartbeat_frame(self):\n LOGGER.debug('Sending heartbeat frame')\n self._connection._send_frame( # pylint: disable=W0212\n self._new_heartbeat_frame())\n self._heartbeat_frames_sent += 1",
"def gen_heartbeat_test(self, heartbeat_packet):\n _, heartbeat_value = heartbeat_packet\n return self.gen_test(\n test_name=\"hearbeat_test\",\n raw_packet_value=heartbeat_value,\n msg=\"frame_type=[8] channel=[0] payload_size=[0] msg=[]\",\n )",
"def _add_heartbeat_message_to_redis(self, hb_interval):\n now = datetime.now().strftime(\"%Y%M%d%H%M%S\")\n key_to_add = \"{}:{}:{}\".format(self.traptor_type,\n self.traptor_id,\n 'heartbeat')\n message = now\n if self.heartbeat_conn.setex(key_to_add, int(hb_interval * 1.5), message):\n theLogMsg = 'heartbeat_message_sent_success'\n self.logger.info(theLogMsg, extra=logExtra())",
"def check_heartbeat(self):\n if (not self._t2.is_alive()):\n print(\"client heartbeat thread not started, starting\")\n self._t2.start()\n else:\n print(\"client thread already started\")\n print(\"heartbeat is \", self._server_healthy)\n return self._server_healthy",
"def run_heartbeat(self, sentinel=False):\n\n self.bind_heatbeat = self.driver.heartbeat_bind()\n heartbeat_at = self.driver.get_heartbeat(\n interval=self.heartbeat_interval\n )\n while True:\n idle_time = heartbeat_at + (self.heartbeat_interval * 3)\n if self.bind_heatbeat and self.driver.bind_check(\n bind=self.bind_heatbeat\n ):\n (\n identity,\n _,\n control,\n _,\n data,\n _,\n _,\n _,\n ) = self.driver.socket_recv(socket=self.bind_heatbeat)\n if control in [\n self.driver.heartbeat_ready,\n self.driver.heartbeat_notice,\n ]:\n self.log.debug(\n \"Received Heartbeat from [ %s ], client online\",\n identity.decode(),\n )\n expire = self.driver.get_expiry(\n heartbeat_interval=self.heartbeat_interval,\n interval=self.heartbeat_liveness,\n )\n worker_metadata = {\"time\": expire}\n try:\n loaded_data = json.loads(data.decode())\n except Exception:\n pass\n else:\n worker_metadata.update(loaded_data)\n\n self.workers[identity] = worker_metadata\n heartbeat_at = self.driver.get_heartbeat(\n interval=self.heartbeat_interval\n )\n self.driver.socket_send(\n socket=self.bind_heatbeat,\n identity=identity,\n control=self.driver.heartbeat_notice,\n info=struct.pack(\"<f\", expire),\n )\n self.log.debug(\n \"Sent Heartbeat to [ %s ]\", identity.decode()\n )\n\n # Send heartbeats to idle workers if it's time\n elif time.time() > idle_time:\n for worker in list(self.workers.keys()):\n self.log.warning(\n \"Sending idle worker [ %s ] a heartbeat\", worker\n )\n self.driver.socket_send(\n socket=self.bind_heatbeat,\n identity=worker,\n control=self.driver.heartbeat_notice,\n command=b\"reset\",\n info=struct.pack(\n \"<f\",\n self.driver.get_expiry(\n heartbeat_interval=self.heartbeat_interval,\n interval=self.heartbeat_liveness,\n ),\n ),\n )\n if time.time() > idle_time + 3:\n self.log.warning(\"Removing dead worker %s\", worker)\n self.workers.pop(worker)\n else:\n self.log.debug(\"Items after prune %s\", self.workers.prune())\n\n if sentinel:\n break",
"def worker_heartbeat(event: Event):\n if \"worker\" in cast(str, event.get(\"hostname\")):\n request(\"POST\", \"workers/heartbeat\", json=event)",
"def receive_heartbeats():\n global max_delay, slaves_rtt, heartbeat_slaves\n while True:\n (data, addr) = heartbeat_sock.recvfrom(1024)\n receive_time = time.time()\n heartbeat_lock.acquire()\n if addr[0] not in heartbeat_slaves:\n print \"%s Slave Connected.\" % (addr[0])\n start_thread(send_heartbeats, (addr[0],))\n slave_ips.append(addr[0])\n heartbeat_slaves[addr[0]] = -1\n if pickled_data != -1:\n data_sock.sendto(pickled_data, (addr[0], SEND_DATA_PORT)) # Send current song configuration materials\n heartbeat_lock.release()\n continue\n elif receive_time > heartbeat_slaves[addr[0]]:\n print \"%s heartbeat period is over 1 second: %s expected, %s arrival\" % (addr[0], heartbeat_slaves[addr[0]], receive_time)\n new_rtt = float(receive_time - (heartbeat_slaves[addr[0]] - 1.0))/2.0\n if addr[0] in slaves_rtt:\n alpha = 0.125\n slaves_rtt[addr[0]] = (alpha * slaves_rtt[addr[0]]) + ((1 - alpha) * new_rtt)\n new_rtt = slaves_rtt[addr[0]]\n else:\n slaves_rtt[addr[0]] = new_rtt\n if new_rtt > max_delay:\n max_delay = new_rtt\n else:\n max_delay = max(slaves_rtt.values())\n heartbeat_slaves[addr[0]] = -1\n heartbeat_lock.release()",
"async def test_when_receiving_a_heartbeat_request():\n out_queue = TeeQueue()\n pace_maker = PaceMaker(out_queue, None)\n\n heartbeat_id = uuid.uuid4()\n\n await pace_maker.handle_request(\n InboundMessage(heartbeat_id, TcpCommand.HeartbeatRequest, bytes())\n )\n\n response = await out_queue.get()\n assert response == OutboundMessage(\n heartbeat_id, TcpCommand.HeartbeatResponse, bytes()\n )",
"def test_heartbeat_thread_forgotten():\n web_id = 555\n payload = {'name': 'Chipy3', 'owner': 'Mr. Hurlburt', 'local_ip': '2.2.2.2'}\n response_payload = payload.copy()\n response_payload['id'] = web_id\n heartbeat_manager = app.HeartBeatManager(id=web_id, payload=payload)\n\n responses.add(responses.PUT, app.ROVERCODE_WEB_REG_URL+str(web_id)+\"/\",\n json=None, status=404,\n content_type='application/json')\n\n responses.add(responses.POST, app.ROVERCODE_WEB_REG_URL,\n json=response_payload, status=200,\n content_type='application/json')\n\n result = heartbeat_manager.thread_func(run_once=True)\n assert result.status_code == 200\n assert heartbeat_manager.web_id == web_id",
"async def _beat(self):\n data = {\n 'op' : self.HEARTBEAT,\n 'd' : int(time_now()*1000),\n }\n \n await self.send_as_json(data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calls next() on hash_iterable until at most bufsz hashes have been retrieved, at which point it queries the database and retrieves all the contracts associated with those hashes. then it yields each contract associated with the hashes in hash_iterable, or None if a contract was not found associated with the hash specified. yields a list [contract, hash_iterable_item]
|
def get_contract_iter(hash_iterable, key=None, bufsz=100):
done = False
while (not done):
count = 0
map = dict()
try:
while (count < bufsz):
item = next(hash_iterable)
if (key is None):
# item is id
id = int(item)
else:
id = int(item[key])
map[id] = [None, item]
count += 1
except StopIteration:
done = True
except:
print(traceback.format_exc())
done = True
if (count == 0):
return
contracts = Contract.query.filter(Contract.id.in_(map.keys())).all()
for c in contracts:
map[c.id][0] = c
for pair in map.values():
yield pair
|
[
"async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]:\n\n if len(header_hashes) == 0:\n return []\n\n formatted_str = (\n f'SELECT header_hash, block from full_blocks WHERE header_hash in ({\"?,\" * (len(header_hashes) - 1)}?)'\n )\n all_blocks: Dict[bytes32, FullBlock] = {}\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str, header_hashes) as cursor:\n for row in await cursor.fetchall():\n header_hash = bytes32(row[0])\n full_block: FullBlock = decompress(row[1])\n all_blocks[header_hash] = full_block\n self.block_cache.put(header_hash, full_block)\n ret: List[FullBlock] = []\n for hh in header_hashes:\n if hh not in all_blocks:\n raise ValueError(f\"Header hash {hh} not in the blockchain\")\n ret.append(all_blocks[hh])\n return ret",
"async def get_block_bytes_by_hash(self, header_hashes: List[bytes32]) -> List[bytes]:\n\n if len(header_hashes) == 0:\n return []\n\n assert len(header_hashes) < self.db_wrapper.host_parameter_limit\n formatted_str = (\n f'SELECT header_hash, block from full_blocks WHERE header_hash in ({\"?,\" * (len(header_hashes) - 1)}?)'\n )\n all_blocks: Dict[bytes32, bytes] = {}\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(formatted_str, header_hashes) as cursor:\n for row in await cursor.fetchall():\n header_hash = bytes32(row[0])\n all_blocks[header_hash] = decompress_blob(row[1])\n\n ret: List[bytes] = []\n for hh in header_hashes:\n block = all_blocks.get(hh)\n if block is not None:\n ret.append(block)\n else:\n raise ValueError(f\"Header hash {hh} not in the blockchain\")\n return ret",
"def get_hashes_from_recent_block_hashes(\n recent_block_hashes: Sequence[Hash32],\n current_block_slot_number: int,\n from_slot: int,\n to_slot: int,\n epoch_length: int) -> Iterable[Hash32]:\n for slot in range(from_slot, to_slot + 1):\n yield get_block_hash(\n recent_block_hashes,\n current_block_slot_number,\n slot,\n epoch_length,\n )",
"async def get_block_records_by_hash(self, header_hashes: List[bytes32]) -> List[BlockRecord]:\n if len(header_hashes) == 0:\n return []\n\n all_blocks: Dict[bytes32, BlockRecord] = {}\n async with self.db_wrapper.reader_no_transaction() as conn:\n async with conn.execute(\n \"SELECT header_hash,block_record,plot_info.plot_filter_info \"\n \"FROM full_blocks LEFT JOIN plot_info USING(header_hash) \"\n f'WHERE header_hash in ({\"?,\" * (len(header_hashes) - 1)}?)',\n header_hashes,\n ) as cursor:\n for row in await cursor.fetchall():\n block_rec_db: BlockRecordDB = BlockRecordDB.from_bytes(row[1])\n if row[2] is None:\n # since we're adding this field lazily, it may not be\n # set. If so, fall back to the slow path\n plot_filter_info = await self.get_plot_filter_info(block_rec_db.header_hash)\n else:\n plot_filter_info = PlotFilterInfo.from_bytes(row[2])\n\n all_blocks[block_rec_db.header_hash] = block_rec_db.to_block_record(\n plot_filter_info.pos_ss_cc_challenge_hash,\n plot_filter_info.cc_sp_hash,\n )\n\n ret: List[BlockRecord] = []\n for hh in header_hashes:\n if hh not in all_blocks:\n raise ValueError(f\"Header hash {hh} not in the blockchain\")\n ret.append(all_blocks[hh])\n return ret",
"def iter_chunks(self) -> Iterable[Hashable]:\n pass",
"def cursor_iterator(cursor, batchsize=100000):\n while True:\n results = cursor.fetchmany(batchsize)\n if not results:\n break\n for result in results:\n yield result",
"def get_iterator(self, tx_storage: 'TransactionStorage') -> Iterator[BaseTransaction]:\n iterator: Iterator[BaseTransaction]\n # XXX: this is to mark if the chosen iterator will yield partial transactions\n iterator_covers_partial: bool\n if self.topological_order:\n iterator = tx_storage.topological_iterator()\n iterator_covers_partial = False\n else:\n iterator = tx_storage.get_all_transactions()\n iterator_covers_partial = True\n for tx in iterator:\n if self.matches(tx):\n yield tx\n if self.include_partial and not iterator_covers_partial:\n # if partial transactions are needed and were not already covered, we use get_all_transactions, which\n # includes partial transactions, to yield them, skipping all that aren't partial\n for tx in tx_storage.get_all_transactions():\n tx_meta = tx.get_metadata()\n if tx_meta.validation.is_fully_connected():\n continue\n if self.matches(tx):\n yield tx",
"def results(self):\r\n while 1:\r\n row = self.cursor.fetchone()\r\n if not row: break\r\n yield self.decode_row(row)",
"def find_peers_for_blob(self, blob_hash, timeout=None, filter_self=True):\n if blob_hash not in self.peers:\n self.peers[blob_hash] = [(self.dht_node.externalIP, self.dht_node.peerPort)]\n bin_hash = binascii.unhexlify(blob_hash)\n finished_deferred = self.dht_node.iterativeFindValue(bin_hash, exclude=self.peers[blob_hash])\n timeout = timeout or conf.settings['peer_search_timeout']\n if timeout:\n finished_deferred.addTimeout(timeout, self.dht_node.clock)\n try:\n peer_list = yield finished_deferred\n except defer.TimeoutError:\n log.debug(\"DHT timed out while looking peers for blob %s after %s seconds\",\n blob_hash, timeout)\n peer_list = []\n\n peers = set(peer_list)\n results = []\n for node_id, host, port in peers:\n if filter_self and (host, port) == (self.dht_node.externalIP, self.dht_node.peerPort):\n continue\n results.append(self.peer_manager.get_peer(host, port))\n self.peers[blob_hash].append((host, port))\n defer.returnValue(results)",
"async def hscan_iter(self, name, match=None, count=None):\n cursor = '0'\n while cursor != 0:\n cursor, data = await self.hscan(name, cursor=cursor,\n match=match, count=count)\n for item in data.items():\n yield item",
"def _fetch(self) -> Iterator: # pragma: no cover\n raise NotImplementedError",
"def get_objects(self, query):\n for ref in self.get_keys(query):\n try:\n yield ref, self._cim.logical_data_store.get_object_buffer(ref)\n except IndexKeyNotFoundError:\n self.w(\"Expected object not found in object store: %s\", ref)\n continue",
"def get_block_hashes_from_hash(self, block_hash, max_num):\n block = self.get_block(block_hash)\n if block is None:\n return []\n hashes = []\n hashes.append(block.hash)\n for i in range(max_num - 1): # already have one block added to the hashes list\n block = self.get_block(block.parentHash)\n if block is None:\n break\n hashes.append(block.hash)\n if block.height == 0:\n break\n return hashes",
"def getContentIterator(self, storage=None):\n if not self.hasContent():\n raise RuntimeError('item has no content yet')\n encryptorFactory = crypto.getSymmetricEncryptionAlgorithm(\n self.oldEncryptionMethod)\n decryptor = encryptorFactory.getDecryptor(\n self.oldEncryptionKey, self.oldContentEncryptionIV)\n\n compressorFactory = compression.getCompressionAlgorithm(\n self.oldCompressionMethod)\n decompressor = compressorFactory.getDecompressor()\n\n hasher = crypto.getHashAlgorithm(self.oldHashMethod)\n\n if storage is None:\n storage = self.storage\n\n with storage.open('rb') as f:\n encryptedChunck = True\n while encryptedChunck:\n encryptedChunck = f.read(io.DEFAULT_BUFFER_SIZE)\n decryptor.putEncrypted(encryptedChunck)\n while decryptor.hasMore():\n compressedChunck = decryptor.getDecrypted()\n decompressor.putCompressed(compressedChunck)\n while decompressor.hasMore():\n plainTextChunck = decompressor.getDecompressed()\n hasher.update(plainTextChunck)\n yield plainTextChunck\n decryptor.finish()\n while decryptor.hasMore():\n compressedChunck = decryptor.getDecrypted()\n decompressor.putCompressed(compressedChunck)\n # leave it in decompressor's buffer, it was in decryptors buffer\n # before\n decompressor.finish()\n while decompressor.hasMore():\n plainTextChunck = decompressor.getDecompressed()\n hasher.update(plainTextChunck)\n yield plainTextChunck\n\n if not hasher.hexdigest() == self.decryptedContentHash:\n raise ValueError(\"Item storage was corrupted\")",
"def query(self, query_expression, filter_expression=None):\n query_kwargs = {}\n\n query_kwargs[\"KeyConditionExpression\"] = query_expression\n if filter_expression is not None:\n query_kwargs[\"FilterExpression\"] = filter_expression\n\n results = self._boto3_table.query(\n **query_kwargs,\n **self.data_model._get_kwargs(),\n )\n items = [self.data_model.deserialize(item) for item in results[\"Items\"]]\n yield from items\n\n while \"LastEvaluatedKey\" in results:\n results = self._boto3_table.query(\n ExclusiveStartKey=results[\"LastEvaluatedKey\"],\n **query_kwargs,\n **self.data_model._get_kwargs(),\n )\n items = [self.data_model.deserialize(item) for item in results[\"Items\"]]\n yield from items",
"def hash_iterator(self):\n return self.fixed_statistics.keys()",
"def deterministic_tile_iterator() -> Iterator[Tile]:\n while True:\n for _, tile in load_tiles_from_json():\n yield tile",
"async def scan_iter(self, match=None, count=None):\n cursor = '0'\n while cursor != 0:\n cursor, data = await self.scan(cursor=cursor, match=match, count=count)\n for item in data:\n yield item",
"def iterkeys(self):\n self.proto.iterinit()\n try:\n while True:\n yield self.proto.iternext()\n except TyrantError:\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
special print function to not add spaces! Just writes IO directly to stdout. Required by all below functions so that we don't end up with spaces after every command.
|
def myPrint(str):
sys.stdout.write(str)
return str
|
[
"def standout_print(info):\n sys.stdout.write(str(info))\n sys.stdout.write(\"\\n\")",
"def _print(data):\n sys.stdout.buffer.write(data)",
"def printout(string):\r\n print(string)",
"def print(self, *args, **kwargs) -> str:\n self.console.print(*args, highlight=False, **kwargs)\n return self.console.export_text(clear=True, styles=False)",
"def _raw(self, txt):\n logging.info(\"%s - out - %s\" % (self.name, txt)) \n sys.stdout.write(txt)\n sys.stdout.write('\\n')",
"def print_cmd(self):\n \n return self.__print_cmd",
"def printNow(output):\n print(output)",
"def print_out(line):\n print(line)\n sys.stdout.flush()",
"def print(self, *args, sep=' ', end='\\n', file=None): # known special case of print\n pass",
"def prints(self, data, base=None):\r\n return self.write(self._process(data, base))",
"def print(self, *args):\n print(*args, file=self.output_file)",
"def _write(self, message=None):\n if message is not None:\n stdout('%s\\n' % message)\n else:\n stdout('\\n')",
"def output(msg):\r\n sys.stdout.write(msg+\"\\n\")",
"def print_console(stmt=\"\"):\n print(indent + stmt)",
"def print_and_exec(cmd):\n print(cmd)\n os.system(cmd)",
"def w(text=''):\n if printing:\n print(text)\n else:\n _handle.write(text + '\\n')",
"def print_flush(s):\n print s\n sys.stdout.flush()",
"def special_print(*args):\n args = \"\".join(str(a) for a in args)\n indent = special_trace.level * special_trace.indent\n print indent + args.replace(\"\\n\", \"\\n\" + indent)",
"def println(self, data, base=None):\r\n return self.write(self._process(data, base)+\"\\r\\n\")",
"def test_puts_with_user_output_on():\n s = \"string!\"\n output.user = True\n puts(s, show_prefix=False)\n eq_(sys.stdout.getvalue(), s + \"\\n\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
set the foreground color using DOSish 016. Colors are out of order but that's ok. live with it!
|
def fg(clr):
if clr < 8:
return myPrint ("%s[%im" % (C_ESC,clr+30))
else:
return myPrint ("%s[1,%im" % (C_ESC,clr-8+30))
|
[
"def text_foreground_color(self, color): # Sub-section .6\n command = 'FFE7{0}'.format(self._to_16_bit_rgb(color))\n reply = self._send_command(command, 2)\n return self._from_16_bit_rgb(reply)",
"def setConsoleColor(hex_color=\"\",counter=0):\r\n if len(hex_color) != 7:\r\n hex_color = MpGlobal.Window.style_dict[\"theme_very_dark\"].name()\r\n \r\n MpGlobal.Window.txt_main.setStyleSheet(\"background: \"+hex_color+\";\")\r\n\r\n if counter > 0:\r\n MpGlobal.Console_State_Counter = counter;",
"def DefaultColorCoding():\n print((\"\\033[49m \\033[39m \"), end=' ') #set to default color coding, suppress newline",
"def set_text_color(self, foreground_color, background_color):\n\n raise NotImplementedError()",
"def foreground_color(self, new_foreground_color):\n self._palette[2] = new_foreground_color",
"def set_background_colors(self) -> None:\n self._window_all.bkgd(\" \", curses.color_pair(m_color_pair.ColorPair.BLACK_N_WHITE.value))",
"def foreground_color(self, foreground_color):\n\n self.container['foreground_color'] = foreground_color",
"def foreground_color(self):\n return self._palette[2]",
"def set_iterm_tab_color(color):\n return \"\"\"\n \\033]6;1;bg;red;brightness;%s\\a\n \\033]6;1;bg;green;brightness;%s\\a\n \\033]6;1;bg;blue;brightness;%s\\a\n \"\"\" % (*util.hex_to_rgb(color),)",
"def set_bright_foreground(self, status):\n if status:\n self.bright_foreground = True\n else:\n self.bright_foreground = False",
"def set_foreground_color(self, color):\n if type(color) == int:\n self.foreground = color\n return True\n if self.color_table.has_key(color):\n self.foreground = self.color_table[color]\n return True\n self.foreground = None\n return False",
"def color_guide():\n print('\\n')\n print('\\u001b[1mStandard Colors\\u001b[0m\\n')\n for j in range(0, 8):\n code = str(j)\n print(f\"\\u001b[48;5;{code}m {code.center(8)}\", end='')\n print(\"\\u001b[0m\")\n\n print('\\n')\n print('\\u001b[1mHigh-Intensity Colors\\u001b[0m\\n')\n for j in range(8, 16):\n code = str(j)\n print(f\"\\u001b[48;5;{code}m {code.center(8)}\", end='')\n print(\"\\u001b[0m\")\n\n print('\\n')\n print('\\u001b[1mColors\\u001b[0m\\n')\n for m in range(0, 6):\n for n in range(0, 36):\n code = str(m * 36 + (n + 16))\n print(f\"\\u001b[48;5;{code}m {code.ljust(3)}\", end='')\n print(\"\\u001b[0m\")\n\n print('\\n')\n print('\\u001b[1mGrayscale colors\\u001b[0m\\n')\n for j in range(232, 256):\n code = str(j)\n print(f\"\\u001b[48;5;{code}m {code.ljust(5)}\", end='')\n print(\"\\u001b[0m\")",
"def _get_fg(attr):\n _cl = attr & curses.foreground\n return None if _cl is 256 else _cl",
"def foreground_color(self):\n return self.container['foreground_color']",
"def setColors(self, fg=None, bg=None):\n if self.console._lockColors is self:\n self.console._lockColors = None\n if fg is not None:\n self._fgcolor = _formatColor(fg)\n if bg is not None:\n self._bgcolor = _formatColor(bg)",
"def _default_bgfun(self):\n self.__screen.fill(BLACK)",
"def set_text_attr(color):\n\t SetConsoleTextAttribute(stdout_handle, color)",
"def red(t):\n return \"\\033[1;7;31m {} \\033[0m\".format(t) if tty() else t",
"def fg(s, color=''):\n return \"^fg(%s)%s^fg()\" % (color, s)",
"def colorfran(self,):\r\n self.objeto_varios.color(self.random,self.color)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
set the background color using DOSish 07 (can not use high color backgrounds ) colors are not in dos order
|
def bg(clr):
return myPrint ("%s[%im" % (C_ESC,clr+40))
|
[
"def set_background_colors(self) -> None:\n self._window_all.bkgd(\" \", curses.color_pair(m_color_pair.ColorPair.BLACK_N_WHITE.value))",
"def background(self, color):\r\n doc.bg_color = color",
"def setConsoleColor(hex_color=\"\",counter=0):\r\n if len(hex_color) != 7:\r\n hex_color = MpGlobal.Window.style_dict[\"theme_very_dark\"].name()\r\n \r\n MpGlobal.Window.txt_main.setStyleSheet(\"background: \"+hex_color+\";\")\r\n\r\n if counter > 0:\r\n MpGlobal.Console_State_Counter = counter;",
"def set_background(self):\r\n\r\n\t\tself.bg = self.get_background() if len(self) > 0 else np.array([0.25] * 4) # (A,C,G,T). Default is equal background if not overwritten by MEME (See OneMotif)\r",
"def set_bg_color (self):\n debug (\"In MayaViTkGUI::set_bg_color ()\")\n col = Common.config.bg_color\n rw_col = self.renwin.get_renderer ().GetBackground ()\n if rw_col != col:\n col = rw_col\n cur_col = \"#%02x%02x%02x\"% (col[0]*255, col[1]*255, col[2]*255)\n new_color = tkColorChooser.askcolor (title=\"Background color\",\n initialcolor=cur_col)\n if new_color[1] != None:\n col = Common.tk_2_vtk_color (new_color[0])\n Common.config.bg_color = col\n Common.state.busy ()\n self.renwin.set_background (col)\n self.renwin.Render ()\n Common.state.idle ()",
"def change_background(self):\n red = hex(random.randint(150, 255))[2:]\n green = hex(random.randint(150, 255))[2:]\n blue = hex(random.randint(150, 255))[2:]\n self.canvas.configure(bg=\"#{0}{1}{2}\".format(red, green, blue))",
"def color_change(screen):\n Display.display_background(screen, Display.get_random_color())",
"def background(self):\r\n\t\tGlobal.canvas.fill(0x0)",
"def set_window_background_color(r, g, b):\n glClearColor(r, g, b, 1.0)",
"def set_bgcolor(self, color):\n self.bgcolor = color\n self.textsurf = self.generate_surface()",
"def background(self, r, g, b):\n self.screen.fill((r, g, b))",
"def _default_bgfun(self):\n self.__screen.fill(BLACK)",
"def Style(window):\n ##Set the background color.\n window.SetBackgroundColour(BACKGROUND_COLOR)\n return",
"def change_text_background_color(rgb=(-1, -1, -1), hex=\"\"):\n\n # First, parse out the arguments to determine which source to use\n rgb_given = (rgb != (-1, -1, -1) and len(rgb) == 3)\n hex_given = (hex != \"\")\n\n # Next, ensure that the values given are valid\n if rgb_given:\n for val in rgb:\n if (val < -1 or val > 255):\n rgb_given = False\n if hex_given:\n if len(hex) != 7 or hex[0] != \"#\":\n hex_given = False\n for i in hex[1:].lower():\n if i not in \"0123456789abcdef\":\n hex_given = False\n\n # The boolean checks are correct at this point\n\n # Complete the operation given which sources are available\n esc = \"\\033[\"\n if rgb_given:\n r, g, b = map(lambda x: x if x != -1 else 0, rgb)\n esc += \"48;2;{};{};{}m\".format(r, g, b)\n elif hex_given:\n r, g, b = map(lambda i: int(hex[i:i+2], 16), (1, 3, 5))\n esc += \"48;2;{};{};{}m\".format(r, g, b)\n else: # Revert to default then\n esc += \"49m\"\n\n # Print out the escape sequence to set the new background color\n print(esc, end=\"\", flush=True)",
"def set_error_bg_color(self):\n\n value = self.GetValue()\n ctrl = wx.TextCtrl(self.GetParent())\n self._bg = ctrl.GetBackgroundColour().GetRGB()\n bg = Color('red')\n bg.mix(Color.from_wxbgr(ctrl.GetBackgroundColour().GetRGBA()), 0.5, in_place=True)\n self._invalidBackgroundColour = wx.Colour(bg.to_wxbgr(alpha=False))\n ctrl.Destroy()\n self.SetParameters()\n self.SetValue(value)",
"def DefaultColorCoding():\n print((\"\\033[49m \\033[39m \"), end=' ') #set to default color coding, suppress newline",
"def draw_background(self):\r\n\t\tself.app.background(0,0,0)",
"def test_set_background_1(self):\n pix_ops = px.PixelOperations()\n self.assertTrue(np.allclose(pix_ops.get_background(), px._white1, rtol=1e-05, atol=1e-08))",
"def set_highlight(self):\r\n vim.command(\"highlight DbgCurrent term=reverse ctermfg=White ctermbg=Red gui=reverse\")\r\n vim.command(\"highlight DbgBreakPt term=reverse ctermfg=White ctermbg=Green gui=reverse\")",
"def set_background(image_path):\n backgroundCommand = \"feh -q --bg-fill ~/Pictures/\" + image_path\n subprocess.run(backgroundCommand, shell=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates data for the first test case. There are 3 columns corresponding to data1, data2 and data3 all of which are of type string.
|
def exampleCase1(self):
data = [['data1', 'data2', 'data3']]
for _ in range(10000000):
data.append([self.randomText() for x in range(3)])
self.writeCSV(1, data)
|
[
"def test_dataset(source='dict'):\n \n def static(): \n row={'name':'Ivan Krsti\\xc4\\x87', 'id':1234,\n 'badge_line1':'laptop.org', 'badge_line2':'Sprint Leader: OLPC',\n 'key_note':True, 'speaker':True, 'vendor':True,\n 'session_chair':True, 'sponsor':True,\n 't_shirt_size':'L' }\n return [row]\n\n def first_in_db():\n # row = db().select(db.expense_item.ALL)[0]\n # row = db(db.expense_form.person==auth.user.id).select()[0]\n row = db().select(db.expense_item.ALL)[0]\n return [row]\n\n ds = {'dict': static(),\n 'first': first_in_db()}[source]\n\n return ds",
"def _generate_data(self):\n pass",
"def sample_data(data_type, num_contexts=None):\n if data_type == '2linear':\n # Create linear dataset\n num_actions = 2\n context_dim = 10\n noise_stds = [0.01 * (i + 1) for i in range(num_actions)]\n dataset, _, opt_linear = sample_linear_data(num_contexts, context_dim,\n num_actions, sigma=noise_stds)\n opt_rewards, opt_actions = opt_linear\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n if data_type == 'linear':\n # Create linear dataset\n num_actions = 8\n context_dim = 10\n noise_stds = [0.01 * (i + 1) for i in range(num_actions)]\n dataset, _, opt_linear = sample_linear_data(num_contexts, context_dim,\n num_actions, sigma=noise_stds)\n opt_rewards, opt_actions = opt_linear\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n elif data_type == 'sparse_linear':\n # Create sparse linear dataset\n num_actions = 7\n context_dim = 10\n noise_stds = [0.01 * (i + 1) for i in range(num_actions)]\n num_nnz_dims = int(context_dim / 3.0)\n dataset, _, opt_sparse_linear = sample_sparse_linear_data(\n num_contexts, context_dim, num_actions, num_nnz_dims, sigma=noise_stds)\n opt_rewards, opt_actions = opt_sparse_linear\n return dataset, opt_rewards, opt_actions, num_actions, context_dim, None\n elif data_type == 'mushroom':\n # Create mushroom dataset\n num_actions = 2\n context_dim = 117\n file_name = FLAGS.mushroom_data\n dataset, opt_mushroom = sample_mushroom_data(file_name, num_contexts)\n opt_rewards, opt_actions = opt_mushroom\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'financial':\n num_actions = 8\n context_dim = 21\n num_contexts = min(3713, num_contexts)\n noise_stds = [0.01 * (i + 1) for i in range(num_actions)]\n file_name = FLAGS.financial_data\n dataset, opt_financial = sample_stock_data(file_name, context_dim,\n num_actions, num_contexts,\n noise_stds, shuffle_rows=True)\n opt_rewards, opt_actions = opt_financial\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'jester':\n num_actions = 8\n context_dim = 32\n num_contexts = min(19181, num_contexts)\n file_name = FLAGS.jester_data\n dataset, opt_jester = sample_jester_data(file_name, context_dim,\n num_actions, num_contexts,\n shuffle_rows=True,\n shuffle_cols=True)\n opt_rewards, opt_actions = opt_jester\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'statlog':\n file_name = FLAGS.statlog_data\n num_actions = 7\n num_contexts = min(43500, num_contexts)\n sampled_vals = sample_statlog_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'adult':\n file_name = FLAGS.adult_data\n num_actions = 2\n num_contexts = min(45222, num_contexts)\n sampled_vals = sample_adult_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'covertype':\n file_name = FLAGS.covertype_data\n num_actions = 7\n num_contexts = min(150000, num_contexts)\n sampled_vals = sample_covertype_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1] #54\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'census':\n file_name = FLAGS.census_data\n num_actions = 9\n num_contexts = min(150000, num_contexts)\n sampled_vals = sample_census_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'wheel':\n delta = 0.5\n num_actions = 5\n context_dim = 2\n mean_v = [0.1,0.1,0.1,0.1,0.2]\n std_v = [0.1, 0.1, 0.1, 0.1, 0.1]\n mu_large = 0.4\n std_large = 0.1\n dataset, opt_wheel = sample_wheel_bandit_data(num_contexts, delta,\n mean_v, std_v,\n mu_large, std_large)\n opt_rewards, opt_actions = opt_wheel\n\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'wheel2':\n delta = 0.7\n num_actions = 2\n context_dim = 2\n mean_v = [0.0, 1]\n std_v = [0.1, 0.1]\n mu_large = 2\n std_large = 0.1\n dataset, opt_wheel = sample_wheel2_bandit_data(num_contexts, delta,\n mean_v, std_v,\n mu_large, std_large)\n opt_rewards, opt_actions = opt_wheel\n\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'eeg': #Epileptic\n file_name = FLAGS.eeg_data\n num_actions = 5\n num_contexts = min(11500, num_contexts)\n sampled_vals = sample_eeg_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'diabetic':\n file_name = FLAGS.diabetic_data\n num_actions = 3\n num_contexts = min(100000, num_contexts)\n sampled_vals = sample_diabetic_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'phone':\n file_name = FLAGS.phone_data\n num_actions = 6\n num_contexts = min(7767, num_contexts)\n sampled_vals = sample_phone_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'aps': #scania\n file_name = FLAGS.aps_data\n num_actions = 2\n num_contexts = min(76000, num_contexts)\n sampled_vals = sample_aps_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions) = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,None\n elif data_type == 'txt':\n file_name = [FLAGS.positive_data_file,FLAGS.negative_data_file]\n num_actions = 2\n num_contexts = min(10000, num_contexts)\n sampled_vals = sample_txt_data(file_name, num_contexts,\n shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions),vocab_processor = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,vocab_processor\n elif data_type == 'amazon':\n file_name = FLAGS.amazon_data_file\n num_actions = 5\n num_contexts = min(10000, num_contexts)\n sampled_vals = sample_amazon_data(file_name, num_contexts,shuffle_rows=True)\n contexts, rewards, (opt_rewards, opt_actions),vocab_processor = sampled_vals\n dataset = np.hstack((contexts, rewards))\n context_dim = contexts.shape[1]\n return dataset, opt_rewards, opt_actions, num_actions, context_dim,vocab_processor",
"def create_dataset_specification_and_records(self):\n\n # We chose the 5 smallest alphabets (i.e. those with the least characters)\n # out of the 'background' set of alphabets that are intended for train/val\n # We keep the 'evaluation' set of alphabets for testing exclusively\n # The chosen alphabets have 14, 14, 16, 17, and 20 characters, respectively.\n validation_alphabets = [\n 'Blackfoot_(Canadian_Aboriginal_Syllabics)',\n 'Ojibwe_(Canadian_Aboriginal_Syllabics)',\n 'Inuktitut_(Canadian_Aboriginal_Syllabics)', 'Tagalog',\n 'Alphabet_of_the_Magi'\n ]\n\n training_alphabets = []\n data_path_trainval = os.path.join(self.data_root, 'images_background')\n for alphabet_name in sorted(tf.io.gfile.listdir(data_path_trainval)):\n if alphabet_name not in validation_alphabets:\n training_alphabets.append(alphabet_name)\n assert len(training_alphabets) + len(validation_alphabets) == 30\n\n data_path_test = os.path.join(self.data_root, 'images_evaluation')\n test_alphabets = sorted(tf.io.gfile.listdir(data_path_test))\n assert len(test_alphabets) == 20\n\n self.parse_split_data(learning_spec.Split.TRAIN, training_alphabets,\n data_path_trainval)\n self.parse_split_data(learning_spec.Split.VALID, validation_alphabets,\n data_path_trainval)\n self.parse_split_data(learning_spec.Split.TEST, test_alphabets,\n data_path_test)",
"def exampleCase2(self):\n\t\t \n\t\tdata = [['date', 'data']]\n\t\tdate_1 = datetime.datetime(2015, 8, 1)\n\t\tdate_2 = datetime.datetime(2017, 8, 1)\n\n\t\tfor _ in range(1800000):\n\t\t\tdata.append([date_1, self.randomText()])\n\t\t\t\n\t\tfor _ in range(1800000, 2000000):\n\t\t\tdata.append([date_2, self.randomText()])\n\n\t\tself.writeCSV(2, data)",
"def exampleCase3(self):\n\t\tdata = [['name', 'phone']]\n\n\t\tfor _ in range(10000):\n\t\t\tdata.append([self.randomText(), self.randomPhoneNumber()])\n\t\t\n\t\tself.writeCSV(3, data)",
"def test_data_str_1(self):\n var1 = variables.Variable(name='test1', pre_transform=lambda x: str(x[1]))\n var2 = variables.Variable(name='test2', pre_transform=lambda x: str(x[2]))\n var3 = variables.Variable(name='test3', pre_transform=lambda x: str(x[0]))\n\n model_vars = variables.ModelVariables(independent=[var2, var3], dependent=[var1], schema=[var1, var2, var3])\n output = model_vars.data_str([100, 200, 300])\n expected = '200\t300\t100'\n\n self.assertEqual(output, expected)",
"def fixture_student1_problem1():\n return \"z1234567\",\"help me\"",
"def get_dataset():\n dataset = DatasetGenerator({\n 'num_rows': 100,\n 'output': 'list',\n 'schema': {'name': 'faker.name',\n 'phone_number': 'faker.phone_number',\n 'group_id': range(2, 5),\n 'called_by': ['robo', 'associate', 'manager']},\n 'start_time': datetime(2017, 1, 1, 23, 22),\n 'end_time': datetime(2017, 7, 1, 22, 14),\n 'increments': 'hours'})\n dataset.generate()\n yield from dataset.to_output()",
"def create_simple_dataset():\r\n # first dataset class distribution\r\n mean = [2, 1]\r\n cov = [[1, 0], [0, 3]]\r\n data1 = np.random.multivariate_normal(mean, cov, 5000)\r\n # Second dataset class distribution\r\n mean = [5, 8]\r\n cov = [[3, 1], [1, 3]]\r\n data2 = np.random.multivariate_normal(mean, cov, 5000)\r\n\r\n x = np.concatenate((data1, data2))\r\n\r\n # creating the labels\r\n y1 = np.zeros((5000, 1))\r\n y2 = np.ones((5000, 1))\r\n y = np.concatenate((y1, y2))\r\n\r\n # splitting to train and test set (0.85, 0.20)\r\n mask = np.random.rand(10000) < 0.85\r\n x_train = x[mask]\r\n y_train = y[mask]\r\n mask = np.logical_not(mask)\r\n x_test = x[mask]\r\n y_test = y[mask]\r\n\r\n return x_train, y_train, x_test, y_test",
"def _creat_testdata(file):\n # Creating testdata and visualizing\n filedir = \"__testfiles__/\" + file\n sol.run(filedir, \"__output__\")\n vis.run(\"__output__\")\n plt.pause(5)\n # Saving testdata if approved\n check = input(\"Should this data be used as testdata [y/n]? \")\n if check == \"y\":\n newdirwf = \"__unittestfiles__/\" + file.split(\".\")[0] + \"_wf.dat\"\n newdirenergy = \"__unittestfiles__/\" + file.split(\".\")[0]\\\n + \"_energy.dat\"\n testdatawf = np.loadtxt(\"__output__/wavefuncs.dat\")\n np.savetxt(newdirwf, testdatawf)\n testdataenergy = np.loadtxt(\"__output__/energies.dat\")\n np.savetxt(newdirenergy, testdataenergy)\n plt.close('all')\n if check == \"n\":\n plt.close('all')",
"def add_test_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", \"25.00\")\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", \"10.00\")\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", \"17.00\")",
"def create_dataset_specification_and_records(self):\n\n splits = self.get_splits()\n # Get the names of the classes assigned to each split.\n train_classes = splits['train']\n valid_classes = splits['valid']\n test_classes = splits['test']\n\n self.parse_split_data(learning_spec.Split.TRAIN, train_classes)\n self.parse_split_data(learning_spec.Split.VALID, valid_classes)\n self.parse_split_data(learning_spec.Split.TEST, test_classes)",
"def ticket_gen(n): \n \n #This is the different words we'll be using for various generated data sets\n faker = Faker()\n df = []\n meta = {}\n activ = {}\n key = []\n users = ['user', 'admin', 'customer']\n activities = ['note', 'admin', 'customer']\n category = ['Phone', 'computer', 'jackets']\n product = ['mobile', 'tigimon', 'tamigotcha']\n status = ['open', 'closed', 'resolved','waiting for customer', 'waiting for third party', \"pending\"]\n descr_act = []\n activity = []\n meta_data = []\n for n in range(n):\n \n \n #key.append(n)\n #this creates the intial metadata\n \n #Time is set from the time_origin, which is when the first ticket is made,\n #and then added onto at random intervals \n \n time_origin = faker.date_time_this_century()\n \n activities = randrange(10)\n work_type = randrange(1,4)\n \n \n desc_act_mwap ={'key':n,\n 'performed_at': faker.date_time_this_century(),\n 'ticket_id': randrange(80,1000),\n 'performer_type': faker.words(1, users, True)[0],\n 'performer_id': randrange(80,10000) }\n descr_act.append(desc_act_mwap)\n\n\n activity_next = time_origin\n for x in range(activities):\n #depending on the total activities, this creates what information is in those activities\n activity_next = activity_next + timedelta(days=randrange(10))\n\n if work_type == 1:\n #if the ticket type is note, as per example\n\n act_data_note = {'key':n,'performed_at': activity_next,\n 'id': randrange(80,1000),\n 'category': faker.words(1, category, True)[0],\n 'type': randrange(20)}\n activity.append(act_data_note)\n\n x+=1\n activity_next = activity_next + timedelta(days=randrange(3))\n\n else:\n #if the ticket type is shipping, as per example\n\n act_data_shipping = {\n 'key':n,\n 'performed_at': time_origin,\n 'shipping_address': faker.address(),\n 'shipping_date': activity_next,\n 'category': faker.words(1, users, True)[0],\n 'priority': randrange(5),\n 'status': faker.words(1, status, True)[0],\n 'contacted_customer': faker.pybool(),\n 'source': randrange(5),\n 'agent_id': randrange(2000),\n 'requester': randrange(2000),\n 'product': faker.words(1, product, True)[0]}\n activity.append(act_data_shipping)\n activity_next = activity_next + timedelta(days=randrange(3))\n\n\n\n x+=1\n \n meta_data_make = {'key':n,'start_at': time_origin, 'end_at': activity_next,\n 'activities_count': randrange(100) }\n meta_data.append(meta_data_make)\n \n time_origin = activity_next + timedelta(days=randrange(10))\n\n \n #when this loop has finished, we append the generated data to the dataframe.\n # df.append({'metadata': {'key':{n},'start_at': {time_origin}, 'end_at': {activity_next},\n # 'activities_count': {randrange(100)} },\n # 'activities_data':[descr_act,activity]})\n act_df = pd.DataFrame(activity)#.set_index(['key'])\n desc_df = pd.DataFrame(descr_act)#.set_index(['key'])\n meta_df = pd.DataFrame(meta_data)#.set_index(['key'])\n return act_df,desc_df,meta_df",
"def test_csvdata(db, specialization, slu1, slu2, student, grade_slu1, grade_slu2):\n\n specialization.unit_count = 2\n spc_list = [specialization]\n unit_list = [slu1, slu2]\n object_list = [\n {\n \"user\": student,\n \"grades\": [grade_slu1, grade_slu2],\n \"submission_date\": datetime(year=2021, month=8, day=15),\n \"total_score\": 38,\n }\n ]\n text = csvdata(spc_list, unit_list, object_list)\n assert (\n text == \"username,slack_id,submission_date,total_score,S01-SLU01,S01-SLU02\\r\\n\"\n \"test_student,U12J14XV12Z,2021-08-15 00:00:00,38,18,20\\r\\n\"\n )",
"def createDatasetHTRU2(trainFilename, testFilename, otherFilename, positiveFraction):\n \n logging.debug('Loading...')\n data = pd.read_csv('HTRU_2.csv', header=None)\n data.columns = ['feature_' + str(c) for c in data.columns[:-1]] + ['target']\n \n logging.debug('Adding features...')\n logging.debug(' log features...')\n originalFeatures = [c for c in data if c != 'target']\n for f in originalFeatures:\n epsilon = 1e-2\n lower = data[f].min()\n if lower < epsilon:\n shift = abs(lower) + epsilon\n else:\n shift = 0\n data['log_' + f] = np.log(data[f] + shift)\n \n logging.debug(' product features...')\n originalFeatures = [c for c in data if c != 'target']\n for f1 in originalFeatures:\n for f2 in originalFeatures:\n if f1 != f2:\n col = '{}_times_{}'.format(f1, f2)\n data[col] = data[f1] * data[f2]\n \n logging.debug('Resampling train/test sets...')\n data_0 = shuffle(data.loc[data.target == 0], random_state = 314159)\n data_1 = shuffle(data.loc[data.target == 1], random_state = 314159)\n data_1_test = data_1.iloc[:250]\n data_1_train = data_1.iloc[250:]\n class_0_train_length = int(len(data_1_train)*(1/positiveFraction - 1))\n data_0_train = data_0.iloc[:class_0_train_length]\n data_0_test = data_0.iloc[class_0_train_length:]\n train = shuffle(pd.concat((data_0_train, data_1_train), axis=0))\n test = shuffle(pd.concat((data_0_test, data_1_test), axis=0))\n \n logging.debug('Scaling...')\n features = [c for c in train.columns if c != 'target']\n scaler = StandardScaler().fit(train[features])\n train[features] = scaler.transform(train[features])\n test[features] = scaler.transform(test[features])\n \n logging.debug('Saving...')\n train.to_csv(trainFilename, index=False)\n test.to_csv(testFilename, index=False)\n pickle.dump({'scaler': scaler}, open(otherFilename, 'wb'))\n logging.debug(' Done')",
"def fixture_student2_problem2():\n return \"z7654321\",\"i am dumb\"",
"def _produce_train_dataset(self):\r\n pass",
"def test_create_data_split(self) -> None:\n\n # Validate data splits for the full data set\n train_test_split = random.uniform(0.1, 0.9)\n full_data_dict = self.data_loader.create_data_split((0, \"hx\"),\n train_test_split=train_test_split)\n train_x, train_y = full_data_dict[\"train_x\"], full_data_dict[\"train_y\"]\n test_x, test_y = full_data_dict[\"test_x\"], full_data_dict[\"test_y\"]\n self._validate_data_split(train_x, train_y,\n test_x, test_y,\n self.num_labels,\n train_test_split)\n\n # Validate data splits for a partial data set\n cutoff = random.randrange(5, self.num_labels - 1)\n partial_data_dict = self.data_loader.create_data_split(\n (0, \"hx\"), train_test_split=train_test_split, data_subset_size=cutoff\n )\n partial_train_x, partial_train_y = partial_data_dict[\"train_x\"], partial_data_dict[\"train_y\"]\n partial_test_x, partial_test_y = partial_data_dict[\"test_x\"], partial_data_dict[\"test_y\"]\n self._validate_data_split(\n partial_train_x, partial_train_y, partial_test_x, partial_test_y,\n size=cutoff, data_split=train_test_split\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates data for the second test case. There are 2 columns corresponding to date and data all of which are of type string. The date is of type python datetime.datetime.
|
def exampleCase2(self):
data = [['date', 'data']]
date_1 = datetime.datetime(2015, 8, 1)
date_2 = datetime.datetime(2017, 8, 1)
for _ in range(1800000):
data.append([date_1, self.randomText()])
for _ in range(1800000, 2000000):
data.append([date_2, self.randomText()])
self.writeCSV(2, data)
|
[
"def generate_testdata(field_type, driver):\n\n # Test data for 'date' data type\n if field_type == \"date\":\n return [\n (\"2018-03-25\", datetime.date(2018, 3, 25)),\n (datetime.date(2018, 3, 25), datetime.date(2018, 3, 25)),\n ]\n\n # Test data for 'datetime' data type\n if field_type == \"datetime\":\n return [\n (\"2018-03-25T22:49:05\", datetime.datetime(2018, 3, 25, 22, 49, 5)),\n (\n datetime.datetime(2018, 3, 25, 22, 49, 5),\n datetime.datetime(2018, 3, 25, 22, 49, 5),\n ),\n (\n \"2018-03-25T22:49:05.23\",\n datetime.datetime(2018, 3, 25, 22, 49, 5, 230000),\n ),\n (\n datetime.datetime(2018, 3, 25, 22, 49, 5, 230000),\n datetime.datetime(2018, 3, 25, 22, 49, 5, 230000),\n ),\n (\n \"2018-03-25T22:49:05.123456\",\n datetime.datetime(2018, 3, 25, 22, 49, 5, 123000),\n ),\n (\n datetime.datetime(2018, 3, 25, 22, 49, 5, 123456),\n datetime.datetime(2018, 3, 25, 22, 49, 5, 123000),\n ),\n (\n \"2018-03-25T22:49:05+01:30\",\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)),\n ),\n (\n \"2018-03-25T22:49:05-01:30\",\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)),\n ),\n (\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)),\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)),\n ),\n (\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)),\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)),\n ),\n (\n datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(\n timezone(\"Europe/Zurich\")\n ),\n datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(\n timezone(\"Europe/Zurich\")\n ),\n ),\n (\n datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(\n timezone(\"US/Mountain\")\n ),\n datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(\n timezone(\"US/Mountain\")\n ),\n ),\n (\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)),\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)),\n ),\n (\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)),\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)),\n ),\n (\n \"2018-03-25T22:49:05-23:45\",\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)),\n ),\n (\n \"2018-03-25T22:49:05+23:45\",\n datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)),\n ),\n ]\n\n # Test data for 'time' data type\n elif field_type == \"time\":\n return [\n (\"22:49:05\", datetime.time(22, 49, 5)),\n (datetime.time(22, 49, 5), datetime.time(22, 49, 5)),\n (\"22:49:05.23\", datetime.time(22, 49, 5, 230000)),\n (datetime.time(22, 49, 5, 230000), datetime.time(22, 49, 5, 230000)),\n (\"22:49:05.123456\", datetime.time(22, 49, 5, 123000)),\n (datetime.time(22, 49, 5, 123456), datetime.time(22, 49, 5, 123000)),\n (\"22:49:05+01:30\", datetime.time(22, 49, 5, tzinfo=TZ(90))),\n (\"22:49:05-01:30\", datetime.time(22, 49, 5, tzinfo=TZ(-90))),\n (\n datetime.time(22, 49, 5, tzinfo=TZ(90)),\n datetime.time(22, 49, 5, tzinfo=TZ(90)),\n ),\n (\n datetime.time(22, 49, 5, tzinfo=TZ(-90)),\n datetime.time(22, 49, 5, tzinfo=TZ(-90)),\n ),\n (\n datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)),\n datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)),\n ),\n (\n datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15)),\n datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15)),\n ),\n (\"22:49:05-23:45\", datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15))),\n (\"22:49:05+23:45\", datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15))),\n ]",
"def data(ignore_date=False):",
"def test_random_first_last_name_dob(input_value):\r\n first_name = input_value[0]\r\n last_name = input_value[1]\r\n address = input_value[2]\r\n type_char = input_value[3]\r\n result = [create_csv.random_first_last_name_dob(type_c) for type_c in type_char]\r\n print(result)\r\n assert len(result) == 4,'Number of column generated is not correct'\r\n assert result[0] in first_name ,f\"Generated value doesn't present in the {first_name} set\"\r\n assert result[1] in last_name, f\"Generated value doesn't present in the {last_name} set\"\r\n assert result[2] in address, f\"Generated value doesn't present in the {address} set\"\r\n assert isinstance(result[3],datetime.date), 'date is not generated as datetime format'",
"def test_insert_daily_data1(self) -> None:\n self._prepare_tables(\n insert_symbol=True, insert_exchange=True, insert_trade_symbol=True\n )\n self._writer.insert_daily_data(\n trade_symbol_id=self._trade_symbol_id,\n date=\"2021-01-01\",\n open_val=10.0,\n high_val=15,\n low_val=9,\n close_val=12.5,\n volume_val=1000,\n )\n self._check_saved_data(table=\"KibotDailyData\")",
"def generate_data(cls, live_env: ExecEnv,\n sim_env: ExecEnv,\n **kwargs) -> 'SingleRunData':\n\n # Extract parameters\n run: StrategyRun = kwargs['run']\n symbol_day: SymbolDay = kwargs['symbol_day']\n\n json_array = []\n\n # Calculate run info\n profit = None if run.sell_price is None else (run.sell_price - run.buy_price) / run.buy_price\n\n # Create a data point for each second\n moment = datetime.combine(symbol_day.day_date, OPEN_TIME)\n while moment < datetime.combine(symbol_day.day_date, CLOSE_TIME):\n # Ensure valid data is present for the second\n candle = symbol_day.get_candle_at_sec(moment)\n if candle is None:\n moment = moment + timedelta(seconds=1)\n continue\n\n # Create a data point for the second\n data_point = {\n \"price\": str(candle.open),\n \"start_time\": run.start_time.strftime(DATE_TIME_FORMAT),\n \"buy_time\": None if run.buy_time is None else run.buy_time.strftime(DATE_TIME_FORMAT),\n \"end_time\": None if run.end_time is None else run.end_time.strftime(DATE_TIME_FORMAT),\n \"buy_price\": None if run.buy_price is None else str(run.buy_price),\n \"sell_price\": None if run.sell_price is None else str(run.sell_price),\n \"profit\": str(profit)\n }\n\n # Save and move on to the next second\n json_array.append(data_point)\n moment = moment + timedelta(seconds=1)\n\n return SingleRunData(\n run_date=symbol_day.day_date,\n profit=profit,\n run_and_price_data=json.dumps(json_array))",
"def test_data_timeseries(self):\n data = [0, 1, 2, 3]\n timestamps1 = [0.0, 0.1, 0.2, 0.3]\n timestamps2 = [1.0, 1.1, 1.2, 1.3]\n ts1 = TimeSeries(\n name=\"test_ts1\", data=data, unit=\"grams\", timestamps=timestamps1\n )\n ts2 = TimeSeries(\n name=\"test_ts2\", data=ts1, unit=\"grams\", timestamps=timestamps2\n )\n self.assertEqual(ts2.data, data)\n self.assertEqual(ts1.num_samples, ts2.num_samples)\n self.assertEqual(ts1.data_link, set([ts2]))",
"def make_test_data(connection, cursor, num_employees, num_departments, num_cycles, num_expenses_per_day):\n\tprint 'make_test_data: num_departments=%d, num_employees=%d, num_cycles=%d, num_expenses_per_day=%d' \\\n\t % (num_departments, num_employees, num_cycles, num_expenses_per_day)\n\tprint ' (should give expenses of %d * n for department n)' % (num_employees * num_cycles * num_expenses_per_day)\n\t\n\t# Functions to generate values for each field\n\tfirst_name = 'Darren'\n\tdef get_name(employee_num):\n\t\treturn 'Smith.%03d' % employee_num\n\tdef get_date(day_num, fraction_of_day):\n\t\td = day_num % 28\n\t\tm = (day_num//28)%12\n\t\ty = 2000 + day_num//28//12\n\t\tseconds = int(24*60*60*fraction_of_day)\n\t\ts = seconds % 60\n\t\tn = (seconds//60) % 60\n\t\th = seconds//60//60\n\t\treturn '%04d-%02d-%02d %2d:%2d:%2d' % (y, m+1, d+1, h, n, s)\n\tdef get_cost(employee_num, department_num):\n\t\treturn department_num\n\tdef get_department(department_num):\n\t\treturn 'department %03d' % department_num\n\tdef get_description(employee_num, department_num, department_change_num):\n\t\treturn 'expense %03d:%03d for employee %03d' % (department_change_num, department_num, employee_num)\n\t\n\t# Create the employees\n\tdepartment_change_num = 0\n\tfor employee_num in range(num_employees): \n\t\tadd_employee(connection, cursor, first_name, get_name(employee_num), get_department(0))\n\t\n\t# Cycle each employee's department through all available num_cycles times\n\tfor c in range(num_cycles):\n\t\tfor department_num in range(0, num_departments): \n\t\t\tfor employee_num in range(num_employees): \n\t\t\t\tchange_department(cursor, first_name, get_name(employee_num), get_department(department_num), get_date(department_change_num, 0.0))\n\t\t\t\tfor expense_num in range(num_expenses_per_day):\n\t\t\t\t\tadd_expense(cursor, first_name, get_name(employee_num), get_date(department_change_num, (expense_num+1)/(num_expenses_per_day+2)), \n\t\t\t\t\t\t\t\tget_cost(employee_num, department_num), get_description(employee_num,department_num,department_change_num))\n\t\t\tdepartment_change_num += 1",
"def test_nextdate_c2(self):",
"def read_data(first_file, second_file):\n series1 = read_csv(first_file, squeeze=True, header=None)\n series2 = read_csv(second_file, squeeze=True, header=None)\n if series1.dtype == 'float64':\n return DataFrame({\"Actual\": series1, \"Expected\": series2})\n else:\n return DataFrame({\"Actual\": series2, \"Expected\": series1})",
"def ddtt(n_n):\n f_0 = 'Data0.csv' # Open the first file Data0.csv\n data0 = pd.read_csv(\n f_0, header=0) # Save the information of this first file\n y_y = data0['date'] # We keep the dates where we have records of\n # this first file AS INITIALIZATION of YY\n\n for i in range(n_n):\n\n f_i = 'Data' + str(\n i) + '.csv' # We give the names of the files that we are going to read\n\n datai = pd.read_csv(f_i, header=0) # We load the file data fi\n y_i = datai[['date',\n 'close']] # We select the dates and the closing prices\n # of the assets on those dates\n\n y_y = pd.merge(y_y, y_i,\n on=['date']) # We perform the equivalent operation of an\n # inner join with the data 'tables' y_i, considering only the fields\n #'date' and 'close'. The inner join is calculated on the 'date' field\n # of the tables.\n nmp = y_y.to_numpy() # We convert our resulting dataframe to array\n nmp0 = nmp[:, 0] # We store the dates in where the price was recorded\n nmp1 = np.transpose(nmp[:,\n 1:]) # We transpose the prices of the nmp array\n # without the record dates\n return nmp0, nmp1 # return the vector containing the dates where there\n # were records (nmp0), and the array of prices per day (nmp1)",
"def create_final_dataset(self):\r\n print('\\nCreating final dataset...')\r\n self.secondary_df_output = pd.read_json(path.data_secondary_most_recent)\r\n self.secondary_df_output.sort_index(inplace=True) \r\n self.final_df_output = self.secondary_df_output\r\n self.make_features()\r\n self.label_output()\r\n new_cols = ['Dates', 'Recession', 'Recession_in_6mo',\r\n 'Recession_in_12mo', 'Recession_in_24mo',\r\n 'Recession_within_6mo', 'Recession_within_12mo',\r\n 'Recession_within_24mo', 'Payrolls_3mo_pct_chg_annualized',\r\n 'Payrolls_12mo_pct_chg', 'Payrolls_3mo_vs_12mo',\r\n 'Unemployment_Rate', 'Unemployment_Rate_12mo_chg',\r\n 'Real_Fed_Funds_Rate', 'Real_Fed_Funds_Rate_12mo_chg',\r\n 'CPI_3mo_pct_chg_annualized', 'CPI_12mo_pct_chg',\r\n 'CPI_3mo_vs_12mo', '10Y_Treasury_Rate_12mo_chg',\r\n '3M_Treasury_Rate_12mo_chg', '3M_10Y_Treasury_Spread',\r\n '3M_10Y_Treasury_Spread_12mo_chg',\r\n '5Y_10Y_Treasury_Spread', 'S&P_500_3mo_chg',\r\n 'S&P_500_12mo_chg', 'S&P_500_3mo_vs_12mo',\r\n 'IPI_3mo_pct_chg_annualized', 'IPI_12mo_pct_chg',\r\n 'IPI_3mo_vs_12mo']\r\n self.final_df_output = self.final_df_output[new_cols]\r\n print('Finished creating final dataset!')\r\n print('\\t|--Saving final dataset to {}'.format(path.data_final))\r\n self.final_df_output.to_json(path.data_final)\r\n print('\\nFinal dataset saved to {}'.format(path.data_final))",
"def test_insert_bulk_daily_data1(self) -> None:\n self._prepare_tables(\n insert_symbol=True, insert_exchange=True, insert_trade_symbol=True\n )\n df = pd.DataFrame(\n {\n \"trade_symbol_id\": [self._trade_symbol_id] * 3,\n \"date\": [\"2021-01-01\", \"2021-01-02\", \"2021-01-03\"],\n \"open\": [10.0] * 3,\n \"high\": [15] * 3,\n \"low\": [9] * 3,\n \"close\": [12.5] * 3,\n \"volume\": [1000] * 3,\n }\n )\n self._writer.insert_bulk_daily_data(df=df)\n self._check_saved_data(table=\"KibotDailyData\")",
"def _get_data_post2006(date):\r\n \r\n # build the url based on date & create data container\r\n url = '{}/{}/{}/'.format(BASE_URL, date.year, str(date).replace('-','_'))\r\n data = dict(Air_Temp = [], Barometric_Press = [], Wind_Speed = [])\r\n\r\n print('Fetching online data for {}'.format(date)) \r\n for key in data.keys():\r\n try:\r\n data[key] = request.urlopen('{}{}'.format(url, key)).read().decode(encoding='utf_8').split('\\r\\n')\r\n except:\r\n raise ValueError(date) # error accessing website\r\n else:\r\n data[key].pop() # remove last item which will be an empty string \r\n\r\n # verify lengths of 3 files are equal\r\n lengths = []\r\n for k in data.keys():\r\n lengths.append(len(data[k]))\r\n if lengths[1:] != lengths[:-1]:\r\n raise ValueError(date) # file lengths do not match\r\n \r\n for i in range(len(data['Air_Temp'])):\r\n \r\n # verify timestamps are equal for every related entry in 3 files\r\n timestamps = []\r\n for k in data.keys():\r\n timestamps.append(data[k][i].split()[1])\r\n if timestamps[1:] != timestamps[:-1]:\r\n raise ValueError(date) # timestamps for fields do not line up\r\n \r\n yield dict(Date = data['Air_Temp'][i].split()[0],\r\n Time = data['Air_Temp'][i].split()[1],\r\n Status = 'PARTIAL' if date == date.today() else 'COMPLETE', # assume data from today is incomplete\r\n Air_Temp = data['Air_Temp'][i].split()[2],\r\n Barometric_Press = data['Barometric_Press'][i].split()[2],\r\n Wind_Speed = data['Wind_Speed'][i].split()[2])",
"def _generate_data(self):\n pass",
"def get_model_data_per_date(date):",
"def test_custom_csv_get_data_team(self):\n jan = datetime.datetime(2015, 1, 1, 0, 0, 0)\n jan_ts = calendar.timegm(jan.timetuple())\n december = datetime.datetime(2015, 12, 31, 23, 59, 0)\n dec_ts = calendar.timegm(december.timetuple())\n custom_time = dubwebdb.CTimes(d_format=\"%Y-%m\",\n start_time=jan_ts,\n end_time=dec_ts)\n one_team = dubwebdb.Ids(prv_id=None, team_id=[\"1\"],\n project_id=None, div_id=None)\n csv_data = dubwebdb.get_data_budget_team(custom_time, one_team)\n for series in csv_data:\n self.assertEqual(len(series), 14)",
"def test_cust_csv_get_data_team(self):\n decone = datetime.datetime(2015, 12, 1, 0, 0, 0)\n decone_ts = calendar.timegm(decone.timetuple())\n december = datetime.datetime(2015, 12, 31, 23, 59, 0)\n dec_ts = calendar.timegm(december.timetuple())\n custom_time = dubwebdb.CTimes(d_format=\"%Y-%m\",\n start_time=decone_ts,\n end_time=dec_ts)\n one_prv = dubwebdb.Ids(prv_id=[\"1\"], team_id=None,\n project_id=None, div_id=None)\n csv_data = dubwebdb.get_data_budget_team(custom_time, one_prv)\n for series in csv_data:\n self.assertEqual(len(series), 3)",
"def test_dataset(source='dict'):\n \n def static(): \n row={'name':'Ivan Krsti\\xc4\\x87', 'id':1234,\n 'badge_line1':'laptop.org', 'badge_line2':'Sprint Leader: OLPC',\n 'key_note':True, 'speaker':True, 'vendor':True,\n 'session_chair':True, 'sponsor':True,\n 't_shirt_size':'L' }\n return [row]\n\n def first_in_db():\n # row = db().select(db.expense_item.ALL)[0]\n # row = db(db.expense_form.person==auth.user.id).select()[0]\n row = db().select(db.expense_item.ALL)[0]\n return [row]\n\n ds = {'dict': static(),\n 'first': first_in_db()}[source]\n\n return ds",
"def testTimeseriesData(self):\n\n from pthelma import timeseries\n # check uploading\n f = open(\"enhydris/hcore/tests/tsdata.hts\", \"r\")\n\n file_dict = {'data': SimpleUploadedFile(f.name, f.read())}\n post_dict = {'gentity': self.station.pk, 'variable': self.var.pk,\n 'unit_of_measurement': self.unit.pk,\n 'time_zone': self.tz.pk\n }\n form = TimeseriesDataForm(post_dict, file_dict, instance=self.ts)\n\n self.assertEqual(form.is_valid(), True)\n ts = form.save()\n\n ts.save()\n pts = timeseries.Timeseries(ts.id)\n pts.read_from_db(dj_connection)\n self.assertEqual(len(pts.items()), 12872)\n\n #check downloading\n url = \"/timeseries/d/%d/download/\" % self.ts.pk\n response = self.client.get(url)\n if settings.ENHYDRIS_TSDATA_AVAILABLE_FOR_ANONYMOUS_USERS:\n self.assertEqual(response.status_code, 200)\n else:\n self.assertEqual(response.status_code, 302)\n self.assertEquals(self.client.login(username='test',\n password='test'), True)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n # check fiLe\n lines = response.content.splitlines()\n linecount = len(lines)\n headerlinecount = sum([1 for x in takewhile(lambda x: x != '',\n lines)]) + 1\n datalinecount = linecount - headerlinecount\n\n self.assertEqual(datalinecount, 12872)\n\n self.client.logout()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates data for the third test case. There are 2 columns corresponding to name and phone which are of type string and integer respectively.
|
def exampleCase3(self):
data = [['name', 'phone']]
for _ in range(10000):
data.append([self.randomText(), self.randomPhoneNumber()])
self.writeCSV(3, data)
|
[
"def add_test_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", \"25.00\")\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", \"10.00\")\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", \"17.00\")",
"def _buildPhoneTypes(self, plexus):\n # get the phone type factory\n factory = plexus.datastore.schema.crm.phoneType.pyre_immutable\n # build the records\n yield factory(type='cell', description='the phone number is a mobile phone')\n yield factory(type='voice', description='the phone number is a land line')\n yield factory(type='fax', description='the phone number is a fax')\n yield factory(type='pager', description='the phone number is a pager')\n # all done\n return",
"def exampleCase1(self):\n\t\tdata = [['data1', 'data2', 'data3']]\n\t\tfor _ in range(10000000):\n\t\t\tdata.append([self.randomText() for x in range(3)])\n\t\t\n\t\tself.writeCSV(1, data)",
"def ticket_gen(n): \n \n #This is the different words we'll be using for various generated data sets\n faker = Faker()\n df = []\n meta = {}\n activ = {}\n key = []\n users = ['user', 'admin', 'customer']\n activities = ['note', 'admin', 'customer']\n category = ['Phone', 'computer', 'jackets']\n product = ['mobile', 'tigimon', 'tamigotcha']\n status = ['open', 'closed', 'resolved','waiting for customer', 'waiting for third party', \"pending\"]\n descr_act = []\n activity = []\n meta_data = []\n for n in range(n):\n \n \n #key.append(n)\n #this creates the intial metadata\n \n #Time is set from the time_origin, which is when the first ticket is made,\n #and then added onto at random intervals \n \n time_origin = faker.date_time_this_century()\n \n activities = randrange(10)\n work_type = randrange(1,4)\n \n \n desc_act_mwap ={'key':n,\n 'performed_at': faker.date_time_this_century(),\n 'ticket_id': randrange(80,1000),\n 'performer_type': faker.words(1, users, True)[0],\n 'performer_id': randrange(80,10000) }\n descr_act.append(desc_act_mwap)\n\n\n activity_next = time_origin\n for x in range(activities):\n #depending on the total activities, this creates what information is in those activities\n activity_next = activity_next + timedelta(days=randrange(10))\n\n if work_type == 1:\n #if the ticket type is note, as per example\n\n act_data_note = {'key':n,'performed_at': activity_next,\n 'id': randrange(80,1000),\n 'category': faker.words(1, category, True)[0],\n 'type': randrange(20)}\n activity.append(act_data_note)\n\n x+=1\n activity_next = activity_next + timedelta(days=randrange(3))\n\n else:\n #if the ticket type is shipping, as per example\n\n act_data_shipping = {\n 'key':n,\n 'performed_at': time_origin,\n 'shipping_address': faker.address(),\n 'shipping_date': activity_next,\n 'category': faker.words(1, users, True)[0],\n 'priority': randrange(5),\n 'status': faker.words(1, status, True)[0],\n 'contacted_customer': faker.pybool(),\n 'source': randrange(5),\n 'agent_id': randrange(2000),\n 'requester': randrange(2000),\n 'product': faker.words(1, product, True)[0]}\n activity.append(act_data_shipping)\n activity_next = activity_next + timedelta(days=randrange(3))\n\n\n\n x+=1\n \n meta_data_make = {'key':n,'start_at': time_origin, 'end_at': activity_next,\n 'activities_count': randrange(100) }\n meta_data.append(meta_data_make)\n \n time_origin = activity_next + timedelta(days=randrange(10))\n\n \n #when this loop has finished, we append the generated data to the dataframe.\n # df.append({'metadata': {'key':{n},'start_at': {time_origin}, 'end_at': {activity_next},\n # 'activities_count': {randrange(100)} },\n # 'activities_data':[descr_act,activity]})\n act_df = pd.DataFrame(activity)#.set_index(['key'])\n desc_df = pd.DataFrame(descr_act)#.set_index(['key'])\n meta_df = pd.DataFrame(meta_data)#.set_index(['key'])\n return act_df,desc_df,meta_df",
"def create_dataset_specification_and_records(self):\n\n # We chose the 5 smallest alphabets (i.e. those with the least characters)\n # out of the 'background' set of alphabets that are intended for train/val\n # We keep the 'evaluation' set of alphabets for testing exclusively\n # The chosen alphabets have 14, 14, 16, 17, and 20 characters, respectively.\n validation_alphabets = [\n 'Blackfoot_(Canadian_Aboriginal_Syllabics)',\n 'Ojibwe_(Canadian_Aboriginal_Syllabics)',\n 'Inuktitut_(Canadian_Aboriginal_Syllabics)', 'Tagalog',\n 'Alphabet_of_the_Magi'\n ]\n\n training_alphabets = []\n data_path_trainval = os.path.join(self.data_root, 'images_background')\n for alphabet_name in sorted(tf.io.gfile.listdir(data_path_trainval)):\n if alphabet_name not in validation_alphabets:\n training_alphabets.append(alphabet_name)\n assert len(training_alphabets) + len(validation_alphabets) == 30\n\n data_path_test = os.path.join(self.data_root, 'images_evaluation')\n test_alphabets = sorted(tf.io.gfile.listdir(data_path_test))\n assert len(test_alphabets) == 20\n\n self.parse_split_data(learning_spec.Split.TRAIN, training_alphabets,\n data_path_trainval)\n self.parse_split_data(learning_spec.Split.VALID, validation_alphabets,\n data_path_trainval)\n self.parse_split_data(learning_spec.Split.TEST, test_alphabets,\n data_path_test)",
"def genReadViewPyTest(self, aClass):\n output = \"\"\n funcName = \"\"\n if aClass.className().find(\"multirow\") != -1:\n rowName = aClass.className()\n tabelName = aClass.multitablename + 'MultiTable'\n objName = aClass.multitablename\n\n funcName = 'readTestFor'+tabelName\n output += '\\n\\ndef '+funcName+'():'\n output += '\\n \"\"\" Generated Test For Read Operation to '+tabelName+' Table \"\"\"'\n output += '\\n table = dbsclient.'+tabelName+'()'\n output += '\\n aRow = dbsclient.'+rowName+'()'\n\n paramDict = self.getParamDict(aClass)\n for aparam in paramDict.keys() :\n paramType = paramDict[aparam]\n #paramMap = localmap[paramType]\n if paramType == \"STRING\" :\n output += '\\n setStrValue(aRow, \"'+aparam+'\", \"test_value_'+aparam+'\")'\n if paramType == \"CHARACTER\" :\n output += '\\n setChrValue(aRow, \"'+aparam+'\", \"y\")'\n if paramType == \"INTEGER\" :\n output += '\\n setIntValue(aRow, \"'+aparam+'\", \"1234\")'\n output += '\\n\\n client.READAPI(aRow, table)'\n output += '\\n\\n noOfRows = table.getNoOfRows()'\n output += '\\n print \"no of Rows \",noOfRows'\n output += '\\n for j in range(noOfRows) :'\n for aparam in paramDict.keys() : \n output += '\\n print \"'+aparam+'\", getStrValue(table, \"'+aparam+'\", j)' \n \n return output, funcName",
"def get_dataset():\n dataset = DatasetGenerator({\n 'num_rows': 100,\n 'output': 'list',\n 'schema': {'name': 'faker.name',\n 'phone_number': 'faker.phone_number',\n 'group_id': range(2, 5),\n 'called_by': ['robo', 'associate', 'manager']},\n 'start_time': datetime(2017, 1, 1, 23, 22),\n 'end_time': datetime(2017, 7, 1, 22, 14),\n 'increments': 'hours'})\n dataset.generate()\n yield from dataset.to_output()",
"def create_dataset_specification_and_records(self):\n\n splits = self.get_splits()\n # Get the names of the classes assigned to each split.\n train_classes = splits['train']\n valid_classes = splits['valid']\n test_classes = splits['test']\n\n self.parse_split_data(learning_spec.Split.TRAIN, train_classes)\n self.parse_split_data(learning_spec.Split.VALID, valid_classes)\n self.parse_split_data(learning_spec.Split.TEST, test_classes)",
"def test_dataset(source='dict'):\n \n def static(): \n row={'name':'Ivan Krsti\\xc4\\x87', 'id':1234,\n 'badge_line1':'laptop.org', 'badge_line2':'Sprint Leader: OLPC',\n 'key_note':True, 'speaker':True, 'vendor':True,\n 'session_chair':True, 'sponsor':True,\n 't_shirt_size':'L' }\n return [row]\n\n def first_in_db():\n # row = db().select(db.expense_item.ALL)[0]\n # row = db(db.expense_form.person==auth.user.id).select()[0]\n row = db().select(db.expense_item.ALL)[0]\n return [row]\n\n ds = {'dict': static(),\n 'first': first_in_db()}[source]\n\n return ds",
"def make_test_data(connection, cursor, num_employees, num_departments, num_cycles, num_expenses_per_day):\n\tprint 'make_test_data: num_departments=%d, num_employees=%d, num_cycles=%d, num_expenses_per_day=%d' \\\n\t % (num_departments, num_employees, num_cycles, num_expenses_per_day)\n\tprint ' (should give expenses of %d * n for department n)' % (num_employees * num_cycles * num_expenses_per_day)\n\t\n\t# Functions to generate values for each field\n\tfirst_name = 'Darren'\n\tdef get_name(employee_num):\n\t\treturn 'Smith.%03d' % employee_num\n\tdef get_date(day_num, fraction_of_day):\n\t\td = day_num % 28\n\t\tm = (day_num//28)%12\n\t\ty = 2000 + day_num//28//12\n\t\tseconds = int(24*60*60*fraction_of_day)\n\t\ts = seconds % 60\n\t\tn = (seconds//60) % 60\n\t\th = seconds//60//60\n\t\treturn '%04d-%02d-%02d %2d:%2d:%2d' % (y, m+1, d+1, h, n, s)\n\tdef get_cost(employee_num, department_num):\n\t\treturn department_num\n\tdef get_department(department_num):\n\t\treturn 'department %03d' % department_num\n\tdef get_description(employee_num, department_num, department_change_num):\n\t\treturn 'expense %03d:%03d for employee %03d' % (department_change_num, department_num, employee_num)\n\t\n\t# Create the employees\n\tdepartment_change_num = 0\n\tfor employee_num in range(num_employees): \n\t\tadd_employee(connection, cursor, first_name, get_name(employee_num), get_department(0))\n\t\n\t# Cycle each employee's department through all available num_cycles times\n\tfor c in range(num_cycles):\n\t\tfor department_num in range(0, num_departments): \n\t\t\tfor employee_num in range(num_employees): \n\t\t\t\tchange_department(cursor, first_name, get_name(employee_num), get_department(department_num), get_date(department_change_num, 0.0))\n\t\t\t\tfor expense_num in range(num_expenses_per_day):\n\t\t\t\t\tadd_expense(cursor, first_name, get_name(employee_num), get_date(department_change_num, (expense_num+1)/(num_expenses_per_day+2)), \n\t\t\t\t\t\t\t\tget_cost(employee_num, department_num), get_description(employee_num,department_num,department_change_num))\n\t\t\tdepartment_change_num += 1",
"def generate(cls,patient_file_name=RI_PATIENTS_FILE):\n\n # Open the patient data file for writing generated data\n f = open(PATIENTS_FILE,'w')\n top = True # Starting at the top of the file (need to write header here...)\n\n # Open the raw data file and read in the first (header) record\n pats = csv.reader(file(patient_file_name,'U'),dialect='excel-tab')\n header = pats.next() \n\n # Read in patient data:\n for pat in pats: \n p=dict((zip(header,pat))) # create patient from header and row values \n # Add synthetic data\n patient_name = rndName(p['GENDER'])\n p['fname']=patient_name[0]\n p['initial']=patient_name[1]\n p['lname']=patient_name[2]\n # Add random day of year to year of birth to get dob value\n # Make it for the prior year so vists, tests come after birth\n p['dob']=rndDate(int(p['YOB'])-1).isoformat()\n # Map raw GENDER to SMART encoding values\n # (For the moment, SMART only handles 'male' and 'female'...)\n gender = 'male' if p['GENDER']=='M' else 'female'\n p['gender'] = gender\n p['email'] = toEmail(patient_name)\n # Finally, add a random address:\n adr = rndAddress()\n p = dict(p.items() + adr.items())\n p['home'] = '' if randint(0,1) else rndTelephone()\n p['cell'] = '' if randint(0,1) else rndTelephone()\n \n # Write out the new patient data file:\n # Start with the header (writing only once at the top of the file):\n if top:\n head = p.keys()\n print >>f, \"\\t\".join(head)\n top = False\n # Then write out the row:\n print >>f, \"\\t\".join([ p[field] for field in head])\n f.close()",
"def test_random_first_last_name_dob(input_value):\r\n first_name = input_value[0]\r\n last_name = input_value[1]\r\n address = input_value[2]\r\n type_char = input_value[3]\r\n result = [create_csv.random_first_last_name_dob(type_c) for type_c in type_char]\r\n print(result)\r\n assert len(result) == 4,'Number of column generated is not correct'\r\n assert result[0] in first_name ,f\"Generated value doesn't present in the {first_name} set\"\r\n assert result[1] in last_name, f\"Generated value doesn't present in the {last_name} set\"\r\n assert result[2] in address, f\"Generated value doesn't present in the {address} set\"\r\n assert isinstance(result[3],datetime.date), 'date is not generated as datetime format'",
"def generate_numbers(scenarii, filename, nbr_of_test):\n file_is_benchmark = False\n inside_scenario = False\n inside_title = False\n\n nbr_of_scenarii = len(scenarii)\n nbr_of_scenarii_read = 0\n\n nbr_of_fields_in_a_scenario = 6\n\n nbr_of_test_read = 0\n\n # tables\n cols_count = nbr_of_scenarii\n rows_count = nbr_of_test\n tests_table = [['' for x in range(cols_count)] for x in range(rows_count)]\n number_table = [['' for x in range(cols_count)] for x in range(rows_count)]\n\n with open(filename) as f:\n for index, line in enumerate(f):\n\n stripped_line = line.strip()\n\n empty_line = stripped_line == ''\n\n if not file_is_benchmark:\n if stripped_line.startswith(BENCHMARK_REPORT_TITLE):\n file_is_benchmark = True\n inside_title = True\n continue\n\n if inside_title:\n #print \"inside title\"\n # each title is followed by an underline\n inside_title = False\n continue\n\n if inside_scenario:\n\n #print \"inside scenario\"\n #print stripped_line\n\n data = stripped_line.split('|')\n assert len(data) == nbr_of_fields_in_a_scenario\n\n tests_table[nbr_of_test_read][nbr_of_scenarii_read] = data[0].strip()\n number_table[nbr_of_test_read][nbr_of_scenarii_read] = data[5].strip()\n\n nbr_of_test_read += 1\n\n if nbr_of_test_read == nbr_of_test:\n nbr_of_test_read = 0\n inside_scenario = False\n nbr_of_scenarii_read += 1\n continue\n\n # find out if we are in a title or in a scenario\n if not empty_line:\n scenario_title = stripped_line.split('|')\n if len(scenario_title) == nbr_of_fields_in_a_scenario:\n inside_title = True\n inside_scenario = True\n continue\n\n if stripped_line.startswith(BENCHMARK_END):\n #print \"end of file\"\n break\n\n if empty_line:\n #print \"empty line\"\n # line is empty: reset markers\n inside_scenario = False\n inside_title = False\n\n return file_is_benchmark and nbr_of_scenarii_read == nbr_of_scenarii, tests_table, number_table",
"def _generate_data(self):\n pass",
"def exampleCase2(self):\n\t\t \n\t\tdata = [['date', 'data']]\n\t\tdate_1 = datetime.datetime(2015, 8, 1)\n\t\tdate_2 = datetime.datetime(2017, 8, 1)\n\n\t\tfor _ in range(1800000):\n\t\t\tdata.append([date_1, self.randomText()])\n\t\t\t\n\t\tfor _ in range(1800000, 2000000):\n\t\t\tdata.append([date_2, self.randomText()])\n\n\t\tself.writeCSV(2, data)",
"def create_dataset_specification_and_records(self):\n raise NotImplementedError('Must be implemented in each sub-class.')",
"def test_csvdata(db, specialization, slu1, slu2, student, grade_slu1, grade_slu2):\n\n specialization.unit_count = 2\n spc_list = [specialization]\n unit_list = [slu1, slu2]\n object_list = [\n {\n \"user\": student,\n \"grades\": [grade_slu1, grade_slu2],\n \"submission_date\": datetime(year=2021, month=8, day=15),\n \"total_score\": 38,\n }\n ]\n text = csvdata(spc_list, unit_list, object_list)\n assert (\n text == \"username,slack_id,submission_date,total_score,S01-SLU01,S01-SLU02\\r\\n\"\n \"test_student,U12J14XV12Z,2021-08-15 00:00:00,38,18,20\\r\\n\"\n )",
"def starter_data():\n\t#Developer User Created and Added to dBase\n\tnew_user = User(user_id=999, name='dev', email='developer@placeholder.com', username='dev', fname='Dev', lname='Eveloper', created_at=datetime.now(), timezone=\"-8\")\n\tdb.session.add(new_user)\n\tdb.session.commit()\n\t#Two placeholder contacts are created associated with developer user and added\n\tcontact1 = Contact(user_id = 999, name='Sneezy', email='Sneezy@placeholder.com', phone='5555555569')\n\tcontact2 = Contact(user_id = 999, name='Dopey', email='Dopey@placeholder.com', phone='5555555519')\n\tdb.session.add_all([contact1, contact2])\n\tdb.session.commit()\n\treturn",
"def default_create_test_data(self, db_name):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
For "count" times we take new successors and choose to go there or not depending on their fitness value.
|
def exec(self, count):
hist_t = []
hist_fit = []
for t in range(count):
T = self.schedule(self.T0, self.alpha, t)
hist_t.append(T)
hist_fit.append(self.fitness(self.state))
if T == 0:
return self.state
successor = copy.deepcopy(self.state)
rand = rnd.randint(0, self.length - 1)
new_color = rnd.randint(1, self.colors)
while new_color == self.state[rand]:
new_color = rnd.randint(1, self.colors)
successor[rand] = new_color
e1 = self.fitness(self.state)
e2 = self.fitness(successor)
delta_e = (e2 - e1) * 1.0
if delta_e >= 0:
self.state = successor
else:
prob = math.exp(delta_e / T)
unif = rnd.uniform(0, 1)
if unif <= prob:
self.state = successor
return self.state, hist_t, hist_fit
|
[
"def next_generation(self, survivors=5, chance_of_mutation=0.01):\n\n self.sort_by_fitness()\n # add fitness of current generation to self.fitness_history:\n self.fitness_history.append(self.individuals['Fitness'])\n # assign fittest individuals to next generation\n new_individuals = self.individuals.iloc[0:survivors, :]\n # calculate probability of individual getting selected for reproduction:\n p = np.array(self.individuals.index)\n p = self.size - p\n p = p/p.sum()\n for i in range(survivors, self.size):\n [first_parent, second_parent] = np.random.choice(self.individuals['Individual'], p=p, size=2)\n first_parent = first_parent.zip()\n second_parent = second_parent.zip()\n # set new individual as first parent and change weights and biases to values from second parent randomly\n new_individual = first_parent\n choice = np.random.choice([True, False], size=len(first_parent))\n new_individual[choice] = second_parent[choice]\n # mutate:\n mutation_array = JanniksNN(input_size=self.individuals['Individual'][0].input_size,\n hidden_layers=self.individuals['Individual'][0].layers,\n output_size=self.individuals['Individual'][0].output_size).initialize().zip()\n choice = np.random.choice([True, False], size=len(first_parent),\n p=[chance_of_mutation, 1-chance_of_mutation])\n new_individual[choice] = mutation_array[choice]\n new_individual = JanniksNN(input_size=self.individuals['Individual'][0].input_size,\n hidden_layers=self.individuals['Individual'][0].layers,\n output_size=self.individuals['Individual'][0].output_size).unzip(new_individual)\n new_individuals = new_individuals.append(pd.DataFrame(index=[i], columns=['Individual', 'Fitness'],\n data=[[new_individual, np.nan]]))\n\n self.individuals = new_individuals\n self.generation += 1",
"def generational_succession(self):\n self.population = self.children\n self.rate_population()",
"def selection_tournament(self):\n for i in range(len(self.population)):\n self.population[i].perform_calculations()\n fitness_vals = \",\".join([str(i.fitness) for i in self.population])\n logger.info(f\"New fitness values: {fitness_vals}\")\n\n new_generation = []\n # Currently only 1v1 selection\n for i in range(len(self.population)):\n # Make sure that we don't select current individual\n individual1 = random.randint(0, len(self.population)-1)\n while individual1 == i:\n individual1 = random.randint(0, len(self.population)-1)\n\n # WE WANT TO ADD LOWER FITNESS. LOWER IS MORE STABLE\n if self.population[i].fitness <= self.population[individual1].fitness:\n new_generation.append(copy.deepcopy(self.population[i]))\n\n else:\n new_generation.append(copy.deepcopy(self.population[individual1]))\n\n self.population = new_generation",
"def fitness(score, nbCouleur=6):\n return (nbCouleur * score[0] + score[1])",
"def select(self) -> None:\n\n # Calculate the fitness of each individual in the population\n self.fitness_values = {}\n for individual in self.population:\n self.fitness_values[individual] = self.fitness_calculator(individual)\n\n # This can be improved, but for now, we will just sort the dictionary to find survivors\n self.s_values = sorted(self.fitness_values.values())[:self.survival]\n\n self.survivors = {k: v for k, v in self.fitness_values.items()\n if v in self.s_values and v != np.Infinity}\n\n # Sort the survivor dictionary to speed favor best survivors in mating\n self.survivors = dict(sorted(self.survivors.items(),\n key=lambda item: item[1]))",
"def prob_estimation(n):\n truecount = 0\n for i in range(n):\n test = gen_rand_23()\n if has_duplicates(test):\n truecount += 1\n return truecount",
"def roulette_selection(self):\n #.....calc all the probabilities\n prob= []\n for ind in self.population:\n prob.append(self.fitfunc(ind.value))\n\n istore=[]\n for i in range(len(self.population)):\n istore.append(0)\n\n for i in range(self.nindv):\n ptot= 0.0\n for ii in range(len(self.population)):\n if istore[ii] == 1: continue\n ptot += prob[ii]\n prnd= random()*ptot\n #print i,ptot\n ptot= 0.0\n for ii in range(len(self.population)):\n if istore[ii] == 1: continue\n ptot= ptot +prob[ii]\n #print ii,prnd,ptot\n if prnd < ptot:\n istore[ii]= 1\n break\n\n while istore.count(0) > 0:\n idx= istore.index(0)\n del self.population[idx]\n del istore[idx]\n\n if len(self.population) != self.nindv:\n print \"{0:*>20}: len(self.population != self.nindv) !!!\".format(' Error')\n print len(self.population), self.nindv\n exit()",
"def step(parents: be.Population, fitness: be.Fitness) -> tuple:\n recombination_schema = 'edge-3' # Other possible options are: 'pmx', 'order' or 'cycle'\n mutation_schema = 'inversion' # Other possible options are: 'swap', 'insert' or 'scramble'\n mutation_probability = 0.3 \n mutation_possible_events = 3\n ranking_selection_schema = 'tournament' # Other possible options for ranking selection are: 'sus' or 'roulette'\n tournament_k = 2\n tournament_w = 1\n tournament_replacement = False\n elite_size = 0.1 # Select the 10% of the best individuals for the next generation\n annihilation_size = 0.1 # Remove the 10% of the least-fitted individuals\n\n # -- ALGORITHM STEPS -- #\n\n # Generate offspring (offspring size == parents size)\n offspring = be.recombination(population=parents, n=len(parents), schema=recombination_schema)\n\n # Mutate offspring\n be.mutation(population=offspring, probability=mutation_probability,\n possible_events=mutation_possible_events, schema=mutation_schema)\n\n # Evaluate offspring fitness\n be.evaluate(population=offspring, fitness_function=fitness)\n\n # Merge offspring and parents\n parents_offspring = be.merge_populations(parents, offspring)\n\n # Select elite population\n elite = be.survivor_selection(population=parents_offspring, schema='elitism', select=elite_size)\n\n # Annihilate least-fitted individuals\n parents_offspring = be.survivor_selection(\n population=parents_offspring, schema='annihilation', annihilate=annihilation_size)\n\n # Apply ranking selection (by selecting a population with a similar size to the parents minus the size of the elite)\n next_generation = be.ranking_selection(\n population=parents_offspring, n=len(parents) - len(elite), schema=ranking_selection_schema,\n w=tournament_w, k=tournament_k, replacement=tournament_replacement)\n\n # Adding the elite to the next generation population\n next_generation = be.merge_populations(next_generation, elite)\n\n # Create the population report\n report.create_report(population=next_generation, population_name='Basic GA population', increment_generation=True)\n\n # If we only wanted to return the first solution found, we could return an EarlyStopping object, which will indicate\n # to the algorithm that the execution is finished\n for individual in next_generation:\n if individual.fitness[0] == np.inf:\n return next_generation, be.EarlyStopping(individual)\n\n return next_generation, None",
"def selection(self):\n while len(self.population):\n individual, opponent = self.population[0], self.population[random.randint(1, len(self.population)-1)]\n self.new_generation.append(individual) if individual.fitness > opponent.fitness else self.new_generation.append(opponent)\n self.population.remove(individual)\n self.population.remove(opponent)",
"def mutation_v1(self, rate=0.002):\n # next_pop = self.crossover_a()\n # next_pop = self.crossover_b()\n # print('here4')\n # TODO: more randomised mutation for crossover b\n for i in self.population:\n # print(i)\n temp = random.randint(1, 1000)\n if temp <= 50:\n # change in mutation method: instead of switch, change value itself using random\n temp_3 = random.randint(0, 15)\n i.phenotype[temp_3] = random.uniform(-1, 1)\n # conduct mutation\n # temp_2 = random.random()\n # # if value below 0.5 then swap genetic bits, else swap learned bits\n # if temp_2 < 0.5:\n # i.phenotype[0], i.phenotype[1] = i.phenotype[1], i.phenotype[0]\n # else:\n # i.phenotype[2], i.phenotype[3] = i.phenotype[3], i.phenotype[2]\n\n # now final version of next_pop has been created\n # creating Individual objects now\n final_next_pop = []\n for i in self.population:\n final_next_pop.append(i)\n\n next_gen = NewPopulation(final_next_pop)\n # for i in next_gen.population:\n # i.calculate_fitness()\n # print('ind', i.fitness)\n return next_gen",
"def evolve(self):\n while True:\n self.population.sort(key = lambda candidate: candidate.fitness())\n\n if self.population[0].fitness() == 0:\n return(self.generation)\n\n if self.generation == self.max_generations:\n return(False)\n\n self.generation += 1\n\n rnd1 = int(random.random() * random.random() * (self.size -1))\n rnd2 = int(random.random() * random.random() * (self.size -1))\n parent1 = self.population[rnd1]\n parent2 = self.population[rnd2]\n child = parent1.crossover(parent2)\n child.mutate()\n\n if child.fitness() < self.population[-1].fitness():\n self.population[-1] = child",
"def test(numTrials):\n # Your Code Here\n n = 100\n yes = 0\n for i in range(numTrials):\n africa = 0\n europe = 0\n samerica = 0\n asia = 0\n for i in range(n):\n rand = random.random()\n if rand < 0.25:\n africa += 1\n if rand < 0.5 and rand > 0.25:\n europe += 1\n if rand < 0.75 and rand > 0.5:\n samerica += 1\n if rand > 0.75:\n asia += 1\n #print africa, samerica, asia, europe\n if asia >= 30 or africa >= 30 or europe >= 30 or samerica >= 30:\n yes += 1\n prob = float(yes)/float(numTrials)\n return prob",
"def evaluate_population(self):\n \"\"\" YOUR CODE HERE!\"\"\"\n min_fitness = np.inf\n for gene in self.gene_pool:\n self.args[\"pacman\"] = gene\n out = runGames(**self.args)\n fitness = 0\n div = 0\n for o in out:\n fitness += o.state.getScore()\n div += 1\n fitness /= div\n if min_fitness > fitness:\n min_fitness = fitness\n gene.fitness = fitness\n\n self.gene_pool.sort(key=lambda x: x.fitness)\n return fitness",
"def random_live(self):\n #random assignment of fittnes \n for chrom in self.chromosomes:\n chrom.strength = random.random()\n self.chromosomes.sort(key=lambda chromosomes: chromosomes.strength, reverse = True)\n\n self.bestChromosomes = self.chromosomes[0:2]",
"def supp_random_learning(training_idx, budget):\n supp_idx=[]\n while len(supp_idx)<budget:\n supp_idx.append(random.choice(training_idx))\n supp_idx=list(set(supp_idx))\n supp_idx=sorted(supp_idx)\n return {'novo_idx':supp_idx,'type':'random'}",
"def policy_iteration(state_count, gamma, theta, get_available_actions, get_transitions):\n V = state_count*[0] # init all state value estimates to 0\n pi = state_count*[0]\n nA = len(get_available_actions(0))\n prob_pi_act = [[1/nA, 1/nA, 1/nA, 1/nA]]*state_count\n # init with a policy with first avail action for each state\n for s in range(state_count):\n avail_actions = get_available_actions(s)\n pi[s] = avail_actions[0]\n while True:\n while True:\n delta = 0\n for i in range(1,state_count):\n tt = 0\n avail_actions = get_available_actions(i)\n for act in avail_actions:\n next_state, reward, prob = get_transitions(state=i,action=act)[0]\n tt += prob_pi_act[i][avail_actions.index(act)]*prob*(reward + gamma*V[next_state])\n delta = max(delta,abs(tt - V[i]))\n V[i] = tt\n if delta < theta:\n break\n \n policy_stable = True\n for i in range(1,state_count):\n old_action = pi[i]\n tmp = -9999\n avail_actions = get_available_actions(i)\n for act in avail_actions:\n next_state, reward, prob = get_transitions(state=i,action=act)[0]\n _tmp = prob*(reward + gamma*V[next_state])\n if tmp < _tmp:\n tmp = _tmp\n pi[i] = act\n if old_action != pi[i]:\n policy_stable = False\n __tmp = [0]*nA\n __tmp[avail_actions.index(pi[i])] = 1\n prob_pi_act[i] = __tmp\n if policy_stable:\n break\n\n # insert code here to iterate using policy evaluation and policy improvement (see Policy Iteration algorithm)\n return (V, pi) # return both the final value function and the final policy",
"def makeMatchSet(self, state_phenotype, exploreIter):\r\n #Initial values----------------------------------\r\n state = state_phenotype[0]\r\n phenotype = state_phenotype[1]\r\n doCovering = True # Covering check: Twofold (1)checks that a match is present, and (2) that at least one match dictates the correct phenotype.\r\n setNumerositySum = 0\r\n #-------------------------------------------------------\r\n # MATCHING\r\n #-------------------------------------------------------\r\n cons.timer.startTimeMatching()\r\n for i in range(len(self.popSet)): # Go through the population\r\n cl = self.popSet[i] # One classifier at a time\r\n epochCompleted = False\r\n epochCompleted = cl.updateEpochStatus(exploreIter) # Note whether this classifier has seen all training data at this point.\r\n# if epochCompleted:\r\n# self.ECPopSize += cl.numerosity #Epoch Complete - Micro Pop Size\r\n# self.ENCPopSize -= cl.numerosity #Epoch Not Complete - Micro Pop Size\r\n #Fitness Update------------------------------\r\n if not cl.epochComplete and (exploreIter - cl.lastMatch) >= cons.noMatchUpdate:\r\n cl.briefUpdateFitness(exploreIter)\r\n\r\n if cl.match(state): # Check for match\r\n cl.lastMatch = exploreIter # Experimental::::: for brief fitness update.\r\n self.matchSet.append(i) # If match - add classifier to match set\r\n setNumerositySum += cl.numerosity # Increment the set numerosity sum\r\n #Covering Check--------------------------------------------------------\r\n if cons.env.formatData.discretePhenotype: # Discrete phenotype\r\n if cl.phenotype == phenotype: # Check for phenotype coverage\r\n doCovering = False\r\n else: #ContinuousCode #########################\r\n if float(cl.phenotype[0]) <= float(phenotype) <= float(cl.phenotype[1]): # Check for phenotype coverage\r\n doCovering = False\r\n\r\n cons.timer.stopTimeMatching()\r\n #-------------------------------------------------------\r\n # COVERING\r\n #-------------------------------------------------------\r\n while doCovering:\r\n cons.timer.startTimeCovering()\r\n newCl = Classifier(setNumerositySum+1,exploreIter, state, phenotype)\r\n self.addCoveredClassifierToPopulation(newCl)\r\n self.matchSet.append(len(self.popSet)-1) # Add covered classifier to matchset\r\n doCovering = False\r\n cons.timer.stopTimeCovering()",
"def choose_parents(population):\n chosen_index = []\n total = sum([get_fitness(chromosom) for chromosom in population])\n while len(chosen_index) != 5:\n partial = randint(0,total)\n for i in range(len(population)):\n partial += get_fitness(population[i])\n if partial > total and i not in chosen_index:\n chosen_index.append(i)\n break\n return chosen_index",
"def runGA(self, time, state, numberOfActions, maxPopSize, doGASubsumption, selection):\r\n \r\n # Don't do a GA if the theta_GA threshold is not reached, yet\r\n if self.getSize()==0 or (time-self.getTimeStampAverage()) < cons.theta_GA:\r\n return\r\n \r\n self.setTimeStamps(time)\r\n \r\n if selection == 0:\r\n fitSum =self.getFitnessSum()\r\n # Select two XClassifiers with roulette Wheel Selection\r\n cl1P = self.selectXClassifierRW(fitSum)\r\n cl2P = self.selectXClassifierRW(fitSum)\r\n else:\r\n cl1P = self.selectXClassifierT()\r\n cl2P = self.selectXClassifierT()\r\n \r\n cl1 = XClassifier(cl1P)\r\n cl2 = XClassifier(cl2P)\r\n \r\n changed = cl1.twoPointCrossover(cl2)\r\n if changed:\r\n cl1.setPrediction((cl1.getPrediction() + cl2.getPrediction())/2.0)\r\n cl1.setPredictionError(cons.predictionErrorReduction * (cl1.getPredictionError() + cl2.getPredictionError())/2.0)\r\n cl1.setFitness(cons.fitnessReduction * (cl1.getFitness() + cl2.getFitness())/2.0)\r\n cl2.setPrediction(cl1.getPrediction())\r\n cl2.setPredictionError(cl1.getPredictionError())\r\n cl2.setFitness(cl1.getFitness())\r\n else: # ensure that the fitness discount is still applied for having run the GA at all\r\n cl1.setFitness(cons.fitnessReduction * cl1.getFitness())\r\n cl2.setFitness(cons.fitnessReduction * cl2.getFitness())\r\n \r\n cl1.applyMutation(state, numberOfActions)\r\n cl2.applyMutation(state, numberOfActions)\r\n \r\n self.insertDiscoveredXClassifiers(cl1, cl2, cl1P, cl2P, maxPopSize, doGASubsumption)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fits given training data on random forest and logistic regression classifiers and returns scoring results with best model. Carries out hyperparameter optimization on both to find best model.
|
def prediction(X_train, y_train):
assert X_train.shape[0] == y_train.shape[0], "data sets not the same size"
results_dict = {}
# set scoring
scoring = ['f1', 'accuracy'] # use f1 scoring because of class imbalance
# baseline model
print("Running baseline")
dummy_model = DummyClassifier(strategy='prior')
scores = cross_validate(dummy_model, X_train, y_train, return_train_score=True, scoring=scoring)
store_results("Baseline", scores, results_dict)
# model 1 Random Forest
print("Running model 1")
rf_model = make_pipeline(RandomForestClassifier())
scores = cross_validate(rf_model, X_train, y_train, return_train_score=True, scoring=scoring)
# scores
store_results("Random Forest", scores, results_dict)
# model 2 Logistic Regression
print("Running model 2")
logreg_pipeline = make_pipeline(LogisticRegression(max_iter=600, class_weight="balanced"))
scores = cross_validate(logreg_pipeline, X_train, y_train, return_train_score=True, scoring=scoring)
store_results("Logistic Regression", scores, results_dict)
results_dict= pd.DataFrame(results_dict)
print(results_dict)
# hyperparameter optimization on best models
print("Optimizing hyperparameters for model 1")
param_dist = {
"n_estimators": scipy.stats.randint(low=10, high=300),
"max_depth": scipy.stats.randint(low=1, high=5000)
}
random_search = RandomizedSearchCV(RandomForestClassifier(), param_dist, n_iter=5, cv=5, random_state=120, scoring=scoring[0])
random_search.fit(X_train, y_train)
best_score_rf = random_search.best_score_
best_est_rf = pd.DataFrame(random_search.best_estimator_)
best_cv_rf = random_search.cv_results_
hyperparam_df = pd.DataFrame(best_cv_rf)[['mean_test_score', 'params']]
hyperparam_df['model'] = 'RandomForest'
print("Optimizing hyperparameters for model 2")
param_dist = {
"class_weight": ["balanced", "none"],
"C": scipy.stats.randint(low=0, high=1000)
}
random_search = RandomizedSearchCV(LogisticRegression(max_iter=600), param_dist, n_iter=5, cv=5, random_state=120, scoring=scoring[0])
random_search.fit(X_train, y_train)
best_cv_logr = random_search.cv_results_
best_hp_log = random_search.best_estimator_
log_reg_df = pd.DataFrame(best_cv_logr)[['mean_test_score', 'params']]
log_reg_df['model'] = 'LogisticRegression'
# Compile results of hyperparameter optimization
hyperparam_df = hyperparam_df.append(log_reg_df).sort_values('mean_test_score', ascending=False).reset_index(drop=True)
column_test_name = "mean " + scoring[0] +" score"
hyperparam_df = hyperparam_df.rename(columns={'mean_test_score': column_test_name})
# Pick best classifier
if hyperparam_df["model"][0] == 'RandomForest':
best_model = best_est_rf
else: best_model = best_hp_log
return hyperparam_df, best_model, results_dict
|
[
"def model_fit(df, features_to_use, random_state, **kwargs):\r\n\r\n # read in boosted tree paramters\r\n lr, n_est, max_depth = get_params(**kwargs['get_params'])\r\n\r\n\r\n ## fit model on historical player data\r\n try:\r\n X = df[features_to_use]\r\n y = df['HOF_A']\r\n except:\r\n logger.error(\"features to use or target variable is not in the dataframe\")\r\n\r\n model = GradientBoostingClassifier(learning_rate = lr, n_estimators = n_est,\r\n max_depth = max_depth, random_state=random_state)\r\n\r\n model.fit(X,y)\r\n\r\n ## feature importance\r\n\r\n importance = pd.DataFrame(index=features_to_use)\r\n importance['feature_importance'] = model.feature_importances_\r\n importance = importance.sort_values(by='feature_importance', ascending=False)\r\n logger.info(\"%s is the most important variable in predicting a player's HOF probability\", importance.index[0])\r\n\r\n ## evaluate model performance by running multiple reps of cross validation to obtain an F-1 score on the minority class (HOF)\r\n\r\n # read in CV paramters\r\n n_splits, n_repeats = get_cv_params(**kwargs['get_cv_params'])\r\n\r\n # define scorer function: F-1 score on minority class\r\n myscore = make_scorer(f1_score, average='macro',labels=[1])\r\n # run K-fold cv and obtain scores\r\n cv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=random_state)\r\n\r\n scores = cross_validate(model, X, y, scoring=myscore, cv=cv,\r\n return_train_score=False)\r\n # take average score across all fits in CV\r\n f_score = np.mean(scores['test_score'])\r\n\r\n logger.info(\"the cross validation f1-score is %s\", f_score)\r\n\r\n return model, importance, f_score",
"def evaluate(self, train_data, test_data):\n tot_time = time.time()\n\n LGMSimVars.per_metric_optValues = config.MLConf.opt_values[self.encoding.lower()]\n assert (os.path.isfile(os.path.join(config.default_data_path, train_data))), \\\n f'{train_data} dataset does not exist'\n assert (os.path.isfile(os.path.join(config.default_data_path, test_data))), \\\n f'{test_data} dataset does not exist'\n\n f = Features()\n pt = hyperparam_tuning.ParamTuning()\n\n start_time = time.time()\n f.load_data(os.path.join(config.default_data_path, train_data), self.encoding)\n fX_train, y_train = f.build()\n print(\"Loaded train dataset and build features for {} setup; {} sec.\".format(\n config.MLConf.classification_method, time.time() - start_time))\n\n start_time = time.time()\n f.load_data(os.path.join(config.default_data_path, test_data), self.encoding)\n fX_test, y_test = f.build()\n print(\"Loaded test dataset and build features; {} sec\".format(time.time() - start_time))\n\n for clf in config.MLConf.clf_custom_params:\n print('Method {}'.format(clf))\n print('=======', end='')\n print(len(clf) * '=')\n\n start_time = time.time()\n # 1st phase: train each classifier on the whole train dataset (no folds)\n estimator = pt.clf_names[clf][0](**config.MLConf.clf_custom_params[clf])\n estimator = pt.trainClassifier(fX_train, y_train, estimator)\n print(\"Finished training model on dataset; {} sec.\".format(time.time() - start_time))\n\n start_time = time.time()\n # 2nd phase: test each classifier on the test dataset\n metrics = pt.testClassifier(fX_test, y_test, estimator)\n self._print_stats({'classifier': clf, **metrics, 'time': start_time})\n\n print(\"The whole process took {} sec.\\n\".format(time.time() - tot_time))",
"def fit_and_score(models, X_train, X_test, y_train, y_test):\n # Random seed for reproducible results\n np.random.seed(42)\n # Make a list to keep model scores\n model_scores = {}\n # Loop through models\n for name, model in models.items():\n # Fit the model to the data\n model.fit(X_train, y_train)\n # Evaluate the model and append its score to model_scores\n model_scores[name] = model.score(X_test, y_test)\n return model_scores",
"def grid_search_lr(sampling, X_test, y_test, X_train, y_train):\n lr = LogisticRegression(solver='liblinear')\n pipeline = Pipeline(steps=[['sampling', sampling],\n ['classifier', lr]])\n param_grid_ = {'C': [\n 0.1, 1, 10, 100, 200, 500, 1000], 'penalty': ['l1', 'l2']}\n param_grid_clf = {'classifier__C': [\n 0.1, 1, 10, 100, 200, 500, 1000], 'classifier__penalty': ['l1', 'l2']}\n if sampling is None:\n estimator = lr\n param_grid = param_grid_\n else:\n estimator = pipeline\n param_grid = param_grid_clf\n # Fitting grid search to the train data with 5 folds\n gridsearch = GridSearchCV(estimator=estimator,\n param_grid=param_grid,\n cv=StratifiedKFold(\n n_splits=5, random_state=1, shuffle=True),\n scoring='f1')\n gridsearch.fit(X_train, y_train)\n y_pred = gridsearch.predict(X_test)\n y_pred_proba = gridsearch.predict_proba(X_test)[:, 1]\n print(\"Best: %f using %s\" %\n (gridsearch.best_score_, gridsearch.best_params_))\n conf_matrix = confusion_matrix(y_test, y_pred)\n # Calculating and printing the f1 score\n f1_train = gridsearch.best_score_\n f1_test = f1_score(y_test, y_pred)\n print('The f1 score for the testing data:', f1_test)\n auprc = average_precision_score(y_test, y_pred_proba)\n return f1_train, f1_test, auprc, conf_matrix, gridsearch",
"def find_best_overall():\n imgs, gts = gi.load_all_images('data/training/')\n X, Y = gi.produce_XY(imgs, gts)\n\n find_best_LogisticRegression(X, Y)\n find_best_BayesianRidge(X, Y)\n find_best_Ridge(X, Y)",
"def grid_search_rf(sampling, X_test, y_test, X_train, y_train):\n rf = RandomForestClassifier()\n pipeline = Pipeline(steps=[['sampling', sampling],\n ['classifier', rf]])\n param_grid_ = {\n 'max_depth': [2, 5, 10, 20],\n 'n_estimators': [10, 100, 1000],\n }\n param_grid_clf = {\n 'classifier__max_depth': [2, 5, 10, 20],\n 'classifier__n_estimators': [10, 100, 1000],\n # 'classifier__criterion': ['gini', 'entropy']\n }\n if sampling is None:\n estimator = rf\n param_grid = param_grid_\n else:\n estimator = pipeline\n param_grid = param_grid_clf\n # Fitting grid search to the train data with 5 folds\n gridsearch = GridSearchCV(estimator=estimator,\n param_grid=param_grid,\n cv=StratifiedKFold(\n n_splits=5, random_state=1, shuffle=True),\n scoring='f1')\n gridsearch.fit(X_train, y_train)\n y_pred = gridsearch.predict(X_test)\n y_pred_proba = gridsearch.predict_proba(X_test)[:, 1]\n print(\"Best: %f using %s\" %\n (gridsearch.best_score_, gridsearch.best_params_))\n conf_matrix = confusion_matrix(y_test, y_pred)\n # Calculating and printing the f1 score\n f1_train = gridsearch.best_score_\n f1_test = f1_score(y_test, y_pred)\n print('The f1 score for the testing data:', f1_test)\n auprc = average_precision_score(y_test, y_pred_proba)\n return f1_train, f1_test, auprc, conf_matrix, gridsearch",
"def random_forest_classifier(self, X_train, y_train, X_test, y_test):\n time_start = time.time()\n \n rf_model = self.randomforest.fit(X_train, y_train)\n \n preds = rf_model.predict(X_test)\n preds_proba = rf_model.predict_proba(X_test)\n score = rf_model.score(X_test, y_test)\n \n time_elapsed = time.time() - time_start\n \n # Update feature_importances list to keep track of each trial's results\n self.feature_importances.append(self.randomforest.feature_importances_)\n \n return {'Score' : score, 'Time' : time_elapsed, 'Pred': preds, 'Pred_Proba':preds_proba, 'True':y_test}",
"def evaluate(X, Y, hyperparams):\n\n# from scikits.learn.cross_val import LeaveOneOut\n# loo = LeaveOneOut(len(Y))\n from scikits.learn.cross_val import KFold\n K = 5\n# print >> sys.stderr, \"Using 10-fold cross-validation\"\n loo = KFold(len(Y), K)\n# print loo\n\n all_y_test = []\n all_y_test_predict = []\n\n nlltotal = 0.\n for train, test in loo:\n trainidx = [idx for idx in range(len(train)) if train[idx]]\n testidx = [idx for idx in range(len(test)) if test[idx]]\n X_train, X_test, y_train, y_test = X[trainidx], X[testidx], Y[trainidx], Y[testidx]\n# print \"train\", X_train.shape, y_train.shape\n# print \"test\", X_test.shape, y_test.shape\n\n if len(frozenset(y_train)) == 1:\n # Skip training on this LOO set if there is only one y-value in the training set\n continue\n\n clf = fit_classifier(X_train, y_train, hyperparams)\n\n# print \"target\", y_test\n## print \"predict\", clf.predict(X_test)\n# print \"predict\", clf.predict_proba(X_test)\n## print \"df\", clf.decision_function(X_test)\n## print \"score\", clf.score(X_test, y_test)\n\n# y_test_predict = clf.predict_proba(X_test)\n y_test_predict = clf.predict(X_test)\n# print y_test_predict\n\n all_y_test.append(y_test)\n all_y_test_predict.append(y_test_predict)\n\n## print clf.best_estimator\n# print precision_score(y_test, y_test_predict)\n# print recall_score(y_test, y_test_predict)\n# print classification_report(y_test, y_test_predict)\n#\n#\n# assert y_test.shape == (1,)\n# assert y_test_predict.shape == (1,)\n# if y_test_predict[0] >= 1.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f >= 1. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1-1e-9\n# elif y_test_predict[0] <= 0.:\n## print >> sys.stderr, \"WHA? y_test_predict[0] %f <= 0. !!!\" % y_test_predict[0]\n# y_test_predict[0] = 1e-9\n#\n# if y_test[0] == 1:\n# probtarget = y_test_predict[0]\n# else:\n# assert y_test[0] == 0\n# probtarget = 1-y_test_predict[0]\n## print \"probtarget\", probtarget\n## print y_test[0], y_test_predict[0], repr(probtarget)\n# nll = -math.log(probtarget)\n## print \"nll\", nll\n## print\n#\n# nlltotal += nll\n# nlltotal /= len(Y)\n## print \"nlltotal %f (alpha=%f, n_iter=%d)\" % (nlltotal, alpha, n_iter)\n# return nlltotal\n\n y_test = numpy.hstack(all_y_test)\n y_test_predict = numpy.hstack(all_y_test_predict)\n assert y_test.ndim == 1\n assert y_test_predict.ndim == 1\n assert Y.shape == y_test.shape\n assert y_test.shape == y_test_predict.shape\n# import plot\n# print \"precision_recall_fscore_support\", scikits.learn.metrics.precision_recall_fscore_support(y_test, y_test_predict)\n f1 = f1_score(y_test, y_test_predict)\n# print \"\\tf1 = %0.3f when evaluating with %s\" % (f1, hyperparams)\n# sys.stdout.flush()\n# precision, recall, thresholds = scikits.learn.metrics.precision_recall_curve(y_test, y_test_predict)\n# plot.plot_precision_recall(precision, recall)\n# print \"confusion_matrix\", scikits.learn.metrics.confusion_matrix(y_test, y_test_predict)\n# print \"roc_curve\", scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# fpr, tpr, thresholds = scikits.learn.metrics.roc_curve(y_test, y_test_predict)\n# print \"auc\", scikits.learn.metrics.auc(fpr, tpr)\n# plot.plot_roc(fpr, tpr)\n return f1",
"def _train_all(names, classifiers,\r\n X, y, X_train, X_test, y_train, y_test,\r\n stats=True, predict=\"\"):\r\n ## ignore numpy warnings\r\n from warnings import filterwarnings\r\n filterwarnings('ignore')\r\n ## cycle around each classifier\r\n classes = {1:\"LIKELY\", -1:\"UNLIKELY\"}\r\n score = {1:0, -1:0}\r\n trusts = {}\r\n predictions = {}\r\n for name, classifier in zip(names, classifiers):\r\n ## train each classifier\r\n classifier.fit(X_train, y_train)\r\n if stats == True:\r\n _get_statistics(name, classifier, X, y, X_test, y_test)\r\n if predict != \"\":\r\n ## Make prediction\r\n prediction = classifier.predict(predict)[0]\r\n\r\n ## Increment counter for relevant score\r\n score[prediction] += 1\r\n predictions.update({name:prediction})\r\n \"\"\"\r\n reveal expected true negatives, false positives,\r\n false negatives, true positives\r\n \"\"\"\r\n tn, fp, fn, tp = c_m(y_test, classifier.predict(X_test)).ravel()\r\n ## trust is the amount of time that the prediction was correct\r\n trust_score = tp/(tp + fp) if prediction == 1 else tn/(tn + fn)\r\n trust_score = round((trust_score * 100), 2)\r\n trusts.update({name:trust_score})\r\n if predict != \"\":\r\n scores = pd.DataFrame({'Recurrence':predictions,\r\n 'Confidence':trusts})\r\n pred_weight = scores.Recurrence * scores.Confidence\r\n weights = pd.DataFrame({'Weights':pred_weight})\r\n scores['Recurrence'] = scores['Recurrence'].apply(lambda x: classes[x])\r\n print(scores)\r\n classification = 1 if weights.Weights.mean() > 0 else -1\r\n print(f\"\\nRecurrence judged {classes[classification]} at \\\r\n{round(abs(weights.Weights.mean()),2)} % confidence\")\r\n print(f\"Poll of classifiers results:\")\r\n for index in score:print(f\"{classes[index]}: \\t\\t{score[index]}\")",
"def fit(self , X , y):\n if self.train_or_test == 'test':\n X_train , X , y_train , y = train_test_split(X , y ,test_size= self.split)\n self.estimator.fit(X_train , y_train)\n elif self.train_or_test == 'train':\n self.estimator.fit(X , y)\n else:\n raise ValueError('The train_or_test parameter can only be \"train\" or \"test\"')\n\n explainer = shap.TreeExplainer(self.estimator , feature_perturbation = \"tree_path_dependent\")\n self.feature_importances_ = np.abs(explainer.shap_values(X)).mean(axis=0) \n return",
"def train_and_test():\n\ttrain_data, test_data, test_users, test_movies = get_train_data()\n\tprint \"loaded train & test data\"\n\tcf = collaborative_filtering(train_data)\n\t# evaluate the collaborative filtering model by printing the rmse value for the test data\n\tprint cf.score(test_data)",
"def runRegressors(trainingData, trainingActions, testData, testActions):\n\n regressorRF = RandomForestRegressor(bootstrap=True, verbose=0, criterion=\"mse\", max_features=\"auto\", oob_score=True, max_samples=0.1)\n regressorBR = BaggingRegressor(bootstrap=True, n_estimators=20)\n regressorGB = GradientBoostingRegressor(max_depth=4, loss=\"lad\", learning_rate=0.1)\n regressorGB2 = GradientBoostingRegressor(max_depth=5, loss=\"quantile\", learning_rate=0.1, alpha=0.6)\n regressorXG = XGBRFRegressor() \n\n regressorRF.fit(trainingData, trainingActions)\n regressorBR.fit(trainingData, trainingActions)\n regressorGB.fit(trainingData, trainingActions)\n regressorGB2.fit(trainingData, trainingActions)\n regressorXG.fit(trainingData, trainingActions)\n\n score = []\n score.append(round(regressorRF.score(testData, testActions), 2))\n score.append(round(regressorBR.score(testData, testActions), 2))\n score.append(round(regressorGB.score(testData, testActions), 2))\n score.append(round(regressorGB2.score(testData, testActions), 2))\n score.append(round(regressorXG.score(testData, testActions), 2))\n\n predictionsRF = regressorRF.predict(testData)\n predictionsBR = regressorBR.predict(testData)\n predictionsGB = regressorGB.predict(testData)\n predictionsGB2 = regressorGB2.predict(testData)\n predicitionsXG = regressorXG.predict(testData)\n\n print(\"-\")\n\n predictionsRF = np.rint(predictionsRF)\n predictionsBR = np.rint(predictionsBR)\n predictionsGB = np.rint(predictionsGB)\n predictionsGB2 = np.rint(predictionsGB2)\n predicitionsXG = np.rint(predicitionsXG)\n\n accuracy = []\n accuracy.append(round(accuracy_score(testActions, predictionsRF), 2))\n accuracy.append(round(accuracy_score(testActions, predictionsBR), 2))\n accuracy.append(round(accuracy_score(testActions, predictionsGB), 2))\n accuracy.append(round(accuracy_score(testActions, predictionsGB2), 2))\n accuracy.append(round(accuracy_score(testActions, predicitionsXG), 2))\n\n confuse = confusion_matrix(testActions, np.array(predictionsBR).astype(int))\n\n print(confuse)\n models = [regressorRF, regressorBR, regressorGB, regressorGB2, regressorXG]\n print(\"\")\n return models, score, accuracy",
"def stack_models_and_evaluate_accuracy(train_probas, val_probas, test_probas, y_train, y_val, y_test):\n logreg = LogisticRegression()\n rfc = RandomForestClassifier(n_estimators=200, max_depth=20)\n\n print(\"Stacking using Random Forest:\")\n print(\"-----------------------------\")\n stacking_accuracy_logreg, train_stack_logreg_pred, val_stack_logreg_pred, test_stack_logreg_pred = \\\n stack_models(train_probas, val_probas, y_train, y_val, logreg, test_probas, y_test)\n print(\"train, validation and test accuracy scores:\", stacking_accuracy_logreg)\n\n print(\"Stacking using Logistic Regression:\")\n print(\"-----------------------------------\")\n stacking_accuracy_rfc, train_stack_rfc_pred, val_stack_rfc_pred, test_stack_rfc_pred = \\\n stack_models(train_probas, val_probas, y_train, y_val, rfc, test_probas, y_test)\n print(\"train, validation and test accuracy scores:\", stacking_accuracy_rfc)\n\n np.save(os.path.join('models', 'train_stack_logreg_pred.npy'), train_stack_logreg_pred)\n np.save(os.path.join('models', 'val_stack_logreg_pred.npy'), val_stack_logreg_pred)\n np.save(os.path.join('models', 'test_stack_logreg_pred.npy'), test_stack_logreg_pred)\n\n np.save(os.path.join('models', 'train_stack_rfc_pred.npy'), train_stack_logreg_pred)\n np.save(os.path.join('models', 'val_stack_rfc_pred.npy'), val_stack_logreg_pred)\n np.save(os.path.join('models', 'test_stack_rfc_pred.npy'), test_stack_logreg_pred)",
"def fit_model(X, y):\n \n # Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # Set up the parameters we wish to tune\n parameters = {'max_depth':(1,2,3,4,5,6,7,8,9,10)}\n\n # Make an appropriate scoring function\n scoring_function = metrics.make_scorer(performance_metric, greater_is_better=False)\n\n # Make the GridSearchCV object\n reg = grid_search.GridSearchCV(regressor,parameters,scoring_function)\n\n # Fit the learner to the data to obtain the optimal model with tuned parameters\n reg.fit(X, y)\n\n # Return the optimal model\n return reg.best_estimator_",
"def train_and_evaluate(self, x_train, y_train, x_test, y_test):\n try:\n classifier = self.classifier_class(random_state=self.classifier_state, **self.classifier_parameters)\n except TypeError:\n classifier = self.classifier_class(**self.classifier_parameters)\n if self.data_balancer_class is not None:\n self.data_balancer = self.data_balancer_class(random_state=self.data_balancer_state)\n self.train_and_evaluate_fold(x_train, y_train, x_test, y_test, classifier, 0, data_balancer=self.data_balancer)\n\n # Error rates\n avg_metric_dict = self.ml_stats.calculate_average_results()\n\n return avg_metric_dict",
"def to_train_random_forest_classifier(X_train, X_test, y_train, y_test):\r\n\r\n # RandomForest generation with hyperparameters\r\n rfc = RandomForestClassifier(random_state=0)\r\n param_grid = { 'n_estimators': [5, 7], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth': [4, 5, 6, 7, 8], 'criterion': ['gini', 'entropy'], \"min_samples_split\": [2, 3, 10], \"min_samples_leaf\": [1, 3, 10], \"bootstrap\": [True, False] }\r\n clf = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5)\r\n clf.fit(X_train, y_train)\r\n y_pred = clf.predict(X_test)\r\n print(clf)\r\n\r\n return y_test, y_pred",
"def train_random_forest(X_train, y_train, X_test_vecs, X_test_strs, y_test):\n\trandomforest_clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)\n\trandomforest_clf.fit(X_train, y_train)\n\tpredictions = predict(randomforest_clf, X_test_vecs, X_test_strs)\n\treturn precision_recall_fscore_support(y_test, predictions, average='binary')",
"def train_model(x_train, y_train, x_test, y_test, hyper_dict, hyperparam_table):\n\n hyperparam_table += [hyper_dict]\n try:\n clf = hyper_dict['model'](class_weight=hyper_dict['class_weight'], random_state=hyper_dict['random_state'])\n except:\n clf = hyper_dict['model']().set_params(**hyper_dict['params'])\n \n clf.fit(x_train, y_train)\n\n predictions_test = clf.predict(x_test)\n\n score = clf.score(x_test, y_test)\n hyperparam_table[-1]['test_score'] = score\n training_score = clf.score(x_train, y_train)\n hyperparam_table[-1]['train_score'] = training_score\n\n tn, fp, fn, tp = confusion_matrix(y_test, predictions_test).ravel()\n hyperparam_table[-1]['tn'] = tn\n hyperparam_table[-1]['fp'] = fp\n hyperparam_table[-1]['fn'] = fn\n hyperparam_table[-1]['tp'] = tp\n\n f1 = f1_score(y_test, predictions_test)\n hyperparam_table[-1]['f1_score'] = f1\n precision = precision_score(y_test, predictions_test)\n hyperparam_table[-1]['precision'] = precision\n recall = recall_score(y_test, predictions_test)\n hyperparam_table[-1]['recall'] = recall\n try:\n hyperparam_table[-1]['feature_importances'] = clf.coef_\n except:\n try:\n hyperparam_table[-1]['feature_importances'] = clf.feature_importances_\n except:\n hyperparam_table[-1]['feature_importances'] = None\n return clf, hyperparam_table",
"def fit_model(X, y):\n \n cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)\n\n regressor = DecisionTreeRegressor()\n\n params = {'max_depth' : [1,2,3,4,5,6,7,8,9,10]}\n\n scoring_fnc = make_scorer(performance_metric)\n\n grid = grid_search.GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)\n\n grid = grid.fit(X, y)\n\n return grid.best_estimator_"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Samples the given frame.
|
def sample(self, frame):
frames = self.frame_stack(frame)
if frames:
frames.pop()
parent_stats = self.stats
for f in frames:
parent_stats = parent_stats.ensure_child(f.f_code, void)
stats = parent_stats.ensure_child(frame.f_code, RecordingStatistics)
stats.own_hits += 1
|
[
"def from_frame_and_timestamp(active_frame, timestamp_ms):\n stack_trace = []\n frame = active_frame\n while frame is not None:\n code = frame.f_code\n stack_trace.append((code, frame.f_lineno))\n frame = frame.f_back\n\n return ProfileSample(stack_trace, timestamp_ms)",
"def sample(self, size):",
"def sample(self, element: Any) -> None:\n self._sample_count += 1\n now = self.time()\n sample_diff = now - self._last_sample_sec\n\n if self._sample_count <= 10 or sample_diff >= self._sample_every_sec:\n self._samples.append(element)\n self._last_sample_sec = now",
"def sampled_frame_size_test(self, sampled_frame_size_test):\n\n self._sampled_frame_size_test = sampled_frame_size_test",
"def _sample(self,ep_ind,frame_ind, num=1, stride=1): \n ep = self.episodes[ep_ind]\n frames_until_end = len(ep.xs) - frame_ind \n frames_needed_from_start_ind = num*stride\n if frames_until_end < frames_needed_from_start_ind:\n assert False, \"end of episode reached and we didn't get all da frames yet!\"\n \n trans = make_empty_transition(self.args)\n for _ in range(num - 1):\n frame_kwargs = {k:ep._asdict()[k][frame_ind] for k in ep._asdict().keys() if k is not \"state_param_dict\" }\n append_to_trans(trans, **frame_kwargs)\n if \"state_param_dict\" in ep._fields:\n param_dict = {k:ep.state_param_dict[k][frame_ind] for k in ep.state_param_dict.keys()}\n append_to_trans_param_dict(trans,param_dict)\n frame_ind += stride\n \n \n frame = ep.xs[frame_ind]\n append_to_trans(trans,xs=frame)\n if \"state_param_dict\" in ep._fields:\n param_dict = {k:ep.state_param_dict[k][frame_ind] for k in ep.state_param_dict.keys()}\n append_to_trans_param_dict(trans,param_dict)\n return trans",
"def sample_extract(src, shots=18, shot_duration=5):\r\n return core.std.SelectEvery(src, src.num_frames//shots, range(0,round(src.fps*shot_duration))).std.AssumeFPS(src)",
"def PlaySample(self, *args):\n return _wiimote.wiimote_PlaySample(self, *args)",
"def _audio_sample_callback(self, in_data, frame_count, time_info, status):\n # time_start = time.time()\n # self._raw_audio_sample = np.frombuffer(in_data, dtype=np.float32)\n raw_sample = np.frombuffer(in_data, dtype=np.float32)\n\n in_sample_len = len(raw_sample)\n out_sample_len = MIC_RATE // self._config[\"sample_rate\"]\n\n if in_sample_len != out_sample_len:\n # Simple resampling\n processed_audio_sample = self.resampler.process(\n raw_sample,\n # MIC_RATE / self._stream.samplerate\n out_sample_len / in_sample_len\n # end_of_input=True\n )\n else:\n processed_audio_sample = raw_sample\n\n if len(processed_audio_sample) != out_sample_len:\n _LOGGER.warning(\n f\"Discarded malformed audio frame - {len(processed_audio_sample)} samples, expected {out_sample_len}\"\n )\n return\n\n # handle delaying the audio with the queue\n if self.delay_queue:\n try:\n self.delay_queue.put_nowait(processed_audio_sample)\n except queue.Full:\n self._raw_audio_sample = self.delay_queue.get_nowait()\n self.delay_queue.put_nowait(processed_audio_sample)\n self.pre_process_audio()\n self._invalidate_caches()\n self._invoke_callbacks()\n else:\n self._raw_audio_sample = processed_audio_sample\n self.pre_process_audio()\n self._invalidate_caches()\n self._invoke_callbacks()\n\n # print(f\"Core Audio Processing Latency {round(time.time()-time_start, 3)} s\")\n # return self._raw_audio_sample",
"def sample(self):\r\n # if the couter is bigger than bufferSize, which means the replaybuffer is full\r\n if self.Counter > self.bufferSize:\r\n rang = self.bufferSize\r\n else:\r\n rang = self.Counter\r\n indexs = np.random.choice(rang,size = self.batchSize)\r\n samples = self.buffer[indexs,:]\r\n return samples",
"def run(self):\n\n next_sample_time_seconds = self.time_fxn()\n sample_number = 0\n\n # Keep sampling until this thread is explicitly stopped.\n while not self.should_stop():\n # Take a sample of the main request thread's frame stack...\n self.profile.take_sample(sample_number)\n sample_number += 1\n\n # ...then sleep and let it do some more work.\n next_sample_time_seconds += (\n 1.0 / InspectingThread.SAMPLES_PER_SECOND)\n seconds_to_sleep = (\n next_sample_time_seconds - self.time_fxn())\n if seconds_to_sleep > 0:\n time.sleep(seconds_to_sleep)\n\n # Always take a sample at the end.\n self.profile.take_sample(sample_number, force_memory=True)",
"def frame_callback(self, frame):\n # Publish at the reduced rate.\n if not self.count % self.period:\n self.image_publisher.publish(frame)\n self.count += 1",
"def packet_get_samples_per_frame(cls, data: bytes) -> int:\n return _lib.opus_packet_get_samples_per_frame(data, cls.SAMPLING_RATE)",
"def _ProfilingSampleMemory(self):\n if not self._memory_profiler:\n return\n\n self._profiling_sample += 1\n\n if self._profiling_sample >= self._profiling_sample_rate:\n self._memory_profiler.Sample()\n self._profiling_sample = 0",
"def draw_samples(dataframe):\n return dbc.Card([\n dbc.CardBody([\n html.H4('Sample tweets'),\n html.Br(),\n draw_random(dataframe),\n html.Br(),\n draw_random(dataframe),\n html.Br(),\n draw_random(dataframe),\n html.Br(),\n draw_random(dataframe),\n ])\n ])",
"def run_frame(self, frame: Frame) -> Any: # can return anything!!!\n assert type(frame) == Frame\n self.push_frame(frame)\n while True:\n byteName, arguments, opoffset = self.parse_byte_and_args()\n if log.isEnabledFor(logging.INFO):\n self.log(byteName, arguments, opoffset)\n\n # When unwinding the block stack, we need to keep track of why we\n # are doing it.\n why = self.dispatch(byteName, arguments)\n\n if why != 'yield':\n while why and frame.block_stack:\n # Deal with any block management we need to do.\n why = self.manage_block_stack(why)\n\n if why:\n break\n\n # TODO: handle generator exception state\n\n self.pop_frame()\n\n # assert self.return_value is None, (self.return_value, type(self.return_value))\n return self.return_value",
"def record_frame(self, frame=None):\n if frame is None:\n frame = self.frame\n\n if frame.shape[0:2] != (self.recorder_height, self.recorder_width):\n frame = cv2.resize(frame,\n (self.recorder_height, self.recorder_width))\n if len(frame.shape) == 2:\n self.recording.write(cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR))\n else:\n self.recording.write(frame)",
"def sample(self, start, stop, step):\n self.data = self.data[start:stop:step]",
"def recognize(self, frame) -> retval:\n ...",
"def sampleImage(fastSample=bool, resolution=int):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
clears the requests record.
|
def clear_record():
requests_header_record[:] = []
return "request record cleared"
|
[
"def clear_request(self):\n self.request_data.clear()",
"def clear_requests(self) -> None:\n with self._lock:\n self._requests.clear()",
"def clear_data():\n try:\n db.all_requests.remove()\n return {\"msg\": \"complete\"}\n except:\n return {\"msg\": \"error\"}",
"def clearrequests(self):\n for k in list(self.wallet.receive_requests.keys()):\n self.wallet.remove_payment_request(k, self.config)",
"def clear(self):\n self.headers = odict()\n self.body = \"\"",
"def clear_records(self, thread=None):\n appenlight_storage = get_local_storage()\n appenlight_storage.logs = []",
"def delete_request():",
"def clear (self):\n self.__statuses.clear()",
"def clear_completed_records(self):\n connection = self.start_connection()\n cursor = connection.cursor()\n cursor.execute(\"DELETE FROM completed\")\n self.commit_close_connection(connection)",
"def clear(self):\n del self.results\n self.results = list()",
"def clear(self):\r\n self.queue = []",
"def clear(self):\n self.data = []\n self.updateData()",
"def _clear(self):\n self.info_buffer.clear()\n self.debug_buffer.clear()\n self.warning_buffer.clear()\n self.error_buffer.clear()",
"async def clear_all(self) -> None:\n await self._connection.execute(\n delete(self._mutation_use_case_invocation_record_table),\n )",
"def clear(self):\n self.models = {}\n self.model_ids = []",
"def clear(self):\n self.msg_store = ''",
"def clear(self) -> None:\n removed = self.storage.clear_entry_data()\n self.storage.clear_last_post_time()\n logging.info(f\"Removed {removed} entries\")",
"def clearDataCounters(self):\n # retcode\n\n retcode = self.set_dataReceived(0)\n if retcode != YAPI.SUCCESS:\n return retcode\n retcode = self.set_dataSent(0)\n return retcode",
"def clear(self):\n self._connection.clearServer()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clamp the value of each RGB component to the range of 0 to 255
|
def rgb_clamp(vals):
return tuple(_adjusted_round(max(0, min(255, c))) for c in vals)
|
[
"def brighten(val, minval):\n return minval + (255 - minval) * val // 255",
"def scale_down_rgb(rgb: ndarray):\n return rgb/255.0",
"def rgb_bound(rgb_value):\n\n # upper bound\n if rgb_value > 255:\n rgb_value = 255\n # lower bound\n elif rgb_value < 0:\n rgb_value = 0\n return rgb_value",
"def imgs_scale_to_255(imgs):\n return np.array(\n [np.divide(((i - i.min()) * 255),\n (i.max() - i.min())) for i in imgs]).astype(int)",
"def value_normalization(v, v_min=500, v_max=800):\r\n if v < v_min:\r\n return 255\r\n elif v > v_max:\r\n return 255\r\n else:\r\n return int(255 * (v-v_min) / (v_max - v_min))",
"def get_grayscale_range(N, min_val, max_val):\n colors_int = np.linspace(min_val, max_val, N).astype(int)[::-1]\n colors = []\n for color_int in colors_int:\n hex_code = hex(color_int).replace(\"0x\", \"\")\n if len(hex_code) == 1:\n hex_code = \"0\"+hex_code\n color = \"#\"+\"\".join([hex_code]*3)\n colors.append(color)\n\n return colors",
"def clamp(n, smallest: int = 0, largest: int = 255) -> int:\n return int(max(smallest, min(n, largest)))",
"def colorUpdate(self):\n if self.value ==0:\n self.color = [255,255,255]\n return\n k = 0\n V = self.value\n while V>0:\n k += 18\n V //= 2\n self.color = [k,255-k,0]",
"def clamp01(val):\n return clamp(val, 0.0, 1.0)",
"def scale_pixels(data):\n data /= 255",
"def rgb_unscaler(\n X: ArrayLike,\n data_range: Sequence,\n):\n rgb_range = [0, 255]\n X_scaled = element_wise_unscaler(X, data_range=data_range, feature_range=rgb_range)\n return X_scaled",
"def rgb_scaler(\n X: ArrayLike,\n data_range: Optional[Sequence] = None,\n):\n rgb_range = [0, 255]\n X_scaled = element_wise_scaler(X, data_range=data_range, feature_range=rgb_range)\n X_scaled = np.round(X_scaled).astype(int)\n return X_scaled",
"def hue2clr(hue):\n num = len(hue)\n\n #print 'hue=',hue\n #print 'hue.shape=',hue.shape\n\n rgb = n.zeros([hue.shape[0],3])\n\n #print 'rgb =', rgb\n\n\n for k in range(0,num):\n\n\tif (hue[k] >= 0) & (hue[k] < 0.167):\n\n\t rgb[k,0] = 1\n\t rgb[k,1] = hue[k]/0.167\n\n\telif (hue[k]>= 0.167) & (hue[k] < 0.333):\n\n\t rgb[k,0] = 1-(hue[k]-0.167)/0.167\n\t rgb[k,1] = 1\n\n\telif (hue[k] >= 0.333) & (hue[k] < 0.500):\n\n\t rgb[k,1] = 1\n\t rgb[k,2] = (hue[k]-0.333)/0.167\n\n\telif (hue[k] >= 0.500) & (hue[k] < 0.667):\n\n\t rgb[k,1] = 1-(hue[k]-0.500)/0.167\n\t rgb[k,2] = 1\n\n\telif (hue[k] >= 0.667) & (hue[k] < 0.883):\n\n\t rgb[k,0] = (hue[k]-0.667)/0.167\n\t rgb[k,2] = 1\n\n\telif (hue[k] >= 0.883) & (hue[k] <= 1):\n\n\t rgb[k,0] = 1\n\t rgb[k,2] = 1-(hue[k]-0.883)/0.167\n\n\t#print 'k=',k\n\t#print 'rgb=',rgb\n return rgb",
"def check_color(c_tuple):\n for i in range(len(c_tuple)):\n if c_tuple[i]>255:\n c_tuple[i] = 255\n elif c_tuple[i]<0:\n c_tuple[i] = 0\n return c_tuple",
"def __call__(self, value):\n pos = int(((value - self.value_min) / self.value_range) * self.num_val_1)\n\n return self.color_scale[pos]",
"def normalize(img,max_=255.0):\n img -= img.min()\n img = (img*max_/img.max()).astype('uint8')\n return img",
"def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n #remaps from -1 to 1 to 0 to 255 so it can be a color\n return int(color_code)",
"def norm_rgb(colors):\n return [tuple([c/float(255) for c in rgb]) for rgb in colors]",
"def color2float(C):\n\n c = C.clone()\n if (C.dtype != torch.float) or (torch.max(c).item() > 1):\n c = torch.clamp(torch.div(C.to(dtype=torch.float), 255), min=0, max=1)\n return c"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate packet initializer values
|
def gen_params():
return {
u'be': SubPacketBE(int64=0x7eaddeaddeaddead, uint64=0xdeaddeaddeaddead),
u'le': SubPacketLE(int64=0x7eaddeaddeaddead, uint64=0xdeaddeaddeaddead)
}
|
[
"def make_packet(packet_num):",
"def generate_initialisation_vector():\n initialisation_vector = Random.new().read(AES.block_size)\n return (initialisation_vector, int(binascii.hexlify(initialisation_vector), 16))",
"def _create_state_init_parameters(self):\n self.init_ws, self.init_bs, self.init_norms = [], [], []\n # shallow copy of the state shapes:\n state_shapes = list(self.rnn_pre_attention.state_shape)\n if self.rnn_post_attention:\n state_shapes += self.rnn_post_attention.state_shape\n for state_idx, (_, init_num_hidden) in enumerate(state_shapes):\n self.init_ws.append(mx.sym.Variable(\"%senc2decinit_%d_weight\" % (self.prefix, state_idx)))\n self.init_bs.append(mx.sym.Variable(\"%senc2decinit_%d_bias\" % (self.prefix, state_idx)))\n if self.config.layer_normalization:\n self.init_norms.append(layers.LayerNormalization(prefix=\"%senc2decinit_%d_norm\" % (self.prefix,\n state_idx)))",
"def write_init_packet(self):\n byte_opcode = struct.pack('B', self._opcodes['Initialize DFU Parameters'])\n init_packet_code = struct.pack(\n 'B', self._init_packet_codes['Receive Init Packet'])\n response_opcode_init = self.write_read_opcode(byte_opcode+init_packet_code, 0)\n # self.get_init_packet([0, 0, 0, 0, 0])\n self.write_char(self._dfu_Packet_char, data=self._init_packet)\n init_complete_packet_code = struct.pack(\n 'B', self._init_packet_codes['Init Packet Complete'])\n response_opcode_init_complete = self.write_read_opcode(\n byte_opcode+init_complete_packet_code, 3)",
"def generate_random_data():\n\n src_ip = ParamVector.random_ip()\n dst_ip = ParamVector.random_ip()\n src_port = ParamVector.random_port()\n dst_port = ParamVector.random_port()\n size_low = randint(ParamVector.SIZE_RANDOM_LOW_MIN, ParamVector.SIZE_RANDOM_LOW_MAX)\n size_high = size_low + randint(ParamVector.SIZE_RANDOM_HIGH_MIN, ParamVector.SIZE_RANDOM_HIGH_MAX)\n ttl = randint(ParamVector.TTL_THRESH_MIN, ParamVector.TTL_THRESH_MAX)\n protoc = get_random_protocol()\n seq_low = randint(ParamVector.SEQ_THRESH_MIN, ParamVector.SEQ_THRESH_MAX)\n seq_high = seq_low + randint(ParamVector.SEQ_THRESH_MIN, ParamVector.SEQ_THRESH_MAX)\n\n weight_func = lambda: uniform(0, ParamVector.WEIGHT_MAX_VAL)\n\n weights = {ParamVector.DST_IP: weight_func(),\n ParamVector.SRC_IP: weight_func(),\n ParamVector.DST_PORT: weight_func(),\n ParamVector.SRC_PORT: weight_func(),\n ParamVector.SIZE: weight_func(),\n ParamVector.TTL: weight_func(),\n ParamVector.PROTOCOL: weight_func(),\n ParamVector.SEQ: weight_func()}\n\n sum_weights = sum(weights.values())\n\n # normalizing the values:\n\n for key in weights.keys():\n weights[key] = weights[key] / sum_weights\n return ParamVector(ip_src_set={src_ip},\n ip_dst_set={dst_ip},\n port_src_set={src_port},\n port_dst_set={dst_port},\n sizes_lower_bound=size_low,\n sizes_upper_bound=size_high,\n ttl_lower_bound=ttl,\n protocol_set={protoc},\n seq_lower_bound=seq_low,\n seq_upper_bound=seq_high,\n weight_of=weights,\n malicious_threshold=random()\n )",
"def initialize(self, init_values: Dict[int, int]):\n self._instruction_pointer = 0\n self._memory = [val for val in self._code]\n for index, value in init_values.items():\n self._memory[index] = value",
"def __init__(self, packet_type):\n self._data = {}\n self._field_to_count = {}\n self._fmt = \"\"\n self._type_num = packet_type if isinstance(packet_type, int) else schema_to_type_num[packet_type]\n\n def initialize_fmt_and_fields():\n packet_path = os.path.join(\n os.path.dirname(__file__), schemas_path + type_num_to_schema[self._type_num] + \".json\"\n )\n\n packet_str = open(packet_path).read()\n packet_schema = json.loads(packet_str)\n # Assumes packet schema has been validated\n\n for field in packet_schema:\n # count = 1\n # if \"count\" in field:\n count = field[\"count\"] if \"count\" in field else 1\n\n self._field_to_count[field[\"name\"]] = count\n for _ in range(count):\n self._fmt += type_name_to_fmt[field[\"type\"]]\n\n initialize_fmt_and_fields()",
"def _gen_authorization_data(\n self,\n spn: str,\n domain: str,\n ) -> bytes:\n # fmt: off\n\n # Build the AD Negotiation data set.\n # AdETypeNegotiation -> RC4_HMAC_NT\n # 0x17 -> RC4_HMAC_NT\n # Since we can't build an ASN.1 sequence without a#,\n # we are just going to hard code this sequence\n # \n # 300f a004 0202 0081 a107 0405 3003 0201\n # 17\n # ----------------------------------------\n # 30 0f - SEQ\n # a0 04 02 02 00 81 - INT: 0x81\n # a1 07 04 05 - OCT STRING\n # 30 03 02 01 17 - SEQ -> INT: 0x17\n neg_type = b\"\\x30\\x03\\x02\\x01\\x17\"\n negotiation_type_data_seq = AuthorizationDataSequence()\n negotiation_type_data_seq[\"ad-type\"] = 0x81\n negotiation_type_data_seq[\"ad-data\"] = neg_type\n\n # Build the Restriction Types data set\n # \n # 303f a004 0202 008d a137 0435 3033 3031\n # a003 0201 00a1 2a04 2800 0000 0000 1000\n # 00f3 cd6a f91c c2b1 32fd fbf1 6349 7585\n # 5e62 4ba4 9675 639e 351a 919e 3028 b9e0\n # 00\n # ----------------------------------------\n # 30 3f - SEQ\n # a0 04 02 02 00 8d - INT: 0x8D\n # a1 37 04 35 - OCT STRING\n # 30 33 - SEQ\n # 30 31 - SEQ\n # a0 03 02 01 00 - INT: 0x00\n # a1 2a 04 28 - OCT STRING\n # 00 00 00 00 00 10 00 00... - re_data\n re_data = b\"\\x00\\x00\\x00\\x00\\x00\\x10\\x00\\x00\" + urandom(32)\n restriction_data = AuthorizationData()\n restriction_data[0][\"ad-type\"] = 0 # const\n restriction_data[0][\"ad-data\"] = re_data\n\n kerb_auth_data_token_restrictions = AuthorizationDataSequence()\n kerb_auth_data_token_restrictions[\"ad-type\"] = 0x8D # 141\n kerb_auth_data_token_restrictions[\"ad-data\"] = encoder.encode(restriction_data)\n\n # Build the KerbLocal data set\n # \n # 301a a004 0202 008e a112 0410 bc20 16eb\n # a5f8 8b2a df78 2b94 7456 bd72\n # ----------------------------------------\n # 30 1a - SEQ\n # a0 04 02 02 00 8e - INT: 0x8E\n # a1 12 04 10 - OCT STRING\n # bc 20 16 eb a5 f8 8b 2a... - urandom()\n kerb_local_data = AuthorizationDataSequence()\n kerb_local_data[\"ad-type\"] = 0x8E\n kerb_local_data[\"ad-data\"] = urandom(16)\n\n # Build the KerbApOptions data set\n # ChannelBindingSupported\n # \n # 300e a004 0202 008f a106 0404 0040 0000\n # ----------------------------------------\n # 30 0e - SEQ\n # a0 04 02 02 00 8f - INT: 0x8F\n # a1 06 04 04 - OCT STRING\n # 00 40 00 00 - \\x00\\x40\\x00\\x00\n binding_data = AuthorizationDataSequence()\n binding_data[\"ad-type\"] = 0x8F\n binding_data[\"ad-data\"] = b\"\\x00\\x40\\x00\\x00\"\n\n # Build the KerbServiceTarget data set\n # \n # 304a a004 0202 0090 a142 0440 6800 6f00\n # 7300 7400 2f00 7300 7400 7300 2e00 6300\n # 6f00 6d00 7000 6100 6e00 7900 2e00 6300\n # 6f00 6d00 4000 6300 6f00 6d00 7000 6100\n # 6e00 7900 2e00 6300 6f00 6d00\n # ----------------------------------------\n # 30 4a - SEQ\n # a0 04 02 02 00 90 - INT: 0x90\n # a1 42 04 40 68 - OCT STRING\n # 00 6f 00 73 00 74 00 2f 00... - spn@domain -> UTF-16LE (null padded)\n kerb_service_target_data = AuthorizationDataSequence()\n kerb_service_target_data[\"ad-type\"] = 0x90\n kerb_service_target_data[\"ad-data\"] = f\"{spn}@{domain}\".encode(\"utf-16le\")\n\n # Now, wrap the above data sets in a sequence (top down).\n # Since we can't build an ASN.1 sequence without a#, we\n # are just going to hard code the sequence and manually\n # calculate the data length.\n # \n # 3081 XX ....\n # ----------------------------------------\n # 30 81 -- SEQ\n # XX -- LEN\n # .. .. -- COMBINED DATA\n auth_data = (\n encoder.encode(negotiation_type_data_seq)\n + encoder.encode(kerb_auth_data_token_restrictions)\n + encoder.encode(kerb_local_data)\n + encoder.encode(binding_data)\n + encoder.encode(kerb_service_target_data)\n )\n auth_data_len = hex(len(auth_data))[2:] # Strip `0x`\n auth_data_len = unhexlify(auth_data_len) # Convert to `\\x`\n authorization_data = b\"\\x30\\x81\" + auth_data_len + auth_data\n\n # fmt: on\n return authorization_data",
"def make_initialization_vector():\r\n return Random.new().read(AES.block_size)",
"def test_init_values(self):\n # set an additional value for test\n self.protocol._param_dict.add(\"foo\", r'foo=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n default_value=10)\n self.protocol._param_dict.add(\"bar\", r'bar=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n direct_access=False,\n startup_param=True,\n default_value=0)\n self.protocol._param_dict.add(\"baz\", r'baz=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n direct_access=True,\n default_value=20)\n self.protocol._param_dict.add(\"bat\", r'bat=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n startup_param=False,\n default_value=20)\n self.protocol._param_dict.add(\"qux\", r'qux=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x),\n startup_param=True)\n self.protocol._param_dict.add(\"rok\", r'rok=(.*)',\n lambda match : int(match.group(1)),\n lambda x : str(x))\n self.protocol._param_dict.update(\"qux=6666\")\n \n # mark init params\n self.assertRaises(InstrumentParameterException,\n self.protocol.set_init_params, [])\n self.protocol.set_init_params({DriverConfigKey.PARAMETERS: {\"foo\": 1111, \"baz\":2222}})\n \n # get new startup config\n self.assertRaises(InstrumentProtocolException, self.protocol.get_startup_config)\n self.protocol.set_init_params({DriverConfigKey.PARAMETERS: {\"foo\": 1111, \"baz\":2222, \"bat\": 11, \"qux\": 22}})\n result = self.protocol.get_startup_config()\n \n self.assertEquals(len(result), 5)\n self.assertEquals(result[\"foo\"], 1111) # init param\n self.assertEquals(result[\"bar\"], 0) # init param with default value\n self.assertEquals(result[\"baz\"], 2222) # non-init param, but value specified\n self.assertEquals(result[\"bat\"], 11) # set param\n self.assertEquals(result[\"qux\"], 22) # set param\n self.assertIsNone(result.get(\"rok\")) # defined in paramdict, no config",
"def __init_decoder_params_fc(self):\n self.dec_params_fc = list(reversed(\n [self.__inshape[-1]]+self.layer_cfg[:-1]))",
"def initData():\n global timeBinary\n global specBinary\n global nrbdBinary\n\n rawData = dataArray(sampleSize)\n\n timeBinary = packTimeData(rawData)\n specBinary = packSpecData(rawData)\n nrbdBinary = packNrbdData(rawData)",
"def init_vars(self):\n pass",
"def TypeInitializer(self) -> _n_5_t_19:",
"def populate_registers(init_keyword: str) -> tuple:\n xyz = \"\"\n for char in init_keyword:\n xyz += bin(ord(char))[2:].zfill(8)\n \n \n if len(xyz) < 64:\n xyz = xyz.ljust(64,\"0\")\n \n x = xyz[:19]\n y = xyz[19:41]\n z = xyz[41:]\n \n print(len(z))\n return (x , y, z)",
"def _constructPacket(self):\n for idx, intf in enumerate(self.ptf_ports):\n udp_sport = random.randint(0, 65535)\n udp_dport = random.randint(0, 65535)\n src_port = self.ptf_ports[intf][0]\n src_ip = self.ptf_ports[intf][2]\n vlan_id = self.ptf_ports[intf][3]\n pkt = testutils.simple_udp_packet(eth_dst=self.dut_mac,\n eth_src=self.ptfadapter.dataplane.get_mac(0, src_port),\n ip_dst=self.dst_ip,\n ip_src=src_ip,\n ip_tos=self.dscp << 2,\n udp_sport=udp_sport,\n udp_dport=udp_dport,\n ip_ttl=64\n )\n self.pkts.append(pkt)\n tmp_pkt = testutils.simple_udp_packet(eth_dst=self.arp_entry[self.dst_ip],\n eth_src=self.dut_mac,\n ip_dst=self.dst_ip,\n ip_src=src_ip,\n ip_tos=self.dscp << 2,\n udp_sport=udp_sport,\n udp_dport=udp_dport,\n ip_ttl=63\n )\n tmp_pkt = mask.Mask(tmp_pkt)\n tmp_pkt.set_do_not_care_scapy(packet.IP, \"chksum\")\n self.exp_pkts.append(tmp_pkt)\n # if inft is a sub interface, tuple be like (\"Eth0.10\", \"Eth0\")\n # if inft is a general interface, tuple be like (\"Eth0\", \"Eth0\")\n self.pkt_map[str(pkt)] = (intf, get_intf_by_sub_intf(intf, vlan_id), pkt)",
"def _build_parsed_values(self):\n try:\n unpack_string = '<4s14s2s4H2s12s4sh2s'\n sync, serial_num, config, board_frequency, pic_version, hw_revision, recorder_size, status, spare, fw_version, cksum, _ = \\\n struct.unpack(unpack_string, self.raw_data)\n\n if not validate_checksum('<23H', self.raw_data, -4):\n log.warn(\"_parse_read_hw_config: Bad read hw response from instrument (%r)\", self.raw_data)\n self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED\n\n config = NortekProtocolParameterDict.convert_bytes_to_bit_field(config)\n status = NortekProtocolParameterDict.convert_bytes_to_bit_field(status)\n recorder_installed = config[-1]\n compass_installed = config[-2]\n velocity_range = status[-1]\n\n except Exception:\n log.error('Error creating particle hardware config, raw data: %r', self.raw_data)\n raise SampleException\n\n result = [{DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.SERIAL_NUM,\n DataParticleKey.VALUE: serial_num},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.RECORDER_INSTALLED,\n DataParticleKey.VALUE: recorder_installed},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.COMPASS_INSTALLED,\n DataParticleKey.VALUE: compass_installed},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.BOARD_FREQUENCY,\n DataParticleKey.VALUE: board_frequency},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.PIC_VERSION,\n DataParticleKey.VALUE: pic_version},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.HW_REVISION,\n DataParticleKey.VALUE: hw_revision},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.RECORDER_SIZE,\n DataParticleKey.VALUE: recorder_size},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.VELOCITY_RANGE,\n DataParticleKey.VALUE: velocity_range},\n {DataParticleKey.VALUE_ID: NortekHardwareConfigDataParticleKey.FW_VERSION,\n DataParticleKey.VALUE: fw_version}]\n\n log.debug('NortekHardwareConfigDataParticle: particle=%r', result)\n return result",
"def __init__(self, py_dict=None):\n super(VxlanNetworkEntrySchema, self).__init__()\n # values of the attributes are the strings which stdout uses to dump data\n # Because stdout uses stuff like Connection-ID we cannot have same attributes\n # as an attribute cannot have '-' in it\n\n self.networkName = \"VXLAN network\"\n self.multicastIP = \"Multicast IP\"\n self.controlPlane = \"Control plane\"\n self.controller = \"Controller\"",
"def initialize_1nucl():\n bases = defaultdict(dict)\n bases['A'] = ['A', 'A']\n bases['T'] = ['T', 'T']\n bases['G'] = ['G', 'G']\n bases['C'] = ['C', 'C']\n bases['N'] = ['N', 'N']\n bases['W'] = ['A', 'T']\n bases['R'] = ['A', 'G']\n bases['M'] = ['A', 'C']\n bases['K'] = ['G', 'T']\n bases['Y'] = ['C', 'T']\n bases['S'] = ['C', 'G']\n return bases"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper function that gets called for response change and response add It opens a new courselab and then uploads the grading files
|
def _open_and_upload(self, obj):
for problem in obj.problems.all().filter(autograde_problem=True):
# open (make) the courselab on tango server with the callback
# _upload_ps_files
tango.open(problem, obj)
# upload the grader librarires
for lib in models.GraderLib.objects.all():
f = lib.lib_upload
tango.upload(problem, obj, f.name.split("/")[-1], f.read())
# upload the grading script
grading = problem.grade_script
grading_name = grading.name.split("/")[-1]
tango.upload(problem, obj, grading_name, grading.read())
# upload the makefile that will run the grading script
makefile = 'autograde:\n @python3 ' + grading_name
tango.upload(problem, obj, vrfy.settings.MAKEFILE_NAME, makefile)
# upload all the other files
for psfile in models.ProblemSolutionFile.objects.filter(
problem=problem):
f = psfile.file_upload
tango.upload(problem, obj, f.name.split("/")[-1], f.read())
|
[
"def upload_course(request):\n status = 0\n form = json.loads(request.POST.get(\"updateForm\"))\n img_info = form[\"imgInfo\"]\n profile_url = None\n for img in img_info:\n if img[\"start\"] == 0:\n profile_url = img[\"id\"]\n break\n try:\n profile = PictureTemp.objects.get(pk=profile_url).position\n audio = AudioTemp.objects.get(pk=form[\"audioInfo\"][\"id\"]).position\n except (PictureTemp.DoesNotExist, AudioTemp.DoesNotExist) as e:\n print(e)\n status = 1\n return JsonResponse({\"status\": status})\n course = Course(\n course_name=form[\"courseTitle\"],\n description=form[\"courseDescription\"],\n content=form[\"courseContain\"], price=form[\"price\"],\n message_on=form[\"messageOn\"],\n burnt_time=int(form[\"destroyTime\"]) * HOUR,\n audio_url=File(audio, audio.name.split(\"/\")[-1]),\n profile_url=File(profile, profile.name.split(\"/\")[-1]),\n perpercentage=int(float(form[\"percentage\"]) * 10000)\n )\n audio.close()\n profile.close()\n course.save()\n status = _insert_pictrue_(img_info, course)\n\n log_user_id = request.user.id\n log_object_id = course.course_id\n log = AdminOperationRecord.objects.create(\n admin_id=log_user_id,\n operation=Operation.objects.get(pk=1),\n object=log_object_id\n )\n log.save()\n return JsonResponse({\"status\": status})",
"def create_lesson():\n\n ### SAVE LESSON TO DATABASE ###\n # Set up default lesson data dict\n lesson_data = {\n 'title': 'Untitled', \n 'author_id': session['user_id'],\n 'overview': '', \n 'imgUrl': None,\n 'public': False,\n }\n\n ### UPLOAD PHOTO TO CLOUDINARY AND ATTACH URL ###\n if 'lesson-pic' not in request.files:\n lesson_data['imgUrl'] = \"/static/img/placeholder.png\"\n else: \n my_file = request.files['lesson-pic']\n result = cloudinary.uploader.upload(my_file, api_key=CLOUD_KEY, \n api_secret=CLOUD_SECRET,\n cloud_name='hackbright')\n lesson_data['imgUrl'] = result['secure_url']\n \n ### SAVE LESSON TO DATABASE ###\n lesson_data['title'] = request.form['title']\n lesson_data['overview'] = request.form['overview']\n db_lesson = crud.create_lesson(lesson_data)\n\n ### CREATE DB ASSOCIATION BETWEEN TAGS AND LESSON ###\n tags = request.form['tags'].split(',') # eg. '6th,science'\n # Right now, setting up new tag with id of \"tag\"\n for tag in tags:\n if tag in SUBJECTS: \n db_tag = crud.get_tag_by_name(tag)\n elif tag in GRADES: \n db_tag = crud.get_tag_by_name(tag)\n crud.assign_tag_to_lesson(db_tag, db_lesson)\n\n ### CREATE DB ASSOCIATION BETWEEN COMPONENTS AND LESSON ###\n if request.form['component-ids']:\n component_ids = request.form['component-ids'].split(',') # e.g. '30,31'\n for comp_id in component_ids:\n db_comp = crud.get_comp_by_id(int(comp_id))\n crud.assign_comp(db_comp, db_lesson)\n \n try: \n return {'success': True, 'lesson_id': db_lesson.lesson_id}\n except: \n print('Except something done broke')\n return {'success': False}",
"def add_teaching_file():\n # Validate the request body contains JSON\n if not request.is_json:\n return jsonify(\"Error\", \"No json data received\"), 400\n # Parse the JSON into a Python dictionary\n req = request.get_json(silent=True)\n return update_json_file(data=req,\n schema_name=\"set_teaching\",\n target_file_path=\"/data/jsons/teaching.json\",\n current_user_id=current_user.get_id(),\n full_file=False)",
"def upload(self, request, files, parties=None):\n # Find the transaction id .. this will be a unique guid generated by eDivorce thats passed to Efiling Hub. We\n # will tie it to the session.\n\n transaction_id = self._get_transaction(request)\n bce_id = self._get_bceid(request)\n\n # if bce_id is None .. we basically have an anonymous user so raise an error\n if bce_id is None:\n raise PermissionDenied()\n\n response = self._get_api(request, f'{self.api_base_url}/submission/documents', transaction_id, bce_id,\n headers={}, files=files)\n if response.status_code == 200:\n response = json.loads(response.text)\n\n if \"submissionId\" in response and response['submissionId'] != \"\":\n # get the redirect url\n headers = {\n 'Content-Type': 'application/json'\n }\n package_data = self._format_package(request, files, parties=parties)\n url = f\"{self.api_base_url}/submission/{response['submissionId']}/generateUrl\"\n response = self._get_api(request, url, transaction_id, bce_id, headers=headers,\n data=json.dumps(package_data))\n\n if response.status_code == 200:\n response = json.loads(response.text)\n return response['efilingUrl'], 'success'\n\n response = json.loads(response.text)\n\n return None, f\"{response['error']} - {response['message']}\"\n\n return None, f'{response.status_code} - {response.text}'",
"def downloadSubmission(self, submission, student, directory, group_memberships={}):\n\n #self.prettyPrint(submission)\n\n attachment = submission['attachments'][0]\n filename = attachment['filename']\n exten = os.path.splitext(filename)[1] # get filename extension\n import datetime\n utc_dt = datetime.datetime.strptime(submission['submitted_at'], '%Y-%m-%dT%H:%M:%SZ')\n utc_dt = utc_dt.replace(tzinfo=datetime.timezone.utc)\n\n # Create a new metadata record to save. \n metadataNew = {\n # Put submission time in local time zone since everything\n # else is in UTC. This isn't used for anything other than\n # making it easy for someone to look at the file and\n # understand the submission time. tz=None to convert to\n # local time zone requires Python 3.3 or higher.\n \"localSubmissionTime\":str(utc_dt.astimezone(tz=None)),\n\n # Include the submission and student information:\n \"canvasSubmission\":submission,\n \"canvasStudent\":student }\n\n # Figure out if the name of the downloaded file/subdirectory\n # should be based on their username or group name (if there is\n # a set of groups associated with this assignment.)\n login = student['login_id']\n if student['login_id'] in group_memberships:\n (group, usersInGroup) = group_memberships[student['login_id']]\n metadataNew['canvasGroup'] = group\n metadataNew['canvasStudentsInGroup'] = usersInGroup\n login = group['name']\n\n # Look for an existing metadata file. When we just download\n # the submitted file (pre-extraction), We will have just the\n # submitted file and the login name with \".AUTOGRADE.json\"\n # appended to it. After we extract, the submissions may go\n # into a directory.\n metadataFile = None;\n metadataFiles = [ os.path.join(directory,login+\".AUTOGRADE.json\"),\n os.path.join(directory,login,\"AUTOGRADE.json\") ]\n for mdf in metadataFiles:\n if os.path.exists(mdf):\n metadataFile = mdf\n\n # Check if we need to download file based on metadata\n metadataCache = {}\n if metadataFile:\n with open(metadataFile,\"r\") as f:\n metadataCache = json.load(f)\n\n # Gather metadata and make assumptions if metadata file is missing:\n if \"locked\" not in metadataCache:\n print(\"%-12s Assuming cached copy is unlocked.\" % login)\n locked = metadataCache.get(\"locked\", 0)\n\n if 'canvasSubmission' not in metadataCache or \\\n 'attempt' not in metadataCache['canvasSubmission']:\n print(\"%-12s Assuming cached submission is attempt 0\" % login)\n cachedAttempt = 0\n else:\n cachedAttempt = metadataCache['canvasSubmission']['attempt']\n newAttempt = metadataNew['canvasSubmission']['attempt']\n metadataNew['canvasSubmission']['late']\n\n # Update our cached information to contain current grade\n # entered into Canvas. If we are getting a newer submission\n # than what we already have, then we'll update the metadata\n # when we get the new submission.\n if newAttempt == cachedAttempt and os.path.exists(metadataFile):\n metadataCache['canvasSubmission'] = metadataNew['canvasSubmission']\n with open(metadataFile, \"w\") as f:\n json.dump(metadataCache, f, indent=4)\n \n # Determine if we should download the submission or not\n if locked:\n print(\"%-12s skipping download because submission is locked to attempt %d.\" % (login, cachedAttempt))\n return\n if newAttempt == cachedAttempt:\n print(\"%-12s We already have downloaded attempt %2d. Skipping download.\" % (login, newAttempt))\n return\n if newAttempt < cachedAttempt:\n print(\"%-12s WARNING: You requested attempt %2d; directory contains newer attempt %2d; SKIPPING DOWNLOAD. To force a download, erase the student directory and rerun. Or, rerun and request to dowload that students' specific attempt.\" % (login, newAttempt, cachedAttempt))\n return\n\n archiveFile = os.path.join(directory,login+exten)\n\n # Delete existing archive if it exists.\n toDelete = metadataFiles\n toDelete.append(archiveFile)\n for f in toDelete:\n if os.path.exists(archiveFile):\n os.unlink(archiveFile)\n # Download the file\n print(\"%-12s downloading attempt %d submitted %s (replacing attempt %d)\" % (login, newAttempt, \n self.prettyDate(utc_dt, datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)), cachedAttempt))\n try:\n urllib.request.urlretrieve(attachment['url'], directory+\"/\"+login+exten)\n except:\n print(\"ERROR: Failed to download \"+attachment['url'])\n import traceback\n traceback.print_exc()\n pass\n\n # Write the new metadata out to a file\n metadataNew['locked']=0\n with open(metadataFiles[0], \"w\") as f:\n json.dump(metadataNew, f, indent=4)",
"def addtourguide():\r\n\r\n if request.method == 'POST':\r\n try:\r\n MD.TourGuide.create_tg(\r\n fname=request.form['firstname'],\r\n lname=request.form['lastname'],\r\n email=request.form['email'],\r\n phone=request.form['phone'],\r\n address=request.form['address'],\r\n age=request.form['age'],\r\n gender=request.form['gender'],\r\n salary=request.form['salary'])\r\n except MD.IntegrityError:\r\n flash(\"Tour Guide Exists Try again\", 'danger')\r\n return redirect(url_for(\"addtourguide\"))\r\n\r\n \"\"\"uploading tourguide avatar image\"\"\"\r\n if 'file' not in request.files:\r\n flash(' No file part', 'danger')\r\n return redirect(request.url)\r\n file = request.files['file']\r\n # if user does not select file, browser also\r\n # submit an empty part without filename\r\n if file.filename == ' ':\r\n flash(' No selected image', 'danger')\r\n return redirect(request.url)\r\n if file and allowed_file(file.filename):\r\n filename = secure_filename(file.filename)\r\n destin = os.path.join(\r\n adminapp.config['UPLOAD_FOLDER']+\"/tourguides/\",\r\n str(request.form['firstname'])+\".jpg\")\r\n file.save(destin)\r\n flash(\"TourGuide Saved Successfully\", 'success')\r\n return redirect(url_for('addtourguide'))\r\n return render_template(\"admin/add_new_tourguide.html\")",
"def lacop_form():\n form = lacopForm(request.form)\n image_file = os.path.join(app.config['UPLOAD_FOLDER'], 'LacOp_img.png')\n lesson_file = os.path.join(app.config['UPLOAD_FOLDER'], 'LacOP_Web_Lab.docx')\n form_warning = ''\n\n if request.method == \"POST\" and form.validate():\n form_data = build_mut_list()\n if \"plasmist=present\" in request.form:\n cell = RunLO(form_data[0], form_data[1], int(request.form.get('ALLO')), int(request.form.get('LI')),\n int(request.form.get('LO')), int(request.form.get('GLU')))\n else:\n cell = RunLO(form_data[0], [], int(request.form.get('ALLO')), int(request.form.get('LI')),\n int(request.form.get('LO')), int(request.form.get('GLU')))\n concentration_dict = cell.archiveConditions\n\n try:\n graph = pygal.XY(x_title='Psuedo Seconds', y_title='Molecular Units')\n graph.title = \"Operon Graph\" # generateTitle(form_data[0])\n graph.x_labels = range(0, 401, 20)\n for item in concentration_dict:\n graph_points = []\n for index in range(len(concentration_dict[item])):\n graph_points.append((index, concentration_dict[item][index]))\n if item == \"allo\":\n item = 'Allolactose'\n if item == \"lacIn\":\n item = 'Lactose Intracellular'\n if item == \"lacOut\":\n item = 'Lactose Extracellular'\n if item == 'gulcose + galactose':\n item = 'Glucose Inside Cell'\n if item == 'perm':\n item = 'Permease'\n if item == 'bgal':\n item = 'Beta-Galactasidase'\n graph.add(item, graph_points)\n graph_data = graph.render_data_uri()\n legend_data = format_legend(form_data[0], cell.archiveConditions, cell.plasmid_data)\n return render_template('lacop.html',\n graph_data=graph_data,\n form=form,\n user_image=image_file,\n is_valid=form_warning,\n legend=legend_data,\n lesson_file=lesson_file)\n except Exception, e:\n return (str(e))\n else:\n graph = pygal.Line()\n graph.title = 'Operon Graph'\n graph.x_labels = range(200)\n graph_data = graph.render_data_uri()\n plasmid_legend = {\"Promoter\": None,\n \"Operator\": None,\n \"Repressor\": None,\n \"Permease\": None,\n \"Beta-Galactasidase\": None}\n return render_template('lacop.html',\n graph_data=graph_data,\n form=form,\n user_image=image_file,\n is_valid=form_warning,\n legend={\"Sugar Concentration\": [], \"Lactose Operon\": [], \"Plasmid\": plasmid_legend},\n lesson_file=lesson_file)",
"def upload_sample_file():\n if not session.get('username') is None:\n if request.method == 'POST':\n # If the log file is already present in the file system and the user chose to skip\n if 'Sample' in request.form:\n print(request.form['Sample'])\n session['samplefile'] = os.path.join(app.config['STORAGE_PATH'], session.get('username') + '_' + request.form['Sample'])\n success, error_str, clus_dict, cluster_labels, log = check_sample_list(os.path.join(app.config['STORAGE_PATH'], session.get(\"username\") + \".xes\"),session['samplefile'])\n\n if not success:\n flash(error_str)\n if log is None or clus_dict is None:\n return redirect('/log')\n\n return render_template('thresholds.html', cluster_labels=cluster_labels)\n # check if the post request has the file part\n if not request.files:\n flash('No file selected for uploading.')\n return redirect(request.url)\n file = request.files['file']\n print(request.form.keys)\n if file.filename == '':\n flash('No file selected for uploading.')\n return redirect(request.url)\n if file and allowed_sample_file(file.filename):\n name = app.config['STORAGE_PATH'] + session.get('username') + \".txt\"\n with open(name, \"r\") as f:\n data = f.readlines()\n alreadyIn = False\n for d in data:\n if d == file.filename + '\\n':\n alreadyIn = True\n flash(\"This filename has already been used. The old file was overwritten\")\n if not alreadyIn:\n with open(name, \"a\") as f:\n f.write(file.filename + '\\n')\n\n file.save(os.path.join(app.config['STORAGE_PATH'], session.get('username') + '_' + file.filename))\n # flash('File successfully uploaded')\n success, error_str, clus_dict, cluster_labels, log = check_sample_list(os.path.join(app.config['STORAGE_PATH'], session.get(\"username\") + \".xes\"), os.path.join(app.config['STORAGE_PATH'], session.get('username') + '_' + file.filename))\n session['samplefile'] = os.path.join(app.config['STORAGE_PATH'], session.get('username') + '_' + file.filename)\n if not success:\n flash(error_str)\n if log is None or clus_dict is None:\n return redirect('/log')\n\n return render_template('thresholds.html', cluster_labels=cluster_labels)\n else:\n flash('The only allowed file type is csv.')\n return redirect(request.url)\n elif request.method == 'GET':\n names = find_samples(app.config['STORAGE_PATH'] + session.get('username')+ \".txt\")\n if names:\n return render_template('sample.html', samples=names)\n else:\n return render_template('sample.html')\n else:\n flash('Please enter a Projectname first.')\n return redirect('/')",
"def upload_file(org_id, absolute_file_path):\n\n \"\"\"\n example\n org_id = \"673573045\"\n absolute_file_path = \"/Users/user_name/Documents/Leads.zip\"\n \"\"\"\n\n # Get instance of BulkWriteOperations Class\n bulk_write_operations = BulkWriteOperations()\n\n # Get instance of FileBodyWrapper class that will contain the request file\n file_body_wrapper = FileBodyWrapper()\n\n \"\"\"\n StreamWrapper can be initialized in any of the following ways\n \n * param 1 -> fileName \n * param 2 -> Read Stream.\n \"\"\"\n # stream_wrapper = StreamWrapper(stream=open(absolute_file_path, 'rb'))\n\n \"\"\"\n * param 1 -> fileName\n * param 2 -> Read Stream\n * param 3 -> Absolute File Path of the file to be attached\n \"\"\"\n\n stream_wrapper = StreamWrapper(file_path=absolute_file_path)\n\n # Set file to the FileBodyWrapper instance\n file_body_wrapper.set_file(stream_wrapper)\n\n # Get instance of HeaderMap Class\n header_instance = HeaderMap()\n\n # Possible parameters for upload_file operation\n header_instance.add(UploadFileHeader.feature, \"bulk-write\")\n\n header_instance.add(UploadFileHeader.x_crm_org, org_id)\n\n # Call upload_file method that takes FileBodyWrapper instance and header_instance as parameter\n response = bulk_write_operations.upload_file(file_body_wrapper, header_instance)\n\n if response is not None:\n # Get the status code from response\n print('Status Code: ' + str(response.get_status_code()))\n\n # Get object from response\n response_object = response.get_object()\n\n if response_object is not None:\n\n # Check if expected ActionWrapper instance is received.\n if isinstance(response_object, SuccessResponse):\n\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())\n\n # Check if the request returned an exception\n elif isinstance(response_object, APIException):\n\n if response_object.get_status() is not None:\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n if response_object.get_code() is not None:\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n if details is not None:\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n if response_object.get_error_message() is not None:\n # Get the ErrorMessage\n print(\"Error Message: \" + response_object.get_error_message().get_value())\n\n # Get the ErrorCode\n print('Error Code: ' + str(response_object.get_error_code()))\n\n if response_object.get_x_error() is not None:\n # Get the XError\n print('XError: ' + response_object.get_x_error().get_value())\n\n if response_object.get_info() is not None:\n # Get the Info\n print(\"Info: \" + response_object.get_info().get_value())\n\n if response_object.get_x_info() is not None:\n # Get the XInfo\n print(\"XInfo: \" + response_object.get_x_info().get_value())\n\n if response_object.get_message() is not None:\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())\n\n print('HttpStatus: ' + response_object.get_http_status())",
"def set_teaching_file():\n # Validate the request body contains JSON\n if not request.is_json:\n return jsonify(\"Error\", \"No json data received\"), 400\n # Parse the JSON into a Python dictionary\n data = request.get_json(silent=True)\n return update_json_file(data=data,\n schema_name=\"set_teaching\",\n target_file_path=\"/data/jsons/teaching.json\",\n current_user_id=current_user.get_id(),\n full_file=True)",
"def createAssignmentMethod(request):\n try:\n courseId = ndb.Key(urlsafe=getattr(request, 'courseId'))\n course = courseId.get()\n if course is None:\n raise Exception(\"Invalid courseId\")\n except Exception, E:\n print str(E)\n return Response(response=1, description=str(E))\n try:\n uploaderId = ndb.Key(urlsafe=getattr(request, 'uploaderId'))\n uploader = uploaderId.get()\n if uploader is None:\n raise Exception(\"Invalid uploaderId\")\n except Exception, E:\n print str(E)\n return Response(response=1, description=str(E))\n\n newAssignment = Assignment()\n\n # storing the details\n setattr(newAssignment, 'assignmentTitle', getattr(request, 'assignmentTitle'))\n setattr(newAssignment, 'assignmentDesc', getattr(request, 'assignmentDesc'))\n setattr(newAssignment, 'dueDate', getattr(request, 'dueDate'))\n setattr(newAssignment, 'dueTime', getattr(request, 'dueTime'))\n setattr(newAssignment, 'urlList', getattr(request, 'urlList'))\n\n dateUploaded = str(datetime.datetime.now() + datetime.timedelta(hours=5, minutes=30))\n setattr(newAssignment, 'courseId', courseId)\n setattr(newAssignment, 'uploaderId', uploaderId)\n setattr(newAssignment, 'dateUploaded', dateUploaded)\n assignmentId = newAssignment.put()\n\n # adding assignmentId to course.assignmentIds\n course.assignmentIds.append(assignmentId)\n course.put()\n\n # deleting the cached value\n memcache.delete(courseId.urlsafe())\n\n # Sending Notification to subscribed profiles\n title = course.courseName\n notificationText = \"New assignment added!\"\n createNotification(course.studentIds, 'Campus Connect',\n notificationText, 'assignment',\n assignmentId.urlsafe(), courseId.urlsafe())\n sendNotification(topicName=courseId.urlsafe(), id=assignmentId.urlsafe(), title=title,\n text=notificationText, type='assignment')\n return Response(response=0, description=\"OK\", key=assignmentId.urlsafe())",
"def project_file(slug,user_id):\n project = Campaign.query.filter_by(slug=slug).first_or_404()\n user = User.query.filter_by(id=user_id).first_or_404()\n cu = CampaignUsers.query.filter(CampaignUsers.campaign_id==project.id,CampaignUsers.user_id==user.id).first_or_404()\n\n #Ensure you can only see your own data, unless you're an admin or supervisor.\n if (user.id != current_user.id) and (not current_user.is_supervisor) and (not current_user.is_admin):\n abort(403)\n\n #Fetch the userdata directory and store it in the userdata variable. This\n #directory is where all the files will be uploaded.\n userdata = project.userdata(user.id)\n\n #GET or HEAD requests serve data\n if request.method==\"HEAD\" or request.method==\"GET\":\n filename=request.args.get(\"filename\",\"\")\n if filename != \"\":\n as_attachment = True if not filename.lower().endswith((\".png\",\".jpg\",\".jpeg\")) else False\n return send_from_directory(os.path.join(userdata,\"attachments\"), filename, as_attachment=as_attachment)\n\n #POST requests store data\n if request.method==\"POST\":\n try:\n print \"import excel parser\"\n from utils import excel_parser\n print \"import excp ok!\"\n f = request.files['file']\n if f:\n print \"f found\"\n upload_file=os.path.join(userdata,\"attachments\",f.filename)\n print \"file ing as %s\"%(upload_file)\n f.save(upload_file)\n print \"file saved as %s\"%(upload_file)\n if f.filename.endswith((\".xls\",\".xlsx\")):\n #spatialite_file=project.features_database(user_id)\n\n observations=excel_parser(upload_file)\n for observation_layer in observations:\n title = observations[observation_layer]['title']\n points = observations[observation_layer]['observations']\n #first delete any other observationlayers with this name\n #deleted=ObservationLayer.query.filter_by(user_id=user_id,campaign_id=project.id,name=title).delete()\n safe_name = observation_layer\n deleted = ObservationLayer.query.filter_by(user_id=user_id,campaign_id=project.id,safe_name=safe_name).delete()\n #flash(\"Deleted %d existing layer\"%(deleted))\n #db.session.delete(observationlayers)\n db.session.commit()\n\n\n\n layer = ObservationLayer(user.id, project.id, title, safe_name)\n for point in points:\n layer.observations.append(Observation(point))\n\n db.session.add(layer)\n db.session.commit()\n\n\n #print observations\n #project.basemap_update(user_id)\n\n\n cu.update_lastactivity()\n\n flash(\"Upload and processing of file <code>%s</code> completed.\"%(f.filename),\"ok\")\n except Exception as e:\n flash(\"An error occurred during the upload. Hint: %s\"%(e),\"error\")\n return jsonify(status=\"OK\",message=\"File uploaded and processed!\"),200",
"def newFile(self):\r\n # Upload the file to the server\r\n file_info = self.uploadFile()\r\n\r\n if not file_info['id']:\r\n log.error(\"*** createProject.newFile: Failed to create file.\")\r\n return self.json({ 'success' : False })\r\n\r\n # Save an attachment record to the database\r\n attachment_id = mProject.createAttachment(self.db,\r\n media_id=file_info['id'],\r\n media_type=file_info['type'],\r\n title=file_info['name'])\r\n\r\n if attachment_id is None:\r\n log.error((\"*** createProject.newFile: Failed insert row for file \"\r\n \"with info %s into the attachments table.\" % file_info))\r\n return self.json({ 'success' : False })\r\n\r\n return self.json({\r\n 'id' : attachment_id,\r\n 'media_id' : file_info['id'],\r\n 'media_type' : file_info['type'],\r\n 'title' : file_info['name'],\r\n 'small_thumb_url' : mProject.getAttachmentThumbUrl(file_info['type'], file_info['id'], 'small'),\r\n 'medium_thumb_url' : mProject.getAttachmentThumbUrl(file_info['type'], file_info['id'], 'medium'),\r\n 'large_thumb_url' : mProject.getAttachmentThumbUrl(file_info['type'], file_info['id'], 'large'),\r\n 'success' : (file_info['id'] != None)\r\n #TODO add url\r\n })",
"def _submit(self, script):",
"def collection_post(self):\n if not self.validate_agreement_document('add'):\n return\n document = upload_file(self.request)\n self.context.documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info('Created tender agreement document {}'.format(document.id),\n extra=context_unpack(self.request,\n {'MESSAGE_ID': 'tender_agreement_document_create'},\n {'document_id': document.id}))\n self.request.response.status = 201\n document_route = self.request.matched_route.name.replace(\"collection_\", \"\")\n self.request.response.headers['Location'] = \\\n self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})\n return {'data': document.serialize(\"view\")}",
"def submit(request):\n if not request.user.is_authenticated():\n return jingo.render(request, 'demos/submit_noauth.html', {})\n\n if request.method != \"POST\":\n initial = {}\n if 'tags' in request.GET:\n initial['challenge_tags'] = parse_tags(request.GET['tags'])\n form = SubmissionNewForm(initial=initial, request_user=request.user)\n else:\n form = SubmissionNewForm(\n request.POST, request.FILES, request_user=request.user)\n if form.is_valid():\n new_sub = form.save(commit=False)\n new_sub.creator = request.user\n new_sub.save()\n form.save_m2m()\n\n # TODO: Process in a cronjob?\n new_sub.process_demo_package()\n _invalidate_submission_listing_helper_cache()\n\n return HttpResponseRedirect(reverse(\n 'demos.views.detail', args=(new_sub.slug,)))\n\n return jingo.render(request, 'demos/submit.html', {'form': form})",
"def _run_submission_and_add_to_leaderboard(self, competition_zip_path, submission_zip_path, expected_submission_output, has_solutions=True, timeout=999):\n self.login(username=self.user.username, password='test')\n\n self.get(reverse('competitions:upload'))\n self.find('input[ref=\"file_input\"]').send_keys(os.path.join(self.test_files_dir, competition_zip_path))\n\n assert self.element_is_visible('div .ui.success.message')\n\n competition = self.user.competitions.first()\n comp_url = reverse(\"competitions:detail\", kwargs={\"pk\": competition.id})\n submission_full_path = os.path.join(self.test_files_dir, submission_zip_path)\n self.find(f'a[href=\"{comp_url}\"]').click()\n self.assert_current_url(comp_url)\n\n # This clicks the page before it loads fully, delay it a bit...\n self.wait(1)\n self.find('.item[data-tab=\"participate-tab\"]').click()\n\n self.circleci_screenshot(\"set_submission_file_name.png\")\n self.find('input[ref=\"file_input\"]').send_keys(submission_full_path)\n self.circleci_screenshot(name='uploading_submission.png')\n\n # The accordion shows \"Running submission.zip\"\n assert self.find_text_in_class('.submission-output-container .title', f\"Running {submission_zip_path}\", timeout=timeout)\n\n # Inside the accordion the output is being streamed\n self.find('.submission-output-container .title').click()\n assert self.find_text_in_class('.submission_output', expected_submission_output, timeout=timeout)\n\n # The submission table lists our submission!\n assert self.find('submission-manager#user-submission-table table tbody tr:nth-of-type(1) td:nth-of-type(2)').text == submission_zip_path\n\n # Check that md5 information was stored correctly\n submission_md5 = md5(f\"./src/tests/functional{submission_full_path}\")\n assert Submission.objects.filter(md5=submission_md5).exists()\n if has_solutions:\n assert Solution.objects.filter(md5=submission_md5).exists()\n\n # Get the submission ID for later comparison\n submission_id = int(self.find('submission-manager#user-submission-table table tbody tr:nth-of-type(1) td:nth-of-type(1)').text)\n\n # Add the submission to the leaderboard and go to results tab\n self.find('submission-manager#user-submission-table table tbody tr:nth-of-type(1) td:nth-of-type(5) span[data-tooltip=\"Add to Leaderboard\"]').click()\n self.find('.item[data-tab=\"results-tab\"]').click()\n\n # The leaderboard table lists our submission\n prediction_score = Submission.objects.get(pk=submission_id).scores.first().score\n assert Decimal(self.find('leaderboards table tbody tr:nth-of-type(1) td:nth-of-type(3)').text) == prediction_score",
"def file_upload(self):\r\n # INIT DATA\r\n data = {}\r\n\r\n # VESSEL ID\r\n vessel_id = request.args.get('vessel_id')\r\n\r\n # # GET DATA\r\n token = request.headers.get('token')\r\n userid = request.headers.get('userid')\r\n\r\n # CHECK TOKEN\r\n token_validation = self.validate_token(token, userid)\r\n\r\n if not token_validation:\r\n data[\"alert\"] = \"Invalid Token\"\r\n data['status'] = 'Failed'\r\n\r\n # RETURN ALERT\r\n return self.return_data(data)\r\n\r\n # RH_<VesselIMO>_<ImageID>\r\n parameters = self.couch_query.get_complete_values(\r\n vessel_id,\r\n \"PARAMETERS\"\r\n )\r\n\r\n # VESSEL IMO\r\n vessel_imo = parameters['PARAMETERS']['INFO']['IMO']\r\n\r\n file_upload = []\r\n filenames = request.files.getlist('upfile')\r\n for filename in filenames:\r\n\r\n try:\r\n\r\n file_name = filename.filename\r\n # ext = file_name.split(\".\")[-1]\r\n\r\n # if not self.allowed_file_type(file_name):\r\n\r\n # data[\"alert\"] = \"File Type Not Allowed!\"\r\n # data['status'] = 'Failed'\r\n # return self.return_data(data)\r\n\r\n except ImportError:\r\n\r\n data[\"alert\"] = \"No image!\"\r\n data['status'] = 'Failed'\r\n\r\n # RETURN ALERT\r\n return self.return_data(data)\r\n\r\n file_name = self.rename_file(vessel_id, file_name)\r\n\r\n vimg_data = {}\r\n vimg_data['vessel_id'] = vessel_id\r\n vimg_data['vessel_imo'] = vessel_imo\r\n vimg_data['file_name'] = file_name\r\n vimg_data['status'] = \"active\"\r\n vimg_data['created_on'] = time.time()\r\n\r\n # ADD FILE TO VESSEL FILE TABLE\r\n self.postgres.insert('vessel_file', vimg_data, 'vessel_file_id')\r\n\r\n # FILE NAME\r\n # file_name_upload = str(vessel_file_id) + \".\" + ext\r\n # upload_file = 'VesselFiles/' + \"RH_\" + vessel_imo + \"_\" + file_name_upload\r\n upload_file = 'VesselFiles/' + vessel_imo +\"/\" + file_name\r\n body = request.files['upfile']\r\n\r\n # SAVE TO S3\r\n url = \"\"\r\n if self.aws3.save_file(upload_file, body):\r\n url = self.aws3.get_url(upload_file)\r\n\r\n file_upload.append({\r\n \"filename\": file_name,\r\n \"url\": url\r\n })\r\n\r\n data[\"status\"] = \"ok\"\r\n data[\"data\"] = file_upload\r\n\r\n # RETURN\r\n return self.return_data(data)",
"def upload_grades(api: markusapi.Markus,\n assignment_id: int,\n gf_file: TextIO,\n criteria: Dict[str, Tuple[str, float]],\n complete=True):\n\n gbook = gb.GradeBook.load_gf_file(gf_file, 'utorid', True)\n\n groups = api.get_groups(assignment_id)\n\n for group in groups:\n group_id, utorid = group['id'], group['group_name']\n upload_grade(api, assignment_id, criteria,\n utorid, gbook, None, group_id,\n complete)",
"def add_to_research_file():\n # Validate the request body contains JSON\n if not request.is_json:\n return jsonify(\"Error\", \"No json data received\"), 400\n # Parse the JSON into a Python dictionary\n req = request.get_json(silent=True)\n return update_json_file(data=req,\n schema_name=\"set_research_file\",\n target_file_path=\"/data/jsons/research.json\",\n current_user_id=current_user.get_id(),\n full_file=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decorator close object on socket.error.
|
def socket_exception(func):
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
logger.debug('ignoring socket exception', exc_info=True)
self.close()
return read
|
[
"def socket_close(self, socket_name):\n msg = \"socket_close(\\\"{}\\\")\".format(socket_name)\n self._add_line_to_program(msg)",
"def close(self):\n try:\n self.client_socket.close()\n except Exception as e:\n pass",
"def close(self):\n _osutil.unlink_silent(self._path)\n self.realsocket.close()",
"def convert_stream_closed_error(obj, exc):\n if exc.real_error is not None:\n # The stream was closed because of an underlying OS error\n exc = exc.real_error\n if ssl and isinstance(exc, ssl.SSLError):\n if \"UNKNOWN_CA\" in exc.reason:\n raise FatalCommClosedError(\n \"in %s: %s: %s\" % (obj, exc.__class__.__name__, exc)\n )\n raise CommClosedError(\"in %s: %s: %s\" % (obj, exc.__class__.__name__, exc))\n else:\n raise CommClosedError(\"in %s: %s\" % (obj, exc))",
"def Close_Light_Socket(self):\n try:\n self.light_socket.close()\n print \"Successfully closed Light Socket\\n\"\n return\n except Exception as e:\n print \"Failure to close light socket. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n return",
"def on_error(self):\n self.log.info('Network error: disconnected from %s' % (self.address,))\n # Inform upstream Network of error\n self.hooks.error()\n self.socket = None\n #AsyncDelayed(self.connect, 10)()",
"def Close_Shutter_Socket(self):\n try:\n self.shutter_socket.close()\n print \"Successfully closed Shutter Socket\\n\"\n return\n except Exception as e:\n print \"Failure to close shutter socket. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n return",
"def exit(client_socket):\r\n client_socket.close()",
"def _do_close(self):\n if self._sock:\n call_hooks(\"modbus_rtu_over_tcp.RtuOverTcpMaster.before_close\", (self, ))\n self._sock.close()\n call_hooks(\"modbus_rtu_over_tcp.RtuOverTcpMaster.after_close\", (self, ))\n self._sock = None",
"def drop_socket(self) -> None:\n with suppress(ZMQError):\n if self.socket and not self.socket.closed:\n self.socket.close(0)\n self.socket = None",
"def close(self):\r\n\r\n if self in IOHandlers()._handler_pool.values():\r\n IOHandlers().unregister(self)\r\n\r\n self.sock.close()\r\n self.connected = False",
"def kill_socket(sock):\n if sock is None:\n return\n try:\n sock.shutdown(socket.SHUT_RDWR)\n except OSError:\n pass\n finally:\n sock.close()",
"def stopSocket():\n Client.socket.close()\n reactor.stop()",
"def connection_lost(self, _: Any) -> None:\n self.transport.close()",
"def close(self):\n try:\n self.close_impl()\n IpHandler.kill_socket(self.socket)\n finally:\n self.socket = None\n self.connected = IpHandler.CLOSED",
"def _safe_close(self, sess: session.Session):\n # pylint: disable=broad-except\n try:\n sess.close()\n except Exception:\n # Intentionally not logging to avoid user complaints that\n # they get cryptic errors. We really do not care that Close\n # fails.\n pass\n # pylint: enable=broad-except",
"def io_error_handle(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n \"\"\"A wrapper function.\"\"\"\n try:\n return func(*args, **kwargs)\n except (OSError, IOError) as err:\n print('{0}.{1} I/O error: {2}'.format(\n func.__module__, func.__name__, err))\n raise\n return wrapper",
"def check_closed(f):\n\n def g(self, *args, **kwargs):\n if self.closed:\n raise Error(f'{self.__class__.__name__} already closed')\n return f(self, *args, **kwargs)\n return g",
"def log_and_raise(self, http_status_code=None, reason=None):\n self._close_response_socket()\n super().log_and_raise(http_status_code, reason)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Does select on open connections.
|
def _select(self):
readable = [self.tsocket.handle.fileno(), self._read.fileno()]
writable = []
remaining = []
for i, connection in list(self.clients.items()):
if connection.is_readable():
readable.append(connection.fileno())
if connection.remaining or connection.received:
remaining.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
if remaining:
return remaining, [], [], False
else:
return select.select(readable, writable, readable) + (True,)
|
[
"def _select(self):\n readable = []\n for sock in self.serverTransport:\n readable.append(sock.handle.fileno())\n writable = []\n print(\"33339999\")\n try:\n res = select.select(readable, writable, readable)\n except Exception as e:\n res = None\n print(\"333399991%s\" % [res, e])\n else:\n print(\"333399992%s\" % [res])\n return res",
"def doSelect(nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', usertimeout: 'timeval *') -> \"int\":\n return _coin.SoDB_doSelect(nfds, readfds, writefds, exceptfds, usertimeout)",
"def SoDB_doSelect(nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', usertimeout: 'timeval *') -> \"int\":\n return _coin.SoDB_doSelect(nfds, readfds, writefds, exceptfds, usertimeout)",
"def selectAllConn():\n with ar_qui.ar_undoChunkOpen('selectAll'):\n ar_selection.ar_selectAll()",
"def handle_incoming_connection(self):\n # The select function is used to poll the socket and check whether\n # there is an incoming connection to accept, preventing the read\n # from blocking the thread while waiting for a request\n if len(select.select([self.server], [], [], 0)[0]) > 0:\n conn, addr = self.server.accept()\n print(conn)\n self.open_sockets[addr[0]] = conn\n self.incoming_sockets[addr[0]] = conn",
"def _wait(self, conn):\n\n while 1:\n state = conn.poll()\n if state == psycopg2.extensions.POLL_OK:\n break\n elif state == psycopg2.extensions.POLL_WRITE:\n select.select([], [conn.fileno()], [])\n elif state == psycopg2.extensions.POLL_READ:\n select.select([conn.fileno()], [], [])\n else:\n raise psycopg2.OperationalError(\"poll() returned %s from _wait function\" % state)",
"def doSelect(self, nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', userTimeOut: 'timeval *') -> \"int\":\n return _coin.SoSensorManager_doSelect(self, nfds, readfds, writefds, exceptfds, userTimeOut)",
"def run(self):\n with selectors.DefaultSelector() as self.sel:\n for conn in self.connections:\n self.sel.register(\n conn['conn'],\n conn['events'],\n self._process,\n )\n self._main_loop()",
"def handle_request(self):\n\t\t# Support people who used socket.settimeout() to escape\n\t\t# handle_request before self.timeout was available.\n\t\ttimeout = self.socket.gettimeout()\n\t\tif timeout is None:\n\t\t\ttimeout = self.timeout\n\t\telif self.timeout is not None:\n\t\t\ttimeout = min(timeout, self.timeout)\n\t\tfd_sets = select.select([self], [], [], timeout)\n\t\tif not fd_sets[0]:\n\t\t\tself.handle_timeout()\n\t\t\treturn\n\t\tself._handle_request_noblock()",
"def cli_loop_select(self, timeout):\n parsed_some = True # requires thight loop, as it may be sending messages core<->cmd\n while parsed_some:\n parsed_some = False\n self.debug(\"Checking if data ready: %s // to %s\"%(repr(self.filenos()), timeout) )\n for n, clients_ready in enumerate(select.select(self.filenos(),[],[], timeout)):\n # self.debug(\"Clients ready[%s]: \"%n, clients_ready)\n for c in clients_ready:\n # self.debug(\"Data ready at %s\"%repr(c))\n parsed_some |= c.recv_and_parse()\n # self.debug(\"parsed_more\", parsed_some)\n timeout=0.1\n # self.debug(\"User input\", parsed_some)",
"def accept_incoming_connection(self):\n sock, addr = self.listen_sock.accept()\n log.debug('accepted connection from %s', str(addr))\n sock.setblocking(False)\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\n self.selector.register(sock, events)\n self.connections[addr] = Connection(sock)",
"async def wait_until_connections_change(self) -> None:\n ...",
"def patch_win_selector():\n import select\n from selectors import SelectSelector\n\n def _select(self, r, w, _, timeout=None):\n try:\n r, w, x = select.select(r, w, w, timeout)\n except OSError as e:\n if hasattr(e, 'winerror') and e.winerror == 10038:\n # descriptors may already be closed\n return [], [], []\n raise\n else:\n return r, w + x, []\n\n SelectSelector._select = _select",
"def get_connection_poller():\r\n if hasattr(select, \"epoll\"):\r\n return select.epoll()\r\n else:\r\n return _Select()",
"def select(read_streams, timeout=0):\n\n write_streams = []\n exception_streams = []\n\n try:\n return builtin_select.select(\n read_streams,\n write_streams,\n exception_streams,\n timeout,\n )[0]\n except builtin_select.error as e:\n # POSIX signals interrupt select()\n if e[0] == errno.EINTR:\n return []\n else:\n raise e",
"def poll(self):\r\n\r\n connections = self.__list_of_sockets()\r\n read, write, error = select.select( connections+[self.__server], connections, connections, 0 )\r\n\r\n messages, connected, disconnected = [], [], []\r\n\r\n # ====== process all the connections that had errors ======\r\n for conn in error:\r\n print( \"error\", conn )\r\n\r\n # ====== process all the connections that we are able to send data to ===\r\n for uid, data in self.__connections.items():\r\n if data['socket'] in write: # if this is a socket that is ready to get some data\r\n while data['sendbuffer'] != []: # while we have some data to send\r\n msg = data['sendbuffer'][0]\r\n\r\n try:\r\n data['socket'].send( \"{}\\n\".format(msg).encode('utf8') )\r\n data['sendbuffer'].pop(0)\r\n except:\r\n break\r\n\r\n # ====== process all the connections that are trying to send us data ===\r\n for conn in read:\r\n if conn is self.__server: # new client connecting\r\n c, addr = conn.accept()\r\n \r\n self.__connections[self.__uid] = {'socket':c, 'address':addr, 'sendbuffer':[], 'recvbuffer':\"\"} # add to list of open self.__connections\r\n connected.append(self.__uid)\r\n self.__uid += 1\r\n\r\n else:\r\n msgbytes = conn.recv(1024)\r\n\r\n for uid, data in self.__connections.items():\r\n if data['socket'] == conn:\r\n if not msgbytes: # treat empty message as a disconnection\r\n disconnected.append( uid )\r\n\r\n else:\r\n \"\"\" for everything else only consider a message complete once a newline character has been recieved \"\"\"\r\n data['recvbuffer'] += msgbytes.decode('utf8')\r\n\r\n msgs = data['recvbuffer'].split('\\n')\r\n for msg in msgs[:-1]:\r\n messages.append( (uid,msg) )\r\n\r\n data['recvbuffer'] = msgs[-1]\r\n\r\n break\r\n\r\n # ====== remove any clients that have disconnected from the connections store ===\r\n for uid in disconnected:\r\n self.__connections[uid][\"socket\"].close()\r\n self.__connections.pop(uid)\r\n\r\n return messages, connected, disconnected",
"def _socket_read_thread_proc(self):\n def _win_select():\n while len(self._socket_dict) == 0:\n time.sleep(.001)\n if not self._is_running:\n return []\n return self._socket_selector.select()\n\n def _nix_select():\n return self._socket_selector.select()\n\n if sys.platform == 'win32':\n select_func = _win_select\n else:\n select_func = _nix_select\n\n buffer = bytearray(8192)\n buff_view = memoryview(buffer)\n buff_view[0:2] = CMD_DATA_PACKET\n while self._is_running:\n # TODO: On windows I can't do this when no sockets are registered\n events = select_func()\n for key, event in events:\n if event & selectors.EVENT_READ:\n try:\n bytes_read = key.fileobj.recv_into(buff_view[6:])\n except EOFError:\n # This socket has been closed, disconnect it\n self.disconnect_socket(key.data)\n continue\n length = bytes_read + 6 # add header to length\n len_bytes = pack('>H', length)\n id_bytes = pack('>H', key.data)\n buff_view[2:4] = len_bytes\n buff_view[4:6] = id_bytes\n try:\n self._handle.bulkWrite(self._out_endpoint, buff_view[:length])\n except usb1.USBError as err:\n eprint(\"Error writing data: %s\" % err)",
"def check_writable(socket):\n\n _, writable, _ = select([], [socket], [], 60)\n return writable == [socket]",
"def recv(self):\n \n ls = [self._sck_listen] + list(self._scks.keys())\n rr, wr, er = select.select(ls, [], ls)\n \n for r in er:\n if r == self._sck_listen:\n print(\"error in the bound socket. quitting.\")\n exit(0)\n print(\"error in socket {0} with id {1}.\".format(\n r, self._scks[r]['session_id']\n ))\n del self._scks[r]\n\n for r in rr:\n if r == self._sck_listen:\n # New client.\n client, addr = r.accept()\n self._scks[client] = dict([\n ('buffer', '')\n , ('pkt-length', 0)\n , ('session_id', -1)\n ])\n # TODO: Do we want to return something here?\n print(\"client connected.\")\n continue\n\n print(\"clients: {0}\".format(\", \".join([ str(s) for s in self._scks.keys() ])))\n\n client_data = self._scks[r]\n try:\n tmp = r.recv(1024)\n except socket.error as e:\n print(\"client socket error: {0}\".format(str(e)))\n del self._scks[r]\n continue\n if tmp == '':\n print(\"client disconnected.\")\n session_id = self._scks[r]['session_id']\n if len([ x for x in self._scks.values() \\\n if x['session_id'] == session_id ]) < 2:\n self._db.del_client(session_id)\n del self._scks[r]\n continue\n client_data['buffer'] += tmp\n\n #print(\"data from client {0}: \\\"{1}\\\".\".format(r, tmp))\n print(\"begin check.\")\n if client_data['pkt-length'] == 0:\n if len(client_data['buffer']) >= 4:\n # Packet length.\n print(\"read packet length.\")\n client_data['pkt-length'] = struct.unpack('<I'\n , client_data['buffer'][:4])[0]\n client_data['buffer'] = client_data['buffer'][4:]\n else:\n print(\"not enough bytes for packet length.\")\n # Not enough bytes for a packet length.\n continue\n if len(client_data['buffer']) < client_data['pkt-length']:\n # Not enough bytes for a packet.\n print(\"packet length known ({0}), not enough bytes for packet.\".format(client_data['pkt-length']))\n continue\n\n # Alright, we have a packet. Take it from the buffer.\n length = client_data['pkt-length']\n packet = client_data['buffer'][:length]\n client_data['buffer'] = client_data['buffer'][length:]\n client_data['pkt-length'] = 0\n\n self._last_session_id = client_data['session_id']\n self._last_socket = r\n\n return (client_data[\"session_id\"], packet)\n\n # Okey, we didn't find any this round.\n return self.recv()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Cancel a scheduled event or kill a process. This method takes one argument, which is the return value from sched() or process(). In either case, it's an opaque object to the user, which can be either an event or process. If it's an event, when cancelled, the previously scheduled function will no longer be invoked at the expected time. Note that the method has no effect if the event that has already happened. If the argument is a process, it's the same as to kill the process using the kill() method.
|
def cancel(self, o):
if o is None:
errmsg = "simulator.cancel(o=None) requires event or process."
log.error(errmsg)
raise ValueError(errmsg)
elif isinstance(o, _Event):
try:
self._eventlist.cancel(o)
except Exception:
# the event is not in the event list; that's OK
#log.debug("[r%d] simulator '%s' cancel non-active event from now=%g" %
# (self._simulus.comm_rank, self.name[-4:], self.now, self.now))
pass
else:
#log.debug("[r%d] simulator '%s' cancel event at time=%g from now=%g" %
# (self._simulus.comm_rank, self.name[-4:], o.time, self.now))
self._runtime["cancelled_events"] += 1
elif isinstance(o, _Process):
self.kill(o)
else:
errmsg = "simulator.cancel(o=%r) not an event or process" % o
log.error(errmsg)
raise TypeError(errmsg)
|
[
"def cancel(self):\n assert self.running\n\n self._cancelled = True\n\n # in this section we callback on processes's deferreds, it's\n # callbacks need to know that conversion is cancelled\n self.stop_running_processes()\n self.reset_tasks_queue()\n\n self.stop_scheduler()",
"def cancel_capture_process(capture_name, cm):\n #kill scheduler process\n try:\n scheduler_pid = capture_scheduler_pids[capture_name]\n os.kill(scheduler_pid, signal.SIGTERM)\n except:\n print(\"Scheduled Capture process not found when trying to kill.\", capture_name, file=sys.stderr)\n #remove record from utility db\n cancel_capture(capture_name, cm)",
"def cancel_call(token):\n try:\n Scheduler.ourScheduledCalls.remove(token)\n except ValueError as ve:\n # pass\n Scheduler.ourLogger.error(\"Cancel call exception: %s\", ve)",
"def cancel(self, future: RuntimeFuture) -> None:\n ...",
"def cancel():\n\t\traise NotImplementedError()",
"def cancel_job(self, command):\n pass",
"def cancel(self):\n raise TypeError('Cannot cancel %s suspend' % self.__class__.__name__)",
"def event_cancelled(self, event, reason):\n pass",
"def cancel(self):\n self.cancelled = True\n cb, self._cancel_cb = self._cancel_cb, None\n if cb: cb()",
"def cancel(self):\n if self._jobid == -1:\n return\n\n self._term_all()\n\n # Set the time limit to the grace period and let wait() do the final\n # killing\n self._time_limit = (0, 0, self.cancel_grace_period)\n self.wait()",
"def cancel_job(self, app):\n raise NotImplementedError(\n \"Abstract method `LRMS.cancel_job()` called \"\n \"- this should have been defined in a derived class.\")",
"def cancel(self) -> None:\n if not self.called:\n canceller = self._canceller\n if canceller:\n canceller(self)\n else:\n # Arrange to eat the callback that will eventually be fired\n # since there was no real canceller.\n self._suppressAlreadyCalled = True\n if not self.called:\n # There was no canceller, or the canceller didn't call\n # callback or errback.\n self.errback(Failure(CancelledError()))\n elif isinstance(self.result, Deferred):\n # Waiting for another deferred -- cancel it instead.\n self.result.cancel()",
"def _cancel_timer(self) -> None:\n if self._timer_unsub is not None:\n self._timer_unsub()\n self._timer_unsub = None",
"def cancel(self, event):\r\n self.queue.remove(event)\r\n heapq.heapify(self.queue)",
"def _unschedule(self, func):\n clock = ScheduledEvent.clock\n clock.queue.remove(func)\n clock.scheduled_funcs[func] -= 1",
"def cancel_scheduled_actions(self):\n for process in self.scheduled_actions:\n process.kill()\n\n self.update_status_bar(\"All scheduled actions canceled.\")",
"def stop(self, app_handle: AppHandle) -> None:\n scheduler, scheduler_backend, app_id = self._scheduler_app_id(app_handle)\n with log_event(\"stop\", scheduler_backend, app_id):\n status = self.status(app_handle)\n if status is not None and not status.is_terminal():\n scheduler.cancel(app_id)",
"def cancel(self):\n if self._jobid == -1:\n return\n\n os_ext.run_command('scancel %s' % self._jobid,\n check=True, timeout=settings.job_submit_timeout)\n self._is_cancelling = True\n self.wait()",
"def do_cancel(self):\n return self.case_cancel()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reschedule an event. One can change the time of a scheduled event using this method. When rescheduled, the previously scheduled function will be invoked at the new designated time. If the event already happens, this method would have no effect. This method takes at least one argument, which is the return value from sched(). Additionally, one can either provide an 'offset' time from now or an absolute time 'until', but not both. If both 'offset' and 'until' are ignored, the rescheduled event is for the current time. The time should never be earlier than the current time. This method returns the same event upon having successfully rescheduled the event. Otherwise, it returns None.
|
def resched(self, e, offset=None, until=None):
if not isinstance(e, _Event):
errmsg = "simulator.resched(e=%r) not an event" % e
log.error(errmsg)
raise TypeError(errmsg)
# figure out the event time
if until == None and offset == None:
# if both are missing, it's now!
e.time = self.now
elif until != None and offset != None:
errmsg = "simulator.resched(until=%r, offset=%r) duplicate specification" % (until, offset)
log.error(errmsg)
raise ValueError(errmsg)
elif offset != None:
if offset < 0:
errmsg = "simulator.resched(offset=%r) negative offset" % offset
log.error(errmsg)
raise ValueError(errmsg)
e.time = self.now + offset
elif until < self.now:
errmsg = "simulator.resched(until=%r) earlier than now (%r)" % (until, self.now)
log.error(errmsg)
raise ValueError(errmsg)
else: e.time = until
try:
self._eventlist.update(e)
#log.debug("[r%d] simulator '%s' reschedule event to time=%g from now=%g" %
# (self._simulus.comm_rank, self.name[-4:], e.time, self.now))
return e
except Exception:
# the event already happened as it's not in the event list
#log.debug("[r%d] simulator '%s' reschedule non-active event from now=%g" %
# (self._simulus.comm_rank, self.name[-4:], self.now))
return None
|
[
"def reschedule(self, schedtime: 'SbTime') -> \"void\":\n return _coin.SoTimerSensor_reschedule(self, schedtime)",
"def schedule_relative(self, duetime, action, state=None):\n\n scheduler = self\n seconds = GEventScheduler.normalize(duetime)\n if seconds == 0:\n return scheduler.schedule(action, state)\n\n disposable = SingleAssignmentDisposable()\n\n def interval():\n disposable.disposable = action(scheduler, state)\n\n log.debug(\"timeout: %s\", seconds)\n timer = [gevent.spawn_later(seconds, interval)]\n\n def dispose():\n # nonlocal timer\n timer[0].kill()\n\n return CompositeDisposable(disposable, Disposable(dispose))",
"def sched_reschedule(self, interval: int) -> None:\n if self.sched_ticket is not self.sched_queue.heap[0]:\n raise RuntimeError(\n \"Reschedule failed because this wasn't the active actor.\"\n \" Make sure this function wasn't called twice.\"\n )\n self.sched_ticket = self.__new_ticket(self.sched_queue.time + interval)\n heapq.heapreplace(self.sched_queue.heap, self.sched_ticket)",
"def schedule_relative(self, duetime, action, state=None):\n\n scheduler = self\n seconds = self.to_relative(duetime)/1000.0\n if not seconds:\n return scheduler.schedule(action, state)\n\n disposable = SingleAssignmentDisposable()\n\n def interval():\n disposable.disposable = action(scheduler, state)\n\n log.debug(\"timeout: %s\", seconds)\n timer = [gevent.spawn_later(seconds, interval)]\n\n def dispose():\n # nonlocal timer\n timer[0].kill()\n\n return CompositeDisposable(disposable, Disposable.create(dispose))",
"def schedule_one_task(start_time, function):\n now= time.localtime()\n if start_time > now: \n governor.enterabs(time.mktime(start_time), 1, function, ())",
"def _reschedule(self, session, delay=300.0):\n logger.debug('Reschedule the lookup in %s seconds' % delay)\n self._scheduler.schedule_task(self._start_process, delay=delay)",
"def reschedule(self):\n\n if self.recurrence_time and not self.recurrence_count:\n # persistent reoccuring task\n self.run_at = time() + self.recurrence_time\n return True\n elif self.recurrence_time and self.recurrence_count > 1:\n # no persistent reoccuring task\n self.run_at = time() + self.recurrence_time\n self.recurrence_count -= 1\n return True\n else:\n # one off task\n return False",
"async def _reschedule_reminder(self, reminder: dict) -> None:\n log.trace(f\"Cancelling old task #{reminder['id']}\")\n self.scheduler.cancel(reminder[\"id\"])\n\n log.trace(f\"Scheduling new task #{reminder['id']}\")\n self.schedule_reminder(reminder)",
"def create_scheduled_event(\n self,\n event_id,\n schedule_type,\n repeat,\n time_or_interval,\n interval_type,\n repeat_period,\n action,\n timeout=200,\n ):\n\n value = bytearray()\n value.append(event_id)\n value.append(schedule_type)\n value.append(repeat)\n value.append((time_or_interval >> 24) & 0xFF)\n value.append((time_or_interval >> 16) & 0xFF)\n value.append((time_or_interval >> 8) & 0xFF)\n value.append(time_or_interval & 0xFF)\n value.append(interval_type)\n value.append(repeat_period)\n value.append(action)\n\n command.create_set_command(\n command.PROTOCOL_COMMAND_CREATE_SCHEDULED_EVENT, value, 10\n )\n command.send_command()\n delay_ms(timeout)\n raw = command.receive_command(COMMAND_SIZE_FOR_UINT8)\n\n result = raw[PROTOCOL_HEADER_SIZE]\n return result",
"def schedule_recursive_with_relative(self, duetime, action):\n\n def action1(_action, this=None):\n def func(dt):\n this(_action, dt)\n _action(func)\n return self.schedule_recursive_with_relative_and_state(duetime, action1, state=action)",
"def schedule_recursive_with_absolute(self, duetime, action):\n\n def action1(_action, this=None):\n def func(dt):\n this(_action, dt)\n _action(func)\n return self.schedule_recursive_with_absolute_and_state(duetime=duetime,\n action=action1,\n state=action)",
"def reschedule(res):\n if self.running:\n self.logger.debug('Reschedule immediately')\n self.schedule_tasks()",
"def reevaluate_schedule(self, nexttime): \n future = self.vm_scheduler.get_future_reschedulable_leases()\n for l in future:\n # We can only reschedule leases in the following four states\n if l.get_state() in (Lease.STATE_PREPARING, Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):\n # For each reschedulable lease already scheduled in the\n # future, we cancel the lease's preparantion and\n # the last scheduled VM.\n vmrr = l.get_last_vmrr()\n self.preparation_scheduler.cancel_preparation(l)\n self.vm_scheduler.cancel_vm(vmrr)\n l.remove_vmrr(vmrr)\n if l.get_state() in (Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_PREPARING):\n l.set_state(Lease.STATE_PENDING)\n elif l.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:\n l.set_state(Lease.STATE_SUSPENDED_PENDING)\n\n # At this point, the lease just looks like a regular\n # pending lease that can be handed off directly to the\n # __schedule_lease method.\n # TODO: We should do exception handling here. However,\n # since we can only reschedule best-effort leases that were\n # originally schedule in the future, the scheduling function \n # should always be able to schedule the lease (worst-case \n # scenario is that it simply replicates the previous schedule)\n self.__schedule_lease(l, nexttime)",
"def modify_time(cal: Calendar, modify: dict) -> Calendar:\n\n if (\"time\" in modify) and (\"shift\" in modify[\"time\"]):\n shift = modify[\"time\"][\"shift\"]\n\n year = 0 if not (\"year\" in shift) else shift[\"year\"]\n month = 0 if not (\"month\" in shift) else shift[\"month\"]\n day = 0 if not (\"day\" in shift) else shift[\"day\"]\n hour = 0 if not (\"hour\" in shift) else shift[\"hour\"]\n minute = 0 if not (\"minute\" in shift) else shift[\"minute\"]\n\n for event in cal.events:\n event.end = event.end.shift(years=year, months=month, days=day, hours=hour, minutes=minute)\n event.begin = event.begin.shift(years=year, months=month, days=day, hours=hour, minutes=minute)\n\n return cal",
"def registerDailyAt(*args):\n try:\n if len(args) == 1:\n inHr, inMin, inSec = args[0]\n elif len(args) == 3:\n inHr, inMin, inSec = args[0:3]\n else:\n raise TypeError(\"registerDailyAt expects a tuple (hr, min, sec) \" \\\n \"or three arguments!\")\n if inHr < 0 or inHr > 23:\n raise TypeError(\"Invalid hour value\")\n if inMin < 0 or inMin > 59:\n raise TypeError(\"Invalid minute value\")\n if inSec < 0 or inSec > 59:\n raise TypeError(\"Invalid seconds value\")\n except:\n (etype, value, tb) = sys.exc_info()\n raise TypeError(value)\n\n def decorator(func):\n def _wrapper():\n # Call back every hour until the target is within 1hr, then\n # try and call back exactly\n left = min(_wrapper._target-time.time(),3600)\n if left <= 0:\n # Time has expired, run immediately\n wfunc = _wrapper._func;\n log_info(\"%s is being executed now as requested\" % \\\n (wfunc.__name__))\n try:\n wfunc()\n except:\n (etype, value, tb) = sys.exc_info()\n log_error(\"Scheduled function '%s' failed!\" % value, \\\n (etype, value, tb))\n # Reschedule tommorrow\n _wrapper._target += (60*60*24) # 60s * 60m * 24hr\n log_debug(\"%s rescheduled for %s\" % \\\n (wfunc.__name__, time.ctime(_wrapper._target)))\n # Check again in an hour\n reactor.callLater(3600, _wrapper)\n else:\n reactor.callLater(left, _wrapper)\n\n # Store function in the wrapper function\n _wrapper._func = func\n\n # Calculate target time today\n (y,m,d,h,minute,s,a,b,c)=time.localtime()\n _wrapper._target = time.mktime((y, m, d, inHr, inMin, inSec, 0, 0, -1))\n if _wrapper._target < time.time():\n # Already passed today, target tommorrow\n _wrapper._target += (60*60*24) # 60s * 60m * 24hr\n log_info(\"%s scheduled for %s\" % \\\n (func.__name__, time.ctime(_wrapper._target)))\n # Call wrapper to schedule\n _wrapper()\n # Done\n return _wrapper\n return decorator",
"def reschedule(self, interval: int) -> None:\n if self.ticket is None:\n # Actor has died during their own turn.\n assert not self.fighter.alive\n return\n self.ticket = self.scheduler.reschedule(self.ticket, interval)",
"def schedule_event(delay, func, repeat=False):\n def thunk():\n #\n # Pygame 1.9 update\n #\n # event = Event(USEREVENT, time = timestamp())\n ts = Scheduler.timestamp()\n event = Event(USEREVENT, {'time': ts})\n CoreUtilities.add_modifiers(event)\n func(event)\n\n Scheduler.schedule_call(delay, thunk, repeat)",
"async def test_set_reschedule_deadline_hours_before_new_time():\n cog, mock_bot, tournament = init_mocks()\n hours = 2\n assert tournament.reschedule_deadline_hours_before_new_time != hours\n await cog.set_reschedule_deadline_hours_before_new_time(cog, tosurnament_mock.CtxMock(mock_bot), hours=hours)\n mock_bot.session.update.assert_called_once_with(\n tosurnament_mock.Matcher(Tournament(reschedule_deadline_hours_before_new_time=hours))\n )",
"def next(self) -> None:\n next_ticket = self.peek()\n self.time, _, actor, _ = next_ticket\n actor.sched_on_turn(next_ticket)\n if actor.sched_ticket is next_ticket:\n raise RuntimeError(\n f\"Schedulable object {actor} did not update its schedule.\"\n \"\\nTo reschedule this object call `self.sched_reschedule`\"\n \"\\nOr if done set `self.sched_ticket = None`\"\n \" to remove it from the schedule.\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the current running process, or None if we are not in a process context.
|
def cur_process(self):
assert self._theproc is None or \
self._theproc.state == _Process.STATE_RUNNING
return self._theproc
|
[
"def _as_process(self):\n pid = self.pid\n if not pid:\n raise self.NotStarted()\n return psutil.Process(pid)",
"def get_instance(self):\n if not self.is_server_active():\n self._log('The TCPServer instance is not running!')\n return self._process",
"def me(self):\r\n myPid = os.getpid()\r\n myProcess = psutil.Process(myPid)\r\n return self.parseProcess(myProcess)",
"def get_process(self, pid):\r\n with self._lock:\r\n return self._get_pid(pid)",
"def get_process_id() -> int:\n return os.getpid()",
"def current():\n\n return Context.__current_context",
"def get_parent_pid(self):\n if not self.h_process:\n self.open()\n\n NT_SUCCESS = lambda val: val >= 0\n\n pbi = (c_int * 6)()\n size = c_int()\n\n # Set return value to signed 32bit integer.\n NTDLL.NtQueryInformationProcess.restype = c_int\n\n ret = NTDLL.NtQueryInformationProcess(self.h_process,\n 0,\n byref(pbi),\n sizeof(pbi),\n byref(size))\n\n if NT_SUCCESS(ret) and size.value == sizeof(pbi):\n return pbi[5]\n\n return None",
"def get_current_task():\n try:\n if hasattr(asyncio, \"current_task\"):\n # Python 3.7 and up\n return asyncio.current_task()\n else:\n # Python 3.6\n return asyncio.Task.current_task()\n except RuntimeError:\n return None",
"def getDefaultProcess():\n global _defaultProcess\n if _defaultProcess is None:\n raise TypeError(\"There is no default process define, use setDefaultProcess to set\")\n return _defaultProcess",
"def get_current_context():\n\n click_core_ctx = click.get_current_context()\n if click_core_ctx:\n return click_core_ctx.find_object(Context) or click_core_ctx.ensure_object(Context)\n\n return None",
"def get_worker(self):\n \n if self.parent:\n return self.parent.get_worker()\n else:\n return None",
"def getpid(self):\n if self._driver_process:\n if self.poll():\n return self._driver_process.pid\n else:\n log.warn(\"Driver process found, but poll failed for pid %s\" % self._driver_process.pid)\n else:\n return None",
"def get_current_status(self) -> Optional[EngineStatus]:\n current_id = self._engine_store.current_run_id\n if current_id is not None:\n return self._engine_store.engine.state_view.commands.get_status()\n\n return None",
"def get_current_script():\n \n return Script.script_stack[-1]",
"def current_window(self) -> Optional[wrappers.Window]:\n result = xlib.get_window_property(\n display=self.dpy, window=self.root, property=self.atom[\"_NET_ACTIVE_WINDOW\"], type=self.atom[\"WINDOW\"]\n )\n return None if not result else self.create_window(window_id=cast(List[xlib.Window], result)[0])",
"def status(self):\r\n if not self._pprocess:\r\n return\r\n return self._pprocess.status",
"def get_worker_id() -> Optional[int]:\n proc_name = mp.current_process().name\n if \"PoolWorker\" in proc_name:\n worker_id = int(proc_name[(proc_name.find('-') + 1):])\n return worker_id\n return None",
"def get_current_worker_info():\n return core.rpc_get_current_worker_info()",
"def isMainProcess():\n import multiprocessing\n return not type(multiprocessing.current_process()) == multiprocessing.Process"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check whether the given process has terminated.
|
def terminated(self, p):
if not isinstance(p, _Process):
errmsg = "simulator.terminated(p=%r) not a process" % p
log.error(errmsg)
raise TypeError(errmsg)
return p.state == _Process.STATE_TERMINATED
|
[
"def is_process_alive(pid):\n try:\n os.kill(pid, 0)\n except OSError:\n # no such process or process is already dead\n return False\n else:\n return True",
"def is_process_running(pid: int):\n try:\n os.kill(pid, 0)\n return True\n except OSError as ex:\n if ex.errno == errno.ESRCH:\n return False\n else:\n raise",
"def is_terminated(self): # -> Union[bool, None]\n\n if self._process:\n return self.returncode == self.PROCESS_IS_TERMINATED_CODE\n return None",
"def check_alive(self):\n returncode = self._process.poll()\n if returncode is not None:\n raise RuntimeError(\"%s unexpectedly quit\" % self._name)",
"def kill_process(process):\n while True:\n process.terminate()\n if process.is_alive() == False:\n break",
"def reap_process(pid):\n if pid_is_dead(pid):\n return True\n\n try:\n is_dead, _ = os.waitpid(pid, os.WNOHANG)\n except OSError, err:\n if err.errno == errno.ECHILD:\n return False # No child processes.\n raise\n return is_dead",
"def check_timeout(self):\n while self.runtime.get() <= self.opts.process_timeout:\n if not any(p.is_alive() for p in self.procs): # Check if any process has exited\n break\n time.sleep(1)\n else:\n self.logger.info(\"Timed out.\")\n self.terminate_early()",
"def check_process_exit(*args) -> \"int\":\n return _ida_pro.check_process_exit(*args)",
"def _is_running(process):\n with hide('output'):\n s = run('ps auwx')\n for x in s.split('\\n'):\n if re.search(process, x):\n print '%s running' % process\n return True\n\n return False",
"def is_finished(self): # -> Union[bool, None]\n if self._process:\n return self._process.returncode is not None\n return None",
"def is_parent_process_alive():\n parent_pid = os.getppid()\n if psutil is None:\n try:\n os.kill(parent_pid, 0)\n except OSError:\n return False\n else:\n return True\n else:\n try:\n return psutil.pid_exists(parent_pid)\n except (AttributeError, KeyboardInterrupt, Exception):\n return False",
"def check_if_process_is_running(process_name: str) -> bool:\n\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n # Check if process name contains the given name string.\n if process_name.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False",
"def is_stopped(self):\n\n stopped = False\n if self.process:\n poll = self.process.poll()\n if poll is not None:\n stopped = True\n if poll < 0:\n self.logger.info(\"You talking to me?\", -poll)\n return stopped",
"def is_running(self):\n # return False if the process is not started yet\n if not self._proc:\n return False\n # return False if there is a return code from the main process\n return self._proc.poll() is None",
"def wait_for_pid_exit(pid: int):\n wait_until(lambda: not psutil.pid_exists(pid))",
"def _check_process_is_running(self, name: str):\n for proc in psutil.process_iter():\n try:\n if name.lower() in proc.name().lower():\n return True\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return False",
"def check_running(self):\n all_running = True\n\n to_delete = []\n for process in self.__processes:\n code = process.poll()\n if code is not None:\n # Process exited.\n logger.error(\"Process %d exited unexpectedly with return code %d\" % \\\n (process.pid, code))\n\n all_running = False\n to_delete.append(process)\n\n for process in to_delete:\n self.__processes.remove(process)\n\n return all_running",
"def stop_process(pid: int, block: bool=True) -> bool:\n if not pid:\n return False\n\n proc = psutil.Process(pid)\n proc.terminate()\n if block:\n proc.wait()\n return True",
"def verify_valid_gdb_subprocess(gdb_process: Popen) -> None:\n if not gdb_process:\n raise NoGdbProcessError('gdb process is not attached')\n\n elif gdb_process.poll() is not None:\n raise NoGdbProcessError(\n 'gdb process has already finished with return code: %s'\n % str(gdb_process.poll())\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the priority of a process. A process should be provided as the only argument. If it's ignored, it's assumed to be the current process.
|
def get_priority(self, p=None):
if p is not None:
# get priority of another process
if not isinstance(p, _Process):
errmsg = "simulator.get_priority(p=%r) not a process" % p
log.error(errmsg)
raise TypeError(errmsg)
else:
# get the priority of the current process
p = self.cur_process()
if p is None:
errmsg = "simulator.get_priority() outside process context"
log.error(errmsg)
raise RuntimeError(errmsg)
return p.get_priority()
|
[
"def getPriorityCode(priority):\n\treturn getProcessPriorityCodes()[priority]",
"def getProcessPriorityCodes():\n\tpriorities = {}\n\tif onPosix():\n\t\t# -20 to 20, -20 being highest priority\n\t\tpriorities[-2] = 18\n\t\tpriorities[-1] = 9\n\t\tpriorities[0] = 0\n\t\tpriorities[1] = -9\n\t\tpriorities[2] = -18\n\t\tpriorities[3] = -20\n\telse:\n\t\tpriorities[-2] = psutil.IDLE_PRIORITY_CLASS\n\t\tpriorities[-1] = psutil.BELOW_NORMAL_PRIORITY_CLASS\n\t\tpriorities[0] = psutil.NORMAL_PRIORITY_CLASS\n\t\tpriorities[1] = psutil.ABOVE_NORMAL_PRIORITY_CLASS\n\t\tpriorities[2] = psutil.HIGH_PRIORITY_CLASS\n\t\tpriorities[3] = psutil.REALTIME_PRIORITY_CLASS\n\treturn priorities",
"def job_priority(conf):\n # type: (dict) -> int\n pri = _kv_read(conf, 'priority', 0)\n if pri < -1000 or pri > 1000:\n raise ValueError('job priority is invalid: {}'.format(pri))\n return pri",
"def vrrp_priority(self):\n return self.data.get('vrrp_priority')",
"def rule_priority(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"rule_priority\")",
"def rule_priority(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"rule_priority\")",
"def process_set_priority(process, priority):\r\n priority_mapping = {'idle': 0, 'low': 0,\r\n 'below_normal': 1,\r\n 'normal': 2,\r\n 'above_normal': 3,\r\n 'high': 4,\r\n 'realtime': 5} # Use with caution, may make the system unstable\r\n return _audll.AU3_ProcessSetPriority(unicode(process), priority_mapping[priority])",
"def set_current_process_priority(priority=PROCESS_NORMAL_PRIORITY_CLASS):\n \n # Get our pid\n pid = os.getpid()\n \n # Get process handle\n handle = get_process_handle(pid)\n\n # Try to change the priority\n status = _set_process_priority(handle, priority)\n\n # Close Process Handle\n _close_handle(handle)\n\n # Return the status of this call\n if status == 0:\n return False\n else:\n return True",
"def boot_priority(self):\n ret = self._get_attr(\"bootPriority\")\n return ret",
"def scale_set_priority(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"scale_set_priority\")",
"def get_process(self, pid):\r\n with self._lock:\r\n return self._get_pid(pid)",
"def priority(self):\n return self._group_assignment_data.get('priority')",
"def get_process_pid(process_name):\n\tfor proc in psutil.process_iter():\n\t\tif proc.name() == process_name:\n\t\t\treturn proc.pid",
"def getVpgPriority(self, vpgid):\n\n return requests.get(self.zvmip + self.endPoint + '/' + vpgid + '/priority', headers=self.headerwithkey, verify=False)",
"def GetPriority(self, fname):\n priority_list = ['-elf', '-unknown-linux-gnu', '-linux',\n '-none-linux-gnueabi', '-uclinux', '-none-eabi',\n '-gentoo-linux-gnu', '-linux-gnueabi', '-le-linux', '-uclinux']\n for prio in range(len(priority_list)):\n if priority_list[prio] in fname:\n return PRIORITY_CALC + prio\n return PRIORITY_CALC + prio",
"def set_priority(self, pid=None, priority=0):\n\t\tpriority_classes=[win32process.IDLE_PRIORITY_CLASS,\n\t\t\t\t\t\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t\t\t\t\t\t win32process.NORMAL_PRIORITY_CLASS,\n\t\t\t\t\t\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t\t\t\t\t\t win32process.HIGH_PRIORITY_CLASS,\n\t\t\t\t\t\t win32process.REALTIME_PRIORITY_CLASS]\n\t\tif pid == None:\n\t\t\tpid=self.pid\n\t\thandle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t\twin32process.SetPriorityClass(handle, priority_classes[priority])",
"def setPriority(self, pid = None, priority = 2):\n\n import win32api, win32process, win32con\n\n priorityClasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityClasses[priority])\n win32process.SetProcessAffinityMask(handle, 1)",
"def priority(priority):\n if isinstance(priority, int):\n return priority\n elif str(priority) == priority:\n if priority not in PRIORITY_NAMES:\n raise ValueError(\"Unknown priority: %r\" % priority)\n return PRIORITY_NAMES[priority]\n else:\n raise TypeError(\n \"Priority not an integer or a valid string: {0}\".format(\n priority))",
"def getPriority(self) -> \"uint32_t\":\n return _coin.SoDelayQueueSensor_getPriority(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create and return a trap for interprocess communication.
|
def trap(self):
return Trap(self)
|
[
"def traps(self, argv):\n from pycopia import asyncio\n from pycopia.SNMP import traps\n traps.get_dispatcher(self._trap_handler)\n asyncio.start_sigio()",
"def mkTunnel(id):\n logging.debugv(\"functions/linux.py->mkTunnel(id)\", [id])\n logging.info(\"Creating tunnel with id %s\" % id)\n cmd = [locations.OPENVPN, '--config', locations.VPNTEMPLATE, '--dev', 'tap'+str(id), \\\n '--writepid', locations.OPENVPNPID]\n logging.debug(\" \".join(cmd))\n pid = os.fork()\n logging.debug(\"WATCHME PID: %s\" % str(pid))\n if pid == 0:\n os.setsid()\n fd = plock(locations.LOCKFILE)\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n buffer = \"\"\n while p.poll() == None:\n out = p.stdout.read(1)\n if out:\n if out != \"\\n\":\n buffer += out\n else:\n logging.debug(buffer[25:])\n regex = \".*\" + locations.OPENVPN_INIT_RDY + \".*\"\n if tools.chkReg(regex, buffer[25:]):\n punlock(fd, locations.LOCKFILE)\n logging.debug(\"SYSEXIT\")\n import sys\n sys.exit()\n buffer = \"\"\n if p.poll() > 0:\n logging.error(\"%s died with error code %s, see log for details\" % (cmd[0], p.poll()))\n else:\n logging.debug(\"%s died with error code %s\" % (cmd[0], p.poll()))\n import sys\n sys.exit(0)\n else:\n logging.debug(\"Parent waiting for child...\")\n while os.path.exists(locations.LOCKFILE):\n time.sleep(1)\n logging.debug(\"Parent continuing...\")\n os.wait()",
"def __init__(self, python=sys.executable):\r\n cmd = '%s -u -c \"exec input()\"' % python\r\n super(PopenGateway, self).__init__(cmd)",
"def registerTrap( \n\t\tself, trapHandler,\n\t):\n\t\tspecifics = self._trapRegistry.get( trapHandler.genericType)\n\t\tif specifics is None:\n\t\t\tself._trapRegistry[ trapHandler.genericType ] = specifics = {}\n\t\tmanagers = specifics.get( trapHandler.specificType )\n\t\tif managers is None:\n\t\t\tspecifics[ trapHandler.specificType ] = managers = {}\n\t\tmanagers[ trapHandler.managerIP ] = trapHandler\n\t\treturn trapHandler",
"def trap_all(self):\n try:\n self.trap_out()\n except OutputTrapError:\n pass\n try:\n self.trap_err()\n except OutputTrapError:\n pass",
"def test_on_sigterm():\n # None is acceptable\n on_sigterm(None)\n\n def mock_sigterm_handler():\n pass\n\n on_sigterm(mock_sigterm_handler)\n\n # Non-callable is not\n with pytest.raises(ValueError):\n on_sigterm('non-callable')\n\n # TODO(jacobperron): implement a functional test by using subprocess.Popen",
"def expect(c, subroutine, pty=True):\n c.run(\"python signaling.py {0}\".format(subroutine), pty=pty)",
"def _turn_sigterm_into_systemexit():\n\n def handle_term(signo, frame):\n raise SystemExit\n signal.signal(signal.SIGTERM, handle_term)",
"def user32_CreateInteractionContext(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"interactionContext\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)",
"def sendTrap(\n\t\tself, genericType=6, specificType=0, \n\t\tpdus=None,\n\t):\n\t\tfor (generic,specific,values) in self.findTrapHandlers(\n\t\t\tgenericType, specificType\n\t\t):\n\t\t\tfor handler in values.values():\n\t\t\t\t# XXX need to be able to add more data!\n\t\t\t\thandler.send( \n\t\t\t\t\tself, \n\t\t\t\t\tgenericType=genericType, \n\t\t\t\t\tspecificType=specificType, \n\t\t\t\t\tpdus=pdus \n\t\t\t\t)",
"def spawn(mycommand,env={},raw_exit_code=False,opt_name=None,fd_pipes=None,returnpid=False,\\\r\n\t uid=None,gid=None,groups=None,umask=None,logfile=None,path_lookup=True,\\\r\n\t selinux_context=None, raise_signals=False, func_call=False):\r\n\r\n\r\n\tmyc=''\r\n\tif not func_call:\r\n\t\tif type(mycommand)==types.StringType:\r\n\t\t\tmycommand=mycommand.split()\r\n\t\tmyc = mycommand[0]\r\n\t\tif not os.access(myc, os.X_OK):\r\n\t\t\tif not path_lookup:\r\n\t\t\t\treturn None\r\n\t\t\tmyc = find_binary(myc)\r\n\t\t\tif myc == None:\r\n\t\t\t\treturn None\r\n mypid=[]\r\n\tif logfile:\r\n\t\tpr,pw=os.pipe()\r\n\t\tmypid.extend(spawn(('tee','-i','-a',logfile),returnpid=True,fd_pipes={0:pr,1:1,2:2}))\r\n\t\tretval=os.waitpid(mypid[-1],os.WNOHANG)[1]\r\n\t\tif retval != 0:\r\n\t\t\t# he's dead jim.\r\n\t\t\tif raw_exit_code:\r\n\t\t\t\treturn retval\r\n\t\t\treturn process_exit_code(retval)\r\n\r\n\t\tif fd_pipes == None:\r\n\t\t\tfd_pipes={}\r\n\t\t\tfd_pipes[0] = 0\r\n\t\tfd_pipes[1]=pw\r\n\t\tfd_pipes[2]=pw\r\n\r\n\tif not opt_name:\r\n\t\topt_name = mycommand[0]\r\n\tmyargs=[opt_name]\r\n\tmyargs.extend(mycommand[1:])\r\n\tglobal spawned_pids\r\n\tmypid.append(os.fork())\r\n\tif mypid[-1] != 0:\r\n\t\t#log the bugger.\r\n\t\tspawned_pids.extend(mypid)\r\n\r\n\tif mypid[-1] == 0:\r\n\t\tif func_call:\r\n\t\t\tspawned_pids = []\r\n\r\n\t\t# this may look ugly, but basically it moves file descriptors around to ensure no\r\n\t\t# handles that are needed are accidentally closed during the final dup2 calls.\r\n\t\ttrg_fd=[]\r\n\t\tif type(fd_pipes)==types.DictType:\r\n\t\t\tsrc_fd=[]\r\n\t\t\tk=fd_pipes.keys()\r\n\t\t\tk.sort()\r\n\r\n\t\t\t#build list of which fds will be where, and where they are at currently\r\n\t\t\tfor x in k:\r\n\t\t\t\ttrg_fd.append(x)\r\n\t\t\t\tsrc_fd.append(fd_pipes[x])\r\n\r\n\t\t\t# run through said list dup'ing descriptors so that they won't be waxed\r\n\t\t\t# by other dup calls.\r\n\t\t\tfor x in range(0,len(trg_fd)):\r\n\t\t\t\tif trg_fd[x] == src_fd[x]:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif trg_fd[x] in src_fd[x+1:]:\r\n\t\t\t\t\tnew=os.dup2(trg_fd[x],max(src_fd) + 1)\r\n\t\t\t\t\tos.close(trg_fd[x])\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\twhile True:\r\n\t\t\t\t\t\t\tsrc_fd[s.index(trg_fd[x])]=new\r\n\t\t\t\t\texcept SystemExit, e:\r\n\t\t\t\t\t\traise\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tpass\r\n\r\n\t\t\t# transfer the fds to their final pre-exec position.\r\n\t\t\tfor x in range(0,len(trg_fd)):\r\n\t\t\t\tif trg_fd[x] != src_fd[x]:\r\n\t\t\t\t\tos.dup2(src_fd[x], trg_fd[x])\r\n\t\telse:\r\n\t\t\ttrg_fd=[0,1,2]\r\n\r\n\t\t# wax all open descriptors that weren't requested be left open.\r\n\t\tfor x in range(0,max_fd_limit):\r\n\t\t\tif x not in trg_fd:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tos.close(x)\r\n except SystemExit, e:\r\n raise\r\n except:\r\n pass\r\n\r\n # note this order must be preserved- can't change gid/groups if you change uid first.\r\n if selinux_capable and selinux_context:\r\n import selinux\r\n selinux.setexec(selinux_context)\r\n if gid:\r\n os.setgid(gid)\r\n if groups:\r\n os.setgroups(groups)\r\n if uid:\r\n os.setuid(uid)\r\n if umask:\r\n os.umask(umask)\r\n\r\n try:\r\n #print \"execing\", myc, myargs\r\n if func_call:\r\n # either use a passed in func for interpretting the results, or return if no exception.\r\n # note the passed in list, and dict are expanded.\r\n if len(mycommand) == 4:\r\n os._exit(mycommand[3](mycommand[0](*mycommand[1],**mycommand[2])))\r\n try:\r\n mycommand[0](*mycommand[1],**mycommand[2])\r\n except Exception,e:\r\n print \"caught exception\",e,\" in forked func\",mycommand[0]\r\n sys.exit(0)\r\n\r\n\t\t\t#os.execvp(myc,myargs)\r\n os.execve(myc,myargs,env)\r\n except SystemExit, e:\r\n raise\r\n except Exception, e:\r\n if not func_call:\r\n raise MetroError, str(e)+\":\\n \"+myc+\" \"+string.join(myargs)\r\n print \"func call failed\"\r\n\r\n # If the execve fails, we need to report it, and exit\r\n # *carefully* --- report error here\r\n os._exit(1)\r\n sys.exit(1)\r\n return # should never get reached\r\n\r\n # if we were logging, kill the pipes.\r\n if logfile:\r\n os.close(pr)\r\n os.close(pw)\r\n\r\n if returnpid:\r\n return mypid\r\n\r\n # loop through pids (typically one, unless logging), either waiting on their death, or waxing them\r\n # if the main pid (mycommand) returned badly.\r\n while len(mypid):\r\n\t\ttry:\r\n \tretval=os.waitpid(mypid[-1],0)[1]\r\n\t\texcept KeyboardInterrupt:\r\n\t\t\tprint \"Keyboard interrupt detected, aborting script...\"\r\n\t\t\tos.kill(mypid[-1],signal.SIGINT)\r\n\t\t\tcontinue\r\n if retval != 0:\r\n cleanup(mypid[0:-1],block_exceptions=False)\r\n # at this point we've killed all other kid pids generated via this call.\r\n # return now.\r\n if raw_exit_code:\r\n return retval\r\n return process_exit_code(retval,throw_signals=raise_signals)\r\n else:\r\n mypid.pop(-1)\r\n cleanup(mypid)\r\n return 0",
"def process_get(self, pid):\n if not isinstance(pid, baseinteger):\n raise TypeError(\"pid can only be an instance of type baseinteger\")\n guest_process = self._call(\"processGet\",\n in_p=[pid])\n guest_process = IGuestProcess(guest_process)\n return guest_process",
"def send_signal(self, signum):\n # If the launcher returned a comm_port value, then use that to send the signal,\n # else, defer to the superclass - which will use a remote shell to issue kill.\n # Note that if the target process is running as a different user than the REMOTE_USER,\n # using anything other than the socket-based signal (via signal_addr) will not work.\n\n if self.comm_port > 0:\n signal_request = dict()\n signal_request['signum'] = signum\n\n try:\n self._send_listener_request(signal_request)\n\n if signum > 0: # Polling (signum == 0) is too frequent\n self.log.debug(\"Signal ({}) sent via gateway communication port.\".format(signum))\n return None\n except Exception as e:\n if isinstance(e, OSError) and e.errno == errno.ECONNREFUSED: # Return False since there's no process.\n return False\n\n self.log.warning(\"An unexpected exception occurred sending signal ({}) for KernelID '{}': {}\"\n .format(signum, self.kernel_id, str(e)))\n\n return super(RemoteProcessProxy, self).send_signal(signum)",
"def make_process(self, loop, id, on_exit):\r\n return self.config.make_process(loop, id, self.name, env=self.env,\r\n on_exit=on_exit)",
"def monkey_patch():\r\n gevent.monkey.patch_all()\r\n gevent_zeromq.monkey_patch()\r\n # Patch signal module for gevent compatability.\r\n # Courtesy of http://code.google.com/p/gevent/issues/detail?id=49\r\n import signal\r\n _orig_signal = signal.signal\r\n def gevent_signal_wrapper(signum,*args,**kwds):\r\n handler = signal.getsignal(signum)\r\n if callable(handler):\r\n handler(signum,None)\r\n def gevent_signal(signum,handler):\r\n _orig_signal(signum,handler)\r\n return gevent.hub.signal(signum,gevent_signal_wrapper,signum)\r\n signal.signal = gevent_signal",
"def send_signal(self, signum):\n # if we have a local process, use its method, else determine if the ip is local or remote and issue\n # the appropriate version to signal the process.\n result = None\n if self.local_proc:\n if self.pgid > 0 and hasattr(os, \"killpg\"):\n try:\n os.killpg(self.pgid, signum)\n return result\n except OSError:\n pass\n result = self.local_proc.send_signal(signum)\n else:\n if self.ip and self.pid > 0:\n if BaseProcessProxyABC.ip_is_local(self.ip):\n result = self.local_signal(signum)\n else:\n result = self.remote_signal(signum)\n return result",
"def __init__(self):\n self._sigint_caught = False\n self._sigint_response = None\n signal.signal(signal.SIGINT, self._sigint_handler)",
"def waitSyscall(self, process=None):\n signum = SIGTRAP\n if self.use_sysgood:\n signum |= 0x80\n if process:\n return self.waitSignals(signum, pid=process.pid)\n else:\n return self.waitSignals(signum)",
"def csrsrv_CsrCreateProcess(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hProcess\", \"hThread\", \"ClientId\", \"NtSession\", \"Flags\", \"DebugCid\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a semaphore for interprocess communication.
|
def semaphore(self, initval=0, qdis=QDIS.FIFO):
if initval < 0:
errmsg = "simulator.semaphore(initval=%r) negative init value" % initval
log.error(errmsg)
raise ValueError(errmsg)
if qdis < QDIS.FIFO or qdis > QDIS.PRIORITY:
errmsg = "simulator.semaphore(qdis=%r) unknown queuing discipline" % qdis
log.error(errmsg)
raise ValueError(errmsg)
return Semaphore(self, initval, qdis)
|
[
"def __init__(self, *args):\n _ida_pro.__qsemaphore_t_swiginit(self, _ida_pro.new___qsemaphore_t(*args))",
"def acquire(self, obj_id=None, i=None):\r\n if not isinstance(obj_id,Process):\r\n raise Exception(\"semaphore requires items added to be of type 'Process'\")\r\n self.sem_dict[int(i)].add(obj_id)\r\n self.val[i]-=1",
"def access_limited_resource(process_id: int) -> None:\n\n with Semaphore(value=2):\n pprint(f\"Process {process_id} Entering critical section\", process_id)\n use_protected_resource()\n pprint(f\"Process {process_id} Leaving critical section\", process_id)",
"def test_semaphore_named_slash():\n with throttle(b\"[semaphores]\\n\\\"/&\\\"=1\") as url:\n client = Client(url)\n assert 1 == client.remainder(\"/&\")\n with lock(url, \"/&\"):\n assert 0 == client.remainder(\"/&\")\n # assert 1 == client.remainder(\"/\")",
"def test_semaphore_exception(self):\n lockfile = join('/tmp', sha1(settings.SECRET_KEY).hexdigest() +'.semaphore')\n try:\n with mutex(max_wait=0, lockfile=lockfile):\n with mutex(max_wait=0, lockfile=lockfile):\n pass\n except SemaphoreException:\n pass\n else:\n self.fail('Should have triggered SemaphoreException')\n self.assertFalse(os.path.exists(lockfile))",
"def allocate_lock(): # real signature unknown; restored from __doc__\n pass",
"def create_producer(self, service):\n concurrency = self.concurrency\n pool_size = self.pool_size\n callback = self.create_callback()\n processor = self.app.services.create_processor(service)\n counter = self.app.counters['pool_overflow']\n task = self.create_task(processor)\n consume = self.create_consumer()\n acceptors = self.app.acceptors\n loop = self.app.loop\n delay = self.app.hub.callback\n Request = self.Request\n\n def stop_accepting():\n if concurrency.reached or pool_size > concurrency:\n return\n logger.info('Stop registered acceptors,'\n ' current concurrency: %d...', int(concurrency))\n counter.add()\n concurrency.reached.set()\n acceptors.stop_accepting()\n\n def inner_producer(connection, message_buffer, request_id):\n \"\"\"Enqueue given request to thread pool.\"\"\"\n request = Request(loop=loop,\n connection=connection,\n message_buffer=message_buffer,\n request_id=request_id,\n service=service)\n curried_task = partial(task, request)\n consume(curried_task, partial(callback, request))\n if not concurrency.reached and pool_size <= concurrency:\n delay(stop_accepting)\n\n return inner_producer",
"def __init__(self, name, acquired=False):\n self.name = name\n self.acquired = acquired\n ret = _CreateMutex(None, False, name)\n if not ret:\n raise ctypes.WinError()\n self.handle = ret\n if acquired:\n self.acquire()",
"def wait_acquire():\n\n token = None\n\n while token is None:\n # make sure we're observing load and memory maximums\n if not JobServer._check_conditions():\n time.sleep(0.01)\n continue\n\n # try to get a job token\n token = JobServer._acquire()\n\n return token",
"def _ensure_initialized_acquire_ready_semaphore(\n wrapped: Callable, instance: GoPro, args: Any, kwargs: Any\n) -> Callable:\n if instance._maintain_ble:\n logger.debug(f\"{wrapped.__name__} acquiring semaphore\")\n with instance._ready:\n logger.debug(f\"{wrapped.__name__} has the semaphore\")\n ret = wrapped(*args, **kwargs)\n else:\n ret = wrapped(*args, **kwargs)\n if instance._maintain_ble:\n logger.debug(f\"{wrapped.__name__} released the semaphore\")\n return ret",
"def acquire():\n\n token = None\n\n # make sure we're observing load and memory maximums\n if JobServer._check_conditions():\n # try to get a job token\n token = JobServer._acquire()\n\n return token",
"def set_concurrency_limit(self, limit):\n try:\n # if the limit value is an integer then it's a local semaphore\n self.concurrency_type = CONCURRENCY_TYPE_LOCAL_SEMAPHORE\n self.concurrency_semaphore = threading.Semaphore(int(limit))\n logging.debug(f\"concurrency limit for {self.hunt_type} set to local limit {limit}\")\n except ValueError:\n # otherwise it's the name of a network semaphore\n self.concurrency_type = CONCURRENCY_TYPE_NETWORK_SEMAPHORE\n self.concurrency_semaphore = limit\n logging.debug(f\"concurrency limit for {self.hunt_type} set to \"\n f\"network semaphore {self.concurrency_semaphore}\")",
"def init():\n global init_procs\n try:\n # compatible with Python 2.6+\n current_proc = threading.current_thread()\n except AttributeError:\n # compatible with Python 2.5- \n current_proc = threading.currentThread() \n\n run = True\n try:\n if current_proc.id != None:\n run = False\n except AttributeError:\n pass\n\n if run:\n if not current_proc in init_procs:\n init_procs.append(current_proc)\n\n current_proc.id = uuid.uuid1().hex + \".__INIT__\"\n current_proc.id = current_proc.id.encode()\n current_proc.fn = None\n current_proc.state = FAIL\n current_proc.result_ch_idx = None\n current_proc.result_msg = None\n current_proc.ack = False\n\n current_proc.sequence_number = 1\n\n # Protect against early termination of mother-processes leaving childs in an invalid state\n current_proc.spawned = []\n\n # Protect against early termination of channelhomes leaving channel references in an invalid state\n current_proc.registeredChanHomeList = []\n current_proc.registeredChanConnectList = []\n\n # Protect against early termination of processes leaving channelhomes in an invalid state\n current_proc.activeChanList = []\n current_proc.closedChanList = []\n\n current_proc.cond = threading.Condition()\n dispatch = SocketDispatcher().getThread()\n current_proc.addr = dispatch.server_addr\n dispatch.registerProcess(current_proc.id, RemoteLock(current_proc))\n\n def wait_ack():\n current_proc.cond.acquire()\n while not current_proc.ack:\n current_proc.cond.wait()\n # Got ack, resetting\n current_proc.ack= False\n current_proc.cond.release()\n\n def wait():\n current_proc.cond.acquire()\n while current_proc.state == READY:\n current_proc.cond.wait()\n current_proc.cond.release()\n\n current_proc.wait = wait\n current_proc.wait_ack = wait_ack",
"def test_error_on_leasing_unknown_semaphore():\n with throttle(b\"[semaphores]\") as url:\n with pytest.raises(Exception, match=r\"Unknown semaphore\"):\n with lock(url, \"Unknown\"):\n pass",
"def mod_sem(molecule: Ligand) -> Ligand:\n\n append_to_log(\"Starting mod_Seminario method\")\n\n mod_sem = ModSeminario(molecule)\n\n mod_sem.modified_seminario_method()\n if molecule.enable_symmetry:\n mod_sem.symmetrise_bonded_parameters()\n\n append_to_log(\"Finishing Mod_Seminario method\")\n\n return molecule",
"def create_new_sem(sem_type):\r\n\r\n current_inst = current_instruction\r\n current_sched = current_schedule\r\n current_evt = current_event\r\n\r\n if sem_type == 'Instruction':\r\n current_inst = parse.add_new_DrugAdmin()\r\n current_sched = None\r\n current_evt = None\r\n elif sem_type == 'Schedule':\r\n if not current_inst:\r\n # E.g., we are at the start of the sig.\r\n current_inst = parse.add_new_DrugAdmin()\r\n current_sched = current_inst.add_new_schedule()\r\n current_evt = None\r\n elif sem_type == 'AdminEvent':\r\n if not current_inst:\r\n current_inst = parse.add_new_DrugAdmin()\r\n if not current_sched:\r\n current_sched = current_inst.add_new_schedule()\r\n current_evt = current_sched.add_new_AdminEvent()\r\n\r\n return (current_inst, current_sched, current_evt)",
"def __init__(self, name, flag=posix_ipc.O_CREAT, mode=0600, destroy=0):\n # name example: /g_115_acct_proc_queue\n self.destroy = destroy\n self.name = name\n self.mq = posix_ipc.MessageQueue( name, flag, mode )",
"def beforeRun(self):\n self.bufferSemaphore = threading.Semaphore(value=1)\n self.sendSemaphore = threading.Semaphore(value=0)\n threading.Thread(target=self.encode).start()",
"def __init__(self, *args):\n this = _wali.new_SemElemPtrPair(*args)\n try: self.this.append(this)\n except: self.this = this"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run simulation up to the given time 'until' (by processing all events with timestamps less than 'until'), and if 'updating_until' is true, update the simulation clock to 'until' after processing all the events.
|
def _run(self, upper, updating_until):
# this is the main event loop of the simulator!
while len(self._eventlist) > 0:
t = self._eventlist.get_min()
if t >= upper: break
self._process_one_event()
# after all the events, make sure we don't wind back the clock
# if upper (set by either 'until' or 'offset') has been
# explicitly specified by the user
if updating_until:
self._eventlist.last = upper
self.now = upper
|
[
"def _cron(self):\n while True:\n self.check_update()\n sleep(60)",
"def update_until(self, then):\n self._model.run(to=then)",
"def resched(self, e, offset=None, until=None):\n\n if not isinstance(e, _Event):\n errmsg = \"simulator.resched(e=%r) not an event\" % e\n log.error(errmsg)\n raise TypeError(errmsg)\n\n # figure out the event time\n if until == None and offset == None:\n # if both are missing, it's now!\n e.time = self.now\n elif until != None and offset != None:\n errmsg = \"simulator.resched(until=%r, offset=%r) duplicate specification\" % (until, offset)\n log.error(errmsg)\n raise ValueError(errmsg)\n elif offset != None:\n if offset < 0:\n errmsg = \"simulator.resched(offset=%r) negative offset\" % offset\n log.error(errmsg)\n raise ValueError(errmsg)\n e.time = self.now + offset\n elif until < self.now:\n errmsg = \"simulator.resched(until=%r) earlier than now (%r)\" % (until, self.now)\n log.error(errmsg)\n raise ValueError(errmsg)\n else: e.time = until\n\n try:\n self._eventlist.update(e)\n #log.debug(\"[r%d] simulator '%s' reschedule event to time=%g from now=%g\" %\n # (self._simulus.comm_rank, self.name[-4:], e.time, self.now))\n return e\n except Exception:\n # the event already happened as it's not in the event list\n #log.debug(\"[r%d] simulator '%s' reschedule non-active event from now=%g\" %\n # (self._simulus.comm_rank, self.name[-4:], self.now))\n return None",
"def run(self):\n schedule.every().day.at(\"13:02\").do(self.fn)\n while True:\n schedule.run_pending()\n time.sleep(1)",
"def waitUntil(hour):\n ctime = datetime.now()\n if hour == 0:\n 'If the time is tommorow, add one onto the day'\n ctime = datetime(ctime.year, ctime.month, ctime.day + 1, hour, 0, 0)\n else:\n ctime = datetime(ctime.year, ctime.month, ctime.day, hour, 0, 0)\n \n while ctime > datetime.now():\n 'Sleeps untill the time is greater, waiting 30 seconds at a time'\n sleep(30)",
"def step(self, upto=None):\n raw2percent = lambda l: ((l-3563.0)/3518)*100\n #percent2raw = lambda l: int((l/100.0)*3518 + 3563.0)\n percent2raw = lambda l: (l/100.0)*3518 + 3563.0\n \n if upto is None:\n upto = time.time()\n deltaT = upto - self.simState['simtime'] \n if deltaT < 0:\n #This happens when we receive an update packet dated from before the\n # current simulation time. In that case, we return to let the\n # changes be applied, and then the simulation will step to the\n # current time, and all will be well. In the worst case, changes\n # should only be 100 ms old.\n return\n consts = self.config['sim_constants']\n \n #We're doing a simple model that's linear over time. Non-linear models\n # may require iterating over fractions deltaT to be accurate.\n # Numerical techniques from Sage or SciPy may be required for advanced\n # models.\n\n level_percent = raw2percent(self.simState['LevelRawInputReg'])\n\n if self.simState['PumpRunCmd']:\n level_change = deltaT * self.pump_on_derivative(\n self.pump_on_inverse(level_percent))\n else:\n level_change = deltaT * self.pump_off_derivative(\n self.pump_off_inverse(level_percent))\n\n level_percent = level_percent + level_change \n if level_percent <= consts['minimum_level']:\n level_percent = consts['minimum_level']\n elif level_percent > consts['maximum_level']:\n level_percent = consts['maximum_level']\n\n self.simState['LevelRawInputReg'] = percent2raw(level_percent)\n self.simState['simtime'] = upto",
"def sleep_until(when):\n\n if type(when) == str:\n when = iso8601_as_datetime(when)\n\n if type(when) != datetime.datetime:\n raise ValueError(\"Passed wrong type; must be string or datetime\")\n\n how_long = time_until_seconds(when)\n time.sleep(how_long)",
"def simulate(self):\n self.hours += 1\n for person in self.people:\n person.update()\n self.update_infections_fast()\n for method in self.on_update_methods:\n method(self)",
"def runUntil(eventSource, suppressErrors=False, idle=sleep):",
"def simulate(self, running_time):\n # repeat until the end of the simulation time:\n while self.now <= running_time:\n # get the first event e from the events\n event = self.events.pop(0)\n # t = time of e\n self.now = event[0]\n event = event[1]\n self.reporter.report(event)\n\n # if e is an arrival event:\n if event.event_type == EventType.CASE_ARRIVAL:\n # add new task\n self.unassigned_tasks[event.task.id] = event.task\n self.reporter.report(Event(EventType.TASK_ACTIVATE, self.now, event.task))\n self.busy_cases[event.task.case_id] = [event.task.id]\n # generate a new planning event to start planning now for the new task\n self.events.append((self.now, Event(EventType.PLAN_TASKS, self.now, None, nr_tasks=len(self.unassigned_tasks), nr_resources=len(self.available_resources))))\n # generate a new arrival event for the first task of the next case\n (t, task) = self.problem.next_case()\n self.events.append((t, Event(EventType.CASE_ARRIVAL, t, task)))\n self.events.sort()\n\n # if e is a start event:\n elif event.event_type == EventType.START_TASK:\n # create a complete event for task\n t = self.now + self.problem.processing_time(event.task, event.resource)\n self.events.append((t, Event(EventType.COMPLETE_TASK, t, event.task, event.resource)))\n self.events.sort()\n # set resource to busy\n del self.reserved_resources[event.resource]\n self.busy_resources[event.resource] = (event.task, self.now)\n\n # if e is a complete event:\n elif event.event_type == EventType.COMPLETE_TASK:\n # set resource to available, if it is still desired, otherwise set it to away\n del self.busy_resources[event.resource]\n if self.working_nr_resources() <= self.desired_nr_resources():\n self.available_resources.add(event.resource)\n else:\n self.away_resources.append(event.resource)\n self.away_resources_weights.append(self.problem.resource_weights[self.problem.resources.index(event.resource)])\n # remove task from assigned tasks\n del self.assigned_tasks[event.task.id]\n self.busy_cases[event.task.case_id].remove(event.task.id)\n # generate unassigned tasks for each next task\n for next_task in event.task.next_tasks:\n self.unassigned_tasks[next_task.id] = next_task\n self.reporter.report(Event(EventType.TASK_ACTIVATE, self.now, next_task))\n self.busy_cases[event.task.case_id].append(next_task.id)\n if len(self.busy_cases[event.task.case_id]) == 0:\n self.events.append((self.now, Event(EventType.COMPLETE_CASE, self.now, event.task)))\n # generate a new planning event to start planning now for the newly available resource and next tasks\n self.events.append((self.now, Event(EventType.PLAN_TASKS, self.now, None, nr_tasks=len(self.unassigned_tasks), nr_resources=len(self.available_resources))))\n self.events.sort()\n\n # if e is a schedule resources event: move resources between available/away,\n # depending to how many resources should be available according to the schedule.\n elif event.event_type == EventType.SCHEDULE_RESOURCES:\n assert self.working_nr_resources() + len(self.away_resources) == len(self.problem.resources) # the number of resources must be constant\n assert len(self.problem.resources) == len(self.problem.resource_weights) # each resource must have a resource weight\n assert len(self.away_resources) == len(self.away_resources_weights) # each away resource must have a resource weight\n if len(self.away_resources) > 0: # for each away resource, the resource weight must be taken from the problem resource weights\n i = random.randrange(len(self.away_resources))\n assert self.away_resources_weights[i] == self.problem.resource_weights[self.problem.resources.index(self.away_resources[i])]\n required_resources = self.desired_nr_resources() - self.working_nr_resources()\n if required_resources > 0:\n # if there are not enough resources working\n # randomly select away resources to work, as many as required\n for i in range(required_resources):\n random_resource = random.choices(self.away_resources, self.away_resources_weights)[0]\n # remove them from away and add them to available resources\n away_resource_i = self.away_resources.index(random_resource)\n del self.away_resources[away_resource_i]\n del self.away_resources_weights[away_resource_i]\n self.available_resources.add(random_resource)\n # generate a new planning event to put them to work\n self.events.append((self.now, Event(EventType.PLAN_TASKS, self.now, None, nr_tasks=len(self.unassigned_tasks), nr_resources=len(self.available_resources))))\n self.events.sort()\n elif required_resources < 0:\n # if there are too many resources working\n # remove as many as possible, i.e. min(available_resources, -required_resources)\n nr_resources_to_remove = min(len(self.available_resources), -required_resources)\n resources_to_remove = random.sample(self.available_resources, nr_resources_to_remove)\n for r in resources_to_remove:\n # remove them from the available resources\n self.available_resources.remove(r)\n # add them to the away resources\n self.away_resources.append(r)\n self.away_resources_weights.append(self.problem.resource_weights[self.problem.resources.index(r)])\n # plan the next resource schedule event\n self.events.append((self.now+1, Event(EventType.SCHEDULE_RESOURCES, self.now+1, None)))\n\n # if e is a planning event: do assignment\n elif event.event_type == EventType.PLAN_TASKS:\n # there only is an assignment if there are free resources and tasks\n if len(self.unassigned_tasks) > 0 and len(self.available_resources) > 0:\n assignments = self.planner.assign(self)\n # for each newly assigned task:\n for (task, resource, moment) in assignments:\n # create start event for task\n self.events.append((moment, Event(EventType.START_TASK, moment, task, resource)))\n self.reporter.report(Event(EventType.TASK_PLANNED, self.now, task))\n # assign task\n del self.unassigned_tasks[task.id]\n self.assigned_tasks[task.id] = (task, resource, moment)\n # reserve resource\n self.available_resources.remove(resource)\n self.reserved_resources[resource] = (event.task, moment)\n self.events.sort()",
"def run(self,t):\r\n while t>0:\r\n update(min(t,dt))\r\n t -= dt",
"def data_updater():\n # This is a daemon thread so no need to explicitly\n # poll for any shutdown events.\n sleep_time = 0\n while True:\n interval = wallet['update_info']['interval']\n if time.time() > sleep_time + interval or \\\n wallet['update_info']['in_need']:\n do_update()\n sleep_time = time.time()\n time.sleep(1)",
"def run(self, steps_per_update=1):\n def loop(sim):\n sim.run(steps_per_update)\n self.loop(loop)",
"def process_events_until(self, time):\n while True:\n if self.__queue.empty():\n break\n event = self.__queue.get()\n if event.time() <= time:\n self.__current_time = event.time()\n event.process()\n else:\n self.__queue.put(event) # Put it back (not yet processed)\n break",
"def naptime(self):\n _config = self.config['misc']\n try:\n # local or UTC?\n tz = pytz.utc if _config['nap_time_frame'] == 'UTC' else None\n now = datetime.datetime.now(tz)\n\n if _config['nap_at_night']:\n\n last_midnight = datetime.datetime(now.year, now.month, now.day, tzinfo=tz)\n next_midnight = datetime.datetime(now.year, now.month, now.day, tzinfo=tz) \\\n + datetime.timedelta(days=1)\n\n hm_start = list(map(int, _config['nap_time_start'].split(':')))\n hm_stop = list(map(int, _config['nap_time_stop'].split(':')))\n\n if hm_stop[0] < hm_start[0]:\n h_before_midnight = 24 - (hm_start[0] + hm_start[1] / 60.0)\n h_after_midnight = hm_stop[0] + hm_stop[1] / 60.0\n\n # print((next_midnight - now).total_seconds() / 3600.0, h_before_midnight)\n # print((now - last_midnight).total_seconds() / 3600.0, h_after_midnight)\n\n if (next_midnight - now).total_seconds() / 3600.0 < h_before_midnight:\n sleep_until = next_midnight + datetime.timedelta(hours=h_after_midnight)\n print('sleep until:', sleep_until)\n elif (now - last_midnight).total_seconds() / 3600.0 < h_after_midnight:\n sleep_until = last_midnight + datetime.timedelta(hours=h_after_midnight)\n print('sleep until:', sleep_until)\n else:\n sleep_until = now + datetime.timedelta(minutes=_config['loop_interval'])\n print('sleep until:', sleep_until)\n\n else:\n h_after_midnight_start = hm_start[0] + hm_start[1] / 60.0\n h_after_midnight_stop = hm_stop[0] + hm_stop[1] / 60.0\n\n if (last_midnight + datetime.timedelta(hours=h_after_midnight_start) <\n now < last_midnight + datetime.timedelta(hours=h_after_midnight_stop)):\n sleep_until = last_midnight + datetime.timedelta(hours=h_after_midnight_stop)\n print('sleep until:', sleep_until)\n else:\n sleep_until = now + datetime.timedelta(minutes=_config['loop_interval'])\n print('sleep until:', sleep_until)\n\n return (sleep_until - now).total_seconds()\n\n else:\n # sleep for loop_interval minutes otherwise (return seconds)\n return _config['loop_interval'] * 60.0\n\n except Exception as _e:\n traceback.print_exc()\n self.logger.error('Failed to take a nap, taking a pill to fall asleep for an hour.')\n self.logger.error(_e)\n return 3600.0",
"def _do_updates(self):\n while self._clock.is_lagging():\n for action in self._actions.get_actions(UpdateAction):\n action.execute(self._actors, self._actions, self._clock, self)\n self._clock.catch_up()",
"def do_something_every_hour():\n sleep(5)",
"def run() -> None:\n while True:\n now = datetime.datetime.now(pytz.timezone('US/Pacific'))\n next_run = datetime.datetime(now.year, now.month, now.day, RUN_HOUR, RUN_MINUTE, 0)\n delta = next_run - now.replace(tzinfo=None)\n\n if (delta.seconds // 60) < 60: # Within hour\n if delta.seconds < 60: # Within minute\n load_config_data()\n try:\n pass_checks(now)\n chore_post(now)\n except SpamException:\n seconds_since_last_post = int(now.timestamp()) - config_data['last_ran']\n debug_post(\"ABORTING - Spam: Chore-Bot ran {} seconds ago. (< 64800)\".format(seconds_since_last_post))\n except WeekendException:\n debug_post(\"ABORTING - Weekend: {}\".format(now.strftime('%A')))\n\n log(\"Sleeping for 1 minute\")\n time.sleep(60)\n else:\n log(\"Sleeping for 30 seconds\")\n time.sleep(30)\n else:\n log(\"Sleeping for 1 hour\")\n time.sleep(3600)",
"async def run(self):\n last_update = await self._get_last_update()\n if not last_update or last_update['created_at'].date() != datetime.datetime.utcnow().date():\n await self._update_prices()\n else:\n self._schedule_next_update()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Process one event on the event list, assuming there is a least one event on the event list.
|
def _process_one_event(self):
e = self._eventlist.delete_min()
self.now = e.time
#log.debug("[r%d] simulator '%s' execute event at time %g" %
# (self._simulus.comm_rank, self.name[-4:], self.now))
self._runtime["executed_events"] += 1
# trigger the trap if the event already has a trap; this is a
# memory-saving mechanism: only those events that the user is
# explicitly interested in (used in the simulator's wait()
# method) are attached with a trap
if e.trap is not None:
e.trap.trigger()
if isinstance(e, _DirectEvent):
if e.repeat_intv is not None:
# note that a renewed event is not trappable
e = e.renew(e.time+e.repeat_intv)
#log.debug("[r%d] simulator '%s' schedule repeated event at time=%g from now=%g" %
# (self._simulus.comm_rank, self.name[-4:], e.time, self.now))
self._runtime["scheduled_events"] += 1
self._eventlist.insert(e)
e.func(*e.args, **e.kwargs)
elif isinstance(e, _ProcessEvent):
e.proc.activate()
else:
errmsg = "unknown event type: " + str(e)
log.error(errmsg)
raise RuntimeError(errmsg)
# processes are run only from the main loop!!
while len(self._readyq) > 0:
p = self._readyq.popleft()
if p.state == _Process.STATE_RUNNING:
self._theproc = p
#log.debug("[r%d] simulator '%s' context switch at time %g" %
# (self._simulus.comm_rank, self.name[-4:], self.now))
self._runtime["process_contexts"] += 1
p.run()
else:
# process is killed while in the ready queue
assert p.state == _Process.STATE_TERMINATED
self._theproc = None
|
[
"async def process_events(self, events: List[EventData]):\n pass",
"def process_events(self):\n pass",
"def _ProcessEvent(self, event):\n if FLAGS.debug_events:\n self._logger.debug('Processing event: %s' % event)\n if isinstance(event, kbevent.QuitEvent):\n self._logger.info('got quit event, quitting')\n self.Quit()\n return\n callbacks = self._GetCallbacksForEvent(event)\n for cb in callbacks:\n cb(event)",
"def process_events(self):\n# print \"in FuncEventQueue.process_events()\"\n while self.events:\n event = self.events.pop(0)\n\n print 'processing event, type = ' + str(event['type'])\n\n\n # TODO - remove\n if event['type'] == 'CLIENT_LEFT':\n client_id = event['data']['clientID']\n player = self.blakes7.players.client_id_to_player(client_id)\n\n # TODO - remove player or set status to inactive,\n # if has a user/pass\n if player:\n player['clientID'] = None\n player['clientData'] = None\n player['status'] = 'INACTIVE'\n\n # remove from clients list\n self.blakes7.players.client_left(client_id)\n\n # send broadcast message of new player list\n self.blakes7.players.broadcast_player_list()",
"def handle_event(event):\n for (_, states) in get_registered().items():\n data = states.get(event, None)\n if data is None:\n continue\n handle_single(data)",
"def process_all_events(self):\n while not self.__queue.empty():\n ev = self.get()\n ev.process()",
"def getNextEvent(self):\n event = self.eventList.pop(0)\n return event",
"def parse_events(self, events):\n for event in events:\n self.handle_event_list(event, self.generic_events)\n if self.subscriber in self.subscription_events:\n self.handle_event_list(event, self.subscription_events[self.subscriber])",
"def test_event_single(sock_dir):\n with eventpublisher_process(str(sock_dir)):\n with salt.utils.event.MasterEvent(str(sock_dir), listen=True) as me:\n me.fire_event({\"data\": \"foo1\"}, \"evt1\")\n evt1 = me.get_event(tag=\"evt1\")\n _assert_got_event(evt1, {\"data\": \"foo1\"})",
"def parse_event(self, event):",
"def dispatch(self, event):\n if self._dispatch_map is not None:\n lst = self._dispatch_map.get(type(event), None)\n if lst is None:\n raise ValueError(\"unknown event type: %s\" % type(event))\n for l_ in lst:\n l_(event)",
"def get_next_event(self) -> Optional[Tuple[Set, int, int]]:\n if self.has_more_events():\n return self._queue.popleft()\n else:\n return None",
"def processEvent(self, event: 'SoEvent') -> \"SbBool\":\n return _coin.SoEventManager_processEvent(self, event)",
"def next_event(self):\n logger.debug('next_event:')\n return next(self.__sse_client)",
"def _read_event(self):\n for event in self._fanotify.read_events():\n self._put_event(event)",
"def test_event_single_no_block(sock_dir):\n with eventpublisher_process(str(sock_dir)):\n with salt.utils.event.MasterEvent(str(sock_dir), listen=True) as me:\n start = time.time()\n finish = start + 5\n evt1 = me.get_event(wait=0, tag=\"evt1\", no_block=True)\n # We should get None and way before the 5 seconds wait since it's\n # non-blocking, otherwise it would wait for an event which we\n # didn't even send\n assert evt1 is None, None\n assert start < finish\n me.fire_event({\"data\": \"foo1\"}, \"evt1\")\n evt1 = me.get_event(wait=0, tag=\"evt1\")\n _assert_got_event(evt1, {\"data\": \"foo1\"})",
"def eventDispatcher(self):\n event = self.nextEvent()\n if event < 0:\n return\n else:\n log.msg(\"processing event: %s\" % event)\n fields = event.split(separator)\n try:\n sequence = int(fields[0])\n except:\n return\n\n try:\n msgType = fields[1]\n except:\n return\n\n fromId = int(fields[2]) if len(fields) >= 3 else None\n toId = int(fields[3]) if len(fields) >= 4 else None\n self.currEvent += 1\n\n if msgType == 'F':\n self.followers[toId].append(fromId)\n if toId in self.users:\n self.users[toId].transport.write(event + '\\n')\n log.msg(\"%d following %d\" % (fromId, toId))\n elif msgType == 'U':\n if fromId in self.followers[toId]:\n self.followers[toId].remove(fromId)\n log.msg(\"%d unfollowing %d\" % (fromId, toId))\n elif msgType == 'B':\n for user in self.users:\n user.transport.write(event + '\\n')\n log.msg(\"broadcasting %s to %s\" % (event, user))\n elif msgType == 'P':\n if toId in self.users:\n self.users[toId].transport.write(event + '\\n')\n log.msg(\"%s sending private message to %s\" % (fromId, toId))\n elif msgType == 'S':\n for follower in self.followers[fromId]:\n if follower in self.users:\n self.users[follower].transport.write(event + '\\n')\n log.msg(\"sending status %s to user %s\" %\n (event, self.users[follower]))",
"def _push_next_event(self):\n\n if self._event_queue:\n # retrieve next event on queue and push into stack\n self._push_event(self._event_queue.pop(0))\n else:\n self.delegate.event_queue_did_empty(self)",
"def next_event(self) -> Union[Part, PartData, Events]:\n try:\n return self.events_queue.popleft()\n except IndexError:\n if self.state is not States.FINISHED:\n return Events.NEED_DATA\n else:\n return Events.FINISHED"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the pseudorandom number generator attached to this simulator. It's a random.Random instance (Mersenne twister).
|
def rng(self):
if self._rng is None:
u = uuid.uuid3(self._simulus.namespace, self.name)
self._rng = random.Random(int(u.int/2**32))
return self._rng
|
[
"def rng(self):\n if self._rng is None:\n # One-time initialization from backend-neutral seed int.\n self._rng = fastmath.random.get_prng(self._rng_seed_int)\n return self._rng",
"def get_rng(self, instance: Instance, seed: Optional[int] = None) -> Random:\n assert instance.id is not None\n # If seed exists, use it as part of the random seed\n return Random(instance.id if seed is None else str(seed) + instance.id)",
"def rand():\r\n global rand_seed\r\n rand_seed = (MULTIPLIER * rand_seed + INCREMENT)\r\n return (rand_seed >> 16) & 0x7FFF",
"def get_random(self):\n index = randrange(self.size)\n return self.individuals[index]",
"def get_prng(seed=None):\n if seed is not None and not (isinstance(seed, numbers.Integral) and seed >= 0):\n raise ValueError('Seed must be a non-negative integer or omitted, not {}'.format(seed))\n\n prng = np.random.RandomState()\n seed = create_seed(seed)\n seed = _int_list_from_bigint(hash_seed(seed))\n prng.seed(seed)\n return prng",
"def routine_rng():\n global current_time\n current_time += randint(40, 1000)\n\n seed = current_time\n rng = MT19937(seed)\n\n current_time += randint(40, 1000)\n return seed, rng.extract_number()",
"def get_rng(random_state):\n if random_state is None:\n return np.random.mtrand._rand\n elif isinstance(random_state, (numbers.Integral, np.integer)):\n return np.random.RandomState(random_state)\n if isinstance(random_state, np.random.RandomState):\n return random_state\n raise ValueError('Wrong random state. Expecting None, an int or a numpy '\n 'RandomState instance, got a '\n '{}'.format(type(random_state)))",
"def random_number():\n return random.getrandbits(32)",
"def get_rng_state():\n return default_generator.get_state()",
"def getGenerator(self):\n return self._generator",
"def random():\n return PrivateKey(secrets.token_bytes(32))",
"def __get_next_random(self, rand_seq):\n if rand_seq is not None:\n return rand_seq.pop(0)\n else:\n return random.random()",
"def get_generator(self):\n return self._get_series(self.root.generator)",
"def _init_random_number_generators(seed=None):\n # Seed Python random (None as seed is okay), then use it to seed the others.\n random.seed(seed)\n if seed is None:\n seed = random.randint(0, 2**31 - 1)\n logging.info('using seed %d', seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n return jax_random.get_prng(seed)",
"def get_seed(self):\n return self.rseed",
"def random_gen(size=32):\n\t# with open(\"/dev/urandom\", 'rb') as f:\n\t# \treturn int.from_bytes(f.read(4), 'big')\n\trandom_num = SystemRandom().getrandbits(size)\n\treturn random_num",
"def get_seed(self):\n return self._current_seed",
"def get_number(self):\n return self.random_number",
"def preferred_rng(self):\n return self._preferred_rng"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print the list of all future events currently on the event list. This is an expensive operation and should be used responsively, possibly just for debugging purposes.
|
def show_calendar(self):
print("list of all future events (num=%d) at time %g on simulator '%s':" %
(len(self._eventlist), self.now, self.name if self.name else ''))
for e in sorted(self._eventlist.pqueue.values()):
print(" %s" % e)
|
[
"def print_possible_events():\n print(\"Registered Events:\")\n print(_BasicEvent.get_possible_events())\n print(\"******************************\")",
"def print_event_handlers(self):\n self.__scheduler.print_event_handlers()",
"async def dump_events(self) -> str:\n\n try:\n return json.dumps(list(map(self.event_asdict, await self.events())), indent=2)\n except ClientError:\n return \"\"",
"def _print_daily_task_list(task_list: list):\n \n filtered = filter(remove_completed, task_list)\n filtered_list = list(filtered)\n for i in range(len(filtered_list)):\n item = filtered_list[i]\n name = item['name']\n print(f'{i+1}. {name}')\n \n print(\"\")",
"def printEventInfo(self):\n\n print self.eventType + ' - ' + conDateNumToDateStr(self.numDate)",
"def print_history(self):\n for item in self.get_history():\n print item",
"def print_event(self):\n indexes = self.events_list.selectedIndexes()\n if len(indexes) <= 0:\n return # Bail out, user hasn't selected anything in the list\n\n # Only one item can be selected at a time anyway, so we just default to index 0\n selected_index = indexes[0].row()\n\n print_dialog = QPrintDialog()\n if print_dialog.exec_() == 0:\n return # User pressed cancel.\n\n printer = print_dialog.printer()\n\n self.print(printer, self.case.event_to_html(self.case.events[selected_index]))",
"def print_history(self, **print_kwargs):\n print(\" History:\", **print_kwargs, end='\\n')\n if 'end' in print_kwargs:\n del print_kwargs['end']\n for event in self.history:\n print(f\"{event} \", **print_kwargs, end='')\n print('', **print_kwargs, end='\\n')",
"def print_queue_flow_list(self):\n result = []\n for val in self.queue:\n result.append(str(val[1].get_key()).ljust(4))\n print \"[debug] PIFO_Queue: print_queue_flow_list \".ljust(80) + str(result)",
"def event_list():\n return render_template(\"event_list.html\", user=current_user)",
"def command_show(calendar):\n sorted_keys = sorted(calendar.keys(), reverse=True)\n allevents_str = \"\"\n for key in sorted_keys:\n sorted_items = sorted(calendar[key], key = lambda i : i['start'])\n i = 0\n one_day_items = \"\"\n for event in sorted_items:\n if i == 0:\n if event['start'] < 10 and event['end'] < 10:\n one_day_items += f\"\\n{key} : \\n \" \\\n f\"start : 0{event['start']}:00,\\n \" \\\n f\"end : 0{event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n elif event['start'] < 10 and event['end'] >= 10:\n one_day_items += f\"\\n{key} : \\n \" \\\n f\"start : 0{event['start']}:00,\\n \" \\\n f\"end : {event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n elif event['start'] >= 10 and event['end'] < 10:\n one_day_items += f\"\\n{key} : \\n \" \\\n f\"start : {event['start']}:00,\\n \" \\\n f\"end : 0{event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n else:\n one_day_items += f\"\\n{key} : \\n \" \\\n f\"start : {event['start']}:00,\\n \" \\\n f\"end : {event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n else:\n if event['start'] < 10 and event['end'] < 10:\n one_day_items += f\"\\n\\n \" \\\n f\"start : 0{event['start']}:00,\\n \" \\\n f\"end : 0{event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n elif event['start'] < 10 and event['end'] >= 10:\n one_day_items += f\"\\n\\n \" \\\n f\"start : 0{event['start']}:00,\\n \" \\\n f\"end : {event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n elif event['start'] >= 10 and event['end'] < 10:\n one_day_items += f\"\\n\\n \" \\\n f\"start : {event['start']}:00,\\n \" \\\n f\"end : 0{event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n else:\n one_day_items += f\"\\n\\n \" \\\n f\"start : {event['start']}:00,\\n \" \\\n f\"end : {event['end']}:00,\\n \" \\\n f\"title : {event['title']}\"\n i += 1\n allevents_str += one_day_items\n\n return allevents_str",
"def list_event_types():\n print('\\nValid event types:')\n for etype in EVENT_TYPES:\n print(' {0}'.format(etype))",
"def events():\n for el in _list_events():\n yield Event(el)",
"def show_tasks(self):\n print('\\nCompleted to following tasks:')\n for step in self.tasks:\n print('\\t{0}'.format(step))",
"def display_agenda(self, agenda):\n for appt in agenda:\n if appt.begin.date() == self.date:\n print(\"DISPLAYING: \" + str(appt))\n self.display_appt(appt)",
"def list(self, irc, msg, args):\n L = self.events.items()\n if L:\n L.sort()\n for (i, (name, command)) in enumerate(L):\n L[i] = format('%s: %q', name, command['command'])\n irc.reply(format('%L', L))\n else:\n irc.reply('There are currently no scheduled commands.')",
"def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('calendar', 'v3', http=http)\r\n\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n eventsResult = service.events().list(\r\n calendarId='primary', timeMin=now, maxResults=1000, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = eventsResult.get('items', [])\r\n retrieved = service.events().list(calendarId=CALENDAR_ID).execute()\r\n\r\n items = retrieved['items']\r\n\r\n for item in items:\r\n process_item(service, item)\r\n\r\n for i in seen:\r\n print(\"'{}': '',\\n\".format(i))\r\n\r\n return service.colors().get()",
"def get_concurrent_events_list_menu():\n events_list = services.events.overlapping_events(g.event).order_by(\n models.Event.start.desc())\n\n return [{'url': url_for('dashboard.concurrent_events',\n event_id=event.id), 'text': event.name, 'visible': True,\n 'active': get_event() == event}\n for event in events_list]",
"def print_queue(self):\n print self.queue"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print a report on the simulator's runtime performance.
|
def show_runtime_report(self, prefix=''):
t = time.time()-self._runtime["start_clock"]
print('%s*********** simulator performance metrics ***********' % prefix)
print('%ssimulator name: %s' % (prefix, self.name))
print('%ssimulation time: %g' % (prefix, self.now-self.init_time))
print('%sexecution time: %g' % (prefix, t))
print('%ssimulation to real time ratio: %g' % (prefix, (self.now-self.init_time)/t))
print('%sscheduled events: %d (rate=%g)' %
(prefix, self._runtime["scheduled_events"], self._runtime["scheduled_events"]/t))
print('%sexecuted events: %d (rate=%g)' %
(prefix, self._runtime["executed_events"], self._runtime["executed_events"]/t))
print('%scancelled events: %d' % (prefix, self._runtime["cancelled_events"]))
print('%screated processes: %d' % (prefix, self._runtime["initiated_processes"]))
print('%sfinished processes: %d' % (prefix, self._runtime["terminated_processes"]))
print('%scancelled processes: %d' % (prefix, self._runtime["cancelled_processes"]))
print('%sprocess context switches: %d' % (prefix, self._runtime["process_contexts"]))
|
[
"def print_report(self):\n print self.__report_str()",
"def print_report():\n print_days_percent_errors()\n print \"\"\n print_popular_authors()\n print \"\"\n print_popular_articles()\n print \"\"",
"def report(self, log):\n max_own_cpu = self.get_max_own_cpu()\n #if max_own_cpu == 0.0:\n # return\n log.blank()\n log('Overview of CPU time usage.')\n log.hline()\n log('Label Total Own')\n log.hline()\n bar_width = log.width-33\n for label, timer in sorted(self.parts.items()):\n if max_own_cpu > 0:\n cpu_bar = \"W\"*int(timer.own.cpu/max_own_cpu*bar_width)\n else:\n cpu_bar = \"\"\n log('%14s %8.1f %8.1f %s' % (\n label.ljust(14),\n timer.total.cpu, timer.own.cpu, cpu_bar.ljust(bar_width),\n ))\n log.hline()\n ru = resource.getrusage(resource.RUSAGE_SELF)\n log.deflist([\n ('CPU user time', '% 10.2f' % ru.ru_utime),\n ('CPU system time', '% 10.2f' % ru.ru_stime),\n ('Page swaps', '% 10i' % ru.ru_nswap),\n ])\n log.hline()",
"def print_performances(self): # pragma: no cover\n result = sorted(self.times.items(), key=lambda item: item[1], reverse=True)\n print()\n print(\"Elapsed times by features (in seconds)\")\n print(\"--------------------------------------\")\n for (name, seconds) in result:\n print(f\"{seconds:8.4f}\\t {name}\")\n print()",
"def displayCpuReport(self):\n cpuReport = self.getTopCpu()\n self.pprint.white('\\tTop CPU Consuming Processes : ')\n self.displayReport(cpuReport)\n print('')",
"def TimeReport(self) -> None:\n if self.dry_run:\n logging.info('Total: %d bytes', self.num_bytes)\n else:\n end_time = time.time()\n dt = end_time - self.start_time\n rate = self.num_bytes / 1024.0 / dt\n logging.info('Total: %d KB/s (%d bytes in %.3fs)', rate, self.num_bytes,\n dt)",
"def print_statistics(self):\n pass",
"def show_performance(self):\n NetworkPerformance(self.network, self.deadline)",
"def do_print_report(self, arg):\n print(self.last_fit_report)",
"def print_perf_info(self, result_list, output_file=None):\n pass",
"def trace_print():\n global snapshot\n global oldmem\n\n snapshot2 = tracemalloc.take_snapshot()\n snapshot2 = snapshot2.filter_traces(\n (\n tracemalloc.Filter(False, \"<frozen importlib._bootstrap>\"),\n tracemalloc.Filter(False, \"<unknown>\"),\n tracemalloc.Filter(False, tracemalloc.__file__),\n )\n )\n\n if snapshot is not None:\n thismem = PROCESS.memory_info().rss / 1024**2\n diff = thismem - oldmem\n print(\n \"===================== Begin Trace (TOTAL MEM={:1.4e} MB... [{:+1.4e} MB]):\".format(\n thismem, diff\n )\n )\n top_stats = snapshot2.compare_to(snapshot, \"lineno\", cumulative=True)\n for stat in top_stats[:4]:\n print(stat)\n print(\"End Trace ===========================================\")\n oldmem = thismem\n\n snapshot = snapshot2",
"def displayMemReport(self):\n memReport = self.getTopMem()\n self.pprint.white('\\tTop Memory Consuming Processes : ')\n self.displayReport(memReport)\n print('')",
"def print_result(self, allocations, non_executables):\n\n print \"\\nAllocations\"\n for i, a in enumerate(allocations):\n print \"Machine %i (%ds):\" % (i, a[self._TOT_DUR])\n if a[self._TEST_SET]:\n for t in a[self._TEST_SET]:\n print \"%s (%ss);\" % (t.title, t.duration),\n print\n else:\n print \"<Empty>\"\n if non_executables:\n print \"Non-Executable Tests:\"\n for t in non_executables:\n print t",
"def show_report(report):\n print()\n for line in report:\n print(line)\n print()",
"def _DisplayResults(self):\n print\n print '=' * 78\n print 'DIAGNOSTIC RESULTS'.center(78)\n print '=' * 78\n\n if 'latency' in self.results:\n print\n print '-' * 78\n print 'Latency'.center(78)\n print '-' * 78\n print ('Operation Size Trials Mean (ms) Std Dev (ms) '\n 'Median (ms) 90th % (ms)')\n print ('========= ========= ====== ========= ============ '\n '=========== ===========')\n for key in sorted(self.results['latency']):\n trials = sorted(self.results['latency'][key])\n op, numbytes = key.split('_')\n numbytes = int(numbytes)\n if op == 'METADATA':\n print 'Metadata'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'DOWNLOAD':\n print 'Download'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'UPLOAD':\n print 'Upload'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'DELETE':\n print 'Delete'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n\n if 'write_throughput' in self.results:\n print\n print '-' * 78\n print 'Write Throughput'.center(78)\n print '-' * 78\n write_thru = self.results['write_throughput']\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\n MakeHumanReadable(write_thru['file_size']),\n write_thru['num_copies'],\n MakeHumanReadable(write_thru['total_bytes_copied']))\n print 'Write throughput: %s/s.' % (\n MakeBitsHumanReadable(write_thru['bytes_per_second'] * 8))\n\n if 'read_throughput' in self.results:\n print\n print '-' * 78\n print 'Read Throughput'.center(78)\n print '-' * 78\n read_thru = self.results['read_throughput']\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\n MakeHumanReadable(read_thru['file_size']),\n read_thru['num_times'],\n MakeHumanReadable(read_thru['total_bytes_copied']))\n print 'Read throughput: %s/s.' % (\n MakeBitsHumanReadable(read_thru['bytes_per_second'] * 8))\n\n if 'listing' in self.results:\n print\n print '-' * 78\n print 'Listing'.center(78)\n print '-' * 78\n\n listing = self.results['listing']\n insert = listing['insert']\n delete = listing['delete']\n print 'After inserting %s objects:' % listing['num_files']\n print (' Total time for objects to appear: %.2g seconds' %\n insert['time_took'])\n print ' Number of listing calls made: %s' % insert['num_listing_calls']\n print (' Individual listing call latencies: [%s]' %\n ', '.join('%.2gs' % lat for lat in insert['list_latencies']))\n print (' Files reflected after each call: [%s]' %\n ', '.join(map(str, insert['files_seen_after_listing'])))\n\n print 'After deleting %s objects:' % listing['num_files']\n print (' Total time for objects to appear: %.2g seconds' %\n delete['time_took'])\n print ' Number of listing calls made: %s' % delete['num_listing_calls']\n print (' Individual listing call latencies: [%s]' %\n ', '.join('%.2gs' % lat for lat in delete['list_latencies']))\n print (' Files reflected after each call: [%s]' %\n ', '.join(map(str, delete['files_seen_after_listing'])))\n\n if 'sysinfo' in self.results:\n print\n print '-' * 78\n print 'System Information'.center(78)\n print '-' * 78\n info = self.results['sysinfo']\n print 'IP Address: \\n %s' % info['ip_address']\n print 'Temporary Directory: \\n %s' % info['tempdir']\n print 'Bucket URI: \\n %s' % self.results['bucket_uri']\n print 'gsutil Version: \\n %s' % self.results.get('gsutil_version',\n 'Unknown')\n print 'boto Version: \\n %s' % self.results.get('boto_version', 'Unknown')\n\n if 'gmt_timestamp' in info:\n ts_string = info['gmt_timestamp']\n timetuple = None\n try:\n # Convert RFC 2822 string to Linux timestamp.\n timetuple = time.strptime(ts_string, '%a, %d %b %Y %H:%M:%S +0000')\n except ValueError:\n pass\n\n if timetuple:\n # Converts the GMT time tuple to local Linux timestamp.\n localtime = calendar.timegm(timetuple)\n localdt = datetime.datetime.fromtimestamp(localtime)\n print 'Measurement time: \\n %s' % localdt.strftime(\n '%Y-%m-%d %I:%M:%S %p %Z')\n\n print 'Google Server: \\n %s' % info['googserv_route']\n print ('Google Server IP Addresses: \\n %s' %\n ('\\n '.join(info['googserv_ips'])))\n print ('Google Server Hostnames: \\n %s' %\n ('\\n '.join(info['googserv_hostnames'])))\n print 'Google DNS thinks your IP is: \\n %s' % info['dns_o-o_ip']\n print 'CPU Count: \\n %s' % info['cpu_count']\n print 'CPU Load Average: \\n %s' % info['load_avg']\n try:\n print ('Total Memory: \\n %s' %\n MakeHumanReadable(info['meminfo']['mem_total']))\n # Free memory is really MemFree + Buffers + Cached.\n print 'Free Memory: \\n %s' % MakeHumanReadable(\n info['meminfo']['mem_free'] +\n info['meminfo']['mem_buffers'] +\n info['meminfo']['mem_cached'])\n except TypeError:\n pass\n\n if 'netstat_end' in info and 'netstat_start' in info:\n netstat_after = info['netstat_end']\n netstat_before = info['netstat_start']\n for tcp_type in ('sent', 'received', 'retransmit'):\n try:\n delta = (netstat_after['tcp_%s' % tcp_type] -\n netstat_before['tcp_%s' % tcp_type])\n print 'TCP segments %s during test:\\n %d' % (tcp_type, delta)\n except TypeError:\n pass\n else:\n print ('TCP segment counts not available because \"netstat\" was not '\n 'found during test runs')\n\n if 'disk_counters_end' in info and 'disk_counters_start' in info:\n print 'Disk Counter Deltas:\\n',\n disk_after = info['disk_counters_end']\n disk_before = info['disk_counters_start']\n print '', 'disk'.rjust(6),\n for colname in ['reads', 'writes', 'rbytes', 'wbytes', 'rtime',\n 'wtime']:\n print colname.rjust(8),\n print\n for diskname in sorted(disk_after):\n before = disk_before[diskname]\n after = disk_after[diskname]\n (reads1, writes1, rbytes1, wbytes1, rtime1, wtime1) = before\n (reads2, writes2, rbytes2, wbytes2, rtime2, wtime2) = after\n print '', diskname.rjust(6),\n deltas = [reads2-reads1, writes2-writes1, rbytes2-rbytes1,\n wbytes2-wbytes1, rtime2-rtime1, wtime2-wtime1]\n for delta in deltas:\n print str(delta).rjust(8),\n print\n\n if 'tcp_proc_values' in info:\n print 'TCP /proc values:\\n',\n for item in info['tcp_proc_values'].iteritems():\n print ' %s = %s' % item\n\n if 'boto_https_enabled' in info:\n print 'Boto HTTPS Enabled: \\n %s' % info['boto_https_enabled']\n\n if 'using_proxy' in info:\n print 'Requests routed through proxy: \\n %s' % info['using_proxy']\n\n if 'google_host_dns_latency' in info:\n print ('Latency of the DNS lookup for Google Storage server (ms): '\n '\\n %.1f' % (info['google_host_dns_latency'] * 1000.0))\n\n if 'google_host_connect_latencies' in info:\n print 'Latencies connecting to Google Storage server IPs (ms):'\n for ip, latency in info['google_host_connect_latencies'].iteritems():\n print ' %s = %.1f' % (ip, latency * 1000.0)\n\n if 'proxy_dns_latency' in info:\n print ('Latency of the DNS lookup for the configured proxy (ms): '\n '\\n %.1f' % (info['proxy_dns_latency'] * 1000.0))\n\n if 'proxy_host_connect_latency' in info:\n print ('Latency connecting to the configured proxy (ms): \\n %.1f' %\n (info['proxy_host_connect_latency'] * 1000.0))\n\n if 'request_errors' in self.results and 'total_requests' in self.results:\n print\n print '-' * 78\n print 'In-Process HTTP Statistics'.center(78)\n print '-' * 78\n total = int(self.results['total_requests'])\n numerrors = int(self.results['request_errors'])\n numbreaks = int(self.results['connection_breaks'])\n availability = (((total - numerrors) / float(total)) * 100\n if total > 0 else 100)\n print 'Total HTTP requests made: %d' % total\n print 'HTTP 5xx errors: %d' % numerrors\n print 'HTTP connections broken: %d' % numbreaks\n print 'Availability: %.7g%%' % availability\n if 'error_responses_by_code' in self.results:\n sorted_codes = sorted(\n self.results['error_responses_by_code'].iteritems())\n if sorted_codes:\n print 'Error responses by code:'\n print '\\n'.join(' %s: %s' % c for c in sorted_codes)\n\n if self.output_file:\n with open(self.output_file, 'w') as f:\n json.dump(self.results, f, indent=2)\n print\n print \"Output file written to '%s'.\" % self.output_file\n\n print",
"def print_model_performance(duration, best_acc, best_model):\n best_acc = best_acc*100\n print('Training and validation complete in: {:8}\\n'\n 'Best validation Accuracy: {:2.2f}%\\n'\n 'Learned model saved: {:16}\\n'.format(\n time_format(duration), best_acc, best_model))",
"def show_speedtest():\n return _run_speedify_cmd([\"show\", \"speedtest\"])",
"def print_stats():\r\n\tprint()\r\n\r\n\tall_fn_names = [k for k in _total_times.keys() if k not in _disabled_names]\r\n\r\n\tmax_name_width = max([len(k) for k in all_fn_names] + [4])\r\n\tif max_name_width % 2 == 1: max_name_width += 1\r\n\tformat_str = ' {:>%d} | {:>10.4f} ' % max_name_width\r\n\r\n\theader = (' {:^%d} | {:^10} ' % max_name_width).format('Name', 'Time (ms)')\r\n\tprint(header)\r\n\r\n\tsep_idx = header.find('|')\r\n\tsep_text = ('-' * sep_idx) + '+' + '-' * (len(header)-sep_idx-1)\r\n\tprint(sep_text)\r\n\r\n\tfor name in all_fn_names:\r\n\t\tprint(format_str.format(name, _total_times[name]*1000))\r\n\t\r\n\tprint(sep_text)\r\n\tprint(format_str.format('Total', total_time()*1000))\r\n\tprint()",
"def print_results(self):\n avg_rew = sum(self.rew_list)/self.num_ep\n print (\"Score over time: \" + str(avg_rew))\n print (\"Final Q-Table: \")\n print (self.Q)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
suppress both stdout and stderr outputs
|
def suppress_output():
if sys.version_info >= (3, 5):
from contextlib import redirect_stderr, redirect_stdout
else:
class _RedirectStream(object):
_stream = None
def __init__(self, new_target):
self._new_target = new_target
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
_stream = "stdout"
class redirect_stderr(_RedirectStream):
_stream = "stderr"
with TemporaryFile("wt") as file:
with redirect_stdout(file):
with redirect_stderr(file):
yield
|
[
"def suppress_stderr():\n with open(os.devnull, 'w') as fnull:\n with redirect_stderr(fnull):\n yield None",
"def silence_stderr():\n class Devnull(object):\n def write(self, _): pass\n\n def flush(self): pass\n\n orig_stderr = sys.stderr\n sys.stderr = Devnull()\n try:\n yield\n finally:\n sys.stderr = orig_stderr",
"def suppress_stdout():\n with open(os.devnull, \"w\") as devnull:\n old_stdout = sys.stdout\n sys.stdout = devnull\n try: \n yield\n finally:\n sys.stdout = old_stdout",
"def suppress_stdout(func):\n def wrapper(*args, **kwargs):\n with open(os.devnull, 'w') as devnull:\n with contextlib.redirect_stdout(devnull):\n func(*args, **kwargs)\n \n return wrapper",
"def restord_stderr():\n sys.stderr = sys.__stderr__",
"def set_stdout_stderr():\n\n class Writer(object):\n def write(self, msg):\n log.debug(msg)\n if verbose:\n chunk_send(msg)\n\n def flush(self):\n pass\n\n orig_stds = sys.stdout, sys.stderr\n w = Writer()\n sys.stdout = w\n sys.stderr = w\n\n def cleanup():\n \"\"\"\n Restores stdout and stderr\n \"\"\"\n sys.stdout = orig_stds[0]\n sys.stderr = orig_stds[1]\n client_sock.close()\n\n return cleanup",
"def test_with_nostderr_print(self, ):\n with redirect.with_nostderr():\n sys.stdout.write('dummyprint')\n self.assertEqual(sys.stderr.getvalue(), '',\n msg=(\"Failed: `with_nostdout' expect sys.stdout ''.\\n\"\n \"but we got {}\".format(sys.stderr.getvalue())))",
"def _run_subprocess_ignore_stderr(command):\n with open(os.devnull, \"w\") as devnull:\n output = subprocess.check_output(command, stderr=devnull)\n return output",
"def redirect_stdout_stderr():\n stdout_logger = logging.getLogger('STDOUT')\n sys.stdout = StreamToLogger(stdout_logger, logging.INFO)\n\n stderr_logger = logging.getLogger('STDERR')\n sys.stderr = StreamToLogger(stderr_logger, logging.ERROR)",
"def test_with_nostdout_print(self, ):\n with redirect.with_nostdout():\n print('dummyprint')\n self.assertEqual(sys.stdout.getvalue(), '',\n msg=(\"Failed: `with_nostdout' expect sys.stdout ''.\\n\"\n \"but we got {}\".format(sys.stdout.getvalue())))",
"def silent_execute( string, return_stderr=True):\r\n if sys.platform.startswith('win') or return_stderr:\r\n return os.system(string + \" > \" + os.devnull)\r\n else:\r\n return os.system('%s >%s 2>%s' % (string, os.devnull,\r\n os.devnull))",
"def use_stderr(self):\n return getattr(self, '_use_stderr', False)",
"def set_no_print(self):\n self._quiet = True",
"def test_error_includes_stdout_if_given_and_hidden(self):\n # Mostly to catch regression bug(s)\n stdout = \"this is my stdout\"\n with hide('stdout'):\n error(\"error message\", func=utils.abort, stdout=stdout)\n assert_contains(stdout, sys.stdout.getvalue())",
"def silence(error=Exception):\r\n return contextlib.suppress(error)",
"def RestoreStdOut():\n sys.stdout = PrintMocker.old_stdout",
"def enable_print():\n if NOPRINT:\n sys.stdout = sys.__stdout__",
"def block_print():\n if NOPRINT:\n sys.stdout = open(os.devnull, \"w\")",
"def disable_error_and_warning_recording(self):\n self._disable_err_warn_rec = True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all tweets from profile and write them on a txt file. Leave filter=True to remove RTs, links and mentions. Twitter only allows access to a users most recent 3240 tweets with this method. keys = [consumer_key,consumer_secret,access_key,access_secret]
|
def get_all_tweets(screen_name,keys=keys,filter=True):
consumer_key,consumer_secret,access_key,access_secret = keys
#re
rt = r'^RT'
link = r'https?:\/\/([\w\.-]+)\/([\w\.-]+)'
mention = r'^\@'
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200,tweet_mode='extended')
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print("getting tweets before {}".format(oldest))
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest,tweet_mode='extended')
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print("...{} tweets downloaded so far".format(len(alltweets)))
#transform the tweepy tweets into a 2D array that will populate the csv
if filter:
outtweets = [tweet.full_text for tweet in alltweets if not re.match(rt, tweet.full_text) and not re.match(mention, tweet.full_text)]
preproc = [re.sub(link, "", tweet)+"\n" for tweet in outtweets][::-1]
else:
outtweets = [tweet.full_text for tweet in alltweets]
#write the csv
with open('tweets/{}_tweets.txt'.format(screen_name), 'w', encoding='utf-8') as f:
f.writelines(preproc)
print('tweets/{}_tweets.txt was successfully created.'.format(screen_name))
pass
|
[
"def get_all_tweets(screen_name):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n \n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n \n #initialize a list to hold all the tweepy Tweets\n alltweets = [] \n \n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts = True)\n \n #only do this for users that have actually tweeted\n if len(new_tweets) > 0:\n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n \n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest, include_rts = True)\n \n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n print \"...%s tweets downloaded so far\" % (len(alltweets))\n \n # Save tweets for user in a json file\n fname = \"tweets/\"+str(screen_name)+\".jsonl\"\n with open(fname, 'w') as f:\n for status in alltweets:\n f.write(json.dumps(status._json)+\"\\n\")\n \n #close the file\n print \"Done with \" + str(screen_name)\n time.sleep(60)\n print \"Sleeping for one minute\"",
"def main():\n #Tokens for authentication\n client_key = ''\n client_secret = ''\n resource_owner_key = ''\n resource_owner_secret = ''\n token = {\n \"client_key\": client_key, \n \"client_secret\": client_secret,\n \"resource_owner_key\": resource_owner_key, \n \"resource_owner_secret\": resource_owner_secret\n } \n i = 0\n tweets = twitter.public_stream(token)\n fp1 = open(\"Streamtweets.txt\",\"w\")\n while True:\n for tweet in tweets:\n print \"writing\"\n fp1.write(tweet)\n fp1.write(\"\\n\\n\")\n i = i + 1\n if i == 1000: \n break\n if i == 1000:\n break\n fp1.close()\n print \"Exiting!!!\"\n raise SystemExit()",
"def filter(self, user_id=None, count=20, tweet_timeline=None):\n try:\n timeline = None\n if tweet_timeline is not None:\n timeline = tweet_timeline\n else:\n timeline = self.get_tweets(user_id=user_id, count=count)\n\n if timeline is None:\n raise Exception(\"Error: Could not fetch the tweets\")\n\n filtered_tweets = {}\n for tweets in timeline:\n tweet = tweets._json\n if (\n tweet[\"retweet_count\"] > self.min_retweet\n and tweet[\"retweet_count\"] < self.max_retweet\n ):\n temp_tweet = {\n \"text\": tweet[\"text\"],\n \"retweet_count\": tweet[\"retweet_count\"],\n }\n filtered_tweets[tweet[\"id\"]] = temp_tweet\n\n print(\n \"Status: Filtered {} tweets with the given criteria\".format(\n len(filtered_tweets)\n )\n )\n return filtered_tweets\n\n except Exception as e:\n print(str(e))\n sys.exit(0)",
"def do_tweets(self):\n http = httplib2.Http()\n if self.callsign:\n url = \"http://search.twitter.com/search.json?q=%s+from:%s\" % (urllib.quote('#' + self.callsign), urllib.quote(self.master))\n else:\n url = \"http://search.twitter.com/search.json?q=from:%s\" % (urllib.quote(self.master))\n resp, content = http.request(url, \"GET\")\n d = json.loads(content)\n for j in d['results']:\n if j['id_str'] == self.lasttweeted:\n return\n else:\n self.tweet_out(j['id_str'])",
"def _filter_tweet(self, tweet):\n if \"extended_tweet\" in tweet.keys():\n tweet[\"text\"] = tweet[\"extended_tweet\"][\"full_text\"]\n elif \"retweeted_status\" in tweet.keys() and \"full_text\" in tweet[\"retweeted_status\"].keys():\n tweet[\"text\"] = \"RT \" + tweet[\"retweeted_status\"][\"full_text\"]\n\n filtered_data = self._extract(tweet, TwitterFetcher.tweet_fields)\n filtered_data[\"user\"] = self._extract(tweet[\"user\"], TwitterFetcher.user_fields)\n filtered_data[\"CC\"] = self._get_location(tweet[\"user\"][\"location\"])\n filtered_data[\"social\"] = {\"topic\": self.topic, \"topic_id\": self.topic_id, \"user_id\": self.user_id}\n filtered_data[\"source\"] = self._get_source(tweet[\"source\"])\n self.redis.publish(f'twitter:stream', json.dumps(filtered_data))\n self._initialize_results(filtered_data)\n return filtered_data",
"def tweets(self):\n tweet=[] # creating a list to add all of the tweets text to\n for json_file in self.data:\n tweet.append(json_file[\"text\"])# adding the text of the tweets to the list\n return tweet # returning the list of tweets so that I can use this function tweets and apply it",
"def filter(self, user_id=None, count=20, tweet_timeline=None):\n try:\n timeline = None\n if tweet_timeline is not None:\n timeline = tweet_timeline\n else:\n timeline = self.get_tweets(user_id=user_id, count=count)\n\n if timeline is None:\n raise Exception(\"Error: Could not fetch the tweets\")\n\n filtered_tweets = {}\n for tweets in timeline:\n tweet = tweets._json\n\n if (\n tweet[\"favorite_count\"] > self.min_favorite\n and tweet[\"favorite_count\"] < self.max_favorite\n ):\n\n temp_tweet = {\n \"text\": tweet[\"text\"],\n \"favorite_count\": tweet[\"favorite_count\"],\n }\n filtered_tweets[tweet[\"id\"]] = temp_tweet\n\n print(\n \"Status: Filtered {} tweets with the given criteria\".format(\n len(filtered_tweets)\n )\n )\n return filtered_tweets\n\n except Exception as e:\n print(str(e))\n sys.exit(0)",
"def userTweets(username):\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n user_tweet = api.GetUserTimeline(screen_name=username)\n for tweet in user_tweet:\n util.safe_print(tweet.GetText())",
"def put_tweets(self):\n\t\tscreen_name = self.screen_name\n\t\tself.get_user_retweets()\n\t\tself.retweet_df[\"date\"] = pd.to_datetime(self.retweet_df['created_at']).dt.date\n\t\tself.retweet_df = self.retweet_df[self.retweet_df[\"date\"] >= self.__START_DATE]\n\t\tself.retweet_df = self.retweet_df.drop(\"date\",axis=1)\n\t\twrite_to_file(self.file_path,self.retweet_df,self.screen_name)\n\t\tprint(\"--- done for {} ---\".format(screen_name))",
"def get_user_tweets(self):\n tweets = []\n for status in tweepy.Cursor(self.api.user_timeline).items():\n tweets.append(status)\n return tweets",
"def read_user_tweets(dir_path):\n tweet_dict = {}\n words = []\n tokenize_dict = {}\n user_tweets = \"\"\n i = 0\n cachedStopWords = stopwords.words(\"english\")\n# print(cachedStopWords) #print stop words\n# loop over the user files\n for filename in os.listdir(dir_path):\n #skip files if it's not xml \n if filename.endswith(\".xml\"): \n dom = ElementTree.parse(dir_path+filename) \n tweets = dom.find('documents')\n #loop over tweet of one user \n for tweet in tweets:\n #concantanate tweets of one user by new line \n user_tweets = user_tweets + \"\\n\" + (tweet.text).lower()\n #remove punctiation and numbers\n user_tweets = re.sub(r'[^\\w\\s]','', user_tweets)\n user_tweets = re.sub(r'[0-9]','', user_tweets)\n #cut '.xml' from file name to get user value as the same as in txt file\n filename = filename[:-4]\n #lowercase the text\n tweet_dict[filename] = user_tweets.lower()\n #tokenize user tweets\n tokenize = word_tokenize(user_tweets)\n tokenize = [word for word in tokenize if not (word.startswith('http') or word.startswith('amp') or word.startswith('xx')) ]\n tokenize_dict[filename] = tokenize\n i += 1\n if i % 100 == 0:\n print(i)\n words += [word for word in tokenize_dict[filename] if word not in cachedStopWords]\n user_tweets = \"\"\n \n return tweet_dict, words",
"def save_tweet(self,tweet):\n with open(self.tweets_file, \"ab\") as output:\n output.write(tweet['id']+','+tweet['created']+','+tweet['text']+','+tweet['retweet_count']+','+tweet['favorite_count']+','+tweet['lang']+','+tweet['country']+','+tweet['city']+','+tweet['province']+'\\n')\n self.file_line_counter+=1\n self.on_save_tweet()",
"def getNewerTweets():\n recent = True\n Searcher.set_twitter_connection(login, TWITTER_CREDENTIALS)\n Searcher.run(search_terms, limit, recent, REST)",
"def get_friends_tweets(self):\n tweets = []\n for friend in self.friends:\n for tweet in tweepy.Cursor(self.API.user_timeline).items():\n tweets.append(tweet._json)\n print(tweets,\"\\n\")\n \n return tweets",
"def crawl_twitter_content(options):\n content_file_writer = FileWriter(100000, \"twitter_content\", options.output) \n with open(options.input, \"r\") as input_f:\n for user_name in input_f:\n try:\n user_name = user_name.strip()\n pre_tweets, last_tweet_time = crawl_content_withapi(user_name.strip(), options)\n tweet_list = trans_json_to_tweet(pre_tweets)\n logging.info(\"Get {} Tweets From Api\".format(str(len(tweet_list))))\n write_content_to_file(content_file_writer, tweet_list, user_name)\n\n if options.all and len(tweet_list) >= 3200:\n logging.info(\"Start Crawl Status Not Use Api!\")\n new_tweet_list = crawl_content_noapi(user_name.strip(), last_tweet_time)\n write_content_to_file(content_file_writer, new_tweet_list, user_name)\n logging.info(\"Get {} Tweets From No Api\".format(str(len(new_tweet_list))))\n tweet_list.append(new_tweet_list)\n\n if options.comment:\n status_id_list = get_status_id_list(tweet_list)\n logging.info(\"Start Crawl Comment\" + str(len(status_id_list)))\n crawl_comments(options, user_name.strip(), status_id_list, content_file_writer)\n\n except Exception as e:\n print \"Have Exception %s\" % e\n\n content_file_writer.close()",
"def get_tweets_to_csv(text_query,tweet_items):\n tweet_list=[]\n for tweet in tweepy.Cursor(api.search, q=text_query,lang=\"en\",tweet_mode=\"extended\",result_type=\"recent\",).items(tweet_items):\n if (not tweet.retweeted) and ('RT @' not in tweet.full_text):\n tweet_list.append((tweet.user.location,tweet.place,tweet.created_at,tweet.id,tweet.retweet_count,tweet.favorite_count,tweet.user.followers_count,tweet.full_text))\n tweetsdf = pd.DataFrame(tweet_list,columns=['UserLocation','Place','Datetime', 'TweetId','RetweetCount','FavoriteCount','followers','Text'])\n tweetsdf.to_csv('data/raw/tweets_raw.csv'.format())",
"def get_tweets(auth):\n url = 'https://stream.twitter.com/1.1/statuses/filter.json'\n query_data = [('language', 'en'), ('locations', '-130,-20,100,50'), ('track', '#')]\n query_url = url + '?' + '&'.join([str(t[0]) + '='+str(t[1]) for t in query_data])\n res = requests.get(query_url, auth=auth, stream=True)\n print(query_url, res)\n return res",
"def get_tweets(self, account, number=MAX_TWEETS, since_id=None, max_id=None):\n import twitter\n\n all_tweets = []\n while number > 0:\n try:\n tweets = self.api.GetUserTimeline(\n screen_name=account,\n include_rts=False,\n exclude_replies=True,\n count=min(number, CHUNK_SIZE),\n since_id=since_id,\n max_id=max_id,\n )\n except twitter.TwitterError as e:\n raise plugin.PluginError(f'Unable to fetch timeline {account} for {e}')\n\n if not tweets:\n break\n\n all_tweets += tweets\n number -= len(tweets)\n max_id = tweets[-1].id - 1\n\n return all_tweets",
"def get_tweets(self):\n try:\n self.response = get(self.query_url, auth=AUTH, stream=True)\n print(self.query_url, self.response)\n return self.response\n except exceptions.HTTPError as e:\n print(\"Response error:\", e)\n exit(1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/changelog[/{revision}] Show information about multiple changesets. If the optional ``revision`` URL argument is absent, information about all changesets starting at ``tip`` will be rendered. If the ``revision`` argument is present, changesets will be shown starting from the specified revision. If ``revision`` is absent, the ``rev`` query string argument may be defined. This will perform a search for changesets. The argument for ``rev`` can be a single revision, a revision set, or a literal keyword to search for in changeset data (equivalent to
|
def changelog(web, shortlog=False):
query = b''
if b'node' in web.req.qsparams:
ctx = webutil.changectx(web.repo, web.req)
symrev = webutil.symrevorshortnode(web.req, ctx)
elif b'rev' in web.req.qsparams:
return _search(web)
else:
ctx = web.repo[b'tip']
symrev = b'tip'
def changelist(maxcount):
revs = []
if pos != -1:
revs = web.repo.changelog.revs(pos, 0)
for entry in webutil.changelistentries(web, revs, maxcount, parity):
yield entry
if shortlog:
revcount = web.maxshortchanges
else:
revcount = web.maxchanges
if b'revcount' in web.req.qsparams:
try:
revcount = int(web.req.qsparams.get(b'revcount', revcount))
revcount = max(revcount, 1)
web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount
except ValueError:
pass
lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
lessvars[b'revcount'] = max(revcount // 2, 1)
morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])
morevars[b'revcount'] = revcount * 2
count = len(web.repo)
pos = ctx.rev()
parity = paritygen(web.stripecount)
changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
entries = list(changelist(revcount + 1))
latestentry = entries[:1]
if len(entries) > revcount:
nextentry = entries[-1:]
entries = entries[:-1]
else:
nextentry = []
return web.sendtemplate(
b'shortlog' if shortlog else b'changelog',
changenav=changenav,
node=ctx.hex(),
rev=pos,
symrev=symrev,
changesets=count,
entries=templateutil.mappinglist(entries),
latestentry=templateutil.mappinglist(latestentry),
nextentry=templateutil.mappinglist(nextentry),
archives=web.archivelist(b'tip'),
revcount=revcount,
morevars=morevars,
lessvars=lessvars,
query=query,
)
|
[
"def revision_list(self, args):\n\n messages = []\n def canceler(cancel_args):\n if cancel_args[0].lower() in ['revision','rev']:\n return RevisionCommand(parent=self.parent, ctx=self.ctx, args=cancel_args, guild=self.guild, user=self.user, channel=self.channel).run()\n else:\n self.parent.args = cancel_args\n self.parent.command = self.parent.args[0]\n return self.parent.get_messages()\n\n def formatter(item, item_num, page_num, page_size):\n return item.get_short_string(self.user)\n\n messages.extend(Dialog({\n 'svc': revision_svc,\n 'user': self.user,\n 'title': 'Revision List',\n 'command': 'revision ' + (' '.join(args)),\n 'type': 'view',\n 'type_name': 'REVISION',\n 'page_size': 1,\n 'getter': {\n 'method': Revision.get_by_page,\n 'params': {'params': {'archived': False}}\n },\n 'formatter': formatter,\n 'cancel': canceler\n }).open())\n return messages",
"def get_commit_history(self, revisions):\n log_fields = {\n 'commit_id': b'%H',\n 'parent_id': b'%P',\n 'author_name': b'%an',\n 'author_email': b'%ae',\n 'author_date': b'%ad',\n 'committer_name': b'%cn',\n 'committer_email': b'%ce',\n 'committer_date': b'%cd',\n 'commit_message': b'%B',\n }\n\n # 0x1f is the ASCII field separator. It is a non-printable character\n # that should not appear in any field in `git log`.\n log_format = b'%x1f'.join(six.itervalues(log_fields))\n\n log_entries = execute(\n [\n self.git,\n b'log',\n b'-z',\n b'--reverse',\n b'--pretty=format:%s' % log_format,\n b'--date=iso8601-strict',\n b'%s..%s' % (revisions['base'].encode('utf-8'),\n revisions['tip'].encode('utf-8')),\n ],\n ignore_errors=True,\n none_on_ignored_error=True,\n results_unicode=True)\n\n if not log_entries:\n return None\n\n history = []\n field_names = six.viewkeys(log_fields)\n\n for log_entry in log_entries.split(self._NUL):\n fields = log_entry.split(self._FIELD_SEP)\n entry = dict(zip(field_names, fields))\n\n parents = entry['parent_id'].split()\n\n if len(parents) > 1:\n raise SCMError(\n 'The Git SCMClient only supports posting commit histories '\n 'that are entirely linear.')\n elif len(parents) == 0:\n raise SCMError(\n 'The Git SCMClient only supports posting commits that '\n 'have exactly one parent.')\n\n history.append(entry)\n\n return history",
"async def revisions(self, revids, prop=\"ids|timestamp|flags|comment|user\"):\n kwargs = {\n \"prop\": \"revisions\",\n \"rvprop\": prop,\n \"revids\": \"|\".join(map(str, revids)),\n }\n\n revisions = []\n res = await self.get(\"query\", **kwargs)\n pages = res.get(\"query\", {}).get(\"pages\", {}).values()\n for page in pages:\n for revision in page.get(\"revisions\", ()):\n revision[\"pageid\"] = page.get(\"pageid\")\n revision[\"pagetitle\"] = page.get(\"title\")\n revision[\"timestamp\"] = parse_timestamp(revision[\"timestamp\"])\n revisions.append(revision)\n return revisions",
"def test_defects_revision_history():\n rally = Rally(server=RALLY, user=RALLY_USER, password=RALLY_PSWD)\n response = rally.get('Defect', fetch=True, limit=10)\n \n defect1 = response.next()\n defect2 = response.next()\n assert defect1.oid != defect2.oid\n\n d1_revs = defect1.RevisionHistory.Revisions\n d2_revs = defect2.RevisionHistory.Revisions\n\n assert type(d1_revs) == list\n assert type(d2_revs) == list\n\n d1_rev1 = d1_revs.pop() # now the revs are in stack order, newest first, original the last\n d2_rev1 = d2_revs.pop() # ditto\n\n assert d1_rev1.RevisionNumber == 0\n assert d2_rev1.RevisionNumber == 0\n\n assert d1_rev1.Description != \"\" and len(d1_rev1.Description) > 0\n assert d2_rev1.Description != \"\" and len(d2_rev1.Description) > 0\n\n assert d1_rev1._hydrated == True\n assert d2_rev1._hydrated == True",
"def run_hg_log(revset):\n result = sudo(\"hg log --template '{rev}:{node|short} {branch} {desc|firstline}\\\\n' -r '%s'\" % revset)\n return result.split('\\n') if result else []",
"def grab_changesets(self, path, url, changesets):\n raise NotImplementedError",
"def revisions(self, revisions):\n\n self._revisions = revisions",
"def get_reviews_by_repository_id_changeset_revision( app, repository_id, changeset_revision ):\n sa_session = app.model.context.current\n return sa_session.query( app.model.RepositoryReview ) \\\n .filter( and_( app.model.RepositoryReview.repository_id == app.security.decode_id( repository_id ),\n app.model.RepositoryReview.changeset_revision == changeset_revision ) ) \\\n .all()",
"def populate(self, revisions):\n \n # reset first\n self.reset()\n \n # for each revision\n for i, r in enumerate(revisions):\n data_found = False\n # check nodes for data\n for n in r.nodes:\n if n.text_data is not None:\n data_found = True\n break\n # add the revision if there was a node with data, we don't show\n # revisions without data to view\n if data_found:\n # revision lookup, ComboBox text to index\n self.values[\"Revision: \" + str(r.revision_number)] = i\n self.comboBox.Append(\"Revision: \" + str(r.revision_number))",
"def BuildRequestsQuery(revision=None, branch_name=None, starttime=None, \n endtime=None, changeid_all=False):\n b = meta.scheduler_db_meta.tables['builds']\n br = meta.scheduler_db_meta.tables['buildrequests']\n bs = meta.scheduler_db_meta.tables['buildsets']\n s = meta.scheduler_db_meta.tables['sourcestamps']\n sch = meta.scheduler_db_meta.tables['sourcestamp_changes']\n c = meta.scheduler_db_meta.tables['changes']\n\n q = outerjoin(br, b, b.c.brid == br.c.id).join(\n bs, bs.c.id == br.c.buildsetid).join(\n s, s.c.id == bs.c.sourcestampid).outerjoin(\n sch, sch.c.sourcestampid == s.c.id).outerjoin(\n c, c.c.changeid == sch.c.changeid\n ).select().with_only_columns([\n b.c.id.label('bid'),\n b.c.finish_time,\n b.c.number,\n b.c.start_time,\n br.c.id.label('brid'),\n br.c.buildsetid,\n br.c.buildername,\n br.c.claimed_at,\n br.c.claimed_by_name,\n br.c.complete,\n br.c.complete_at,\n br.c.results,\n br.c.submitted_at,\n bs.c.reason,\n c.c.author,\n c.c.category,\n c.c.changeid,\n c.c.comments,\n c.c.project,\n c.c.repository,\n c.c.revision.label('changes_revision'),\n c.c.revlink,\n c.c.when_timestamp,\n s.c.branch,\n s.c.revision,\n s.c.id.label('ssid'),\n ])\n\n if revision:\n if not isinstance(revision, list):\n revision = [revision]\n revmatcher = [s.c.revision.like(rev + '%') for rev in revision if rev]\n if revmatcher: \n q = q.where(or_(*revmatcher))\n if branch_name:\n q = q.where(s.c.branch.like(branch_name + '%'))\n if starttime:\n q = q.where(or_(c.c.when_timestamp >= starttime, \n br.c.submitted_at >= starttime))\n if endtime:\n q = q.where(or_(c.c.when_timestamp < endtime, \n br.c.submitted_at < endtime))\n\n # some build requests might have multiple builds or changeids\n if not changeid_all:\n q = q.group_by(br.c.id, b.c.id)\n # else:\n # q = q.group_by(br.c.id, b.c.id, c.c.changeid)\n\n return q",
"def changeset(web):\n ctx = webutil.changectx(web.repo, web.req)\n\n return web.sendtemplate(b'changeset', **webutil.changesetentry(web, ctx))",
"def get_changelogs(self, entity=None):\r\n return self._changelog_manager.list(entity=entity)",
"def log_revision(self, cvs_rev, svn_revnum):\n\n for (symbol_id, cvs_symbol_id,) in cvs_rev.opened_symbols:\n self._log_opening(symbol_id, cvs_symbol_id, svn_revnum)\n\n for (symbol_id, cvs_symbol_id) in cvs_rev.closed_symbols:\n self._log_closing(symbol_id, cvs_symbol_id, svn_revnum)",
"def RenderChangelogHTML(self, tweak_data):\n element = \"\"\n try:\n for version in tweak_data['changelog'][::-1]:\n element += DepictionGenerator.ChangelogEntry(self, version['version'], version['changes'])\n return element\n except Exception:\n return \"This package has no changelog.\"",
"def changes(argv):\r\n\t\tcallBuilder()\r\n\t\tshow.info(\"The overview file is in %(TARGETDIR)s.\", OPTIONS)",
"def parse_revision_spec(self, revisions=[]):\n n_revs = len(revisions)\n result = {}\n\n if n_revs == 0:\n # No revisions were passed in. Start with HEAD, and find the\n # tracking branch automatically.\n head_ref = self._rev_parse(self.get_head_ref())[0]\n parent_branch = self._get_parent_branch()\n remote = self._find_remote(parent_branch)\n parent_ref = self._rev_parse(parent_branch)[0]\n\n merge_base = self._rev_list_youngest_remote_ancestor(\n parent_ref, remote)\n\n result = {\n 'base': parent_ref,\n 'tip': head_ref,\n 'commit_id': head_ref,\n }\n\n if parent_ref != merge_base:\n result['parent_base'] = merge_base\n\n # Since the user asked us to operate on HEAD, warn them about a\n # dirty working directory.\n if (self.has_pending_changes() and\n not self.config.get('SUPPRESS_CLIENT_WARNINGS', False)):\n logging.warning('Your working directory is not clean. Any '\n 'changes which have not been committed '\n 'to a branch will not be included in your '\n 'review request.')\n\n elif n_revs == 1 or n_revs == 2:\n # Let `git rev-parse` sort things out.\n parsed = self._rev_parse(revisions)\n\n n_parsed_revs = len(parsed)\n assert n_parsed_revs <= 3\n\n if n_parsed_revs == 1:\n # Single revision. Extract the parent of that revision to use\n # as the base.\n parent = self._rev_parse('%s^' % parsed[0])[0]\n result = {\n 'base': parent,\n 'tip': parsed[0],\n 'commit_id': parsed[0],\n }\n elif n_parsed_revs == 2:\n if parsed[1].startswith('^'):\n # Passed in revisions were probably formatted as\n # \"base..tip\". The rev-parse output includes all ancestors\n # of the first part, and none of the ancestors of the\n # second. Basically, the second part is the base (after\n # stripping the ^ prefix) and the first is the tip.\n result = {\n 'base': parsed[1][1:],\n 'tip': parsed[0],\n }\n else:\n # First revision is base, second is tip\n result = {\n 'base': parsed[0],\n 'tip': parsed[1],\n }\n elif n_parsed_revs == 3 and parsed[2].startswith('^'):\n # Revision spec is diff-since-merge. Find the merge-base of the\n # two revs to use as base.\n merge_base = self._execute([self.git, 'merge-base', parsed[0],\n parsed[1]]).strip()\n result = {\n 'base': merge_base,\n 'tip': parsed[0],\n }\n else:\n raise InvalidRevisionSpecError(\n 'Unexpected result while parsing revision spec')\n\n parent_branch = self._get_parent_branch()\n remote = self._find_remote(parent_branch)\n parent_base = self._rev_list_youngest_remote_ancestor(\n result['base'], remote)\n\n if parent_base != result['base']:\n result['parent_base'] = parent_base\n else:\n raise TooManyRevisionsError\n\n return result",
"def parse_revision_spec(self, revisions=[]):\n n_revs = len(revisions)\n result = {}\n\n if n_revs == 0:\n # No revisions were passed in--start with HEAD, and find the\n # submit branch automatically.\n result['tip'] = self._get_revno()\n result['base'] = self._get_revno('ancestor:')\n elif n_revs == 1 or n_revs == 2:\n # If there's a single argument, try splitting it on '..'\n if n_revs == 1:\n revisions = self.REVISION_SEPARATOR_REGEX.split(revisions[0])\n n_revs = len(revisions)\n\n if n_revs == 1:\n # Single revision. Extract the parent of that revision to use\n # as the base.\n result['base'] = self._get_revno('before:' + revisions[0])\n result['tip'] = self._get_revno(revisions[0])\n elif n_revs == 2:\n # Two revisions.\n result['base'] = self._get_revno(revisions[0])\n result['tip'] = self._get_revno(revisions[1])\n else:\n raise TooManyRevisionsError\n\n # XXX: I tried to automatically find the parent diff revision here,\n # but I really don't understand the difference between submit\n # branch, parent branch, bound branches, etc. If there's some way\n # to know what to diff against, we could use\n # 'bzr missing --mine-only --my-revision=(base) --line'\n # to see if we need a parent diff.\n else:\n raise TooManyRevisionsError\n\n if self.options.parent_branch:\n result['parent_base'] = result['base']\n result['base'] = self._get_revno(\n 'ancestor:%s' % self.options.parent_branch)\n\n return result",
"def getRevisionLog(url, revision):\r\n svn_log = subprocess2.check_output(\r\n ['svn', 'log', url, '-r', str(revision)],\r\n universal_newlines=True).splitlines(True)\r\n # Don't include the header lines and the trailing \"---...\" line.\r\n return ''.join(svn_log[3:-1])",
"def changes(self, tv_id, start_date=None, end_date=None, page=1):\n params = \"page=%s\" % page\n if start_date:\n params += \"&start_date=%s\" % start_date\n if end_date:\n params += \"&end_date=%s\" % end_date\n return self._request_obj(\n self._urls[\"changes\"] % tv_id,\n params=params,\n key=\"changes\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/shortlog Show basic information about a set of changesets. This accepts the same parameters as the ``changelog`` handler. The only difference is the ``shortlog`` template will be rendered instead of the ``changelog`` template.
|
def shortlog(web):
return changelog(web, shortlog=True)
|
[
"def changelog(web, shortlog=False):\n\n query = b''\n if b'node' in web.req.qsparams:\n ctx = webutil.changectx(web.repo, web.req)\n symrev = webutil.symrevorshortnode(web.req, ctx)\n elif b'rev' in web.req.qsparams:\n return _search(web)\n else:\n ctx = web.repo[b'tip']\n symrev = b'tip'\n\n def changelist(maxcount):\n revs = []\n if pos != -1:\n revs = web.repo.changelog.revs(pos, 0)\n\n for entry in webutil.changelistentries(web, revs, maxcount, parity):\n yield entry\n\n if shortlog:\n revcount = web.maxshortchanges\n else:\n revcount = web.maxchanges\n\n if b'revcount' in web.req.qsparams:\n try:\n revcount = int(web.req.qsparams.get(b'revcount', revcount))\n revcount = max(revcount, 1)\n web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount\n except ValueError:\n pass\n\n lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])\n lessvars[b'revcount'] = max(revcount // 2, 1)\n morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])\n morevars[b'revcount'] = revcount * 2\n\n count = len(web.repo)\n pos = ctx.rev()\n parity = paritygen(web.stripecount)\n\n changenav = webutil.revnav(web.repo).gen(pos, revcount, count)\n\n entries = list(changelist(revcount + 1))\n latestentry = entries[:1]\n if len(entries) > revcount:\n nextentry = entries[-1:]\n entries = entries[:-1]\n else:\n nextentry = []\n\n return web.sendtemplate(\n b'shortlog' if shortlog else b'changelog',\n changenav=changenav,\n node=ctx.hex(),\n rev=pos,\n symrev=symrev,\n changesets=count,\n entries=templateutil.mappinglist(entries),\n latestentry=templateutil.mappinglist(latestentry),\n nextentry=templateutil.mappinglist(nextentry),\n archives=web.archivelist(b'tip'),\n revcount=revcount,\n morevars=morevars,\n lessvars=lessvars,\n query=query,\n )",
"def changes(argv):\r\n\t\tcallBuilder()\r\n\t\tshow.info(\"The overview file is in %(TARGETDIR)s.\", OPTIONS)",
"def print_short_help():\n\n print(CommandlineTool.create_short_help())",
"def short_changeset(self):\n return self.changeset[:8]",
"def setShort(*args, **kwargs):\n \n pass",
"def describe_change_set(self, change_set_name):\n self.logger.debug(\n \"%s - Describing Change Set '%s'\", self.stack.name, change_set_name\n )\n return self.connection_manager.call(\n service=\"cloudformation\",\n command=\"describe_change_set\",\n kwargs={\n \"ChangeSetName\": change_set_name,\n \"StackName\": self.stack.external_name,\n },\n )",
"def SetToolShortHelp(*args, **kwargs):\n return _aui.AuiToolBar_SetToolShortHelp(*args, **kwargs)",
"def create_short_help():\n\n result = ''\n result += CommandlineTool._title + \"\\n\"\n result += CommandlineTool._cp_string + \"\\n\"\n have_options = ' [options..]' if len(CommandlineTool._short_opt) > 0 or len(\n CommandlineTool._long_opt) > 1 else ''\n have_commands = ' <command>' if len(\n CommandlineTool._commands) > 0 else ''\n cmd = (sys.argv[0]).split('/')[-1]\n result += \"Syntax: \" + sys.argv[0] + have_options + have_commands + \\\n \"\\n\" if CommandlineTool._trn == None else CommandlineTool._trn.msg(\n 'htk_help_syntax', cmd) + \"\\n\"\n result += \"For list of all available commands and options type {h}\" + cmd + \\\n \" help{e}\" if CommandlineTool._trn == None else CommandlineTool._trn.msg(\n 'htk_help_on_help', cmd)\n\n # apply decorations\n result = CommandlineTool.parse_shell_text(result)\n return result",
"def changeset(web):\n ctx = webutil.changectx(web.repo, web.req)\n\n return web.sendtemplate(b'changeset', **webutil.changesetentry(web, ctx))",
"def info(ctx, show_all):\r\n ts, tier = _get_config_and_tier(ctx.obj.tier_name)\r\n\r\n #hd = ['template_name', '']\r\n #print \"template code\", [t().template_name for t in templater.export]\r\n\r\n _list_stacks(tier, show_all)",
"def setGenericShort(*args, **kwargs):\n \n pass",
"def run_hg_log(revset):\n result = sudo(\"hg log --template '{rev}:{node|short} {branch} {desc|firstline}\\\\n' -r '%s'\" % revset)\n return result.split('\\n') if result else []",
"def GetToolShortHelp(*args, **kwargs):\n return _aui.AuiToolBar_GetToolShortHelp(*args, **kwargs)",
"def help_doc(self):\n # ignore option that cant be modified on cmd line\n if not (self.short or self.long):\n return []\n\n text = []\n opt_str = self.help_param()\n # TODO It should always display option's default value\n opt_help = self.help % {'default': self.default}\n opt_choices = self.help_choices()\n opt_config = 'config: {}'.format(self.name)\n opt_env = ', environ: {}'.format(self.env_var) if self.env_var else ''\n\n desc = f'{opt_help} {opt_choices} ({opt_config}{opt_env})'\n text.append(self._print_2_columns(opt_str, desc))\n # print bool inverse option\n if self.inverse:\n opt_str = '--%s' % self.inverse\n opt_help = 'opposite of --%s' % self.long\n text.append(self._print_2_columns(opt_str, opt_help))\n return text",
"def main(milestone, log_level, verbose, check_only, allow_closed):\n log_level = \"DEBUG\" if verbose else log_level\n logging.basicConfig(level=log_level)\n\n token = os.environ.get(\"GH_TOKEN\", None)\n if not token:\n print(\"error: GH_TOKEN is not set\", file=sys.stderr)\n sys.exit(1)\n\n items = get_milestone_items(milestone, token, allow_closed)\n if not items:\n print(f\"error: no such milestone: {milestone}\", file=sys.stderr)\n sys.exit(1)\n\n problems = check_milestone_items(items)\n for problem in problems:\n print(problem, file=sys.stderr)\n\n if len(problems) > 0:\n sys.exit(2)\n elif check_only:\n sys.exit(0)\n\n CHANGELOG = REPO_ROOT / \"docs\" / \"CHANGELOG\"\n\n with open(CHANGELOG) as f:\n old_changelog = f.read()\n\n out = open(CHANGELOG, mode=\"w\")\n\n out.write(f\"{datetime.date.today()} {milestone:>8}:\\n\")\n out.write(\"--------------------\\n\")\n grouping = lambda item: get_label_type(item) or \"none\"\n items = sorted(items, key=grouping)\n for group_type, group in groupby(items, grouping):\n if group_type == \"bug\":\n out.write(\" * bugfixes:\\n\")\n elif group_type == \"feature\":\n out.write(\" * features:\\n\")\n elif group_type == \"task\":\n out.write(\" * tasks:\\n\")\n elif group_type == \"none\":\n continue\n for item in group:\n out.write(f\" - {description(item)}\\n\")\n out.write(\"\\n\")\n out.write(old_changelog)",
"def view_config_changes():",
"def htmlhelp(argv):\r\n\t\tcallBuilder()\r\n\t\tshow.info(\"Build finished; now you can run HTML Help Workshop with the .hhp project file in %(TARGETDIR)s.\", OPTIONS)",
"def showhelp():\n\tusage()",
"def api_help():\n docs = [{'name': route.__name__, 'value': route.__doc__}\n for route in [aliases, connections, databases, fetch, now, reports]]\n return render_template('help.html', docs=docs, version=__version__, url_root=request.url_root)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/changeset[/{revision}] Show information about a single changeset. A URL path argument is the changeset identifier to show. See ``hg help revisions`` for possible values. If not defined, the ``tip`` changeset will be shown. The ``changeset`` template is rendered. Contents of the ``changesettag``, ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many templates related to diffs may all be used to produce the output.
|
def changeset(web):
ctx = webutil.changectx(web.repo, web.req)
return web.sendtemplate(b'changeset', **webutil.changesetentry(web, ctx))
|
[
"def grab_changesets(self, path, url, changesets):\n raise NotImplementedError",
"def changelog(web, shortlog=False):\n\n query = b''\n if b'node' in web.req.qsparams:\n ctx = webutil.changectx(web.repo, web.req)\n symrev = webutil.symrevorshortnode(web.req, ctx)\n elif b'rev' in web.req.qsparams:\n return _search(web)\n else:\n ctx = web.repo[b'tip']\n symrev = b'tip'\n\n def changelist(maxcount):\n revs = []\n if pos != -1:\n revs = web.repo.changelog.revs(pos, 0)\n\n for entry in webutil.changelistentries(web, revs, maxcount, parity):\n yield entry\n\n if shortlog:\n revcount = web.maxshortchanges\n else:\n revcount = web.maxchanges\n\n if b'revcount' in web.req.qsparams:\n try:\n revcount = int(web.req.qsparams.get(b'revcount', revcount))\n revcount = max(revcount, 1)\n web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount\n except ValueError:\n pass\n\n lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])\n lessvars[b'revcount'] = max(revcount // 2, 1)\n morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])\n morevars[b'revcount'] = revcount * 2\n\n count = len(web.repo)\n pos = ctx.rev()\n parity = paritygen(web.stripecount)\n\n changenav = webutil.revnav(web.repo).gen(pos, revcount, count)\n\n entries = list(changelist(revcount + 1))\n latestentry = entries[:1]\n if len(entries) > revcount:\n nextentry = entries[-1:]\n entries = entries[:-1]\n else:\n nextentry = []\n\n return web.sendtemplate(\n b'shortlog' if shortlog else b'changelog',\n changenav=changenav,\n node=ctx.hex(),\n rev=pos,\n symrev=symrev,\n changesets=count,\n entries=templateutil.mappinglist(entries),\n latestentry=templateutil.mappinglist(latestentry),\n nextentry=templateutil.mappinglist(nextentry),\n archives=web.archivelist(b'tip'),\n revcount=revcount,\n morevars=morevars,\n lessvars=lessvars,\n query=query,\n )",
"def get_changeset_details_from_osm(changeset_id):\n url = os.path.join(\n config.OSM_API_BASE_URL,\n 'changeset',\n changeset_id,\n 'download')\n response = requests.get(url)\n return analyze_changeset(ET.fromstring(response.content))",
"def describe_change_set(self, change_set_name):\n self.logger.debug(\n \"%s - Describing Change Set '%s'\", self.stack.name, change_set_name\n )\n return self.connection_manager.call(\n service=\"cloudformation\",\n command=\"describe_change_set\",\n kwargs={\n \"ChangeSetName\": change_set_name,\n \"StackName\": self.stack.external_name,\n },\n )",
"def run_hg_log(revset):\n result = sudo(\"hg log --template '{rev}:{node|short} {branch} {desc|firstline}\\\\n' -r '%s'\" % revset)\n return result.split('\\n') if result else []",
"def changes(argv):\r\n\t\tcallBuilder()\r\n\t\tshow.info(\"The overview file is in %(TARGETDIR)s.\", OPTIONS)",
"def get_changeset_branches(self, changeset):\n raise NotImplementedError(\"Abstract method\")",
"def get_changeset(changeset_id: str | int) -> dict:\n try:\n return get_elm(\"changeset\", changeset_id)\n except ValueError:\n raise ValueError(f\"Changeset `{changeset_id}` not found\")",
"def resource_show(args):\n _show_patch(args.hash)",
"def make_ticket_comment(self, repos, changeset):\n revstring = str(changeset.rev)\n if repos.reponame:\n revstring += '/' + repos.reponame\n return \"\"\"\\\nIn [%s]:\n{{{\n#!CommitTicketReference repository=\"%s\" revision=\"%s\"\n%s\n}}}\"\"\" % (revstring, repos.reponame, changeset.rev, changeset.message.strip())",
"def suggest_changes(self, req, page):\n page_id, contents = self.get_page_source(page)\n\n return Response(render_template(req, 'edit.html', self.globalcontext, dict(\n contents=contents,\n pagename=page,\n doctitle=self.globalcontext['titles'].get(page_id+'.rst') or 'this page',\n submiturl=relative_uri('/@edit/'+page+'/', '/@submit/'+page),\n )))",
"def shortlog(web):\n return changelog(web, shortlog=True)",
"def RenderChangelogHTML(self, tweak_data):\n element = \"\"\n try:\n for version in tweak_data['changelog'][::-1]:\n element += DepictionGenerator.ChangelogEntry(self, version['version'], version['changes'])\n return element\n except Exception:\n return \"This package has no changelog.\"",
"def changed_files(revset, filter_re=None):\n require('code_dir')\n\n with cd(env.code_dir):\n result = run(\"hg status --rev '%s'\" % revset, quiet=True).splitlines()\n\n if filter_re:\n regex = re.compile(filter_re)\n result = filter(lambda filename: regex.search(filename), result)\n\n return result",
"def short_changeset(self):\n return self.changeset[:8]",
"def test_get_url_on_diff_viewer_revision(self):\n request = RequestFactory().request()\n request.resolver_match = Mock()\n request.resolver_match.url_name = 'view-diff-revision'\n\n self.assertEqual(self.action.get_url({'request': request}),\n 'raw/')",
"def revisionfiles_info(unrestricted=False):",
"def changelogrevision(self, nodeorrev):\n text, sidedata = self._revisiondata(nodeorrev)\n return changelogrevision(\n text, sidedata, self._copiesstorage == b'changeset-sidedata'\n )",
"def get_git_changeset():\n repo_dir = os.path.dirname(\n os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))\n )\n )\n git_log = subprocess.run(\n ['git', 'log', '--pretty=format:%ct', '--quiet', '-1', 'HEAD'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=repo_dir,\n )\n timestamp = git_log.stdout\n try:\n timestamp = datetime.utcfromtimestamp(int(timestamp))\n except ValueError:\n return None\n return timestamp.strftime('%Y%m%d%H%M%S')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/branches Show information about branches. All known branches are contained in the output, even closed branches. No arguments are accepted. The ``branches`` template is rendered.
|
def branches(web):
entries = webutil.branchentries(web.repo, web.stripecount)
latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
return web.sendtemplate(
b'branches',
node=hex(web.repo.changelog.tip()),
entries=entries,
latestentry=latestentry,
)
|
[
"def _gitlab_list_branches(self) -> typing.Set[str]:\n response = requests.Session().get(\n f\"{IGitt.GitLab.BASE_URL}/projects/{quote_plus(self.slug)}/repository/branches\",\n params={'private_token': self.token},\n )\n\n response.raise_for_status()\n # TODO: pagination?\n return response.json()",
"def branches(self, match=None):\n args = ['--list', '--remote']\n if match:\n args.append(match)\n result = self.git('branch', *args)\n branches = [branch.strip() for branch in result.stdout.readlines()]\n return [branch.decode() for branch in branches]",
"def branches(self):\n branches_response = self.client.GET(self.name, Client.BRANCHES)\n for branch_name in branches_response:\n yield Branch(self, branch_name)",
"def _github_list_branches(self) -> typing.Set[str]:\n response = requests.Session().get(\n f'{IGitt.GitHub.BASE_URL}/repos/{self.slug}/branches',\n headers={f'Authorization': f'token {self.token}'},\n )\n\n response.raise_for_status()\n # TODO: pagination?\n return response.json()",
"def list_all_branches(bank, city):\n try:\n branches = (db.session.query(Branches, Banks).\n filter(Branches.bank_id == Banks.id).\n filter(Branches.city == city).\n filter(Banks.name == bank).\n all())\n results = util.prepare_results(branches)\n if results:\n return render_template(\"branch_details.html\", data=results)\n # no results matched\n return '<p>No results matched your query!</p>', 404\n except Exception as fault:\n logger.error(fault, exc_info=True)\n logger.error(\"Something went wrong while listing branches. Error: %s\", str(fault))\n return \"Uh-OH! Something went wrong, we are looking into it.\"",
"def branches(self):\n return self._named_trees('branch')",
"def all_git_branches(self, project_id, **kwargs):\n\n all_params = ['project_id']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method all_git_branches\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'project_id' is set\n if ('project_id' not in params) or (params['project_id'] is None):\n raise ValueError(\"Missing the required parameter `project_id` when calling `all_git_branches`\")\n\n resource_path = '/projects/{project_id}/git_branches'.replace('{format}', 'json')\n path_params = {}\n if 'project_id' in params:\n path_params['project_id'] = params['project_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[GitBranch]',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get_branches(self) -> List[\"Branch\"]:\n results = self.gitea.requests_get(\n Repository.REPO_BRANCHES % (self.owner.username, self.name)\n )\n return [Branch.parse_response(self.gitea, result) for result in results]",
"def branches(self):\n results = []\n\n try:\n root_dirents = self.client.list(\n self.normalize_path('/'),\n dirent_fields=SVN_DIRENT_CREATED_REV,\n recurse=False)[1:]\n except ClientError as e:\n raise SCMError(e)\n\n root_entries = SortedDict()\n for dirent, unused in root_dirents:\n name = dirent['path'].split('/')[-1]\n rev = six.text_type(dirent['created_rev'].number)\n root_entries[name] = rev\n\n if 'trunk' in root_entries:\n # Looks like the standard layout. Adds trunks and any branches\n results.append(\n Branch('trunk', root_entries['trunk'], True))\n\n try:\n branches = self.client.list(\n self.normalize_path('branches'),\n dirent_fields=SVN_DIRENT_CREATED_REV)[1:]\n for branch, unused in branches:\n results.append(Branch(\n branch['path'].split('/')[-1],\n six.text_type(branch['created_rev'].number)))\n except ClientError:\n # It's possible there aren't any branches. Ignore errors for\n # this part.\n pass\n else:\n # If the repository doesn't use the standard layout, just use a\n # listing of the root directory as the \"branches\". This probably\n # corresponds to a list of projects instead of branches, but it\n # will at least give people a useful result.\n default = True\n for name, rev in six.iteritems(root_entries):\n results.append(Branch(name, rev, default))\n default = False\n\n return results",
"def getAllBranches(server,repo):\n branches=[]\n url=server+\"/repos/\"+repo+\"/branches\"\n res=conn.get(url)\n dicres=json.loads(res.text)\n for branch in dicres:\n branches.append((branch.get(\"name\"),branch.get(\"commit\").get(\"sha\")))\n return branches",
"def tutorial_branches(config, url):\n if url.startswith('file://') or url == '':\n # no branches for file urls or missing urls\n return []\n try:\n jsontxt = urllib2.urlopen(url)\n except urllib2.HTTPError:\n return []\n branches = json.load(jsontxt)\n jsontxt.close()\n return [b['name'] for b in branches]",
"def set_branches(self, branches):\n self.branches = branches",
"def branches(self) -> Dict[str, str]:\n return dict(self._branches)",
"def get_branches(self):\n results = []\n\n try:\n root_dirents = self.client.list_dir('/')\n except Exception as e:\n raise self.normalize_error(e)\n\n default = True\n if 'trunk' in root_dirents:\n # Looks like the standard layout. Adds trunk and any branches.\n trunk = root_dirents['trunk']\n results.append(self._create_branch_from_dirent(\n 'trunk', trunk, default=True))\n default = False\n\n if 'branches' in root_dirents:\n try:\n dirents = self.client.list_dir('branches')\n\n results += [\n self._create_branch_from_dirent(name, dirents[name])\n for name in sorted(six.iterkeys(dirents))\n ]\n except Exception as e:\n raise self.normalize_error(e)\n\n # Add anything else from the root of the repository. This is a\n # catch-all for repositories which do not use the standard layout, and\n # for those that do, will include any additional top-level directories\n # that people may have.\n for name in sorted(six.iterkeys(root_dirents)):\n if name not in ('trunk', 'branches'):\n results.append(self._create_branch_from_dirent(\n name, root_dirents[name], default))\n default = False\n\n return results",
"def set_branches(self, branches: Dict[str, str]) -> None:\n self._branches = branches",
"def get():\r\n validated_msg = validate_token(request.headers, 'M')\r\n if validated_msg is not None:\r\n return validated_msg\r\n\r\n user_id = request.headers['user_id']\r\n hospitals = Hospital.query.filter(Hospital.management_group == user_id).all()\r\n if len(hospitals) > 0:\r\n branch_list = []\r\n for hospital in hospitals:\r\n curr_branch = {'id': hospital.id}\r\n curr_branch.update({'Address': hospital.location})\r\n branch_list.append(curr_branch)\r\n return {'message': {'num_branches': len(branch_list), 'branches': branch_list}}\r\n else:\r\n return {'message': 'No branches has been created yet'}",
"def repository_branches_url(namespace, repository):\n return _BASE_URL_V1 % ('repositories/%s/%s/branches' % (namespace, repository))",
"def get_list_of_branches(src_repo, target_branch,\n merge_st, regex=\"\"):\n merge_param_dict = {'merged': \"--merged\",\n 'unmerged': \"--no-merged\",\n 'both': \"\"}\n cmd_str = (GIT + \" branch --list \" + regex + \" -r \"\n + merge_param_dict[merge_st] + \" \" + src_repo\n + \"/\" + target_branch)\n return run_cmd(cmd_str, splitter='__ALL_WHITE_SPACES__')",
"def get_branches(self, active=False, closed=False):\n raise NotImplementedError(\"Abstract method\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/diff/{revision}/{path} Show how a file changed in a particular commit. The ``filediff`` template is rendered. This handler is registered under both the ``/diff`` and ``/filediff`` paths. ``/diff`` is used in modern code.
|
def filediff(web):
fctx, ctx = None, None
try:
fctx = webutil.filectx(web.repo, web.req)
except LookupError:
ctx = webutil.changectx(web.repo, web.req)
path = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])
if path not in ctx.files():
raise
if fctx is not None:
path = fctx.path()
ctx = fctx.changectx()
basectx = ctx.p1()
style = web.config(b'web', b'style')
if b'style' in web.req.qsparams:
style = web.req.qsparams[b'style']
diffs = webutil.diffs(web, ctx, basectx, [path], style)
if fctx is not None:
rename = webutil.renamelink(fctx)
ctx = fctx
else:
rename = templateutil.mappinglist([])
ctx = ctx
return web.sendtemplate(
b'filediff',
file=path,
symrev=webutil.symrevorshortnode(web.req, ctx),
rename=rename,
diff=diffs,
**pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
)
|
[
"def git_diff(filepath, since):\n html_diff = None\n commits = git_commits(filepath, since)\n if commits:\n cmd = ('git', '--no-pager', 'diff', commits[-1]+'^', '--',\n filepath)\n stdout, stderr = execute(cmd)\n\n if stdout:\n html_diff = highlight(stdout, lexers.DiffLexer(),\n HtmlFormatter())\n\n # print(' '.join(cmd))\n # print(diff)\n # print('\\n')\n\n return html_diff",
"def _hg_diff_path_change(diff, path):\n if diff == None:\n return None\n INIT = 0\n INDIFF = 1\n # small state machine makes sure we never touch anything inside the actual diff\n state = INIT\n result = \"\"\n s_list = [line for line in diff.split(os.linesep)]\n for line in s_list:\n newline = line\n if line.startswith(\"diff\"):\n state = INIT\n if state == INIT:\n if line.startswith(\"@@\"):\n state = INDIFF\n else:\n if line.startswith(\"---\") and not line.startswith(\"--- /dev/null\"):\n newline = \"--- \" + path + line[5:]\n if line.startswith(\"+++\") and not line.startswith(\"+++ /dev/null\"):\n newline = \"+++ \" + path + line[5:]\n if line.startswith(\"diff --git\"):\n # first replacing b in case path starts with a/\n newline = string.replace(line, \" b/\", \" \" + path + \"/\", 1)\n newline = string.replace(newline, \" a/\", \" \" + path + \"/\", 1)\n result += newline + '\\n'\n return result",
"def parse_diff_revision(self, filename, revision, *args, **kwargs):\n assert isinstance(filename, bytes), (\n 'filename must be a byte string, not %s' % type(filename))\n assert isinstance(revision, bytes), (\n 'revision must be a byte string, not %s' % type(revision))\n\n # Some diffs have additional tabs between the parts of the file\n # revisions\n revision = revision.strip()\n\n if self.working_copy_re.match(revision):\n return filename, HEAD\n\n # \"(revision )\" is generated by a few weird tools (like IntelliJ). If\n # in the +++ line of the diff, it means HEAD, and in the --- line, it\n # means PRE_CREATION. Since the more important use case is parsing the\n # source revision, we treat it as a new file. See bugs 1937 and 2632.\n if revision == b'(revision )':\n return filename, PRE_CREATION\n\n # Binary diffs don't provide revision information, so we set a fake\n # \"(unknown)\" in the SVNDiffParser. This will never actually appear\n # in SVN diffs.\n if revision == b'(unknown)':\n return filename, UNKNOWN\n\n m = self.revision_re.match(revision)\n\n if not m:\n raise SCMError('Unable to parse diff revision header \"%s\"'\n % revision.decode('utf-8'))\n\n relocated_file = m.group(2)\n revision = m.group(4)\n\n # group(3) holds the revision string in braces, like '(revision 4)'\n # group(4) only matches the revision number, which might by None when\n # 'nonexistent' is given as the revision string\n if revision in (None, b'0'):\n revision = PRE_CREATION\n\n if relocated_file:\n if not relocated_file.startswith(b'...'):\n raise SCMError('Unable to parse SVN relocated path \"%s\"'\n % relocated_file.decode('utf-8'))\n\n filename = b'%s/%s' % (relocated_file[4:], filename)\n\n return filename, revision",
"def gitdiff():\n if len(sys.argv) < 2:\n _usage_diffgit()\n sys.exit(1)\n\n #diffprog = sys.argv[1]\n filenames = sys.argv[1:]\n old_files = []\n for filename in filenames:\n failure, output = commands.getstatusoutput('git log %s' % filename)\n if not failure:\n commits = re.findall(r'^commit\\s+(.+)$', output,\n flags=re.MULTILINE)\n dates = re.findall(r'^Date:\\s+(.+)\\d\\d:\\d\\d:\\d\\d .+$', output,\n flags=re.MULTILINE)\n system('git checkout %s %s' % (commits[1], filename))\n old_filename = '__' + dates[1].replace(' ', '_') + filename\n shutil.copy(filename, old_filename)\n system('git checkout %s %s' % (commits[0], filename))\n old_files.append(old_filename)\n print 'doconce diff', old_filename, filename\n #pydiff(filenames, old_files)",
"def revision_diff(self,src_ext,revision):\n repo_base = \"%s/%s\" % (self.base_url,src_ext)\n if self.verbose:\n self.log.info(\"(%s)\\n%s\" % (inspect.stack()[0][3],revision))\n try:\n revision_diff = self.client.diff(self.workspace, repo_base,\n revision1=pysvn.Revision(pysvn.opt_revision_kind.number,int(revision) -1),\n revision2=pysvn.Revision(pysvn.opt_revision_kind.number,int(revision)))\n return revision_diff\n except Exception as e:\n self.log.error(e)\n return \"\"",
"def _render_comment_diff(self, req, ticket, data, cnum):\n req.perm(ticket.resource).require('TICKET_VIEW')\n new_version = int(req.args.get('version', 1))\n old_version = int(req.args.get('old_version', new_version))\n if old_version > new_version:\n old_version, new_version = new_version, old_version\n elif old_version == new_version:\n old_version = new_version - 1\n\n history = {}\n for change in self._get_comment_history(req, ticket, cnum):\n history[change['version']] = change\n\n def version_info(version):\n path = _(\"Ticket #%(num)s, comment %(cnum)d\",\n num=ticket.id, cnum=cnum)\n if version:\n rev = _(\"Version %(num)s\", num=version)\n shortrev = 'v%d' % version\n else:\n rev, shortrev = _(\"Initial Version\"), _(\"initial\")\n return {'path': path, 'rev': rev, 'shortrev': shortrev}\n\n diff_style, diff_options, diff_data = get_diff_options(req)\n diff_context = 3\n for option in diff_options:\n if option.startswith('-U'):\n diff_context = int(option[2:])\n break\n if diff_context < 0:\n diff_context = None\n\n def get_text(version):\n try:\n text = history[version]['value']\n return text.splitlines() if text else []\n except KeyError:\n raise ResourceNotFound(_(\"No version %(version)d for comment \"\n \"%(cnum)d on ticket #%(ticket)s\",\n version=version, cnum=cnum,\n ticket=ticket.id))\n\n old_text = get_text(old_version)\n new_text = get_text(new_version)\n diffs = diff_blocks(old_text, new_text, context=diff_context,\n ignore_blank_lines='-B' in diff_options,\n ignore_case='-i' in diff_options,\n ignore_space_changes='-b' in diff_options)\n\n changes = [{'diffs': diffs, 'props': [],\n 'new': version_info(new_version),\n 'old': version_info(old_version)}]\n\n # -- prev/up/next links\n prev_version = old_version\n next_version = None\n if new_version < len(history) - 1:\n next_version = new_version + 1\n\n if prev_version:\n url = req.href.ticket(ticket.id, cnum=cnum, action='comment-diff',\n version=prev_version)\n add_link(req, 'prev', url, _(\"Version %(num)s\", num=prev_version))\n add_link(req, 'up', req.href.ticket(ticket.id, cnum=cnum,\n action='comment-history'),\n _(\"Ticket Comment History\"))\n if next_version:\n url = req.href.ticket(ticket.id, cnum=cnum, action='comment-diff',\n version=next_version)\n add_link(req, 'next', url, _(\"Version %(num)s\", num=next_version))\n\n prevnext_nav(req, _(\"Previous Change\"), _(\"Next Change\"),\n _(\"Ticket Comment History\"))\n add_stylesheet(req, 'common/css/diff.css')\n add_script(req, 'common/js/diff.js')\n\n data.update({\n 'title': _(\"Ticket Comment Diff\"),\n 'resource': ticket.resource,\n 'name': _(\"Ticket #%(num)s, comment %(cnum)d\",\n num=ticket.id, cnum=cnum),\n 'url': self._make_comment_url(req, ticket, cnum),\n 'old_url': self._make_comment_url(req, ticket, cnum, old_version),\n 'new_url': self._make_comment_url(req, ticket, cnum, new_version),\n 'diff_url': req.href.ticket(ticket.id, cnum=cnum,\n action='comment-diff',\n version=new_version),\n 'diff_action': 'comment-diff', 'diff_args': [('cnum', cnum)],\n 'old_version': old_version, 'new_version': new_version,\n 'changes': changes, 'diff': diff_data,\n 'num_changes': new_version - old_version,\n 'change': history[new_version],\n 'ticket': ticket, 'cnum': cnum,\n 'longcol': '', 'shortcol': ''\n })\n\n return 'diff_view.html', data, None",
"def snippet_diff(request, template_name='libpaste/snippet_diff.html'):\n if request.GET.get('a') and request.GET.get('a').isdigit() \\\n and request.GET.get('b') and request.GET.get('b').isdigit():\n try:\n fileA = Snippet.objects.get(pk=int(request.GET.get('a')))\n fileB = Snippet.objects.get(pk=int(request.GET.get('b')))\n except ObjectDoesNotExist:\n return HttpResponseBadRequest('Selected file(s) does not exist.')\n else:\n return HttpResponseBadRequest('You must select two snippets.')\n\n class DiffText(object):\n pass\n\n diff = DiffText()\n\n if fileA.content != fileB.content:\n d = difflib.unified_diff(\n fileA.content.splitlines(),\n fileB.content.splitlines(),\n 'Original',\n 'Current',\n lineterm=''\n )\n\n diff.content = '\\n'.join(d).strip()\n diff.lexer = 'diff'\n else:\n diff.content = _('No changes were made between this two files.')\n diff.lexer = 'text'\n\n return render(request, template_name, {\n 'snippet': diff,\n 'fileA': fileA,\n 'fileB': fileB,\n 'page': 'snippet_diff',\n })",
"def test_diff_viewer_filter_by_change_type(repo_with_diffs: Tuple[Repo, Commit, Commit]):\n repo, previous_head, new_head = repo_with_diffs\n with DiffViewer(previous_head, new_head) as viewer:\n # we added 1 file, we expect the added() generator to return only 1 diff\n diffs = list(viewer.added())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"other/gbac.rego\") in paths\n\n # we modified 1 file, we expect the modified() generator to return only 1 diff\n diffs = list(viewer.modified())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"mylist.txt\") in paths\n\n # we deleted 1 file, we expect the deleted() generator to return only 1 diff\n diffs = list(viewer.deleted())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert Path(\"other/data.json\") in paths\n\n # we renamed 1 file, we expect the renamed() generator to return only 1 diff\n diffs = list(viewer.renamed())\n assert len(diffs) == 1\n paths = diff_paths(diffs)\n assert len(paths) == 2 # both old and new file name\n assert Path(\"ignored.json\") in paths\n assert Path(\"ignored2.json\") in paths",
"def diff_file(\n self, new_commit, new_path, old_commit=None, old_path=None, shallow=None\n ):\n\n if old_commit is not None and old_path is not None:\n old_file = pfs_proto.File(commit=commit_from(old_commit), path=old_path)\n else:\n old_file = None\n\n return self._req(\n Service.PFS,\n \"DiffFile\",\n new_file=pfs_proto.File(commit=commit_from(new_commit), path=new_path),\n old_file=old_file,\n shallow=shallow,\n )",
"def do_diff(self, cmd, repository_info=None):\n diff = execute(cmd, split_lines=True)\n diff = self.handle_renames(diff)\n diff = self.convert_to_absolute_paths(diff, repository_info)\n\n return ''.join(diff)",
"def diff_between_revisions(self, revision_range, args, repository_info):\n if self.options.repository_url:\n revisions = revision_range.split(':')\n if len(revisions) < 1:\n return None\n elif len(revisions) == 1:\n revisions.append('HEAD')\n\n # if a new path was supplied at the command line, set it\n files = []\n if len(args) == 1:\n repository_info.set_base_path(args[0])\n elif len(args) > 1:\n files = args\n\n url = repository_info.path + repository_info.base_path\n\n new_url = url + '@' + revisions[1]\n\n # When the source revision is zero, assume the user wants to\n # upload a diff containing all the files in ``base_path`` as new\n # files. If the base path within the repository is added to both\n # the old and new URLs, the ``svn diff`` command will error out\n # since the base_path didn't exist at revision zero. To avoid\n # that error, use the repository's root URL as the source for\n # the diff.\n if revisions[0] == \"0\":\n url = repository_info.path\n\n old_url = url + '@' + revisions[0]\n\n return (self.do_diff([\"svn\", \"diff\", \"--diff-cmd=diff\", old_url,\n new_url] + files,\n repository_info), None)\n # Otherwise, perform the revision range diff using a working copy\n else:\n return (self.do_diff([\"svn\", \"diff\", \"--diff-cmd=diff\", \"-r\",\n revision_range],\n repository_info), None)",
"def svn_file_invoke_rev_handler(*args) -> \"svn_txdelta_window_handler_t *, void **\":\n return _delta.svn_file_invoke_rev_handler(*args)",
"def diff(self, base=\"commit\"):\n if base == \"commit\":\n base = None\n if base == \"dependencies\":\n branch = self.git.current_branch()\n try:\n self.gather(self.trac.dependencies())\n self.git.diff(\"%s..%s\"%(HEAD,branch))\n finally:\n self.git.checkout(branch)\n else:\n self.git.execute(\"diff\", base)",
"def _render_diff(self, req, ticket, data, text_fields):\n new_version = int(req.args.get('version', 1))\n old_version = int(req.args.get('old_version', new_version))\n if old_version > new_version:\n old_version, new_version = new_version, old_version\n\n # get the list of versions having a description change\n history = self._get_history(req, ticket)\n changes = {}\n descriptions = []\n old_idx = new_idx = -1 # indexes in descriptions\n for change in history:\n version = change['version']\n changes[version] = change\n if any(f in text_fields for f in change['fields']):\n if old_version and version <= old_version:\n old_idx = len(descriptions)\n if new_idx == -1 and new_version and version >= new_version:\n new_idx = len(descriptions)\n descriptions.append((version, change))\n\n # determine precisely old and new versions\n if old_version == new_version:\n if new_idx >= 0:\n old_idx = new_idx - 1\n if old_idx >= 0:\n old_version, old_change = descriptions[old_idx]\n else:\n old_version, old_change = 0, None\n num_changes = new_idx - old_idx\n if new_idx >= 0:\n new_version, new_change = descriptions[new_idx]\n else:\n raise TracError(_(\"No differences to show\"))\n\n tnew = ticket.resource(version=new_version)\n told = ticket.resource(version=old_version)\n\n req.perm(tnew).require('TICKET_VIEW')\n req.perm(told).require('TICKET_VIEW')\n\n # determine prev and next versions\n prev_version = old_version\n next_version = None\n if new_idx < len(descriptions) - 1:\n next_version = descriptions[new_idx+1][0]\n\n # -- old properties (old_ticket) and new properties (new_ticket)\n\n # assume a linear sequence of change numbers, starting at 1, with gaps\n def replay_changes(values, old_values, from_version, to_version):\n for version in range(from_version, to_version+1):\n if version in changes:\n for k, v in changes[version]['fields'].iteritems():\n values[k] = v['new']\n if old_values is not None and k not in old_values:\n old_values[k] = v['old']\n\n old_ticket = {}\n if old_version:\n replay_changes(old_ticket, None, 1, old_version)\n\n new_ticket = dict(old_ticket)\n replay_changes(new_ticket, old_ticket, old_version+1, new_version)\n\n field_labels = TicketSystem(self.env).get_ticket_field_labels()\n\n changes = []\n\n def version_info(t, field=None):\n path = _(\"Ticket #%(id)s\", id=ticket.id)\n # TODO: field info should probably be part of the Resource as well\n if field:\n path = tag(path, Markup(' – '),\n field_labels.get(field, field.capitalize()))\n if t.version:\n rev = _(\"Version %(num)s\", num=t.version)\n shortrev = 'v%d' % t.version\n else:\n rev, shortrev = _(\"Initial Version\"), _(\"initial\")\n return {'path': path, 'rev': rev, 'shortrev': shortrev,\n 'href': get_resource_url(self.env, t, req.href)}\n\n # -- prop changes\n props = []\n for k, v in new_ticket.iteritems():\n if k not in text_fields:\n old, new = old_ticket[k], new_ticket[k]\n if old != new:\n label = field_labels.get(k, k.capitalize())\n prop = {'name': label, 'field': k,\n 'old': {'name': label, 'value': old},\n 'new': {'name': label, 'value': new}}\n rendered = self._render_property_diff(req, ticket, k,\n old, new, tnew)\n if rendered:\n prop['diff'] = tag.li(\n tag_(\"Property %(label)s %(rendered)s\",\n label=tag.strong(label), rendered=rendered))\n props.append(prop)\n changes.append({'props': props, 'diffs': [],\n 'new': version_info(tnew),\n 'old': version_info(told)})\n\n # -- text diffs\n diff_style, diff_options, diff_data = get_diff_options(req)\n diff_context = 3\n for option in diff_options:\n if option.startswith('-U'):\n diff_context = int(option[2:])\n break\n if diff_context < 0:\n diff_context = None\n\n for field in text_fields:\n old_text = old_ticket.get(field)\n old_text = old_text.splitlines() if old_text else []\n new_text = new_ticket.get(field)\n new_text = new_text.splitlines() if new_text else []\n diffs = diff_blocks(old_text, new_text, context=diff_context,\n ignore_blank_lines='-B' in diff_options,\n ignore_case='-i' in diff_options,\n ignore_space_changes='-b' in diff_options)\n\n changes.append({'diffs': diffs, 'props': [], 'field': field,\n 'new': version_info(tnew, field),\n 'old': version_info(told, field)})\n\n # -- prev/up/next links\n if prev_version:\n add_link(req, 'prev', get_resource_url(self.env, ticket.resource,\n req.href, action='diff',\n version=prev_version),\n _(\"Version %(num)s\", num=prev_version))\n add_link(req, 'up', get_resource_url(self.env, ticket.resource,\n req.href, action='history'),\n _(\"Ticket History\"))\n if next_version:\n add_link(req, 'next', get_resource_url(self.env, ticket.resource,\n req.href, action='diff',\n version=next_version),\n _(\"Version %(num)s\", num=next_version))\n\n prevnext_nav(req, _(\"Previous Change\"), _(\"Next Change\"),\n _(\"Ticket History\"))\n add_stylesheet(req, 'common/css/diff.css')\n add_script(req, 'common/js/diff.js')\n\n data.update({\n 'title': _(\"Ticket Diff\"),\n 'resource': ticket.resource,\n 'old_version': old_version, 'new_version': new_version,\n 'changes': changes, 'diff': diff_data,\n 'num_changes': num_changes, 'change': new_change,\n 'old_ticket': old_ticket, 'new_ticket': new_ticket,\n 'longcol': '', 'shortcol': ''\n })\n\n return 'diff_view.html', data, None",
"def make_svn_diff(self, merge_base, diff_lines):\n rev = self._execute([self.git, 'svn', 'find-rev', merge_base]).strip()\n\n if not rev:\n return None\n\n diff_data = b''\n original_file = b''\n filename = b''\n newfile = False\n\n for i, line in enumerate(diff_lines):\n if line.startswith(b'diff '):\n # Grab the filename and then filter this out.\n # This will be in the format of:\n #\n # diff --git a/path/to/file b/path/to/file\n info = line.split(b' ')\n diff_data += b'Index: %s\\n' % info[2]\n diff_data += b'=' * 67\n diff_data += b'\\n'\n elif line.startswith(b'index '):\n # Filter this out.\n pass\n elif line.strip() == b'--- /dev/null':\n # New file\n newfile = True\n elif (line.startswith(b'--- ') and i + 1 < len(diff_lines) and\n diff_lines[i + 1].startswith(b'+++ ')):\n newfile = False\n original_file = line[4:].strip()\n diff_data += b'--- %s\\t(revision %s)\\n' % (original_file, rev)\n elif line.startswith(b'+++ '):\n filename = line[4:].strip()\n if newfile:\n diff_data += b'--- %s\\t(revision 0)\\n' % filename\n diff_data += b'+++ %s\\t(revision 0)\\n' % filename\n else:\n # We already printed the \"--- \" line.\n diff_data += b'+++ %s\\t(working copy)\\n' % original_file\n elif (line.startswith(b'new file mode') or\n line.startswith(b'deleted file mode')):\n # Filter this out.\n pass\n elif line.startswith(b'Binary files '):\n # Add the following so that we know binary files were\n # added/changed.\n diff_data += b'Cannot display: file marked as a binary type.\\n'\n diff_data += b'svn:mime-type = application/octet-stream\\n'\n else:\n diff_data += line\n\n return diff_data",
"def test_get_with_file_diff_id(self):\n review_request = self.create_review_request(publish=True,\n create_repository=True)\n review = self.create_review(review_request, publish=True)\n diffset1 = self.create_diffset(review_request)\n\n # A comment.\n filediff1 = self.create_filediff(diffset1)\n comment1 = self.create_diff_comment(review, filediff1)\n\n # A comment linked to the same diffset but different file diff.\n filediff2 = self.create_filediff(diffset1)\n self.create_diff_comment(review, filediff2)\n\n # A comment from a different diffset and file diff.\n diffset2 = self.create_diffset(review_request, revision=2)\n filediff3 = self.create_filediff(diffset2)\n self.create_diff_comment(review, filediff3)\n\n # Testing that only comments from the given file diff are returned.\n rsp = self.api_get(get_root_diff_comment_list_url(), {\n 'file-diff-id': filediff1.id,\n }, expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 1)\n self.compare_item(rsp_items[0], comment1)\n\n # Testing that no comments are returned when the given file diff\n # does not exist.\n rsp = self.api_get(get_root_diff_comment_list_url(), {\n 'file-diff-id': -1,\n }, expected_mimetype=review_diff_comment_list_mimetype)\n rsp_items = rsp[self.resource.list_result_key]\n self.assertEqual(rsp['stat'], 'ok')\n self.assertEqual(rsp['total_results'], 0)",
"def test_base_filediff_and_interfilediff(self):\n repository = self.create_repository(tool_name='Git')\n review_request = self.create_review_request(repository=repository,\n create_with_history=True)\n review_request.target_people.add(review_request.submitter)\n\n diffset = self.create_diffset(review_request, draft=True)\n diffset_commits = [\n self.create_diffcommit(diffset=diffset, commit_id='r1',\n parent_id='r0'),\n self.create_diffcommit(diffset=diffset, commit_id='r2',\n parent_id='r1'),\n ]\n\n filediff = diffset_commits[1].files.get()\n base_filediff = diffset_commits[0].files.get()\n\n diffset.finalize_commit_series(\n cumulative_diff=self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,\n validation_info=None,\n validate=False,\n save=True)\n review_request.publish(user=review_request.submitter)\n\n interdiffset = self.create_diffset(review_request, draft=True)\n interdiffset_commit = self.create_diffcommit(\n diffset=interdiffset, commit_id='r1', parent_id='r0')\n\n interdiffset.finalize_commit_series(\n cumulative_diff=self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,\n validation_info=None,\n validate=False,\n save=True)\n review_request.publish(user=review_request.submitter)\n\n interfilediff = interdiffset_commit.files.get()\n\n rsp = self.client.get(\n local_site_reverse(\n 'view-diff-fragment',\n kwargs={\n 'review_request_id': review_request.display_id,\n 'revision': diffset.revision,\n 'interdiff_revision': interdiffset.revision,\n 'filediff_id': filediff.pk,\n 'interfilediff_id': interfilediff.pk,\n }),\n data={'base-filediff-id': base_filediff.pk})\n\n self.assertEqual(rsp.status_code, 500)\n self.assertIn(\n b'Cannot generate an interdiff when base FileDiff ID is '\n b'specified.',\n rsp.content)",
"def test_diff_viewer_affected_paths(repo_with_diffs: Tuple[Repo, Commit, Commit]):\n repo, previous_head, new_head = repo_with_diffs\n with DiffViewer(previous_head, new_head) as viewer:\n paths = viewer.affected_paths()\n # we touched 4 files, 1 is a rename so it has two paths (old and new)\n assert len(paths) == 5\n assert Path(\"other/gbac.rego\") in paths\n assert Path(\"mylist.txt\") in paths\n assert Path(\"other/data.json\") in paths\n assert Path(\"ignored.json\") in paths\n assert Path(\"ignored2.json\") in paths",
"def test_action_hooks_diff_viewer_hook(self):\n SandboxDiffViewerActionTestHook(extension=self.extension)\n\n context = Context({'comment': 'this is a comment'})\n\n template = Template(\n '{% load reviewtags %}'\n '{% review_request_actions %}')\n\n template.render(context)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/comparison/{revision}/{path} Show a comparison between the old and new versions of a file from changes made on a particular revision. This is similar to the ``diff`` handler. However, this form features a split or sidebyside diff rather than a unified diff. The ``context`` query string argument can be used to control the lines of context in the diff. The ``filecomparison`` template is rendered.
|
def comparison(web):
ctx = webutil.changectx(web.repo, web.req)
if b'file' not in web.req.qsparams:
raise ErrorResponse(HTTP_NOT_FOUND, b'file not given')
path = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])
parsecontext = lambda v: v == b'full' and -1 or int(v)
if b'context' in web.req.qsparams:
context = parsecontext(web.req.qsparams[b'context'])
else:
context = parsecontext(web.config(b'web', b'comparisoncontext'))
def filelines(f):
if f.isbinary():
mt = pycompat.sysbytes(
mimetypes.guess_type(pycompat.fsdecode(f.path()))[0]
or r'application/octet-stream'
)
return [_(b'(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
return f.data().splitlines()
fctx = None
parent = ctx.p1()
leftrev = parent.rev()
leftnode = parent.node()
rightrev = ctx.rev()
rightnode = scmutil.binnode(ctx)
if path in ctx:
fctx = ctx[path]
rightlines = filelines(fctx)
if path not in parent:
leftlines = ()
else:
pfctx = parent[path]
leftlines = filelines(pfctx)
else:
rightlines = ()
pfctx = ctx.p1()[path]
leftlines = filelines(pfctx)
comparison = webutil.compare(context, leftlines, rightlines)
if fctx is not None:
rename = webutil.renamelink(fctx)
ctx = fctx
else:
rename = templateutil.mappinglist([])
ctx = ctx
return web.sendtemplate(
b'filecomparison',
file=path,
symrev=webutil.symrevorshortnode(web.req, ctx),
rename=rename,
leftrev=leftrev,
leftnode=hex(leftnode),
rightrev=rightrev,
rightnode=hex(rightnode),
comparison=comparison,
**pycompat.strkwargs(webutil.commonentry(web.repo, ctx))
)
|
[
"def filediff(web):\n fctx, ctx = None, None\n try:\n fctx = webutil.filectx(web.repo, web.req)\n except LookupError:\n ctx = webutil.changectx(web.repo, web.req)\n path = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])\n if path not in ctx.files():\n raise\n\n if fctx is not None:\n path = fctx.path()\n ctx = fctx.changectx()\n basectx = ctx.p1()\n\n style = web.config(b'web', b'style')\n if b'style' in web.req.qsparams:\n style = web.req.qsparams[b'style']\n\n diffs = webutil.diffs(web, ctx, basectx, [path], style)\n if fctx is not None:\n rename = webutil.renamelink(fctx)\n ctx = fctx\n else:\n rename = templateutil.mappinglist([])\n ctx = ctx\n\n return web.sendtemplate(\n b'filediff',\n file=path,\n symrev=webutil.symrevorshortnode(web.req, ctx),\n rename=rename,\n diff=diffs,\n **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))\n )",
"def git_diff(filepath, since):\n html_diff = None\n commits = git_commits(filepath, since)\n if commits:\n cmd = ('git', '--no-pager', 'diff', commits[-1]+'^', '--',\n filepath)\n stdout, stderr = execute(cmd)\n\n if stdout:\n html_diff = highlight(stdout, lexers.DiffLexer(),\n HtmlFormatter())\n\n # print(' '.join(cmd))\n # print(diff)\n # print('\\n')\n\n return html_diff",
"def snippet_diff(request, template_name='libpaste/snippet_diff.html'):\n if request.GET.get('a') and request.GET.get('a').isdigit() \\\n and request.GET.get('b') and request.GET.get('b').isdigit():\n try:\n fileA = Snippet.objects.get(pk=int(request.GET.get('a')))\n fileB = Snippet.objects.get(pk=int(request.GET.get('b')))\n except ObjectDoesNotExist:\n return HttpResponseBadRequest('Selected file(s) does not exist.')\n else:\n return HttpResponseBadRequest('You must select two snippets.')\n\n class DiffText(object):\n pass\n\n diff = DiffText()\n\n if fileA.content != fileB.content:\n d = difflib.unified_diff(\n fileA.content.splitlines(),\n fileB.content.splitlines(),\n 'Original',\n 'Current',\n lineterm=''\n )\n\n diff.content = '\\n'.join(d).strip()\n diff.lexer = 'diff'\n else:\n diff.content = _('No changes were made between this two files.')\n diff.lexer = 'text'\n\n return render(request, template_name, {\n 'snippet': diff,\n 'fileA': fileA,\n 'fileB': fileB,\n 'page': 'snippet_diff',\n })",
"def revision_diff(self,src_ext,revision):\n repo_base = \"%s/%s\" % (self.base_url,src_ext)\n if self.verbose:\n self.log.info(\"(%s)\\n%s\" % (inspect.stack()[0][3],revision))\n try:\n revision_diff = self.client.diff(self.workspace, repo_base,\n revision1=pysvn.Revision(pysvn.opt_revision_kind.number,int(revision) -1),\n revision2=pysvn.Revision(pysvn.opt_revision_kind.number,int(revision)))\n return revision_diff\n except Exception as e:\n self.log.error(e)\n return \"\"",
"def gitdiff():\n if len(sys.argv) < 2:\n _usage_diffgit()\n sys.exit(1)\n\n #diffprog = sys.argv[1]\n filenames = sys.argv[1:]\n old_files = []\n for filename in filenames:\n failure, output = commands.getstatusoutput('git log %s' % filename)\n if not failure:\n commits = re.findall(r'^commit\\s+(.+)$', output,\n flags=re.MULTILINE)\n dates = re.findall(r'^Date:\\s+(.+)\\d\\d:\\d\\d:\\d\\d .+$', output,\n flags=re.MULTILINE)\n system('git checkout %s %s' % (commits[1], filename))\n old_filename = '__' + dates[1].replace(' ', '_') + filename\n shutil.copy(filename, old_filename)\n system('git checkout %s %s' % (commits[0], filename))\n old_files.append(old_filename)\n print 'doconce diff', old_filename, filename\n #pydiff(filenames, old_files)",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv # [1:]\n\n parser = OptionParser(usage=help_message)\n\n parser.add_option(\n '-r', '--recursive',\n action='store_true',\n dest='recursive',\n default=False,\n help='Recursively compare any subdirectories found.')\n\n parser.add_option(\n '-i', '--include',\n action='append',\n dest='include',\n default=include_default,\n metavar='PAT',\n help='Include files that match the pattern PAT.')\n\n parser.add_option(\n '-x', '--exclude',\n action='append',\n dest='exclude',\n default=exclude_default,\n metavar='PAT',\n help='Exclude files that match the pattern PAT.')\n\n parser.add_option(\n '-f', '--format',\n dest='format',\n default='npz',\n metavar='FORMAT',\n help='File format: npz [default: %default]')\n\n parser.add_option(\n '-V', '--variables',\n dest='variables',\n default=None,\n metavar='VAR',\n help='Compare variables VAR, [default: compare all variables]')\n\n parser.add_option(\n '-v',\n action='count',\n dest='verbosity',\n default=0,\n help='Verbose mode, [default: %default]')\n\n parser.add_option(\n '--show-file',\n dest='show_file',\n action='store_true',\n default=False,\n help='show most differing file, [default: %default]')\n\n (options, args) = parser.parse_args()\n if options.include == []:\n options.include = ['*']\n\n if len(args) != 2:\n parser.error(\"incorrect number of arguments\")\n\n # do the diff\n try:\n diff(args[0], args[1], options)\n return 0\n except:\n raise\n return 1",
"def diff(argv):\n usage = \"%(prog)s diff [options] LABEL1 LABEL2\"\n description = dedent(\"Show the differences, if any, between two records.\")\n parser = ArgumentParser(usage=usage,\n description=description)\n parser.add_argument('label1')\n parser.add_argument('label2')\n parser.add_argument('-i', '--ignore', action=\"append\",\n help=\"a regular expression pattern for filenames to ignore when evaluating differences in output data. To supply multiple patterns, use the -i option multiple times.\")\n parser.add_argument('-l', '--long', action=\"store_const\", const=\"long\",\n dest=\"mode\", default=\"short\",\n help=\"prints full information for each record\"),\n args = parser.parse_args(argv)\n if args.ignore is None:\n args.ignore = []\n\n project = load_project()\n print(project.show_diff(args.label1, args.label2, mode=args.mode,\n ignore_filenames=args.ignore))",
"def _render_diff(self, req, ticket, data, text_fields):\n new_version = int(req.args.get('version', 1))\n old_version = int(req.args.get('old_version', new_version))\n if old_version > new_version:\n old_version, new_version = new_version, old_version\n\n # get the list of versions having a description change\n history = self._get_history(req, ticket)\n changes = {}\n descriptions = []\n old_idx = new_idx = -1 # indexes in descriptions\n for change in history:\n version = change['version']\n changes[version] = change\n if any(f in text_fields for f in change['fields']):\n if old_version and version <= old_version:\n old_idx = len(descriptions)\n if new_idx == -1 and new_version and version >= new_version:\n new_idx = len(descriptions)\n descriptions.append((version, change))\n\n # determine precisely old and new versions\n if old_version == new_version:\n if new_idx >= 0:\n old_idx = new_idx - 1\n if old_idx >= 0:\n old_version, old_change = descriptions[old_idx]\n else:\n old_version, old_change = 0, None\n num_changes = new_idx - old_idx\n if new_idx >= 0:\n new_version, new_change = descriptions[new_idx]\n else:\n raise TracError(_(\"No differences to show\"))\n\n tnew = ticket.resource(version=new_version)\n told = ticket.resource(version=old_version)\n\n req.perm(tnew).require('TICKET_VIEW')\n req.perm(told).require('TICKET_VIEW')\n\n # determine prev and next versions\n prev_version = old_version\n next_version = None\n if new_idx < len(descriptions) - 1:\n next_version = descriptions[new_idx+1][0]\n\n # -- old properties (old_ticket) and new properties (new_ticket)\n\n # assume a linear sequence of change numbers, starting at 1, with gaps\n def replay_changes(values, old_values, from_version, to_version):\n for version in range(from_version, to_version+1):\n if version in changes:\n for k, v in changes[version]['fields'].iteritems():\n values[k] = v['new']\n if old_values is not None and k not in old_values:\n old_values[k] = v['old']\n\n old_ticket = {}\n if old_version:\n replay_changes(old_ticket, None, 1, old_version)\n\n new_ticket = dict(old_ticket)\n replay_changes(new_ticket, old_ticket, old_version+1, new_version)\n\n field_labels = TicketSystem(self.env).get_ticket_field_labels()\n\n changes = []\n\n def version_info(t, field=None):\n path = _(\"Ticket #%(id)s\", id=ticket.id)\n # TODO: field info should probably be part of the Resource as well\n if field:\n path = tag(path, Markup(' – '),\n field_labels.get(field, field.capitalize()))\n if t.version:\n rev = _(\"Version %(num)s\", num=t.version)\n shortrev = 'v%d' % t.version\n else:\n rev, shortrev = _(\"Initial Version\"), _(\"initial\")\n return {'path': path, 'rev': rev, 'shortrev': shortrev,\n 'href': get_resource_url(self.env, t, req.href)}\n\n # -- prop changes\n props = []\n for k, v in new_ticket.iteritems():\n if k not in text_fields:\n old, new = old_ticket[k], new_ticket[k]\n if old != new:\n label = field_labels.get(k, k.capitalize())\n prop = {'name': label, 'field': k,\n 'old': {'name': label, 'value': old},\n 'new': {'name': label, 'value': new}}\n rendered = self._render_property_diff(req, ticket, k,\n old, new, tnew)\n if rendered:\n prop['diff'] = tag.li(\n tag_(\"Property %(label)s %(rendered)s\",\n label=tag.strong(label), rendered=rendered))\n props.append(prop)\n changes.append({'props': props, 'diffs': [],\n 'new': version_info(tnew),\n 'old': version_info(told)})\n\n # -- text diffs\n diff_style, diff_options, diff_data = get_diff_options(req)\n diff_context = 3\n for option in diff_options:\n if option.startswith('-U'):\n diff_context = int(option[2:])\n break\n if diff_context < 0:\n diff_context = None\n\n for field in text_fields:\n old_text = old_ticket.get(field)\n old_text = old_text.splitlines() if old_text else []\n new_text = new_ticket.get(field)\n new_text = new_text.splitlines() if new_text else []\n diffs = diff_blocks(old_text, new_text, context=diff_context,\n ignore_blank_lines='-B' in diff_options,\n ignore_case='-i' in diff_options,\n ignore_space_changes='-b' in diff_options)\n\n changes.append({'diffs': diffs, 'props': [], 'field': field,\n 'new': version_info(tnew, field),\n 'old': version_info(told, field)})\n\n # -- prev/up/next links\n if prev_version:\n add_link(req, 'prev', get_resource_url(self.env, ticket.resource,\n req.href, action='diff',\n version=prev_version),\n _(\"Version %(num)s\", num=prev_version))\n add_link(req, 'up', get_resource_url(self.env, ticket.resource,\n req.href, action='history'),\n _(\"Ticket History\"))\n if next_version:\n add_link(req, 'next', get_resource_url(self.env, ticket.resource,\n req.href, action='diff',\n version=next_version),\n _(\"Version %(num)s\", num=next_version))\n\n prevnext_nav(req, _(\"Previous Change\"), _(\"Next Change\"),\n _(\"Ticket History\"))\n add_stylesheet(req, 'common/css/diff.css')\n add_script(req, 'common/js/diff.js')\n\n data.update({\n 'title': _(\"Ticket Diff\"),\n 'resource': ticket.resource,\n 'old_version': old_version, 'new_version': new_version,\n 'changes': changes, 'diff': diff_data,\n 'num_changes': num_changes, 'change': new_change,\n 'old_ticket': old_ticket, 'new_ticket': new_ticket,\n 'longcol': '', 'shortcol': ''\n })\n\n return 'diff_view.html', data, None",
"def test_base_filediff_and_interfilediff(self):\n repository = self.create_repository(tool_name='Git')\n review_request = self.create_review_request(repository=repository,\n create_with_history=True)\n review_request.target_people.add(review_request.submitter)\n\n diffset = self.create_diffset(review_request, draft=True)\n diffset_commits = [\n self.create_diffcommit(diffset=diffset, commit_id='r1',\n parent_id='r0'),\n self.create_diffcommit(diffset=diffset, commit_id='r2',\n parent_id='r1'),\n ]\n\n filediff = diffset_commits[1].files.get()\n base_filediff = diffset_commits[0].files.get()\n\n diffset.finalize_commit_series(\n cumulative_diff=self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,\n validation_info=None,\n validate=False,\n save=True)\n review_request.publish(user=review_request.submitter)\n\n interdiffset = self.create_diffset(review_request, draft=True)\n interdiffset_commit = self.create_diffcommit(\n diffset=interdiffset, commit_id='r1', parent_id='r0')\n\n interdiffset.finalize_commit_series(\n cumulative_diff=self.DEFAULT_GIT_FILEDIFF_DATA_DIFF,\n validation_info=None,\n validate=False,\n save=True)\n review_request.publish(user=review_request.submitter)\n\n interfilediff = interdiffset_commit.files.get()\n\n rsp = self.client.get(\n local_site_reverse(\n 'view-diff-fragment',\n kwargs={\n 'review_request_id': review_request.display_id,\n 'revision': diffset.revision,\n 'interdiff_revision': interdiffset.revision,\n 'filediff_id': filediff.pk,\n 'interfilediff_id': interfilediff.pk,\n }),\n data={'base-filediff-id': base_filediff.pk})\n\n self.assertEqual(rsp.status_code, 500)\n self.assertIn(\n b'Cannot generate an interdiff when base FileDiff ID is '\n b'specified.',\n rsp.content)",
"def test_base_filediff_and_interfilediff(self):\n\t\trepository = self.create_repository(tool_name=\"Git\")\n\t\treview_request = self.create_review_request(repository=repository, create_with_history=True)\n\t\treview_request.target_people = [review_request.submitter]\n\t\tdiffset = self.create_diffset(review_request, draft=True)\n\t\tdiffset_commits = [self.create_diffcommit(diffset=diffset, commit_id=\"r1\", parent_id=\"r0\"), self.create_diffcommit(diffset=diffset, commit_id=\"r2\", parent_id=\"r1\")]\n\t\tfilediff = diffset_commits[1].files.get()\n\t\tbase_filediff = diffset_commits[0].files.get()\n\t\tdiffset.finalize_commit_series(cumulative_diff=self.DEFAULT_GIT_FILEDIFF_DATA_DIFF, validation_info=None, validate=False, save=True)\n\t\treview_request.publish(user=review_request.submitter)\n\t\tinterdiffset = self.create_diffset(review_request, draft=True)\n\t\tinterdiffset_commit = self.create_diffcommit(diffset=interdiffset, commit_id=\"r1\", parent_id=\"r0\")\n\t\tinterdiffset.finalize_commit_series(cumulative_diff=self.DEFAULT_GIT_FILEDIFF_DATA_DIFF, validation_info=None, validate=False, save=True)\n\t\treview_request.publish(user=review_request.submitter)\n\t\tinterfilediff = interdiffset_commit.files.get()\n\t\trsp = self.client.get(local_site_reverse(\"view-diff-fragment\", kwargs={\"review_request_id\": review_request.display_id, \"revision\": diffset.revision, \"interdiff_revision\": interdiffset.revision, \"filediff_id\": filediff.pk, \"interfilediff_id\": interfilediff.pk}), data={\"base-filediff-id\": base_filediff.pk})\n\t\tself.assertEqual(rsp.status_code, 500)\n\t\tself.assertIn(b\"Cannot generate an interdiff when base FileDiff ID is \" b\"specified.\", rsp.content)",
"def parse_diff_revision(self, filename, revision, *args, **kwargs):\n assert isinstance(filename, bytes), (\n 'filename must be a byte string, not %s' % type(filename))\n assert isinstance(revision, bytes), (\n 'revision must be a byte string, not %s' % type(revision))\n\n # Some diffs have additional tabs between the parts of the file\n # revisions\n revision = revision.strip()\n\n if self.working_copy_re.match(revision):\n return filename, HEAD\n\n # \"(revision )\" is generated by a few weird tools (like IntelliJ). If\n # in the +++ line of the diff, it means HEAD, and in the --- line, it\n # means PRE_CREATION. Since the more important use case is parsing the\n # source revision, we treat it as a new file. See bugs 1937 and 2632.\n if revision == b'(revision )':\n return filename, PRE_CREATION\n\n # Binary diffs don't provide revision information, so we set a fake\n # \"(unknown)\" in the SVNDiffParser. This will never actually appear\n # in SVN diffs.\n if revision == b'(unknown)':\n return filename, UNKNOWN\n\n m = self.revision_re.match(revision)\n\n if not m:\n raise SCMError('Unable to parse diff revision header \"%s\"'\n % revision.decode('utf-8'))\n\n relocated_file = m.group(2)\n revision = m.group(4)\n\n # group(3) holds the revision string in braces, like '(revision 4)'\n # group(4) only matches the revision number, which might by None when\n # 'nonexistent' is given as the revision string\n if revision in (None, b'0'):\n revision = PRE_CREATION\n\n if relocated_file:\n if not relocated_file.startswith(b'...'):\n raise SCMError('Unable to parse SVN relocated path \"%s\"'\n % relocated_file.decode('utf-8'))\n\n filename = b'%s/%s' % (relocated_file[4:], filename)\n\n return filename, revision",
"def LaTeXDiff(target=None, source=None, env=None):\n print 'SOURCES :', source\n print 'TARGET :', target\n \n do_rev = env['DOREV'] \n if do_rev:\n #\n # The command below seems not to work with me :-(\n # svn diff -r [REV] $SOURCEFILE | patch -R -p0 -o $TARGET\n # What follows is more secure but involved :-(\n #\n repo_dir = env['REPODIR'] \n #\n #--------------- Get version ---------------\n print 'DDCR: git_version'\n #\n #--------------- retrieve file from SVN and do latexdiff ---------------\n return None",
"def create_visual_diff_through_html_files(file1, file2, encoding=\"utf8\", page=None,\n browser=False, notebook=False, context_size=None,\n inline_view=False):\n diff = create_visual_diff_through_html(file1, file2, notebook=notebook,\n context_size=context_size, inline_view=inline_view)\n if page is not None:\n with open(page, \"w\", encoding=\"utf8\") as f:\n f.write(diff)\n if browser: # pragma: no cover\n if page is None:\n raise AttributeError(\"browser is True, page must be True\")\n import webbrowser\n webbrowser.open(page)\n return None\n return diff",
"def diff_file(\n self, new_commit, new_path, old_commit=None, old_path=None, shallow=None\n ):\n\n if old_commit is not None and old_path is not None:\n old_file = pfs_proto.File(commit=commit_from(old_commit), path=old_path)\n else:\n old_file = None\n\n return self._req(\n Service.PFS,\n \"DiffFile\",\n new_file=pfs_proto.File(commit=commit_from(new_commit), path=new_path),\n old_file=old_file,\n shallow=shallow,\n )",
"def run_diff(self, src, dst, deref=False):\n deref_str = \"\"\n if not deref:\n deref_str = \"--no-dereference\"\n\n cmd = \"diff -r {} '{}' '{}'\".format(\n deref_str, src, dst)\n self.execute_cmd(cmd)",
"def wrapped_diff(filenode_old, filenode_new, cut_off_limit=None,\n ignore_whitespace=True, line_context=3,\n enable_comments=False):\n\n if filenode_old is None:\n filenode_old = FileNode(filenode_new.path, '', EmptyChangeset())\n\n if filenode_old.is_binary or filenode_new.is_binary:\n diff = wrap_to_table(_('Binary file'))\n stats = (0, 0)\n size = 0\n\n elif cut_off_limit != -1 and (cut_off_limit is None or\n (filenode_old.size < cut_off_limit and filenode_new.size < cut_off_limit)):\n\n f_gitdiff = get_gitdiff(filenode_old, filenode_new,\n ignore_whitespace=ignore_whitespace,\n context=line_context)\n diff_processor = DiffProcessor(f_gitdiff, format='gitdiff')\n\n diff = diff_processor.as_html(enable_comments=enable_comments)\n stats = diff_processor.stat()\n size = len(diff or '')\n else:\n diff = wrap_to_table(_('Changeset was too big and was cut off, use '\n 'diff menu to display this diff'))\n stats = (0, 0)\n size = 0\n if not diff:\n submodules = filter(lambda o: isinstance(o, SubModuleNode),\n [filenode_new, filenode_old])\n if submodules:\n diff = wrap_to_table(escape('Submodule %r' % submodules[0]))\n else:\n diff = wrap_to_table(_('No changes detected'))\n\n cs1 = filenode_old.changeset.raw_id\n cs2 = filenode_new.changeset.raw_id\n\n return size, cs1, cs2, diff, stats",
"def list_changed_files(self, compared, *paths):\n\n self.update_index_refresh()\n if not isinstance(compared, (list, tuple)):\n compared = [ compared ]\n\n if len(compared) == 2:\n str = self._getoutput(\"diff-tree -r --name-only\",\n compared[0], compared[1], *paths)\n elif len(compared) == 1:\n str = self._getoutput(\"diff-index --ignore-submodules -r --name-only\",\n compared[0], *paths)\n else:\n raise self.Error(\"compared does not contain 1 or 2 elements\")\n \n if str:\n return str.split('\\n')\n return []",
"def diff_between_revisions(self, revision_range, args, repository_info):\n if self.options.repository_url:\n revisions = revision_range.split(':')\n if len(revisions) < 1:\n return None\n elif len(revisions) == 1:\n revisions.append('HEAD')\n\n # if a new path was supplied at the command line, set it\n files = []\n if len(args) == 1:\n repository_info.set_base_path(args[0])\n elif len(args) > 1:\n files = args\n\n url = repository_info.path + repository_info.base_path\n\n new_url = url + '@' + revisions[1]\n\n # When the source revision is zero, assume the user wants to\n # upload a diff containing all the files in ``base_path`` as new\n # files. If the base path within the repository is added to both\n # the old and new URLs, the ``svn diff`` command will error out\n # since the base_path didn't exist at revision zero. To avoid\n # that error, use the repository's root URL as the source for\n # the diff.\n if revisions[0] == \"0\":\n url = repository_info.path\n\n old_url = url + '@' + revisions[0]\n\n return (self.do_diff([\"svn\", \"diff\", \"--diff-cmd=diff\", old_url,\n new_url] + files,\n repository_info), None)\n # Otherwise, perform the revision range diff using a working copy\n else:\n return (self.do_diff([\"svn\", \"diff\", \"--diff-cmd=diff\", \"-r\",\n revision_range],\n repository_info), None)",
"def test_should_render_on_diff_viewer_revision(self):\n request = RequestFactory().request()\n request.resolver_match = Mock()\n request.resolver_match.url_name = 'view-diff-revision'\n\n review_request = self.create_review_request()\n\n self.assertTrue(self.action.should_render({\n 'request': request,\n 'review_request': review_request,\n }))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/annotate/{revision}/{path} Show changeset information for each line in a file. The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and ``ignoreblanklines`` query string arguments have the same meaning as their ``[annotate]`` config equivalents. It uses the hgrc boolean parsing logic to interpret the value. e.g. ``0`` and ``false`` are false and ``1`` and ``true`` are true. If not defined, the server default settings are used. The ``fileannotate`` template is rendered.
|
def annotate(web):
fctx = webutil.filectx(web.repo, web.req)
f = fctx.path()
parity = paritygen(web.stripecount)
ishead = fctx.filenode() in fctx.filelog().heads()
# parents() is called once per line and several lines likely belong to
# same revision. So it is worth caching.
# TODO there are still redundant operations within basefilectx.parents()
# and from the fctx.annotate() call itself that could be cached.
parentscache = {}
def parents(context, f):
rev = f.rev()
if rev not in parentscache:
parentscache[rev] = []
for p in f.parents():
entry = {
b'node': p.hex(),
b'rev': p.rev(),
}
parentscache[rev].append(entry)
for p in parentscache[rev]:
yield p
def annotate(context):
if fctx.isbinary():
mt = pycompat.sysbytes(
mimetypes.guess_type(pycompat.fsdecode(fctx.path()))[0]
or r'application/octet-stream'
)
lines = [
dagop.annotateline(
fctx=fctx.filectx(fctx.filerev()),
lineno=1,
text=b'(binary:%s)' % mt,
)
]
else:
lines = webutil.annotate(web.req, fctx, web.repo.ui)
previousrev = None
blockparitygen = paritygen(1)
for lineno, aline in enumerate(lines):
f = aline.fctx
rev = f.rev()
if rev != previousrev:
blockhead = True
blockparity = next(blockparitygen)
else:
blockhead = None
previousrev = rev
yield {
b"parity": next(parity),
b"node": f.hex(),
b"rev": rev,
b"author": f.user(),
b"parents": templateutil.mappinggenerator(parents, args=(f,)),
b"desc": f.description(),
b"extra": f.extra(),
b"file": f.path(),
b"blockhead": blockhead,
b"blockparity": blockparity,
b"targetline": aline.lineno,
b"line": aline.text,
b"lineno": lineno + 1,
b"lineid": b"l%d" % (lineno + 1),
b"linenumber": b"% 6d" % (lineno + 1),
b"revdate": f.date(),
}
diffopts = webutil.difffeatureopts(web.req, web.repo.ui, b'annotate')
diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults}
return web.sendtemplate(
b'fileannotate',
file=f,
annotate=templateutil.mappinggenerator(annotate),
path=webutil.up(f),
symrev=webutil.symrevorshortnode(web.req, fctx),
rename=webutil.renamelink(fctx),
permissions=fctx.manifest().flags(f),
ishead=int(ishead),
diffopts=templateutil.hybriddict(diffopts),
**pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
)
|
[
"def annotate(self):\n for line in self.line_map:\n if line.is_tier_line:\n line.annotations = self._extract_annots(line.tier, line.onset,\n line.offset, line.content,\n line.index)\n self.annotated = True",
"def annotate( # pylint: disable=too-many-locals,too-many-branches\n input_file: str,\n region: Optional[Tuple[str, int, int]],\n pipeline_config: Optional[list[AnnotatorInfo]],\n grr_definition: Optional[dict],\n out_file_path: str,\n reannotate: Optional[str] = None\n) -> None:\n # flake8: noqa: C901\n grr = build_genomic_resource_repository(definition=grr_definition)\n\n pipeline = build_annotation_pipeline(\n pipeline_config=pipeline_config,\n grr_repository=grr)\n\n if reannotate:\n pipeline_old = build_annotation_pipeline(\n pipeline_config_file=reannotate,\n grr_repository=grr\n )\n pipeline_new = pipeline\n pipeline = ReannotationPipeline(pipeline_new, pipeline_old)\n\n # cache pipeline\n resources: set[str] = set()\n for annotator in pipeline.annotators:\n resources = resources | {res.get_id() for res in annotator.resources}\n cache_resources(grr, list(resources))\n\n with closing(VariantFile(input_file)) as in_file:\n update_header(in_file, pipeline)\n with pipeline.open(), closing(VariantFile(\n out_file_path, \"w\", header=in_file.header\n )) as out_file:\n annotation_attributes = pipeline.get_attributes()\n\n if region is None:\n in_file_iter = in_file.fetch()\n else:\n in_file_iter = in_file.fetch(*region)\n\n for vcf_var in in_file_iter:\n # pylint: disable=use-list-literal\n buffers: List[List] = [list() for _ in annotation_attributes]\n\n if vcf_var.ref is None:\n logger.warning(\n \"vcf variant without reference: %s %s\",\n vcf_var.chrom, vcf_var.pos\n )\n continue\n\n if vcf_var.alts is None:\n logger.info(\n \"vcf variant without alternatives: %s %s\",\n vcf_var.chrom, vcf_var.pos\n )\n continue\n\n has_value = {}\n\n if isinstance(pipeline, ReannotationPipeline):\n for col in pipeline.attributes_deleted:\n del vcf_var.info[col]\n\n for alt in vcf_var.alts:\n if isinstance(pipeline, ReannotationPipeline):\n annotation = pipeline.annotate(\n VCFAllele(\n vcf_var.chrom, vcf_var.pos, vcf_var.ref, alt\n ), dict(vcf_var.info)\n )\n else:\n annotation = pipeline.annotate(\n VCFAllele(\n vcf_var.chrom, vcf_var.pos, vcf_var.ref, alt\n )\n )\n\n for buff, attribute in zip(buffers, annotation_attributes):\n attr = annotation.get(attribute.name)\n attr = attr if attr is not None else \".\"\n if attr != \".\":\n has_value[attribute.name] = True\n if isinstance(attr, list):\n attr = \";\".join(map(str, attr))\n elif isinstance(attr, dict):\n attr = \";\".join(\n f\"{k}:{v}\"\n for k, v in attr.items()\n )\n attr = str(attr).replace(\";\", \"|\")\\\n .replace(\",\", \"|\")\\\n .replace(\" \", \"_\")\n buff.append(attr)\n\n for attribute, buff in zip(annotation_attributes, buffers):\n if has_value.get(attribute.name, False):\n vcf_var.info[attribute.name] = buff\n out_file.write(vcf_var)",
"def _annotate_variants(args, conn, get_val_fn):\n # For each, use Tabix to detect overlaps with the user-defined\n # annotation file. Update the variant row with T/F if overlaps found.\n annos = pysam.Tabixfile(args.anno_file)\n naming = guess_contig_naming(annos)\n select_cursor = conn.cursor()\n select_cursor.execute(\"SELECT chrom, start, end, variant_id FROM variants\")\n to_update = []\n for row in select_cursor:\n to_update.append((str(row[\"variant_id\"]),\n get_val_fn(annotations_in_region(row, annos, \"tuple\", naming))))\n update_cursor = conn.cursor()\n add_requested_column(args.col_name, update_cursor)\n for variant_id, val in to_update:\n update_qry = \"UPDATE variants SET \" + args.col_name + \" = \" + str(val) + \\\n \" WHERE variant_id = \" + variant_id\n update_cursor.execute(update_qry)",
"def cli(raw_args: Optional[list[str]] = None) -> None:\n if not raw_args:\n raw_args = sys.argv[1:]\n\n parser = configure_argument_parser()\n args = parser.parse_args(raw_args)\n VerbosityConfiguration.set(args)\n CLIAnnotationContext.register(args)\n\n context = get_genomic_context()\n pipeline = CLIAnnotationContext.get_pipeline(context)\n grr = CLIAnnotationContext.get_genomic_resources_repository(context)\n\n if args.output:\n output = args.output\n else:\n output = os.path.basename(args.input).split(\".\")[0] + \"_annotated.vcf\"\n\n if not os.path.exists(args.work_dir):\n os.mkdir(args.work_dir)\n\n\n task_graph = TaskGraph()\n\n task_graph.input_files.append(args.input)\n task_graph.input_files.append(args.pipeline)\n if args.reannotate:\n task_graph.input_files.append(args.reannotate)\n\n if not tabix_index_filename(args.input):\n # annotate(args.input, None, pipeline.get_info(),\n # grr.definition, output, args.reannotate)\n assert grr is not None\n task_graph.create_task(\n \"all_variants_annotate\",\n annotate,\n [args.input, None, pipeline.get_info(),\n grr.definition, output, args.reannotate],\n []\n )\n else:\n with closing(TabixFile(args.input)) as pysam_file:\n regions = produce_regions(pysam_file, args.region_size)\n file_paths = produce_partfile_paths(args.input, regions, args.work_dir)\n region_tasks = []\n for index, (region, file_path) in enumerate(zip(regions, file_paths)):\n assert grr is not None\n region_tasks.append(task_graph.create_task(\n f\"part-{index}\",\n annotate,\n [args.input, region,\n pipeline.get_info(), grr.definition,\n file_path, args.reannotate],\n []\n ))\n\n assert grr is not None\n task_graph.create_task(\n \"combine\",\n combine,\n [args.input, pipeline.get_info(),\n grr.definition, file_paths, output],\n region_tasks\n )\n\n args.task_status_dir = os.path.join(args.work_dir, \".tasks-status\")\n args.log_dir = os.path.join(args.work_dir, \".tasks-log\")\n\n TaskGraphCli.process_graph(task_graph, **vars(args))",
"def _buildkite_annotate(content, style=\"success\", context=None):\n if context is None:\n context = \"ctx-%s\" % (style,)\n agent = local[\"buildkite-agent\"]\n _ = (\n agent[\"annotate\", content, \"--style\", style, \"--context\", context, \"--append\"]\n & FG\n )",
"def annotateFiles(listOfFilesPath=None, annotatedOutputFolder=u'./002manuallyAnnotated/', dumpSP=True):\n referencePathLine = []\n listOfAnnotations = []\n # get the list containing the file paths\n if listOfFilesPath is None:\n listOfFilesPath = randomlySelectNDocsFromPath(b000path.getBtFolderPath(flagFolder=None), n=100)\n makeLocalFolderPaths(listOfFilesPath)\n elif type(listOfFilesPath) is str:\n if u'.json' in listOfFilesPath:\n listOfFilesPath = utilsOs.openJsonFileAsDict(listOfFilesPath)\n else:\n listOfFilesPath = [listOfFilesPath]\n # get rid of the files we have already annotated\n if utilsOs.theFileExists(u'{0}sampleReference.tsv'.format(annotatedOutputFolder)):\n refLines = utilsOs.readAllLinesFromFile(u'{0}sampleReference.tsv'.format(annotatedOutputFolder),\n noNewLineChar=True)\n annotatedFiles = set([line.split(u'\\t')[0] for line in refLines])\n listOfFilesPath = [file for file in listOfFilesPath if file not in annotatedFiles]\n # print the annotator cheat sheet\n print(\"\"\"\"0 - badly aligned\n \\n\\t0.0 - AMPLIFICATION: compensation, description, repetition or lang tendency to hypergraphy\n \\n\\t0.1 - ELISION: absence, omission, reduction or lang tendency to micrography\n \\n\\t0.2 - DISPLACEMENT: modification of the line order also modifying the order of the following lines\n \\n\\t0.3 - MISALIGNED and FOIBLE: alignment and quality errors\n \\n1 - well aligned\n \\n\\t1.0 - ALIGNED and GOOD QUALITY: is aligned and shows no evident sing of translation imperfections \n \\n\\t1.1 - FOIBLE: imperfection in the translation quality\"\"\")\n # open each file in EN and FR and show it in the terminal\n for filePath in listOfFilesPath:\n print(u'############# {0} ##############'.format(filePath.replace(u'/data/rali8/Tmp/rali/bt/burtrad/corpus_renamed/', u'')))\n # get the path for the source and target\n fileSourcePath = u'{0}.fr'.format(filePath) if u'fr-en' in filePath else u'{0}.en'.format(filePath)\n fileTargetPath = u'{0}.en'.format(filePath) if u'fr-en' in filePath else u'{0}.fr'.format(filePath)\n with open(fileSourcePath) as fileSource:\n with open(fileTargetPath) as fileTarget:\n # show the context of the annotated sentence\n beforeSentSource = fileSource.readline()\n duringSentSource = fileSource.readline()\n beforeSentTarget = fileTarget.readline()\n duringSentTarget = fileTarget.readline()\n # annotate the first sentence pair\n listOfAnnotations = annotateFirstSP(beforeSentSource, duringSentSource, beforeSentTarget,\n duringSentTarget, listOfAnnotations, lineLength=137)\n # save the reference\n # if the filepath is the reference\n if u'burtrad' in filePath:\n referencePathLine.append(u'{0}\\t{1}'.format(filePath, 0))\n # otherwise we get it from a reference file\n else:\n with open(u'{0}.tsv'.format(filePath)) as refFile:\n refLns = [ln.replace(u'\\n', u'') for ln in refFile.readlines()]\n referencePathLine.append(refLns[0])\n # dump the first SP\n if dumpSP is True:\n enSent = beforeSentSource if u'.en' in fileSourcePath else beforeSentTarget\n frSent = beforeSentTarget if u'.en' in fileSourcePath else beforeSentSource\n utilsOs.appendLineToFile(enSent, u'{0}sample.en'.format(annotatedOutputFolder), addNewLine=False)\n utilsOs.appendLineToFile(frSent, u'{0}sample.fr'.format(annotatedOutputFolder), addNewLine=False)\n duringIndex = 1\n # for each line\n while duringSentSource or duringSentTarget:\n # get the correct terminal line length\n lineLength = 137-len(str(len(listOfAnnotations)+1))\n # get the sentences\n afterSentSource = fileSource.readline()\n afterSentTarget = fileTarget.readline()\n # color in red the during lines\n redDuringSource = u'\\033[1;31m{0}\\033[0m'.format(duringSentSource)\n redDuringTarget = u'\\033[1;31m{0}\\033[0m'.format(duringSentTarget)\n # print the sentences\n print(u'{0} - {1}'.format(len(listOfAnnotations)-1, beforeSentSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations)-1, beforeSentTarget))\n print(u'{0} - {1}'.format(len(listOfAnnotations), redDuringSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations), redDuringTarget))\n print(u'{0} - {1}'.format(len(listOfAnnotations)+1, afterSentSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations)+1, afterSentTarget))\n print()\n # count if the lines that take the space of 2 lines\n longLines = getNbLongLines([beforeSentSource, beforeSentTarget, duringSentSource,\n duringSentTarget, afterSentSource, afterSentTarget], lineLength)\n # get the first part of the annotation (aligned or not)\n annotatorGeneralInput = input(u'Aligned-Misaligned annotation: ')\n # make sure to have the right general annotation\n while True:\n if annotatorGeneralInput in [u'0', u'1', u'0.0', u'0.1', u'0.2', u'0.3', u'1.0', u'1.1', u'c', u'correct']:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorGeneralInput = input(u'Repeat annotation: ')\n if annotatorGeneralInput in [u'c', u'correct']:\n annotatorGeneralInput, listOfAnnotations = correctionToAnnotation(listOfAnnotations)\n # if we still need to specify what type of alignment or misalignment\n if annotatorGeneralInput in [u'0', u'1']:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n # get the second part of the annotation (aligned or not)\n annotatorSpecificInput = input(u'Specific type annotation: ')\n typeAnswers = [u'0', u'1', u'2', u'3'] if annotatorGeneralInput == 0 else [u'0', u'1']\n # make sure to have the right specific annotation\n while True:\n if annotatorSpecificInput in typeAnswers:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorSpecificInput = input(u'Repeat type annotation: ')\n # save to the list of annotations\n listOfAnnotations.append(float(u'{0}.{1}'.format(annotatorGeneralInput, annotatorSpecificInput)))\n # if the right answer was given in the right format right away\n else:\n # save to the list of annotations\n listOfAnnotations.append(float(annotatorGeneralInput))\n # remove the lines from the terminal before getting to the next pair\n utilsOs.moveUpAndLeftNLines(14+longLines, slowly=False)\n # erase all remainder of the previous sentences and go back up again\n for e in range(14+longLines):\n print(u' '*(lineLength+4))\n utilsOs.moveUpAndLeftNLines(14 + longLines, slowly=False)\n # next line source\n beforeSentSource = duringSentSource\n duringSentSource = afterSentSource\n # next line target\n beforeSentTarget = duringSentTarget\n duringSentTarget = afterSentTarget\n # append the reference to the file\n # if the filepath is the reference\n if u'burtrad' in filePath:\n referencePathLine.append(u'{0}\\t{1}'.format(filePath, duringIndex))\n # otherwise we get it from a reference file\n else:\n with open(u'{0}.tsv'.format(filePath)) as refFile:\n refLns = [ln.replace(u'\\n', u'') for ln in refFile.readlines()]\n referencePathLine.append(refLns[duringIndex])\n # add 1 to index\n duringIndex += 1\n # dump the file line by line, to be sure in case of error\n # dump the reference\n utilsOs.dumpRawLines(referencePathLine, u'{0}sampleReference.tsv'.format(annotatedOutputFolder),\n addNewline=True, rewrite=True)\n # dump the annotation\n utilsOs.dumpRawLines(listOfAnnotations, u'{0}sampleAnnotation.tsv'.format(annotatedOutputFolder),\n addNewline=True, rewrite=True)\n # dump the SP\n if dumpSP is True:\n enSent = beforeSentSource if u'.en' in fileSourcePath else beforeSentTarget\n frSent = beforeSentTarget if u'.en' in fileSourcePath else beforeSentSource\n utilsOs.appendLineToFile(enSent, u'{0}sample.en'.format(annotatedOutputFolder), addNewLine=False)\n utilsOs.appendLineToFile(frSent, u'{0}sample.fr'.format(annotatedOutputFolder), addNewLine=False)\n # clear part of terminal\n utilsOs.moveUpAndLeftNLines(2, slowly=False)",
"def annotate(base, parents, skiprevs=None, diffopts=None):\n\n # This algorithm would prefer to be recursive, but Python is a\n # bit recursion-hostile. Instead we do an iterative\n # depth-first search.\n\n # 1st DFS pre-calculates pcache and needed\n visit = [base]\n pcache = {}\n needed = {base: 1}\n while visit:\n f = visit.pop()\n if f in pcache:\n continue\n pl = parents(f)\n pcache[f] = pl\n for p in pl:\n needed[p] = needed.get(p, 0) + 1\n if p not in pcache:\n visit.append(p)\n\n # 2nd DFS does the actual annotate\n visit[:] = [base]\n hist = {}\n while visit:\n f = visit[-1]\n if f in hist:\n visit.pop()\n continue\n\n ready = True\n pl = pcache[f]\n for p in pl:\n if p not in hist:\n ready = False\n visit.append(p)\n if ready:\n visit.pop()\n curr = _decoratelines(f.data(), f)\n skipchild = False\n if skiprevs is not None:\n skipchild = f._changeid in skiprevs\n curr = _annotatepair(\n [hist[p] for p in pl], f, curr, skipchild, diffopts\n )\n for p in pl:\n if needed[p] == 1:\n del hist[p]\n del needed[p]\n else:\n needed[p] -= 1\n\n hist[f] = curr\n del pcache[f]\n\n a = hist[base]\n return [\n annotateline(*r)\n for r in zip(a.fctxs, a.linenos, a.skips, mdiff.splitnewlines(a.text))\n ]",
"def get_filtered_annotations(assoc_file, accepted_evcodes=None,\n remove_leading_gene_id=None,\n use_symbol=None, tax_id=None):\n\n if assoc_file.endswith('.gz'):\n assoc_fh = gzip.open(assoc_file, 'r')\n else:\n assoc_fh = open(assoc_file, 'r')\n\n annotations = []\n\n for line in assoc_fh:\n if line.startswith('!'):\n continue\n\n toks = line.strip().split('\\t')\n\n (xrdb, xrid, details, goid, refstring, ev_code, taxon, date) = (\n toks[0], toks[1], toks[3], toks[4], toks[5], toks[6],\n toks[12].split(':')[1], toks[13])\n\n if tax_id and (tax_id != taxon):\n continue\n\n if remove_leading_gene_id:\n xrid = xrid.split(':')[1]\n\n if xrdb in DB_REMAP:\n xrdb = DB_REMAP[xrdb]\n\n if use_symbol:\n xrdb = 'Symbol'\n if toks[0] == 'UniProtKB':\n xrid = toks[2]\n\n # These next few lines are needed for processing\n # Arabidopsis annotations\n if xrdb == 'TAIR':\n tair_regex = re.compile('AT[0-9MC]G[0-9][0-9][0-9][0-9][0-9]')\n first_alias = toks[10].split('|')[0]\n if tair_regex.match(toks[2]):\n xrid = toks[2]\n elif tair_regex.match(toks[9]):\n xrid = toks[9]\n elif tair_regex.match(first_alias):\n xrid = first_alias\n\n if details == 'NOT':\n continue\n\n if accepted_evcodes is not None and (ev_code not in accepted_evcodes):\n continue\n\n annotation = (xrdb, xrid, goid, refstring, date)\n\n annotations.append(annotation)\n\n return annotations",
"def cli():\n script_name = os.path.splitext(os.path.basename(__file__))[0]\n config, logger, report_filename = get_common_arguments(\n script_name, DESCRIPTION, PARAMETERS)\n\n report = annotate_occurrence_files(\n config[\"dwc_filenames\"], config[\"riis_with_gbif_taxa_filename\"],\n config[\"geoinput_path\"], config[\"output_path\"], logger)\n\n # If the output report was requested, write it\n if report_filename:\n try:\n with open(report_filename, mode='wt') as out_file:\n json.dump(report, out_file, indent=4)\n except OSError:\n raise\n except IOError:\n raise\n except Exception:\n raise\n logger.log(\n f\"Wrote report file to {report_filename}\", refname=script_name)",
"def annotate(self, annotations, overwrite=True, cmd_args=None):\n\n r = Result(\"annotate\")\n base_args = list()\n\n if overwrite:\n base_args.append(\"--overwrite\")\n\n for l, v in six.iteritems(annotations):\n if not v:\n if not l.endswith(\"-\"):\n l += \"-\" # Indicate removal on command line if caller has not applied \"-\" suffix\n base_args.append(l)\n else:\n base_args.append('{}={}'.format(l, v))\n\n r.add_action(oc_action(self.context, \"annotate\", all_namespaces=self.all_namespaces,\n cmd_args=[self._selection_args(needs_all=True), base_args, cmd_args]))\n\n r.fail_if(\"Error running annotate\")\n return self",
"def annotate_file_html(fp, lines, covered):\n \n # initialize stats\n n_covered = n_lines = 0\n\n output = []\n for i, line in enumerate(fp):\n is_covered = False\n is_line = False\n\n i += 1\n\n if i in covered:\n is_covered = True\n\n n_covered += 1\n n_lines += 1\n elif i in lines:\n is_line = True\n\n n_lines += 1\n\n color = ''\n if is_covered:\n color = 'covered'\n elif is_line:\n color = 'uncovered'\n\n line = escape_html(line.rstrip())\n output.append('<span class=\"%s\"><strong>%4d</strong> %s</span>' % (color, i, line))\n\n try:\n percent = n_covered * 100. / n_lines\n except ZeroDivisionError:\n percent = 100\n\n return output, n_covered, n_lines, percent",
"def annotate_commit_loc(commits, project, clear_cache=False):\n print 'Annotating lines of code changed'\n cache = {}\n if not clear_cache:\n try:\n cache = jload(project_to_fname(project, loc=True))\n # Hack to remove artifacts left by jdump,\n # also remove any empty entries\n \"\"\"\n for k, entry in cache.items():\n if entry:\n if 'json_key' in entry:\n del cache[k]['json_key']\n else:\n del cache[k]\n \"\"\"\n print ' Loaded Lines of Code Changed cache'\n\n except Exception:\n print ' Failed to load Lines of Code Changed cache'\n cache = {}\n pass\n\n cache_initial_size = len(cache)\n print ' Initial Lines of Code Changed cache size:', cache_initial_size\n\n repo_name = get_repo_name(project)\n filter_config = get_filter_config(project)\n repo = git.Repo(repo_name)\n total_operations = 0\n for k, commit in commits.items():\n if commit['reachable'] and 'loc_add' not in commit:\n if k not in cache:\n # print commit['cid']\n c = repo.commit(commit['cid'])\n loc_add = 0\n loc_change = 0\n detail = {}\n if len(c.parents) > 0:\n p = c.parents[0]\n\n files = process_commit_files_unfiltered(c)\n subset_files = [f for f in files\n if filter_file(f, filter_config)]\n for path in subset_files:\n # print 'Getting diff object for path:', path\n d = c.diff(p, create_patch=True, paths=path)\n diff_text = d[0].diff\n # print diff_text\n\n adds = sum([1 for txt in diff_text.splitlines()\n if txt.startswith('+')]) - 1\n removes = sum([1 for txt in diff_text.splitlines()\n if txt.startswith('-')]) - 1\n changes = max(adds, removes)\n detail[path] = {'add': adds, 'changes': changes}\n loc_add += adds\n loc_change += changes\n\n cache[k] = {'loc_add': loc_add,\n 'loc_change': loc_change,\n 'loc_detail': detail}\n else:\n cache[k] = {'loc_add': 0,\n 'loc_change': 0,\n 'loc_detail': {}}\n\n commit['loc_add'] = cache[k]['loc_add']\n commit['loc_change'] = cache[k]['loc_change']\n commit['loc_detail'] = cache[k]['loc_detail']\n\n total_operations += 1\n if total_operations % 100 == 0:\n print '.',\n if total_operations % 1000 == 0:\n print total_operations,\n print\n\n if len(cache) > cache_initial_size:\n print\n print ' Saving updated Lines of Code Changed Cache'\n jdump(cache, project_to_fname(project, loc=True))\n \"\"\"\n # Hack to remove artifacts left by jdump\n for k in blame_cache.keys(): # remove key artifact from jload\n if 'json_key' in blame_cache[k]:\n del blame_cache[k]['json_key']\n \"\"\"",
"def annotateVCF(self):\n cwd = os.getcwd()\n if self.__finalVCF:\n self.__ifVerbose(\"Annotating final VCF.\")\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.__finalVCF])\n self.__annotation = self.fOut + \"/\" + self.name +'_annotation.txt'\n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Final_annotation.txt'],\n [self.__parser, self.__annotation, self.name, self.mutationloci])\n if os.path.isfile(self.fOut + \"/\" + self.name +'_SamTools_Resistance_filtered.vcf'):\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_Resistance_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.fOut + \"/\" + self.name +'_SamTools_Resistance_filtered.vcf']) \n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Resistance_Final_annotation.txt'],\n [self.__parser, self.fOut + \"/\" + self.name +'_Resistance_annotation.txt', self.name, self.mutationloci])\n elif os.path.isfile(self.fOut + \"/\" + self.name +'_GATK_Resistance_filtered.vcf'):\n self.__CallCommand(['SnpEff', self.fOut + \"/\" + self.name +'_Resistance_annotation.txt'],\n ['env', '_JAVA_OPTIONS=-Xmx4g', self.__annotator, '-download', 'm_tuberculosis_H37Rv', self.fOut + \"/\" + self.name +'_GATK_Resistance_filtered.vcf']) \n self.__ifVerbose(\"parsing final Annotation.\")\n self.__CallCommand(['parse annotation', self.fOut + \"/\" + self.name +'_Resistance_Final_annotation.txt'],\n [self.__parser, self.fOut + \"/\" + self.name +'_Resistance_annotation.txt', self.name, self.mutationloci])\n else:\n self.__ifVerbose(\"Use SamTools, GATK, or Freebayes to annotate the final VCF.\")\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_genes.txt\"])\n self.__CallCommand('rm', ['rm', cwd + \"/snpEff_summary.html\"])",
"def annotate(args):\n from .annotation.annotation import annotate as anno\n anno(args)",
"def svn_fs_info_config_files(*args) -> \"SWIGTYPE **\":\n return _fs.svn_fs_info_config_files(*args)",
"def annotateImages(path = None):\n\tif not path:\n\t\tpath = defaultPath\n\tf = open(\"%s/images.txt\" % path)\n\ttry:\n\t\thdr = f.readline()\n\t\titems = hdr.strip(\"\\n\").split(\"\\t\")\n\t\tfirst=True\n\t\tfor tmp in f:\n\t\t\tdata = tmp.strip(\"\\n\").split(\"\\t\")\n\t\t\tm = {}\n\t\t\tfor i in range(0, len(items)):\n\t\t\t\tm[items[i].upper()] = data[i]\n\t\t\timg = []\n\t\t\tfor i in range(0, 4):\n\t\t\t\tif m[\"TYPE\"]==\"SI\":\n\t\t\t\t\tfpt=jio.File(path, m[\"NAME\"])\n\t\t\t\t\ti0 = jio.File(fpt, \"Image[0][[%d]].png\" % (i, ))\n\t\t\t\t\toutFile = jio.File(fpt, \"Image[0][[%d]][SC].png\" % (i, ))\n\t\t\t\telse:\n\t\t\t\t\ti0 = jio.File(path, \"%s[%d].png\" % (m[\"NAME\"], i))\n\t\t\t\t\toutFile = jio.File(path, \"%s[%d][SC].png\" % (m[\"NAME\"], i))\n\t\t\t\tif i0.isFile() and (not outFile.isFile()):\n\t\t\t\t\tbi = iio.ImageIO.read(i0)\n\t\t\t\t\tfov = float(m[\"FOV\"])\n\t\t\t\t\tsc = epq.StageCoordinate()\n\t\t\t\t\tsc.set(X_AXIS, float(m[\"X\"]))\n\t\t\t\t\tsc.set(Y_AXIS, float(m[\"Y\"]))\n\t\t\t\t\tsc.set(Z_AXIS, float(m[\"Z\"]))\n\t\t\t\t\tif first:\n\t\t\t\t\t\tprint \"Filename\\tField-of-View\"\n\t\t\t\t\t\tfirst=False\n\t\t\t\t\tprint \"%s\\t%3.1f um\" % (i0.name, 1000.0*fov )\n\t\t\t\t\tsi = ept.ScaledImage(bi, fov * 0.001, fov * 0.001, 0.0, sc, str(i))\n\t\t\t\t\tsi.applyMicronBar()\n\t\t\t\t\tiio.ImageIO.write(si, \"png\", outFile)\n\tfinally:\n\t\tf.close()",
"def anotation(output):\r\n\r\n vcfs = obtener_nombre_ficheros(output + '/pileup/', 'vcf')\r\n for fichero in vcfs:\r\n os.system(\"awk '{{print $1, $2, $4, $5, $10}}' {0}/pileup/{1} > {0}/annotate/{1}\".format(output, fichero))\r\n os.system(\"sed -i 's/chr//g' {0}/annotate/{1}\".format(output, fichero))\r\n os.system(\"awk '{{print $1{2}$2{2}$2{2}$3{2}$4{2}$5}}' {0}/annotate/{1} > {0}/annotate/{1}_awk.vcf\".format(output, fichero,'\"\\\\t\"'))\r\n os.system(\"grep -v '#' {0}/annotate/{1}_awk.vcf > {0}/annotate/{1}_grep.vcf\".format(output,fichero))\r\n os.system(\"python genotipo.py -i {0}/annotate/{1}_grep.vcf -o {0}/annotate/{1}\".format(output,fichero))\r\n os.system(\"rm {0}/annotate/{1}_awk.vcf\".format(output,fichero))\r\n os.system(\"rm {0}/annotate/{1}_grep.vcf\".format(output,fichero))\r\n os.system(\"perl annovar/table_annovar.pl {0}/annotate/{1} annovar/humandb/ -buildver hg19 -out {0}/annotate/{1} -remove -protocol refGene,cytoBand,gnomad_exome,clinvar_20131105,exac03,avsnp147,dbnsfp30a -operation g,r,f,f,f,f,f -nastring . -csvout -polish -xref annovar/example/gene_fullxref.txt\".format(output,fichero))\r\n os.system(\"awk -f filtro_awk {0}/annotate/{1}.{2}_multianno.csv > {0}/annotate/{1}.{2}_multianno_filtrado.csv\".format(output,fichero,\"hg19\")\r\n os.system(\"python multianno_vcf_annot.py -i {0}/annotate/{1}.{2}_multianno_filtrado.csv -o {0}/annotate/{1}.{2}_multianno_filtrado_genot.csv -v {0}/annotate/{1}\".format(output,fichero,\"hg19\"))\r\n \r\ndef main():\r\n \"\"\"\r\n Funcion que ejecuta el programa.\r\n \"\"\"\r\n\r\n ext = \"fastq\"\r\n argum = argumentos()\r\n crear_directorios(argum.output)\r\n ficheros = obtener_nombre_ficheros(argum.input, ext)\r\n calidad_fichero(ficheros, argum.input, argum.output)\r\n trimming(ficheros, argum.input, argum.output, argum.type)\r\n alineamiento(argum.reference, argum.input, argum.output, argum.type, ext, argum.amplicon)\r\n variant_calling(argum.reference, argum.input, argum.output)\r\n anotation(argm.output)",
"def annotations(self):\n for line in self.fp:\n self.lineno += 1\n if not line or line[0] == '!':\n # This is a comment line\n continue\n try:\n # append the organism name to the line, the file.\n # Some wiggleling is necessary, because the last\n # part of the line is actually a newline and three tab\n line = line[0:-2] + self.organism_name\n yield Annotation(line)\n except TypeError as ex:\n raise SyntaxError(\"cannot parse annotation\", self.lineno)",
"def iter_lines_tagged(file, eol=r'(?:\\r?\\n|\\n?\\r)'):\r\n\r\n\tm = re.compile(\r\n\t\tr'(?:'\r\n\t\tr'(?P<eop>' + eol + r')|'\r\n\t\tr'(?P<eob>-{5,})|'\r\n\t\tr'(?P<text>.+?)'\r\n\t\tr')'\r\n\t\tr'(?:' + eol + r'|$)')\r\n\r\n\tfor x in m.finditer(file.read()):\r\n\r\n\t\tif x.group(\"eop\"):\r\n\t\t\tyield \"eop\", None\r\n\t\telif x.group(\"eob\"):\r\n\t\t\tyield \"eob\", None\r\n\t\telif x.group(\"text\"):\r\n\t\t\tyield \"text\", x.group(\"text\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/filelog/{revision}/{path} Show information about the history of a file in the repository. The ``revcount`` query string argument can be defined to control the maximum number of entries to show. The ``filelog`` template will be rendered.
|
def filelog(web):
try:
fctx = webutil.filectx(web.repo, web.req)
f = fctx.path()
fl = fctx.filelog()
except error.LookupError:
f = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])
fl = web.repo.file(f)
numrevs = len(fl)
if not numrevs: # file doesn't exist at all
raise
rev = webutil.changectx(web.repo, web.req).rev()
first = fl.linkrev(0)
if rev < first: # current rev is from before file existed
raise
frev = numrevs - 1
while fl.linkrev(frev) > rev:
frev -= 1
fctx = web.repo.filectx(f, fl.linkrev(frev))
revcount = web.maxshortchanges
if b'revcount' in web.req.qsparams:
try:
revcount = int(web.req.qsparams.get(b'revcount', revcount))
revcount = max(revcount, 1)
web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount
except ValueError:
pass
lrange = webutil.linerange(web.req)
lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
lessvars[b'revcount'] = max(revcount // 2, 1)
morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])
morevars[b'revcount'] = revcount * 2
patch = b'patch' in web.req.qsparams
if patch:
lessvars[b'patch'] = morevars[b'patch'] = web.req.qsparams[b'patch']
descend = b'descend' in web.req.qsparams
if descend:
lessvars[b'descend'] = morevars[b'descend'] = web.req.qsparams[
b'descend'
]
count = fctx.filerev() + 1
start = max(0, count - revcount) # first rev on this page
end = min(count, start + revcount) # last rev on this page
parity = paritygen(web.stripecount, offset=start - end)
repo = web.repo
filelog = fctx.filelog()
revs = [
filerev
for filerev in filelog.revs(start, end - 1)
if filelog.linkrev(filerev) in repo
]
entries = []
diffstyle = web.config(b'web', b'style')
if b'style' in web.req.qsparams:
diffstyle = web.req.qsparams[b'style']
def diff(fctx, linerange=None):
ctx = fctx.changectx()
basectx = ctx.p1()
path = fctx.path()
return webutil.diffs(
web,
ctx,
basectx,
[path],
diffstyle,
linerange=linerange,
lineidprefix=b'%s-' % ctx.hex()[:12],
)
linerange = None
if lrange is not None:
assert lrange is not None # help pytype (!?)
linerange = webutil.formatlinerange(*lrange)
# deactivate numeric nav links when linerange is specified as this
# would required a dedicated "revnav" class
nav = templateutil.mappinglist([])
if descend:
it = dagop.blockdescendants(fctx, *lrange)
else:
it = dagop.blockancestors(fctx, *lrange)
for i, (c, lr) in enumerate(it, 1):
diffs = None
if patch:
diffs = diff(c, linerange=lr)
# follow renames accross filtered (not in range) revisions
path = c.path()
lm = webutil.commonentry(repo, c)
lm.update(
{
b'parity': next(parity),
b'filerev': c.rev(),
b'file': path,
b'diff': diffs,
b'linerange': webutil.formatlinerange(*lr),
b'rename': templateutil.mappinglist([]),
}
)
entries.append(lm)
if i == revcount:
break
lessvars[b'linerange'] = webutil.formatlinerange(*lrange)
morevars[b'linerange'] = lessvars[b'linerange']
else:
for i in revs:
iterfctx = fctx.filectx(i)
diffs = None
if patch:
diffs = diff(iterfctx)
lm = webutil.commonentry(repo, iterfctx)
lm.update(
{
b'parity': next(parity),
b'filerev': i,
b'file': f,
b'diff': diffs,
b'rename': webutil.renamelink(iterfctx),
}
)
entries.append(lm)
entries.reverse()
revnav = webutil.filerevnav(web.repo, fctx.path())
nav = revnav.gen(end - 1, revcount, count)
latestentry = entries[:1]
return web.sendtemplate(
b'filelog',
file=f,
nav=nav,
symrev=webutil.symrevorshortnode(web.req, fctx),
entries=templateutil.mappinglist(entries),
descend=descend,
patch=patch,
latestentry=templateutil.mappinglist(latestentry),
linerange=linerange,
revcount=revcount,
morevars=morevars,
lessvars=lessvars,
**pycompat.strkwargs(webutil.commonentry(web.repo, fctx))
)
|
[
"def getRevisionLog(url, revision):\r\n svn_log = subprocess2.check_output(\r\n ['svn', 'log', url, '-r', str(revision)],\r\n universal_newlines=True).splitlines(True)\r\n # Don't include the header lines and the trailing \"---...\" line.\r\n return ''.join(svn_log[3:-1])",
"def get_repo_log(path=None, file_detail=False, commandline=True, subset=None):\n if file_detail:\n if subset is None:\n res = get_file_details_all(path, commandline=commandline)\n details = {}\n for commit in res:\n com = commit[0]\n if com not in details:\n details[com] = []\n details[com].append(commit[1:])\n else:\n files = subset\n details = {}\n for i, name in enumerate(files):\n res = get_file_details(name.name if isinstance(name, RepoFile) else name,\n path, commandline=commandline)\n for commit in res:\n com = commit[0]\n if com not in details:\n details[com] = []\n details[com].append(commit[1:])\n logs = get_repo_log(path=path, file_detail=False,\n commandline=commandline)\n final = []\n for log in logs:\n com = log[4]\n if com not in details:\n continue\n det = details[com]\n for d in det:\n final.append(tuple(log) + d)\n return final\n\n if path is None:\n path = os.path.normpath(\n os.path.abspath(os.path.join(os.path.split(__file__)[0], \"..\", \"..\", \"..\")))\n\n if not commandline: # pragma: no cover\n try:\n raise NotImplementedError()\n except Exception:\n return get_repo_log(path, file_detail, True)\n else:\n cmd = get_cmd_git()\n if sys.platform.startswith(\"win\"): # pragma: no cover\n cmd += ' log --pretty=format:\"<logentry revision=\\\\\"%h\\\\\">' + \\\n '<author>%an</author><date>%ci</date><hash>%H</hash><msg>%s</msg></logentry>\" ' + \\\n path\n else:\n cmd_tmp = '--pretty=format:<logentry revision=\"%h\"><author>%an</author><date>%ci' + \\\n '</date><hash>%H</hash><msg>%s</msg></logentry>'\n cmd = [cmd, 'log', cmd_tmp, path]\n\n enc = sys.stdout.encoding if sys.stdout is not None else \"utf8\"\n out, err = run_cmd(cmd, wait=True, encerror=\"strict\", encoding=enc,\n change_path=os.path.split(\n path)[0] if os.path.isfile(path) else path,\n shell=sys.platform.startswith(\"win32\"), preprocess=False)\n\n if len(err) > 0: # pragma: no cover\n mes = f\"Problem with file '{path}'\"\n raise GitException(mes + \"\\n\" +\n err + \"\\nCMD:\\n\" + cmd + \"\\nOUT:\\n\" + out +\n \"\\n[giterror]\\n\" + err + \"\\nCMD:\\n\" + cmd)\n\n master = get_master_location(path, commandline)\n if master.endswith(\".git\"):\n master = master[:-4]\n\n if enc != \"utf8\" and enc is not None:\n by = out.encode(enc)\n out = by.decode(\"utf8\")\n\n out = out.replace(\"\\n\\n\", \"\\n\")\n out = f\"<xml>\\n{out}\\n</xml>\"\n try:\n root = ET.fromstring(out)\n except ET.ParseError:\n # it might be due to character such as << >>\n lines = out.split(\"\\n\")\n out = []\n suffix = \"</msg></logentry>\"\n for line in lines:\n if line.endswith(suffix):\n pos = line.find(\"<msg>\")\n if pos == -1:\n out.append(line)\n continue\n begin = line[:pos + 5]\n body = line[pos + 5:-len(suffix)]\n msg = escape(body)\n line = begin + msg + suffix\n out.append(line)\n out = \"\\n\".join(out)\n try:\n root = ET.fromstring(out)\n except ET.ParseError as eee: # pragma: no cover\n raise GitException(\n f\"Unable to parse:\\n{out}\") from eee\n\n res = []\n for i in root.iter('logentry'):\n revision = i.attrib['revision'].strip()\n author = i.find(\"author\").text.strip()\n t = i.find(\"msg\").text\n hash = i.find(\"hash\").text\n msg = t.strip() if t is not None else \"-\"\n sdate = i.find(\"date\").text.strip()\n dt = my_date_conversion(sdate.replace(\"T\", \" \").strip(\"Z \"))\n row = [author, revision, dt, msg, hash]\n if master.startswith(\"http\"):\n row.append(master + \"/commit/\" + hash)\n else:\n row.append(f\"{master}//{hash}\")\n res.append(row)\n return res",
"def get_file_revisions(self, repo_id, commit_id, path, limit):\n return seafserv_threaded_rpc.list_file_revisions(repo_id, commit_id, path, limit)",
"def do_tail(client, filename):\n logger.info('Opening log file: %s' % filename)\n fd = open(filename, 'r')\n\n # wind back to near the end of the file...\n tail_lines(fd, 10)\n\n stats = {'ok': 0,\n 'try_later': 0,\n }\n\n logger.info('Starting log loop.')\n last_log_file_update = datetime.now()\n delta = timedelta(seconds=60)\n\n while 1:\n where = fd.tell()\n line = fd.readline()\n\n if not line:\n fd_results = os.fstat(fd.fileno())\n try:\n st_results = os.stat(filename)\n except OSError:\n st_results = fd_results\n\n if st_results.st_size < where:\n logger.info('%s was truncated. Jump back to 0.', filename)\n fd.seek(0)\n elif st_results.st_ino == fd_results.st_ino:\n time.sleep(1)\n fd.seek(where)\n else:\n logger.info(\"%s changed inode numbers from %d to %d\" %\n (filename, fd_results[1], st_results[1]))\n fd = open(filename, 'r')\n else:\n if options.prepend_hostname:\n line = '%s: %s' % (hostname, line)\n log_entry=scribe.LogEntry(category=options.category, message=line)\n result = client.Log([log_entry])\n if result == scribe.ResultCode.OK:\n stats['ok'] += 1\n elif result == scribe.ResultCode.TRY_LATER:\n stats['try_later'] += 1\n\n now = datetime.now()\n if (now - delta) > last_log_file_update:\n last_log_file_update = now\n logger.info('Messages successfully logged: %d' % stats['ok'])\n logger.info('Messages to try later: %d' % stats['try_later'])\n stats['ok'] = 0\n stats['try_later'] = 0\n if options.publish_stats:\n publish_stats(stats)",
"def log_message(repo,rev0,rev1,paths, authors):\n now = datetime.datetime.now()\n now = now.strftime(\"%c\")\n path_string = ', '.join(paths)\n author_string = ', '.join(authors)\n print \"[{reponame}] -- Rev {rev0} -> {rev1}, {now} \\n\\tAuthors: {authors}\\n\\tFiles: {files}\".format(now=now,reponame=repo,rev0=rev0,rev1=rev1,authors=author_string,files=path_string)",
"def svn_fs_node_history(*args) -> \"svn_fs_history_t **\":\n return _fs.svn_fs_node_history(*args)",
"def do_view_log() -> 'html':\n view_log_dict = dict(the_data=read_log()\n , the_title='Current Log Data'\n , the_row_titles=['Form Data'\n , 'Remote Addr'\n , 'User Agent'\n , 'Results'\n ]\n )\n return render_template('viewlog.html', **view_log_dict)",
"def get_filenames_in_revision(self, revision):\n r = self.__normalize_revision(revision)\n logs = self.client.log(self.repopath, r, r, True)\n\n if len(logs) == 0:\n return []\n elif len(logs) == 1:\n return [f['path'] for f in logs[0]['changed_paths']]\n else:\n assert False",
"def log_revision(self, cvs_rev, svn_revnum):\n\n for (symbol_id, cvs_symbol_id,) in cvs_rev.opened_symbols:\n self._log_opening(symbol_id, cvs_symbol_id, svn_revnum)\n\n for (symbol_id, cvs_symbol_id) in cvs_rev.closed_symbols:\n self._log_closing(symbol_id, cvs_symbol_id, svn_revnum)",
"def svn_fs_history_location(*args) -> \"char const **, svn_revnum_t *\":\n return _fs.svn_fs_history_location(*args)",
"def render_history_view(request, template='clients/history.html', **kwargs):\n\n context = kwargs.get('context', dict())\n max_results = int(kwargs.get('page_limit', 25))\n page = int(kwargs.get('page_number', 1))\n\n client = kwargs.get('client', None)\n if not client and 'hostname' in kwargs:\n client = get_object_or_404(Client, name=kwargs['hostname'])\n if client:\n context['client'] = client\n\n entry_max = kwargs.get('maxdate', None)\n context['entry_max'] = entry_max\n\n # Either filter by client or limit by clients\n iquery = kwargs.get('interaction_base', Interaction.objects)\n if client:\n iquery = iquery.filter(client__exact=client).select_related()\n\n if 'orderby' in kwargs and kwargs['orderby']:\n iquery = iquery.order_by(kwargs['orderby'])\n\n if 'state' in kwargs and kwargs['state']:\n iquery = iquery.filter(state__exact=kwargs['state'])\n if 'server' in kwargs and kwargs['server']:\n iquery = iquery.filter(server__exact=kwargs['server'])\n\n if entry_max:\n iquery = iquery.filter(timestamp__lte=entry_max)\n\n if max_results < 0:\n max_results = 1\n entry_list = []\n if max_results > 0:\n try:\n rec_start, rec_end = prepare_paginated_list(request,\n context,\n iquery,\n page,\n max_results)\n except PaginationError:\n page_error = sys.exc_info()[1]\n if isinstance(page_error[0], HttpResponse):\n return page_error[0]\n return HttpResponseServerError(page_error)\n context['entry_list'] = iquery.all()[rec_start:rec_end]\n else:\n context['entry_list'] = iquery.all()\n\n return render_to_response(template, context,\n context_instance=RequestContext(request))",
"def revision_list(self, args):\n\n messages = []\n def canceler(cancel_args):\n if cancel_args[0].lower() in ['revision','rev']:\n return RevisionCommand(parent=self.parent, ctx=self.ctx, args=cancel_args, guild=self.guild, user=self.user, channel=self.channel).run()\n else:\n self.parent.args = cancel_args\n self.parent.command = self.parent.args[0]\n return self.parent.get_messages()\n\n def formatter(item, item_num, page_num, page_size):\n return item.get_short_string(self.user)\n\n messages.extend(Dialog({\n 'svc': revision_svc,\n 'user': self.user,\n 'title': 'Revision List',\n 'command': 'revision ' + (' '.join(args)),\n 'type': 'view',\n 'type_name': 'REVISION',\n 'page_size': 1,\n 'getter': {\n 'method': Revision.get_by_page,\n 'params': {'params': {'archived': False}}\n },\n 'formatter': formatter,\n 'cancel': canceler\n }).open())\n return messages",
"def build_history(args):\n\n if args.history == \"list\":\n list_build_history(\n no_header=args.no_header,\n terse=args.terse,\n pager=args.pager,\n color=args.color,\n row_count=args.row_count,\n )\n\n if args.history == \"query\":\n query_builds(\n build_id=args.id, log_option=args.log, output=args.output, pager=args.pager\n )",
"def changelog(web, shortlog=False):\n\n query = b''\n if b'node' in web.req.qsparams:\n ctx = webutil.changectx(web.repo, web.req)\n symrev = webutil.symrevorshortnode(web.req, ctx)\n elif b'rev' in web.req.qsparams:\n return _search(web)\n else:\n ctx = web.repo[b'tip']\n symrev = b'tip'\n\n def changelist(maxcount):\n revs = []\n if pos != -1:\n revs = web.repo.changelog.revs(pos, 0)\n\n for entry in webutil.changelistentries(web, revs, maxcount, parity):\n yield entry\n\n if shortlog:\n revcount = web.maxshortchanges\n else:\n revcount = web.maxchanges\n\n if b'revcount' in web.req.qsparams:\n try:\n revcount = int(web.req.qsparams.get(b'revcount', revcount))\n revcount = max(revcount, 1)\n web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount\n except ValueError:\n pass\n\n lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])\n lessvars[b'revcount'] = max(revcount // 2, 1)\n morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])\n morevars[b'revcount'] = revcount * 2\n\n count = len(web.repo)\n pos = ctx.rev()\n parity = paritygen(web.stripecount)\n\n changenav = webutil.revnav(web.repo).gen(pos, revcount, count)\n\n entries = list(changelist(revcount + 1))\n latestentry = entries[:1]\n if len(entries) > revcount:\n nextentry = entries[-1:]\n entries = entries[:-1]\n else:\n nextentry = []\n\n return web.sendtemplate(\n b'shortlog' if shortlog else b'changelog',\n changenav=changenav,\n node=ctx.hex(),\n rev=pos,\n symrev=symrev,\n changesets=count,\n entries=templateutil.mappinglist(entries),\n latestentry=templateutil.mappinglist(latestentry),\n nextentry=templateutil.mappinglist(nextentry),\n archives=web.archivelist(b'tip'),\n revcount=revcount,\n morevars=morevars,\n lessvars=lessvars,\n query=query,\n )",
"def dl_history():\n with open(list_file, 'a+t') as f: \n # Nothing to add to our list\n if file_name is None:\n # error for no data\n sys.stderr.write(\"\\n[!] There was no history \" \\\n \"occurance to write to history file.\")\n else:\n print(\"\\n[+] Writing a history occurance to \" \\\n \"{0}.\".format(list_file))\n f.write(\"\\n\" + strftime(\"@ %H:%M:%S on %A %d %B %Y\", \\\n gmtime()) + \"\\n\" + prfx + file_name + \"\\n\")\n f.close()\n print(\"\\n[+] Done!\")",
"def log_diffs_to_file(latest_file_path, latest_file_ms, track_index, message_index):\n with open(os.path.join(os.path.dirname(latest_file_path), \"changes.gitbit\"), \"a\") as changes_log_file:\n changes_log_file.write(str(track_index) + \":\" + str(message_index) + \":\" + str(latest_file_ms) + \"\\n\")",
"def svn_file_invoke_rev_handler(*args) -> \"svn_txdelta_window_handler_t *, void **\":\n return _delta.svn_file_invoke_rev_handler(*args)",
"def cli_get_history(options):\n for docid in cli_docids_iterator(options):\n bibdoc = BibDoc(docid)\n history = bibdoc.get_history()\n for row in history:\n print_info(bibdoc.get_recid(), docid, row)",
"def file_change_history(self, branch='master', limit=None, days=None, ignore_globs=None, include_globs=None):\n\n if limit is not None:\n limit = int(limit / len(self.repo_dirs))\n\n df = pd.DataFrame(columns=['repository', 'date', 'author', 'committer', 'message', 'rev', 'filename', 'insertions', 'deletions'])\n\n for repo in self.repos:\n try:\n ch = repo.file_change_history(\n branch,\n limit=limit,\n days=days,\n ignore_globs=ignore_globs,\n include_globs=include_globs\n )\n ch['repository'] = repo.repo_name\n df = df.append(ch)\n except GitCommandError:\n print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch))\n\n df.reset_index()\n\n return df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/archive/{revision}.{format}[/{path}] Obtain an archive of repository content. The content and type of the archive is defined by a URL path parameter. ``format`` is the file extension of the archive type to be generated. e.g. ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your server configuration. The optional ``path`` URL parameter controls content to include in the archive. If omitted, every file in the specified revision is present in the archive. If included, only the specified file or contents of the specified directory will be included in the archive. No template is used for this handler. Raw, binary content is generated.
|
def archive(web):
type_ = web.req.qsparams.get(b'type')
allowed = web.configlist(b"web", b"allow-archive")
key = web.req.qsparams[b'node']
if type_ not in webutil.archivespecs:
msg = b'Unsupported archive type: %s' % stringutil.pprint(type_)
raise ErrorResponse(HTTP_NOT_FOUND, msg)
if not ((type_ in allowed or web.configbool(b"web", b"allow" + type_))):
msg = b'Archive type not allowed: %s' % type_
raise ErrorResponse(HTTP_FORBIDDEN, msg)
reponame = re.sub(br"\W+", b"-", os.path.basename(web.reponame))
cnode = web.repo.lookup(key)
arch_version = key
if cnode == key or key == b'tip':
arch_version = short(cnode)
name = b"%s-%s" % (reponame, arch_version)
ctx = webutil.changectx(web.repo, web.req)
match = scmutil.match(ctx, [])
file = web.req.qsparams.get(b'file')
if file:
pats = [b'path:' + file]
match = scmutil.match(ctx, pats, default=b'path')
if pats:
files = [f for f in ctx.manifest().keys() if match(f)]
if not files:
raise ErrorResponse(
HTTP_NOT_FOUND, b'file(s) not found: %s' % file
)
mimetype, artype, extension, encoding = webutil.archivespecs[type_]
web.res.headers[b'Content-Type'] = mimetype
web.res.headers[b'Content-Disposition'] = b'attachment; filename=%s%s' % (
name,
extension,
)
if encoding:
web.res.headers[b'Content-Encoding'] = encoding
web.res.setbodywillwrite()
if list(web.res.sendresponse()):
raise error.ProgrammingError(
b'sendresponse() should not emit data if writing later'
)
bodyfh = web.res.getbodyfile()
archival.archive(
web.repo,
bodyfh,
cnode,
artype,
prefix=name,
match=match,
subrepos=web.configbool(b"web", b"archivesubrepos"),
)
return []
|
[
"def archive(\n repo,\n dest,\n node,\n kind,\n decode=True,\n match=None,\n prefix=b'',\n mtime=None,\n subrepos=False,\n):\n\n if kind == b'txz' and not pycompat.ispy3:\n raise error.Abort(_(b'xz compression is only available in Python 3'))\n\n if kind == b'files':\n if prefix:\n raise error.Abort(_(b'cannot give prefix when archiving to files'))\n else:\n prefix = tidyprefix(dest, kind, prefix)\n\n def write(name, mode, islink, getdata):\n data = getdata()\n if decode:\n data = repo.wwritedata(name, data)\n archiver.addfile(prefix + name, mode, islink, data)\n\n if kind not in archivers:\n raise error.Abort(_(b\"unknown archive type '%s'\") % kind)\n\n ctx = repo[node]\n archiver = archivers[kind](dest, mtime or ctx.date()[0])\n\n if not match:\n match = scmutil.matchall(repo)\n\n if repo.ui.configbool(b\"ui\", b\"archivemeta\"):\n name = b'.hg_archival.txt'\n if match(name):\n write(name, 0o644, False, lambda: buildmetadata(ctx))\n\n files = list(ctx.manifest().walk(match))\n total = len(files)\n if total:\n files.sort()\n scmutil.prefetchfiles(\n repo, [(ctx.rev(), scmutil.matchfiles(repo, files))]\n )\n progress = repo.ui.makeprogress(\n _(b'archiving'), unit=_(b'files'), total=total\n )\n progress.update(0)\n for f in files:\n ff = ctx.flags(f)\n write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, ctx[f].data)\n progress.increment(item=f)\n progress.complete()\n\n if subrepos:\n for subpath in sorted(ctx.substate):\n sub = ctx.workingsub(subpath)\n submatch = matchmod.subdirmatcher(subpath, match)\n subprefix = prefix + subpath + b'/'\n total += sub.archive(archiver, subprefix, submatch, decode)\n\n if total == 0:\n raise error.Abort(_(b'no files match the archive pattern'))\n\n archiver.done()\n return total",
"def archive(request, id):\n try:\n token = Dir.objects.get(pk=id).archieve_token()\n except:\n return HttpResponse(status=400)\n \n response = HttpResponse(settings.ARCHIVES_URL + token + '.zip', status=201)\n response['Content-Disposition'] = 'attachment; filename={}'.format(token)\n\n return response",
"def _create_archive_obj(self):\n ArtifactoryPath = self.cls\n folder = ArtifactoryPath(\"http://b/artifactory/reponame/folder\")\n constructed_url = \"http://b/artifactory/api/storage/reponame/folder\"\n responses.add(\n responses.GET,\n constructed_url,\n status=200,\n json=self.dir_stat,\n )\n archive_obj = folder.archive(check_sum=True)\n return archive_obj",
"def get_archive(filter: Optional[pulumi.InputType['GetArchiveFilterArgs']] = None,\n os_type: Optional[str] = None,\n zone: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetArchiveResult:\n __args__ = dict()\n __args__['filter'] = filter\n __args__['osType'] = os_type\n __args__['zone'] = zone\n if opts is None:\n opts = pulumi.InvokeOptions()\n if opts.version is None:\n opts.version = _utilities.get_version()\n __ret__ = pulumi.runtime.invoke('sakuracloud:index/getArchive:getArchive', __args__, opts=opts, typ=GetArchiveResult).value\n\n return AwaitableGetArchiveResult(\n description=__ret__.description,\n filter=__ret__.filter,\n icon_id=__ret__.icon_id,\n id=__ret__.id,\n name=__ret__.name,\n os_type=__ret__.os_type,\n size=__ret__.size,\n tags=__ret__.tags,\n zone=__ret__.zone)",
"def archive():\n os.chdir(BASE_DIR)\n p = run(\n 'git archive --format=tar.gz -9 --output ' +\n BASE_DIR + '/archive/kehia.tar.gz HEAD'\n )\n _fail_loudly(p)\n click.echo('Created source archive in /archive')",
"def github_archive(name, repo, commit, local_override=None,\n sha256=None, **kwargs):\n\n if local_override:\n native.local_repository(\n name = name,\n path = local_override,\n )\n else:\n http_archive(\n name = name,\n url = \"https://github.com/{repo}/archive/{commit}.zip\".format(\n repo=repo, commit=commit),\n strip_prefix = \"{}-{}\".format(repo.rsplit('/', 1)[-1], commit),\n sha256 = sha256 or \"0000000000000000000000000000000000000000000000000000000000000000\",\n **kwargs)",
"def archive():\n try:\n years = []\n months = {}\n entries_by_month = {}\n entries = reversed(get_entries())\n for entry in entries:\n if entry['year'] not in years:\n years.append(entry['year'])\n months[entry['year']] = []\n if entry['month'] not in months[entry['year']]:\n months[entry['year']].append(entry['month'])\n entries_by_month[entry['month']] = []\n entries_by_month[entry['month']].append(entry)\n print repr(entries_by_month)\n except (ValueError, TypeError, KeyError, OSError, IOError):\n return render_template('index.html',\n error=\"There was an error reading the journal.\")\n else:\n return render_template(\"archive.html\", years=years, months=months,\n entries=entries_by_month)",
"def archive_directive(name, arguments, options, content, lineno,\n content_offset, block_text, state, state_machine):\n environment = state.document.settings.env\n static_path = environment.config.html_static_path[0]\n\n directory = arguments[0]\n\n if options.has_key('file'):\n filename = options['file']\n else:\n filename = os.path.basename(directory.rstrip(os.sep)) + '.zip'\n\n archive_file = zipfile.ZipFile(os.path.dirname(os.path.abspath(__file__))\n + '%s%s%s' % (os.sep, static_path, os.sep)\n + filename, \"w\")\n\n if directory.startswith(os.sep):\n dir = directory\n else:\n dir = os.path.normpath(os.path.join(environment.config.code_path,\n directory))\n\n for root, dirs, files in os.walk(dir,topdown=False):\n for name in files:\n file = os.path.join(root, name)\n zipfilename = string.replace(file, dir, '')\n if zipfilename[0] == os.sep:\n zipfilename = zipfilename[1:]\n archive_file.write(file, str(zipfilename), zipfile.ZIP_DEFLATED)\n\n archive_file.close()\n\n archive = util.relative_uri(state_machine.document.current_source,\n os.path.dirname(os.path.abspath(__file__))\n + '%s%s%s' % (os.sep, static_path, os.sep)) \\\n + filename\n\n role = roles.CustomRole(role_name, archive_role,\n {'archive' : archive},\n content)\n roles.register_local_role(role_name, role)\n return",
"def getArchivalObject(repo, asid):\n # supply repo and id\n headers = ASAuthenticate()\n endpoint = \"repositories/\" + str(repo) + \"/archival_objects/\" + str(asid)\n output = getIt(endpoint, headers=headers)\n output = json.dumps(output)\n return output",
"def archive_urls(path: str) -> None:\n if os.path.isfile(path):\n archive_path = io.archive_file(path)\n print(f\"Archived '{path}' to '{archive_path}'.\")",
"def test_archive_download(self):\n archive_obj = self._create_archive_obj()\n constructed_url = \"http://b/artifactory/api/archive/download/reponame/folder\"\n responses.add(\n responses.GET,\n constructed_url,\n status=200,\n json=self.dir_stat,\n )\n archive_obj.writeto(\"test.zip\")\n reference_params = {\"archiveType\": \"zip\", \"includeChecksumFiles\": \"True\"}\n # check that params were really added to the request\n self.assertDictEqual(responses.calls[1].request.params, reference_params)",
"def write_archive(self, treeish, archive, timestamp=None, prefix=''):\n\n # Try to get a tree form whatever we got\n if isinstance(treeish, Tree):\n tree = treeish\n\n if isinstance(treeish, Oid) or is_string(treeish):\n treeish = self[treeish]\n\n # if we don't have a timestamp, try to get it from a commit\n if not timestamp:\n try:\n commit = treeish.peel(Commit)\n timestamp = commit.committer.time\n except Exception:\n pass\n\n # as a last resort, use the current timestamp\n if not timestamp:\n timestamp = int(time())\n\n tree = treeish.peel(Tree)\n\n index = Index()\n index.read_tree(tree)\n\n for entry in index:\n content = self[entry.id].read_raw()\n info = tarfile.TarInfo(prefix + entry.path)\n info.size = len(content)\n info.mtime = timestamp\n info.uname = info.gname = 'root' # just because git does this\n if entry.mode == GIT_FILEMODE_LINK:\n info.type = tarfile.SYMTYPE\n info.linkname = content.decode(\"utf-8\")\n info.mode = 0o777 # symlinks get placeholder\n info.size = 0\n archive.addfile(info)\n else:\n archive.addfile(info, StringIO(content))",
"def archive_repo(languages):\n cmdline = [\"tar\", \"-cf\", \"../grpc.tar\", \"../grpc/\"]\n if \"java\" in languages:\n cmdline.append(\"../grpc-java\")\n if \"go\" in languages:\n cmdline.append(\"../grpc-go\")\n if \"node\" in languages:\n cmdline.append(\"../grpc-node\")\n\n archive_job = jobset.JobSpec(\n cmdline=cmdline, shortname=\"archive_repo\", timeout_seconds=3 * 60\n )\n\n jobset.message(\"START\", \"Archiving local repository.\", do_newline=True)\n num_failures, _ = jobset.run(\n [archive_job], newline_on_success=True, maxjobs=1\n )\n if num_failures == 0:\n jobset.message(\n \"SUCCESS\",\n \"Archive with local repository created successfully.\",\n do_newline=True,\n )\n else:\n jobset.message(\n \"FAILED\", \"Failed to archive local repository.\", do_newline=True\n )\n sys.exit(1)",
"def make_archive (base_name, format,\r\n root_dir=None, base_dir=None,\r\n verbose=0, dry_run=0):\r\n save_cwd = os.getcwd()\r\n if root_dir is not None:\r\n log.debug(\"changing into '%s'\", root_dir)\r\n base_name = os.path.abspath(base_name)\r\n if not dry_run:\r\n os.chdir(root_dir)\r\n\r\n if base_dir is None:\r\n base_dir = os.curdir\r\n\r\n kwargs = { 'dry_run': dry_run }\r\n\r\n try:\r\n format_info = ARCHIVE_FORMATS[format]\r\n except KeyError:\r\n raise ValueError, \"unknown archive format '%s'\" % format\r\n\r\n func = format_info[0]\r\n for (arg,val) in format_info[1]:\r\n kwargs[arg] = val\r\n filename = apply(func, (base_name, base_dir), kwargs)\r\n\r\n if root_dir is not None:\r\n log.debug(\"changing back to '%s'\", save_cwd)\r\n os.chdir(save_cwd)\r\n\r\n return filename",
"def archive(obj):\n return match(obj, archive_matchers)",
"def link_archive(request, year=None):\n site = get_current_site(request)\n if not year:\n year = date.today().year\n links = Link.objects.filter(publish_from__year=year, public=True, sites__id=site.id)\n return load_template(request, site, 'link_archive.html', {'links': links})",
"def download_content_indices_file(architecture, path):\n\n url = \"http://ftp.uk.debian.org/debian/dists/stable/main/\"\n try:\n r = requests.get(url+'/Contents-' + architecture +'.gz', allow_redirects= True) # download the file with the specified url\n open(path + '/Contents-' + architecture + '.gz', 'wb').write(r.content) # write the contents of the downloaded file to a file in the specified directory\n except:\n raise Exception ('Error, could not download file.')\n\n file = gzip.open(path + '/Contents-' + architecture + '.gz', 'rb') # open a .gz file\n data = file.read() # read file contents in bytes\n file.close()\n final_file = open(path + '/outputFile', 'wb')\n final_file.write(data) # write the content to a .File extension file in the specified directory\n final_file.close()\n return",
"def download(edition, version, path=\".\"):\n archive_name = dist_archive_name(edition, version)\n uri = \"%s://%s/%s\" % (DIST_SCHEME, DIST_HOST, archive_name)\n filename = os.path.join(os.path.abspath(path), archive_name)\n _download(uri, filename)\n return filename",
"def repository_path_contents_url(namespace, repository, revision, path):\n return _BASE_URL_V1 % ('repositories/%s/%s/src/%s/%s' % (namespace, repository, revision, path))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/graph[/{revision}] Show information about the graphical topology of the repository. Information rendered by this handler can be used to create visual representations of repository topology. The ``revision`` URL parameter controls the starting changeset. If it's absent, the default is ``tip``. The ``revcount`` query string argument can define the number of changesets to show information for. The ``graphtop`` query string argument can specify the starting changeset for producing ``jsdata`` variable that is used for rendering graph in JavaScript. By default it has the same value as ``revision``. This handler will render the ``graph`` template.
|
def graph(web):
if b'node' in web.req.qsparams:
ctx = webutil.changectx(web.repo, web.req)
symrev = webutil.symrevorshortnode(web.req, ctx)
else:
ctx = web.repo[b'tip']
symrev = b'tip'
rev = ctx.rev()
bg_height = 39
revcount = web.maxshortchanges
if b'revcount' in web.req.qsparams:
try:
revcount = int(web.req.qsparams.get(b'revcount', revcount))
revcount = max(revcount, 1)
web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount
except ValueError:
pass
lessvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
lessvars[b'revcount'] = max(revcount // 2, 1)
morevars = copy.copy(web.tmpl.defaults[b'sessionvars'])
morevars[b'revcount'] = revcount * 2
graphtop = web.req.qsparams.get(b'graphtop', ctx.hex())
graphvars = copy.copy(web.tmpl.defaults[b'sessionvars'])
graphvars[b'graphtop'] = graphtop
count = len(web.repo)
pos = rev
uprev = min(max(0, count - 1), rev + revcount)
downrev = max(0, rev - revcount)
changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
tree = []
nextentry = []
lastrev = 0
if pos != -1:
allrevs = web.repo.changelog.revs(pos, 0)
revs = []
for i in allrevs:
revs.append(i)
if len(revs) >= revcount + 1:
break
if len(revs) > revcount:
nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])]
revs = revs[:-1]
lastrev = revs[-1]
# We have to feed a baseset to dagwalker as it is expecting smartset
# object. This does not have a big impact on hgweb performance itself
# since hgweb graphing code is not itself lazy yet.
dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
# As we said one line above... not lazy.
tree = list(
item
for item in graphmod.colored(dag, web.repo)
if item[1] == graphmod.CHANGESET
)
def fulltree():
pos = web.repo[graphtop].rev()
tree = []
if pos != -1:
revs = web.repo.changelog.revs(pos, lastrev)
dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
tree = list(
item
for item in graphmod.colored(dag, web.repo)
if item[1] == graphmod.CHANGESET
)
return tree
def jsdata(context):
for (id, type, ctx, vtx, edges) in fulltree():
yield {
b'node': pycompat.bytestr(ctx),
b'graphnode': webutil.getgraphnode(web.repo, ctx),
b'vertex': vtx,
b'edges': edges,
}
def nodes(context):
parity = paritygen(web.stripecount)
for row, (id, type, ctx, vtx, edges) in enumerate(tree):
entry = webutil.commonentry(web.repo, ctx)
edgedata = [
{
b'col': edge[0],
b'nextcol': edge[1],
b'color': (edge[2] - 1) % 6 + 1,
b'width': edge[3],
b'bcolor': edge[4],
}
for edge in edges
]
entry.update(
{
b'col': vtx[0],
b'color': (vtx[1] - 1) % 6 + 1,
b'parity': next(parity),
b'edges': templateutil.mappinglist(edgedata),
b'row': row,
b'nextrow': row + 1,
}
)
yield entry
rows = len(tree)
return web.sendtemplate(
b'graph',
rev=rev,
symrev=symrev,
revcount=revcount,
uprev=uprev,
lessvars=lessvars,
morevars=morevars,
downrev=downrev,
graphvars=graphvars,
rows=rows,
bg_height=bg_height,
changesets=count,
nextentry=templateutil.mappinglist(nextentry),
jsdata=templateutil.mappinggenerator(jsdata),
nodes=templateutil.mappinggenerator(nodes),
node=ctx.hex(),
archives=web.archivelist(b'tip'),
changenav=changenav,
)
|
[
"def graph():\n return render_template('main/graph.html')",
"def get_graph(request):\r\n data = {\"links\":[]}\r\n name = memcache.get(\"name\")\r\n id = memcache.get(\"id\")\r\n if 'graphid' not in request.session:\r\n nodes = memcache.get(\"nodes\")\r\n if not name or not nodes:\r\n g = random.choice([g for g in Graph.all()])\r\n memcache.set(\"nodes\",g.nodes)\r\n memcache.set(\"name\", g.name)\r\n nodes = g.nodes\r\n name = g.name\r\n else:\r\n nodes = memcache.get(\"nodes\")\r\n if not id or id != int(request.session['graphid']) or not nodes or not name:\r\n g = Graph.get_by_id(int(request.session['graphid']))\r\n memcache.set(\"nodes\",g.nodes)\r\n memcache.set(\"name\", g.name)\r\n memcache.set(\"id\", g.key().id())\r\n nodes = g.nodes\r\n name = g.name\r\n\r\n nodes = simplejson.loads(nodes)\r\n data['graphname'] = name\r\n\r\n for src, dst in nodes['nodes']:\r\n data[\"links\"].append({\"source\":src,\"target\":dst,\"value\":1})\r\n mimetype = 'application/javascript'\r\n return HttpResponse(simplejson.dumps(data), mimetype)",
"def graph_info(self):\n return self.graph.get_operations()",
"def edit_graph(args=None):\n from .edit_graph import main\n\n db = (\n None\n if not args.database\n else expand_database_argument(args.database, exist=True, hyphen_default=True)\n )\n\n return main(\n graph_output=args.output,\n graph_format=args.format,\n db_url=db,\n input=args.input,\n method=args.method,\n min_abundance=args.abundance,\n total_min_abundance=args.total,\n show_db_marker=args.marker,\n max_edit_dist=args.editdist,\n ignore_prefixes=tuple(args.ignore_prefixes),\n debug=args.verbose,\n )",
"def drawGraph(G, **kwargs):\n\twarn(\"networkit.viztasks.drawGraph is deprecated, will be removed in future updates. Use networkit.vizbridges instead to draw graphs (needs additional plugins).\")\n\tif not have_nx:\n\t\traise MissingDependencyError(\"networkx\")\n\tif not G.checkConsistency():\n\t\tprint(\"WARNING: Multi-graph has been converted to simple graph for display\")\n\t\tG.removeMultiEdges()\n\tnxG = nxadapter.nk2nx(G)\n\tif not \"node_size\" in kwargs:\n\t\tkwargs[\"node_size\"] = [30+270*s for s in centrality.DegreeCentrality(G,True).run().scores()]\n\tnetworkx.draw(nxG, **kwargs)",
"def drawGraph(graph, property, tag):\n edge_colors = [graph.edges[edge][property] for edge in graph.edges()]\n vmin = min(edge_colors)\n vmax = max(edge_colors)\n cmap = plt.get_cmap(config['COLORMAP1'])\n plt.figure()\n nx.draw(graph, pos=nx.get_node_attributes(graph, 'pos'), node_size=0,\n edge_color=edge_colors, node_shape='s',\n edge_cmap=cmap, width=1, edge_vmin=vmin, edge_vmax=vmax)\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin = vmin, vmax=vmax))\n sm._A = []\n plt.colorbar(sm, shrink=0.8)\n plt.savefig('./plots/edges_plots/edges_{}.png'.format(tag))\n plt.close()",
"def read_graph_struct(self, graph_name: str, revision: int = -1) -> GraphStruct:\n pass",
"def print_stats(graph):\n print(f\"The number of nodes is: {graph.number_of_edges()}\")\n print(f\"The number of edges is: {graph.number_of_nodes()}\") #These statements print out the number of nodes and edges in the graph",
"def gen_repo_graph(self, repo_list, show=True, write=False):\r\n repo_graph = self.graph.graph_repository(repo_list)\r\n\r\n if show:\r\n nx.draw(repo_graph, with_labels=True)\r\n plt.show()\r\n if write:\r\n networkx.write_gexf(repo_graph, os.getcwd() + '/repos.gexf')\r\n\r\n return repo_graph",
"def print_stats(graph: nx.DiGraph) -> None:\n print('mutations:')\n for op, count in get_mutation_stats(graph).items():\n print(' %s: %d' % (op, count))\n\n sources = [n for n, in_degree in graph.in_degree() if in_degree == 0]\n sinks = [n for n, out_degree in graph.out_degree() if out_degree == 0]\n min_len, max_len = get_path_stats(graph, sources, sinks)\n num_connected_components = nx.number_weakly_connected_components(graph)\n\n print('num. source nodes: %d' % len(sources))\n print('num. sink nodes: %d' % len(sinks))\n print('num. connected components: %d' % num_connected_components)\n print('shortest mutation chain: %d' % min_len)\n print('longest mutation chain: %d' % max_len)",
"def get_graph_for_revision(\n revision_or_range: str,\n database_gateway: IDatabaseGateway,\n project_gateway: IProjectGateway,\n) -> List[Dict]:\n all_objects = database_gateway.get_modified_objects_from_revision(revision_or_range=revision_or_range)\n\n change_types = (Project, Dataset, DatasetTag, Activity, Plan, CompositePlan)\n\n changed_objects = []\n\n for obj in all_objects:\n if isinstance(obj, change_types):\n changed_objects.append(obj)\n\n project = project_gateway.get_project()\n\n return _convert_entities_to_graph(changed_objects, project)",
"def show_graph(self, g, layout):\n\n # copy the positions from the layout into the Vertex objects\n for v in g.vertices():\n v.pos = layout.pos(v)\n \n # draw the edges and store the tags in self.etags, which maps\n # from Edges to their tags\n c = self.canvas\n self.etags = {}\n for v in g:\n self.etags[v] = [c.draw_edge(e) for e in g.out_edges(v)]\n\n # draw the vertices and store their tags in a list\n self.vtags = [c.draw_vertex(v) for v in g]",
"def view_dot_graph(graph, filename=None, view=False):\n # Optionally depends on graphviz package\n import graphviz as gv\n\n src = gv.Source(graph)\n if view:\n # Returns the output file path\n return src.render(filename, view=view)\n else:\n # Attempts to show the graph in IPython notebook\n try:\n __IPYTHON__\n except NameError:\n return src\n else:\n import IPython.display as display\n format = 'svg'\n return display.SVG(data=src.pipe(format))",
"def hypergraph_compute_plot_info_G(\n H,\n G,\n highlight=(),\n node_color=(.5, .5, .5, 1.0),\n edge_color=(0.0, 0.0, 0.0),\n edge_alpha=1 / 3,\n colormap='Spectral_r',\n centrality=False,\n sliced_inds=(),\n):\n import matplotlib as mpl\n import matplotlib.cm\n\n for e in G.edges:\n ix = G.edges[e]['ind']\n width = math.log2(H.size_dict.get(ix, 2))\n color = (\n (*edge_color[:3], edge_alpha)\n if ix not in highlight else\n (1.0, 0.0, 1.0, edge_alpha**0.5)\n )\n label = (ix if not G.edges[e]['hyperedge'] else '')\n\n G.edges[e]['color'] = color\n G.edges[e]['width'] = width\n G.edges[e]['label'] = label\n G.edges[e]['style'] = 'dotted' if ix in sliced_inds else 'solid'\n\n if centrality:\n if centrality == 'resistance':\n Cs = H.resistance_centrality()\n elif centrality == 'simple':\n Cs = H.simple_centrality()\n else:\n Cs = centrality\n\n if isinstance(colormap, mpl.colors.Colormap):\n cmap = colormap\n else:\n cmap = getattr(matplotlib.cm, colormap)\n\n for nd in G.nodes:\n if G.nodes[nd]['hyperedge']:\n color = (0., 0., 0., 0.)\n label = str(nd)\n else:\n if centrality:\n c = Cs[nd]\n G.nodes[nd]['centrality'] = c\n color = cmap(c)\n else:\n color = node_color\n label = f'{nd}' # H.inputs[nd]\n\n G.nodes[nd]['color'] = color\n G.nodes[nd]['label'] = label",
"def get_graph_path(owner, repo):\n return f\"data/projects/{owner}/{repo}/images/graphs\"",
"def SetGraph(self, *args):\n return _snap.TFfGGen_SetGraph(self, *args)",
"def hyperGraph(string, docTag=\"string\", look=float, opaqueContainers=bool, imagePosition=float, showRelationships=bool, useTemplate=\"string\", addBookmark=bool, nodePressCommand=\"string\", previousView=bool, attributeEditor=\"string\", downstream=bool, panel=\"string\", deleteBookmark=\"string\", dropNode=\"string\", dependNode=\"string\", frame=bool, layout=bool, dependGraph=bool, unlockMainConnection=bool, transitionFrames=int, showInvisible=bool, frameBranch=bool, frameGraph=bool, forceMainConnection=\"string\", range=float, enableAutomaticLayout=bool, graphLayoutStyle=\"string\", showConnectionFromSelected=bool, restoreBookmark=\"string\", showConnectionToSelected=bool, showDeformers=bool, fitImageToWidth=bool, defineTemplate=\"string\", parent=\"string\", rebuild=bool, dropTargetNode=\"string\", mergeConnections=bool, iconSize=\"string\", feedbackGadget=\"string\", collapseContainer=bool, getNodePosition=\"string\", stateString=bool, unfoldAll=bool, unfold=bool, control=bool, addDependNode=\"string\", selectionConnection=\"string\", imageEnabled=bool, showUnderworld=bool, animateTransition=bool, mainListConnection=\"string\", imageForContainer=bool, navigateHome=bool, layoutSelected=\"string\", showConstraints=bool, scrollUpDownNoZoom=bool, expandContainer=bool, image=\"string\", popupMenuScript=\"string\", dragAndDropBehaviorCommand=\"string\", lockMainConnection=bool, showExpressions=bool, graphType=\"string\", focusCommand=\"string\", nodeDropCommand=\"string\", updateMainConnection=bool, isHotkeyTarget=bool, resetFreeform=bool, getNodeList=bool, fromAttr=\"string\", edgeDimmedDblClickCommand=\"string\", clear=bool, viewOption=\"string\", bookmarkName=bool, showShapes=bool, edgeDblClickCommand=\"string\", down=bool, unParent=bool, connectionDrawStyle=\"string\", freeform=bool, filterDetail=\"string\", fold=bool, upstream=bool, zoom=float, highlightConnection=\"string\", visibility=bool, exists=bool, rename=bool, useFeedbackList=bool, nextView=bool, fitImageToHeight=bool, removeNode=\"string\", feedbackNode=\"string\", updateSelection=bool, imageScale=float, addDependGraph=\"string\", updateNodeAdded=bool, frameHierarchy=bool, nodeReleaseCommand=\"string\", setNodePosition=\"string\", forceRefresh=bool, filter=\"string\", orientation=\"string\"):\n pass",
"def branch_statistics(g):\n visited = np.zeros(max(g) + 1, dtype=bool)\n type_dict = {'tiptip': 0, 'tipjunction': 1, 'junctiontip': 1,\n 'junctionjunction': 2, 'pathpath': 3}\n result = []\n for node, data in g.nodes(data=True):\n if data['type'] == 'path' and not visited[node]:\n # we expand the path in either direction\n visited[node] = True\n left, right = g.neighbors(node)\n id0, d0, kind0 = _expand_path(g, node, left, visited)\n id1, d1, kind1 = _expand_path(g, node, right, visited)\n result.append([id0, id1, d0 + d1, type_dict[kind0 + kind1]])\n return np.array(result)",
"def output_graph(self):\n for n in self._nodes.values():\n print(str(n.get_name()) + \": \" + n.get_type())\n print(n.get_prior())\n print(n.get_neighbors())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/help[/{topic}] Render help documentation.
|
def help(web):
from .. import commands, help as helpmod # avoid cycle
topicname = web.req.qsparams.get(b'node')
if not topicname:
def topics(context):
for h in helpmod.helptable:
entries, summary, _doc = h[0:3]
yield {b'topic': entries[0], b'summary': summary}
early, other = [], []
primary = lambda s: s.partition(b'|')[0]
for c, e in pycompat.iteritems(commands.table):
doc = _getdoc(e)
if b'DEPRECATED' in doc or c.startswith(b'debug'):
continue
cmd = primary(c)
if getattr(e[0], 'helpbasic', False):
early.append((cmd, doc))
else:
other.append((cmd, doc))
early.sort()
other.sort()
def earlycommands(context):
for c, doc in early:
yield {b'topic': c, b'summary': doc}
def othercommands(context):
for c, doc in other:
yield {b'topic': c, b'summary': doc}
return web.sendtemplate(
b'helptopics',
topics=templateutil.mappinggenerator(topics),
earlycommands=templateutil.mappinggenerator(earlycommands),
othercommands=templateutil.mappinggenerator(othercommands),
title=b'Index',
)
# Render an index of sub-topics.
if topicname in helpmod.subtopics:
topics = []
for entries, summary, _doc in helpmod.subtopics[topicname]:
topics.append(
{
b'topic': b'%s.%s' % (topicname, entries[0]),
b'basename': entries[0],
b'summary': summary,
}
)
return web.sendtemplate(
b'helptopics',
topics=templateutil.mappinglist(topics),
title=topicname,
subindex=True,
)
u = webutil.wsgiui.load()
u.verbose = True
# Render a page from a sub-topic.
if b'.' in topicname:
# TODO implement support for rendering sections, like
# `hg help` works.
topic, subtopic = topicname.split(b'.', 1)
if topic not in helpmod.subtopics:
raise ErrorResponse(HTTP_NOT_FOUND)
else:
topic = topicname
subtopic = None
try:
doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
except error.Abort:
raise ErrorResponse(HTTP_NOT_FOUND)
return web.sendtemplate(b'help', topic=topicname, doc=doc)
|
[
"def api_help():\n docs = [{'name': route.__name__, 'value': route.__doc__}\n for route in [aliases, connections, databases, fetch, now, reports]]\n return render_template('help.html', docs=docs, version=__version__, url_root=request.url_root)",
"def show_help_topic(self):\n \n pass",
"def help():\n#TODO Complete for people using the interface\n return render_template('help.html')",
"def help(self):\n return render('/help.html')",
"def do_help(self, arg):\n\n return stem.interpreter.help.response(self._controller, arg)",
"def topic(request, id, template_name=\"help_files/topic.html\"):\n topic = get_object_or_404(Topic, pk=id)\n query = None\n\n filters = get_query_filters(request.user, 'help_files.view_helpfile')\n help_files = HelpFile.objects.filter(filters).filter(topics__in=[topic.pk]).distinct()\n if not request.user.is_anonymous():\n help_files = help_files.select_related()\n\n EventLog.objects.log(instance=topic)\n\n return render_to_response(template_name, {'topic':topic, 'help_files':help_files},\n context_instance=RequestContext(request))",
"def help(name=None):\n\n if name is None:\n name = \"help\"\n\n task = crawl(name, state.commands)\n if isinstance(task, Task):\n doc = getattr(task, \"__doc__\", None)\n if doc is not None:\n print(\"Help on {0:s}:\".format(name))\n print(doc)\n else:\n print(\"No help available for {0:s}\".format(name))\n else:\n print(\"No such task {0:s}\".format(name))\n print(\"For a list of tasks type: fab -l\")",
"def help(): # real signature unknown; restored from __doc__\n pass",
"def help(self):\n\n self.help_wdw = HelpWindow()\n self.help_wdw.show()",
"def help_help(self):\n return self.run_subcommand([\"help\", \"help\"])",
"def HelpLink(self) -> str:",
"def help():\n webbrowser.open('https://github.com/llinkz/directTDoA/wiki/Help')",
"def help(self):\n help = self.__doc__\n if help is None:\n return 'No additional help.'\n return formatHelp(help)[1]",
"def show_help():\n print __doc__\n sys.exit()",
"def help(self):\n sock = self.__connect_socket()\n message = json.dumps({\"name\":\"_help\"})\n\n return self.__send_and_recv(sock,message)",
"def help(self, dummy):\n doc = self.doc\n if not doc:\n doc = \"No help available.\"\n elif doc.find(\"%s\") > 0:\n doc = doc.replace(\"%s\", self.progname)\n print(doc, end='')\n sys.exit(0)",
"def showhelp():\n\tusage()",
"def show_app_help(self):\n subprocess.Popen([\"python\", resource_find(\"help_view.py\"), self.help_text])",
"def test_help_device(self):\r\n for section in range(len(fygen_help.SECTIONS)):\r\n fygen.help(section, 'fy2300', self.output)\r\n self.assertIn('Other Help Sections', self.output.getvalue())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checking that getting rotation matrices from diffpy.structure works without issue.
|
def test_get_rotation_matrix_from_diffpy(self):
r = Rotation.from_matrix([i.R for i in sg225.symop_list])
assert not np.isnan(r.data).any()
|
[
"def test_rotation_matrix_conversions(self):\n from clifford.g3c import layout\n from clifford.tools.g3 import rotation_matrix_to_rotor, rotor_to_rotation_matrix\n e1 = layout.blades['e1']\n e2 = layout.blades['e2']\n\n rotor = e1*e2\n print(rotor)\n matrix = rotor_to_rotation_matrix(rotor)\n print(matrix)\n rotor_return = rotation_matrix_to_rotor(matrix)\n print(rotor_return)\n testing.assert_almost_equal(rotor.value, rotor_return.value)",
"def getRotationMatrix( self):",
"def test_rotate_matrix(self):\n # Simple 2x2 and 3x3 cases\n mat1 = [(1, 2), (3, 4)]\n mat2 = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n mat1_single_rotation = [(3, 1), (4, 2)]\n mat2_single_rotation = [(7, 4, 1), (8, 5, 2), (9, 6, 3)]\n\n # Test a single rotation\n self.assertListEqual(rotate_matrix(mat1), mat1_single_rotation)\n self.assertListEqual(rotate_matrix(mat2), mat2_single_rotation)\n\n # Test 4 rotations returns the original matrix\n mat2_rotated = deepcopy(mat2)\n for i in range(4):\n mat2_rotated = rotate_matrix(mat2_rotated)\n self.assertListEqual(mat2_rotated, mat2)",
"def _extract_rotation_matrix(self, mod):\n r = np.matrix(\n [\n [mod.rot_xu.item(), mod.rot_xv.item(), mod.rot_xw.item()],\n [mod.rot_yu.item(), mod.rot_yv.item(), mod.rot_yw.item()],\n [mod.rot_zu.item(), mod.rot_zv.item(), mod.rot_zw.item()],\n ]\n )\n return r",
"def test_field_rotation_matrix_is_correct_near_zenith() -> None:\n\n alt = 89.9 / 180.0 * np.pi\n az = 45.0 / 180.0 * np.pi\n lat = -30.24463 / 180.0 * np.pi\n dt = 1.0\n t = np.linspace(0.0, dt, 100)\n\n field_rot_matrix = diffraction.prepare_field_rotation_matrix(\n latitude=lat, altitude=alt, azimuth=az\n )\n rot = field_rot_matrix(np.array([t[-1]]))\n\n e_star = diffraction.star_trace(latitude=lat, altitude=alt, azimuth=az, t=t)\n alt_t = np.arctan2(e_star[:, 2], np.hypot(e_star[:, 0], e_star[:, 1]))\n az_t = np.arctan2(e_star[:, 0], e_star[:, 1])\n rate = diffraction.OMEGA_EARTH * np.cos(lat) * np.cos(az_t) / np.cos(alt_t)\n # Expected field rotation angle is the integral over the rate:\n expected_angle = np.trapz(rate, t)\n\n alpha = np.arctan2(rot[0, 0, 1], rot[0, 0, 0])\n np.testing.assert_allclose(alpha, expected_angle, rtol=1.0e-7)",
"def test_generate_rotation_rotor_and_angle(self):\n from clifford.tools.g3 import generate_rotation_rotor, random_unit_vector, angle_between_vectors\n\n euc_vector_m = random_unit_vector()\n euc_vector_n = random_unit_vector()\n theta = angle_between_vectors(euc_vector_m, euc_vector_n)\n print(theta)\n\n rot_rotor = generate_rotation_rotor(theta, euc_vector_m, euc_vector_n)\n v1 = euc_vector_m\n v2 = rot_rotor*euc_vector_m*~rot_rotor\n theta_return = angle_between_vectors(v1, v2)\n print(theta_return)\n\n testing.assert_almost_equal(theta_return, theta)\n testing.assert_almost_equal(euc_vector_n.value, v2.value)",
"def check_rotation(raster_one, raster_two):\n test = (raster_one.rotone == raster_two.rotone) and \\\n (raster_one.rottwo == raster_two.rottwo)\n return test",
"def test_quaternion_conversions(self):\n from clifford.g3c import layout\n from clifford.tools.g3 import rotor_to_quaternion, quaternion_to_rotor\n e1 = layout.blades['e1']\n e2 = layout.blades['e2']\n rotor = e1*e2\n print(rotor)\n quaternion = rotor_to_quaternion(rotor)\n print(quaternion)\n rotor_return = quaternion_to_rotor(quaternion)\n print(rotor_return)\n testing.assert_almost_equal(rotor.value, rotor_return.value)",
"def _make_rotation_matrix_from_reprs(start_representation, end_representation):\n A = start_representation.to_cartesian()\n B = end_representation.to_cartesian()\n rotation_axis = A.cross(B)\n rotation_angle = -np.arccos(\n A.dot(B) / (A.norm() * B.norm())\n ) # negation is required\n\n # This line works around some input/output quirks of Astropy's rotation_matrix()\n matrix = np.array(rotation_matrix(rotation_angle, rotation_axis.xyz.value.tolist()))\n return matrix",
"def findRotation(a1,b1,c1,a2,b2,c2):\n translation = a2-a1 \n d1 = b1-a1\n d2 = b2-a2\n e1 = c1-a1\n e2 = c2-a2\n f1 = numpy.cross(d1,e1)\n f2 = numpy.cross(d2,e2)\n nd1 = numpy.linalg.norm(d1)\n nd2 = numpy.linalg.norm(d2)\n ne1 = numpy.linalg.norm(e1)\n ne2 = numpy.linalg.norm(e2)\n nf1 = numpy.linalg.norm(f1)\n nf2 = numpy.linalg.norm(f2)\n assert abs(nd2-nd1)+abs(ne2-ne1)+abs(nf2-nf1) < 1.0e-10, 'ERROR: the inputted vectors do no represent srigid body rotation %f, %f, %f' % (abs(nd2-nd1),abs(ne2-ne1),abs(nf2-nf1))\n d1 /= nd1\n d2 /= nd2\n e1 /= ne1\n e2 /= ne2\n f1 /= nf1\n f2 /= nf2\n g1 = numpy.vstack([d1,e1,f1]).T \n g2 = numpy.vstack([d2,e2,f2]).T \n # want to solve for M: g2 = M g1\n # M = g2*inv(g1)\n M = numpy.dot(g2,numpy.linalg.inv(g1))\n # we have a matrix of the rotation\n eigval,eigvec = numpy.linalg.eig(M) \n eigvalreal = numpy.real(eigval)\n index = 0\n for i in range(len(eigval)):\n if abs(eigval[i]-1.0) < 1.0e-10:\n index = i\n if abs(eigval[index]-1.0) > 1.0e-10:\n print 'ERROR: i did not find eigenvalue 1 for this matrix, i.e. it is not rotation '\n sys.exit(1)\n axis = numpy.real(eigvec[:,index])\n axis /= numpy.linalg.norm(axis) \n x = numpy.array([1.,0.,0.])\n y = numpy.array([0.,1.,0.])\n x = x - axis*numpy.dot(axis,x)\n y = y - axis*numpy.dot(axis,y)\n if numpy.linalg.norm(y) > numpy.linalg.norm(x):\n x = y \n xrot = numpy.dot(M,x)\n angle = findAngleOfRotation(axis,x,xrot)\n return translation,axis,angle",
"def test_qwc_rotation(self, pauli_ops, qwc_rot_sol):\n\n qwc_rot = qwc_rotation(pauli_ops)\n\n assert all(\n self.are_identical_rotation_gates(qwc_rot[i], qwc_rot_sol[i])\n for i in range(len(qwc_rot))\n )",
"def rotation_matrix_decompose(r):\n return numpy.array( (math.atan2(r[2][1],r[2][2]),\\\n math.atan2(-r[2][0],math.sqrt(r[2][1]*r[2][1]+r[2][2]*r[2][2])),\\\n math.atan2(r[1][0],r[0][0])))",
"def matchPositionOrientation():\n with ar_qui.ar_undoChunkOpen('Match Position and Orientation'):\n ar_gen.ar_matchPositionOrientation()",
"def test_generate_translation_rotor(self):\n from clifford import g3c\n layout = g3c.layout\n locals().update(g3c.blades)\n ep, en, up, down, homo, E0, ninf, no = (g3c.stuff[\"ep\"], g3c.stuff[\"en\"],\n g3c.stuff[\"up\"], g3c.stuff[\"down\"], g3c.stuff[\"homo\"],\n g3c.stuff[\"E0\"], g3c.stuff[\"einf\"], -g3c.stuff[\"eo\"])\n from clifford.tools.g3 import random_euc_mv\n from clifford.tools.g3c import generate_translation_rotor\n\n for i in range(100):\n rand = random_euc_mv()\n starting_point = up(random_euc_mv())\n r_trans = generate_translation_rotor(rand)\n end_point = r_trans*starting_point*~r_trans\n translation_vec = down(end_point) - down(starting_point)\n testing.assert_almost_equal(translation_vec.value, rand.value)",
"def rotation_mat2vec(R):\n TINY = 1e-15\n\n # Compute the trace of the rotation matrix plus one\n aux = np.sqrt(R.trace()+1.0)\n \n if aux > TINY: \n\n # Compute the associated quaternion. Notice: trace(R) + 1 = 4w^2\n quat = np.array([R[2,1]-R[1,2], R[0,2]-R[2,0], R[1,0]-R[0,1], .5*aux])\n quat[0:3] *= .5/aux\n \n # Compute the angle between 0 and PI (ensure that the last\n # quaternion element is in the range (-1,1))\n theta = 2*np.arccos(max(-1., min(quat[3], 1.)))\n\n # Normalize the rotation axis\n norma = max(np.sqrt((quat[0:3]**2).sum()), TINY)\n return (theta/norma)*quat[0:3]\n \n else: \n \n # Singularity case: theta == PI. In this case, the above\n # identification is not possible since w=0. \n x2 = .25*(1 + R[0][0]-R[1][1]-R[2][2])\n if x2 > TINY: \n xy = .5*R[1][0]\n xz = .5*R[2][0]\n n = np.array([x2,xy,xz])\n else: \n y2 = .25*(1 + R[1][1]-R[0][0]-R[2][2])\n if y2 > TINY: \n xy = .5*R[1][0]\n yz = .5*R[2][1]\n n = np.array([xy,y2,yz])\n else: \n z2 = .25*(1 + R[2][2]-R[0][0]-R[1][1])\n if z2 > TINY: \n xz = .5*R[2][0]\n yz = .5*R[2][1]\n n = np.array([xz,yz,z2])\n return np.pi*n/np.sqrt((n**2).sum())",
"def _rotation_trans_equations(cls, matrix, equations):\n return tuple(matrix * Matrix(equations))",
"def _rotation_matrix_from_crota(self):\n return super()._rotation_matrix_from_crota(crota_key='CROTA')",
"def match_orientation(obj = None, source = None,\n rotateOrder = True, rotateAxis = True): \n _str_func = 'match_orientation'\n \n obj = VALID.mNodeString(obj)\n source = VALID.mNodeString(source)\n \n log.debug(\"|{0}| >> obj:{1}\".format(_str_func,obj)) \n log.debug(\"|{0}| >> source:{1}\".format(_str_func,source))\n log.debug(\"|{0}| >> rotateOrder:{1}\".format(_str_func,rotateOrder))\n log.debug(\"|{0}| >> rotateAxis:{1}\".format(_str_func,rotateAxis))\n \n if not rotateOrder and not rotateAxis:\n raise ValueError,\"|{0}| >> Both rotateOrder and rotateAxis are False. Nothing to do...\".format(_str_func) \n \n #First gather children to parent away and shapes so they don't get messed up either\n _l_children = mc.listRelatives (obj, children = True,type='transform') or []\n _l_shapes = mc.listRelatives (obj, shapes = True, fullPath = True) or []\n _dup = False\n \n log.debug(\"|{0}| >> children:{1}\".format(_str_func,_l_children))\n log.debug(\"|{0}| >> shapes:{1}\".format(_str_func,_l_shapes))\n \n if _l_children:#...parent children to world as we'll be messing with stuff\n for i,c in enumerate(_l_children):\n _l_children[i] = parent_set(c,False)\n log.debug(\"|{0}| >> children:{1}\".format(_str_func,_l_children))\n \n if _l_shapes:#...dup our shapes to properly shape parent them back\n _dup = mc.duplicate(obj, parentOnly = False)[0]\n #log.debug(\"|{0}| >> dup:{1}\".format(_str_func,_dup))\n for s in _l_shapes:\n mc.delete(s)\n #shapeParent_in_place(_dup,s,keepSource=False) \n \n #The meat of it...\n _restorePivotRP = False\n _restorePivotSP = False\n \n if rotateAxis:\n log.debug(\"|{0}| >> rotateAxis...\".format(_str_func)) \n \n #There must be a better way to do this. Storing to be able to restore after matrix ops\n _restorePivotRP = mc.xform(obj, q=True, ws=True, rp = True)\n _restorePivotSP = mc.xform(obj, q=True, ws=True, sp = True)\n _restoreRO = mc.xform (obj, q=True, roo=True )\n \n #We do our stuff with a locator to get simple transferrable values after matching parents and what not...\n loc = locators.locMeObject(source)\n #..match ro before starting to do values\n \n parent_set(loc, parent_get(obj))#...match parent\n \n mc.xform(loc, ws = True, t = mc.xform(obj, q=True, ws = True, rp = True))#...snap\n #mc.xform(loc, roo = mc.xform (obj, q=True, roo=True ), p=True)#...match rotateOrder\n mc.xform(loc, roo = 'xyz', p=True)\n mc.xform(obj, roo = 'xyz', p=True)\n \n mc.makeIdentity(obj,a = True, rotate = True)\n \n #...push matrix\n _matrix = mc.xform (loc, q=True, m =True)\n mc.xform(obj, m = _matrix)\n \n objRot = mc.xform (obj, q=True, os = True, ro=True)\n \n mc.xform(obj, ra=[v for v in objRot], os=True)\n mc.xform(obj,os=True, ro = [0,0,0])#...clear\"\"\"\n \n mc.delete(loc)\n \n mc.xform(obj, roo = _restoreRO)\n mc.xform(obj,ws=True, rp = _restorePivotRP) \n mc.xform(obj,ws=True, sp = _restorePivotSP) \n \n if rotateOrder: \n log.debug(\"|{0}| >> rotateOrder...\".format(_str_func)) \n mc.xform(obj, roo = mc.xform (source, q=True, roo=True ), p=True)#...match rotateOrder\n \n if _dup:\n log.debug(\"|{0}| >> shapes back...: {1}\".format(_str_func,_l_shapes)) \n #mc.delete(_l_shapes)\n shapeParent_in_place(obj,_dup)\n mc.delete(_dup)\n \n for c in _l_children:\n log.debug(\"|{0}| >> parent back...: '{1}'\".format(_str_func,c)) \n log.debug(\"|{0}| >> obj:{1}\".format(_str_func,obj)) \n \n parent_set(c,obj) \n \n return True",
"def rotation_matrix(delta):\n return np.array([[np.cos(delta), -np.sin(delta)],[np.sin(delta), np.cos(delta)]])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
the purpose of this function is to create a list of residues names A11 of proteins that are made of two helixes seperated by a gap pro_eitherside is how many side of the gap I should search
|
def double_helix_parser(input_file, output_file, helicies_length = 6, helix_gap = 3, pro_eitherside = 3):
res_no_l = [] # for residue names
res_name_l = [] # for amino acid names
sec_str_l = [] # for sec structure prediction
two_helix_l = [] # contains a list aminoacids (also a list)
# Extracts the residue no, amino acid and secstr and signs to variables
rx_seq = re.compile(r"^(\w+?)\s+?(\w+?)\s+?(\S)", re.MULTILINE)
text = fileread(input_file)
# assign the matched groups in the text to the res_no_l, res_name_l and sec_str_str
for match in rx_seq.finditer(text):
res_no, res_name, sec_str = match.groups()
res_no_l.append(res_no)
res_name_l.append(res_name)
sec_str_l += sec_str
# creates dictionaries for each with the chain as the key
chains_sec_str_d = keychain_value_str(res_no_l, sec_str_l)
chains_res_no_d = keychain_value_list(res_no_l, res_no_l)
chains_res_name_d = keychain_value_list(res_no_l, res_name_l)
# which a Pro is found a in the res_name_d[chain] its secstr in sec_str_d is replaced with a P
# We will then search for this P later on
counter = 0
for chain in chains_res_name_d:
#print(chains_res_name_d[chain])
counter = 0
for residue in chains_res_name_d[chain]:
#print(chains_res_name_d[chain][counter])
if residue == 'PRO':
chains_sec_str_d[chain] = chains_sec_str_d[chain][:counter] + 'P' + chains_sec_str_d[chain][counter + 1:]
#print(chains_res_no_d[chain][counter])
counter += 1
# only adds if a proline is found in the gap
# contains 2 groups, the 1st group being the whole helix and group 2 being the gap
for x in chains_sec_str_d:
regex = "([h|H]{6,}(?:.?){1}(P)(?:.?){1}[h|H]{6,})"
p = re.compile(r"" +regex +"")
# if one is found it prints out the residues numbers of that helix
for match in p.finditer(chains_sec_str_d[x]):
# adjusted to check for Proline around the gap 1 before and 1 after
two_helix_l += [chains_res_no_d[x][ (match.start(1)) : (match.end(1)) ]]
match_groups =(match.groups())
# finds the location of the proline for mutation using mutmod
pro_res = (x + str(match.start(2)))
print(pro_res + " :" + match.group(2))
tempstr = ""
for protein in two_helix_l:
for residue in protein:
tempstr += (residue + "\n")
tempstr +=("\n")
output = open(output_file, 'w')
output.write(tempstr)
output.close()
#print('#####################')
#print(tempstr)
#print('#####################')
|
[
"def get_exemplu_apartamente():\r\n apartamente = []\r\n p = 100\r\n for i in range(0,10):\r\n adauga_apartament(apartamente,i*p,i*p+1,i*p+2,i*p+3,i*p+4)\r\n return apartamente",
"def genenames_from10x(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[-1]:])\n \n return genesymbol#,ensemblid",
"def locate_restriction_sites(self):\r\n result = []\r\n for i in range(len(self.sq)):\r\n for j in range(4, 13, 1):\r\n if i+j > len(self.sq):\r\n break\r\n else:\r\n og_sq = self.sq[i: i+j]\r\n list = []\r\n x = -1\r\n while x >= -len(og_sq):\r\n if og_sq[x] == \"A\":\r\n list.append(\"T\")\r\n elif og_sq[x] == \"G\":\r\n list.append(\"C\")\r\n elif og_sq[x] == \"T\":\r\n list.append(\"A\")\r\n elif og_sq[x] == \"C\":\r\n list.append(\"G\")\r\n x -= 1\r\n comple_sq = \"\"\r\n for k in list:\r\n\r\n comple_sq += k\r\n if (og_sq) == (comple_sq):\r\n result.append((i+1, j))\r\n return result",
"def expandeaza(self):\n succ = []\n nod_c = self.nod_graf\n arce = self.problema.arce\n for a in arce:\n if a.capat == nod_c.info:\n succ.append((problema.cauta_nod_nume(a.varf), a.cost))\n\n return succ",
"def get_aa(codons, genetic_code):\n\n # if there are no codons\n if codons==\"-\": return \"-\"\n\n # if it is a protein\n elif len(codons)%3==0: return str(Seq(codons).translate(table = genetic_code))\n\n # if not\n else: \n if len(codons)<3: return \"X\"\n else: return (str(Seq(\"\".join(list(chunks(codons, 3))[0:-1])).translate(table = genetic_code)) + \"X\")",
"def pks_peptide(peptide_chain):\r\n\r\n\tlargo_cadena = len(peptide_chain)\r\n\tpk = []\r\n\tion = []\r\n\tionizado = []\r\n\r\n\r\n\tfor aa in range(largo_cadena):\r\n\t\tif aa == 0:\r\n\t\t\tamino_terminal = peptide_chain[aa]\r\n\t\t\tpk.append(aminoacidos[amino_terminal][2])\r\n\t\t\tion.append(\"basic\")\r\n\t\t\tionizado.append(aminoacidos[amino_terminal][0] + \"_at\")\r\n\t\t\t\r\n\t\t\t# Agregar cadena lateral del primer aa en caso de presentar\r\n\t\t\tif len(aminoacidos[amino_terminal]) == 5:\r\n\t\t\t\tpk.append(aminoacidos[amino_terminal][3])\r\n\t\t\t\tion.append(aminoacidos[amino_terminal][4])\r\n\t\t\t\tionizado.append(aminoacidos[amino_terminal][0] + \"_r\")\r\n\t\t\t\r\n\t\t\t# Para los casos con un solo aminoácido se toman solo los\r\n\t\t\t# valores de ese aminoácido\r\n\t\t\tif largo_cadena == 1:\r\n\t\t\t\tcarboxi_terminal = peptide_chain[aa]\r\n\t\t\t\tpk.append(aminoacidos[carboxi_terminal][1])\r\n\t\t\t\tion.append(\"acid\")\r\n\t\t\t\tionizado.append(aminoacidos[carboxi_terminal][0] + \"_ct\")\r\n\t\t\t\tbreak\r\n\r\n\t\telif aa == largo_cadena - 1:\r\n\t\t\tcarboxi_terminal = peptide_chain[aa]\r\n\r\n\t\t\t# Agregar cadena lateral del último aa en caso de presentar\r\n\t\t\tif len(aminoacidos[carboxi_terminal]) == 5:\r\n\t\t\t\tpk.append(aminoacidos[carboxi_terminal][3])\r\n\t\t\t\tion.append(aminoacidos[carboxi_terminal][4])\r\n\t\t\t\tionizado.append(aminoacidos[carboxi_terminal][0] + \"_r\")\r\n\r\n\t\t\tpk.append(aminoacidos[carboxi_terminal][1])\r\n\t\t\tion.append(\"acid\")\r\n\t\t\tionizado.append(aminoacidos[carboxi_terminal][0] + \"_ct\")\r\n\r\n\t\telif aa > 0 and aa < largo_cadena:\r\n\t\t\tno_terminales = peptide_chain[aa]\r\n\t\t\tif len(aminoacidos[no_terminales]) == 5:\r\n\t\t\t\tpk.append(aminoacidos[no_terminales][3])\r\n\t\t\t\tion.append(aminoacidos[no_terminales][4])\r\n\t\t\t\tionizado.append(aminoacidos[no_terminales][0] + \"_r\")\r\n\r\n\r\n\tprint(\"\\n***************** INFORMACION DE LA SECUENCIA ******************\\n\")\r\n\tprint(f\"Secuencia de aminoácidos --> {peptide_chain}\\n\")\r\n\tc = 1\r\n\tprint(\"Aminoácidos con cargas...\\n\")\r\n\tfor residuos in ionizado:\r\n\t\tprint(f\"{c} --> {residuos}\")\r\n\t\tc += 1\r\n\t#print(ion)\r\n\t#print(pk)\r\n\tprint(\"\\n****************************************************************\")\r\n\r\n\r\n\tpk_ordenado = sorted(pk)\r\n\r\n\tintervalos_pk, cantidad_intervalos = intervalos(pk_ordenado)\r\n\r\n\t# creación de tabla que indica la ionización, es decir,\r\n\t# si es positivo, neutro o negativo en un intervalo\r\n\tpks = tabla_intervalos(cantidad_intervalos, intervalos_pk, ion, pk, ionizado)\r\n\r\n\r\n\t# Detecta el punto del zwitterion\r\n\tpk_intervalo = zwitterion(pks, cantidad_intervalos)\r\n\t\r\n\r\n\t# calcular punto isoelectrico\r\n\tpunto_iso = punto_isoelectrico(pk_intervalo)\r\n\treturn(round(punto_iso, 2))",
"def genenames_from10x_mod(genelist):\n genesymbol=[]\n #ensemblid=[]\n for i in range(len(genelist)):\n curgene=genelist[i]\n starts=[]\n for x in re.finditer('_',curgene):\n starts.append(x.start()+1)\n genesymbol.append(curgene[starts[0]:])\n \n return genesymbol#,ensemblid",
"def split_protein(protein):\n peptides = []\n pep = \"\"\n for amino_acid in protein: # for each letter in the string\n pep += amino_acid\n if (amino_acid == 'K' or amino_acid == 'R'):\n peptides.append(pep)\n if (pep.startswith('P') and len(peptides) > 1):\n pep2 = peptides.pop()\n pep1 = peptides.pop()\n peptides.append(pep1 + pep2)\n pep = \"\"\n if not (protein.endswith('K') or protein.endswith('R')):\n peptides.append(pep)\n if (pep.startswith('P') and len(peptides) > 1):\n pep2 = peptides.pop()\n pep1 = peptides.pop()\n peptides.append(pep1 + pep2)\n\n return peptides",
"def voisins(liste_arcs, noeud): \n liste_voisins = list()\n for arc in liste_arcs:\n if noeud == arc[0]:\n liste_voisins.append( arc[1] )\n if noeud == arc[1]:\n liste_voisins.append( arc[0] )\n return liste_voisins",
"def summarize_polyA(fasta):\n lst = []\n for name in fasta.keys():\n seq = str(fasta[name])\n L = seq[0:10]\n R = seq[-10:]\n end = L + R\n most_common_char = Counter(end).most_common(1)[0][0]\n Ln = Counter(L)[most_common_char]\n Rn = Counter(R)[most_common_char]\n if Ln > Rn:\n m = re.search('^(' + most_common_char + '+)', seq)\n if m:\n lst.append([\"L\", most_common_char, m.group(1), name, seq])\n else:\n lst.append([\"L\", most_common_char, \"-\", name, seq])\n else:\n m = re.search('(' + most_common_char + '+)$', seq)\n if m:\n lst.append([\"R\", most_common_char, m.group(1), name, seq])\n else:\n lst.append([\"R\", most_common_char, \"-\", name, seq])\n return(lst)",
"def ConvertCnsProtonNames(residueName, atomName):\n #I. get a clean three-letter code and strip & uppercase the atomName\n threeLetter = AminoAcid.AminoAcid(residueName)[1]\n if threeLetter[2] == '':\n print 'WARNING: residue name', residueName, 'not understood'\n return atomName\n atomName = string.upper(string.strip(atomName))\n \n #II. methylenes\n #1. GLY HA:\n if threeLetter == 'GLY' and atomName == 'HA1':\n atomName = 'HA2'\n elif threeLetter == 'GLY' and atomName == 'HA2':\n atomName = 'HA1'\n \n #2. ARG, ASN, ASP, CYS, GLN, GLU, HIS, LEU, LYS, MET, PHE, PRO, SER, TRP, TYR HB%:\n elif threeLetter in ('ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'HIS', 'LEU', 'LYS',\\\n 'MET', 'PHE', 'PRO', 'SER', 'TRP', 'TYR') and \\\n atomName == 'HB3':\n atomName = 'HB1'\n elif threeLetter in ('ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'HIS', 'LEU', 'LYS',\\\n 'MET', 'PHE', 'PRO', 'SER', 'TRP', 'TYR') and \\\n atomName == 'HB1':\n atomName = 'HB3'\n\n #3. ARG, GLN, GLU, LYS, MET, PRO HG%:\n elif threeLetter in ('ARG', 'GLN', 'GLU', 'LYS', 'MET', 'PRO') and\\\n atomName == 'HG1':\n atomName = 'HG3'\n elif threeLetter in ('ARG', 'GLN', 'GLU', 'LYS', 'MET', 'PRO') and\\\n atomName == 'HG3':\n atomName = 'HG1'\n #4. ILE HG1%:\n elif threeLetter == 'ILE' and atomName == 'HG13':\n atomName = 'HG11'\n elif threeLetter == 'ILE' and atomName == 'HG11':\n atomName = 'HG13' \n #5. ARG, ASN, LYS, PRO HD:\n elif threeLetter in ('ARG', 'ASN', 'LYS', 'PRO') and atomName == 'HD1':\n atomName = 'HD3'\n elif threeLetter in ('ARG', 'ASN', 'LYS', 'PRO') and atomName == 'HD3':\n atomName = 'HD1'\n #6. LYS HE:\n elif threeLetter == 'LYS' and atomName == 'HE3':\n atomName = 'HE1'\n elif threeLetter == 'LYS' and atomName == 'HE1':\n atomName = 'HE3'\n \n #III. methyls:\n #1. ALA beta:\n elif threeLetter == 'ALA' and atomName == 'HB2':\n atomName = 'HB1'\n elif threeLetter == 'ALA' and atomName == 'HB1':\n atomName = 'HB2'\n #2. VAL gamma1:\n elif threeLetter == 'VAL' and atomName == 'HG11':\n atomName = 'HG12'\n elif threeLetter == 'VAL' and atomName == 'HG12':\n atomName = 'HG11'\n #3. ILE, VAL gamma2:\n elif threeLetter in ('ILE', 'VAL') and atomName == 'HG21':\n atomName = 'HG22'\n elif threeLetter in ('ILE', 'VAL') and atomName == 'HG22':\n atomName = 'HG21'\n #4. ILE, LEU delta1:\n elif threeLetter in ('ILE', 'LEU') and atomName == 'HD11':\n atomName = 'HD12'\n elif threeLetter in ('ILE', 'LEU') and atomName == 'HD12':\n atomName = 'HD11' \n #5. LEU delta2:\n elif threeLetter == 'LEU' and atomName == 'HD21':\n atomName = 'HD22'\n elif threeLetter == 'LEU' and atomName == 'HD22':\n atomName = 'HD21' \n #6. MET epsilon:\n elif threeLetter == 'MET' and atomName == 'HE1':\n atomName = 'HE2'\n elif threeLetter == 'MET' and atomName == 'HE2':\n atomName = 'HE1'\n #7. zeta:\n elif atomName == 'HZ1':\n atomName = 'HZ2'\n elif atomName == 'HZ2':\n atomName = 'HZ1' \n \n #IV. ARG NHs:\n elif threeLetter == 'ARG' and atomName == 'HH11':\n atomName = 'HH12'\n elif threeLetter == 'ARG' and atomName == 'HH12':\n atomName = 'HH11'\n elif threeLetter == 'ARG' and atomName == 'HH21':\n atomName = 'HH22'\n elif threeLetter == 'ARG' and atomName == 'HH22':\n atomName = 'HH21' \n\n return atomName",
"def extract_seq_entropy_data(siteresidue_list, profile):\n # Opening and formatting lists of the probabilities and residues\n with open(profile) as profile:\n ressingle_list = []\n probdata_list = []\n\n # extracting relevant information\n for line in profile:\n line_list = line.split()\n residue_type = line_list[0]\n prob_data = line_list[1:]\n prob_data = list(map(float, prob_data))\n ressingle_list.append(residue_type)\n probdata_list.append(prob_data)\n ressingle_list = ressingle_list[1:]\n probdata_list = probdata_list[1:]\n\n # Changing single letter amino acid to triple letter\n # with its corresponding number\n count = 0\n restriple_list = []\n for res in ressingle_list:\n newres = res.replace(res, amino_single_to_triple(res))\n count += 1\n restriple_list.append(newres + str(count))\n\n # Calculating information entropy\n # suppress warning\n with np.errstate(divide='ignore'):\n prob_array = np.asarray(probdata_list)\n log_array = np.log2(prob_array)\n\n # change all infinite values to 0\n log_array[~np.isfinite(log_array)] = 0\n entropy_array = log_array * prob_array\n entropydata_array = np.sum(a=entropy_array, axis=1) * -1\n entropydata_list = entropydata_array.tolist()\n\n # Matching amino acids from .mol2 and .profile files and creating dictionary\n fullprotein_data = dict(zip(restriple_list, entropydata_list))\n seq_entropy_data = {k: float(\n fullprotein_data[k]) for k in siteresidue_list if k in fullprotein_data}\n return seq_entropy_data",
"def non_standard_resi(self):\n r_ids = self.resi_ids\n r_ch = self.chain_ids\n resids=[]\n n=0\n for res in self.resi_names:\n if is_non_standard_AA(res):\n resids.append((res, AA_code(res), r_ids[n], r_ch[n]))\n n += 1\n return resids",
"def get_qualifizierte_parteien(erststimmen_pro_partei_pro_wahlkreis,zweitstimmen_pro_partei_pro_wahlkreis):\n funf_prozent_berechnung=zweitstimmen_pro_partei_pro_wahlkreis.sum()[1:]/zweitstimmen_pro_partei_pro_wahlkreis.sum()[1:].sum() #zweitstimmen per party divided by total number of zweitstimmen\n parteien_mehr_als_funf_prozent=funf_prozent_berechnung.where(funf_prozent_berechnung.values>=0.05).dropna().index.tolist() #drop parties where % Zweitstimmen below 5% and get list of remaining parties\n wahlkreis_hurde_berechnung=get_wahlkreissitze_pro_partei_pro_bundesland(erststimmen_pro_partei_pro_wahlkreis).sum(axis=1) #calculate number of Wahlkreissitze per party on Bundesebene\n parteien_min_drei_wahlkreise=wahlkreis_hurde_berechnung.where(wahlkreis_hurde_berechnung.values>=3).dropna().index.tolist() #drop parties that don't have at least 3 Wahlkreissitze, and get list of remaining parties\n qualifizierte_parteien = list(set(parteien_mehr_als_funf_prozent+parteien_min_drei_wahlkreise)) #get union of the two lists\n return qualifizierte_parteien",
"def get_residue_info(name):\n # Fetch the protein \n url = 'https://gpcrdb.org/services/protein/'+name\n response = requests.get(url)\n protein_data = response.json()\n # Determine the numbering scheme to use\n num_scheme = protein_data['residue_numbering_scheme']\n # Fetch the residue information\n url = 'https://gpcrdb.org/services/residues/extended/'+name\n response = requests.get(url)\n residue_data = response.json()\n # Extract info in array format\n residue_info = []\n for res in residue_data:\n res_part = res['protein_segment']\n res_seqn = res['sequence_number']\n res_code = res['amino_acid']\n res_dbid = ''\n for num in res['alternative_generic_numbers']:\n if num['scheme'] == num_scheme:\n res_dbid = num['label']\n residue_info.append([res_part, res_seqn, res_code, res_dbid])\n return residue_info",
"def CalculateConjointTriad(proteinsequence):\n \tres={}\n\tproteinnum=_Str2Num(proteinsequence)\n\tfor i in range(8):\n\t\tfor j in range(8):\n\t\t\tfor k in range(8):\n\t\t\t\ttemp=str(i)+str(j)+str(k)\n\t\t\t\tres[temp]=proteinnum.count(temp)\n\treturn res",
"def ligand_only(pqrfile,lig_name):\n #here we want the ligand to have charges\n #while the protein 0.0 \n ifile = open(pqrfile,\"r\").readlines()\n ofile = open(\"ligand_only.pqr\",\"w\")\n\n for line in ifile:\n res_name = line.split()[3]\n if res_name==lig_name:\n lig_atnumb = int(line.split()[1])\n lig_atom = line.split()[2]\n lig_chain = int(line.split()[4])\n lig_xx = float(line.split()[5])\n lig_yy = float(line.split()[6])\n lig_zz = float(line.split()[7])\n lig_charge = float(line.split()[8])\n lig_radius = float(line.split()[-1])\n #print(std_string)\n ofile.write(\"ATOM %5d %-3s %-3s %3d %7.3f %7.3f %7.3f %8.4f %8.4f\\n\" % (lig_atnumb,lig_atom,lig_name,lig_chain,\\\n lig_xx,lig_yy,lig_zz,lig_charge,lig_radius))\n else:\n res_atnumb = int(line.split()[1])\n res_atom = line.split()[2]\n res_name = line.split()[3]\n res_chain = int(line.split()[4])\n res_xx = float(line.split()[5])\n res_yy = float(line.split()[6])\n res_zz = float(line.split()[7])\n res_charge =0.0000\n res_radius = float(line.split()[-1])\n #print(std_string)\n ofile.write(\"ATOM %5d %-3s %-3s %3d %7.3f %7.3f %7.3f %8.4f %8.4f\\n\" % (res_atnumb,res_atom,res_name,res_chain,\\\n res_xx,res_yy,res_zz,res_charge,res_radius))\n\n ofile.close()",
"def extract_residues(model):\r\n\r\n\t#TODO : return a list of protein residues given a PDB model\r\n\tresidues = []\r\n\tfor chain in model:\r\n\t\tfor residue in chain:\r\n\t\t\tif PDB.is_aa(residue, standard=True): residues.append(residue)\r\n\t# print(residues)\r\n\treturn residues",
"def obtener_hijos_proveedor(self):\n res = []\n for arbol in self.lista_subarboles:\n raiz_sub_arbol = arbol.raiz\n hijos_subarbol = self.obtener_hijos_de(raiz_sub_arbol)\n cant = len(hijos_subarbol)\n if cant > 1:\n for i in range(1, cant + 1): # recorremos con uno mas para mantener los numeros multiplos de 5\n if i == 2 or i == 4 or i == 6:\n res.append(hijos_subarbol[i - 1])\n elif i > 6 and i % 5 == 0:\n res.append(hijos_subarbol[i - 1])\n print \"Los hijos de %s son %s\" % (self.raiz, res)\n return res"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the post settings. This function sets custom values for post type, post status and maxposts if custom values are definied in the object initialization. If not, the function will set default values as provided from the Constants section. The Basic Post Configuration is configuratable, the postMetaFields only via Constants.
|
def __init__(self, **kwargs):
self.postBasicInformation = {}
if 'post_type' in kwargs:
self.postBasicInformation['post_type'] = kwargs.get('post_type')
else:
self.postBasicInformation['post_type'] = DEFAULT_POST_TYPE
if 'post_status' in kwargs:
self.postBasicInformation['post_status'] = kwargs.get('post_status')
else:
self.postBasicInformation['post_status'] = DEFAULT_POST_STATUS
if 'maxposts' in kwargs:
self.postBasicInformation['maxposts'] = kwargs.get('maxposts')
else:
self.postBasicInformation['maxposts'] = DEFAULT_MAXPOSTS
self.postCustomFields = {
'imageField' : IMAGE_FIELD,
'dateField' : DATE_FIELD,
'timeField' : TIME_FIELD,
'timestampField' : TIMESTAMP_FIELD
}
|
[
"def post_type(self, post_type):\n\n self._post_type = post_type",
"def _structure_blog_settings(_khoros_object, _blog_settings, _payload, _discussion_style):\n if any(_blog_settings.values()) and _discussion_style != 'blog':\n _warn_about_ignored_settings('blog', _discussion_style)\n else:\n # Populate the 'comments_enabled' setting if applicable\n if _blog_settings.get('comments_enabled'):\n _payload['data']['comments_enabled'] = _blog_settings.get('comments_enabled')\n\n # Populate the blog authors list\n if any((_blog_settings['authors'], _blog_settings['author_ids'], _blog_settings['author_logins'])):\n authors = users.structure_user_dict_list(_khoros_object, _blog_settings['authors'],\n _blog_settings['author_ids'], _blog_settings['author_logins'])\n _payload['data']['authors'] = authors\n\n # Populate the blog moderators list\n if any((_blog_settings['moderators'], _blog_settings['moderator_ids'], _blog_settings['moderator_logins'])):\n moderators = users.structure_user_dict_list(_khoros_object, _blog_settings['moderators'],\n _blog_settings['moderator_ids'],\n _blog_settings['moderator_logins'])\n _payload['data']['moderators'] = moderators\n return _payload",
"def set_settings(self, subreddit, title, public_description='',\n description='', language='en', subreddit_type='public',\n content_options='any', over_18=False, default_set=True,\n show_media=False, domain='', domain_css=False,\n domain_sidebar=False, header_hover_text='',\n wikimode='disabled', wiki_edit_age=30,\n wiki_edit_karma=100,\n submit_link_label='', submit_text_label='',\n exclude_banned_modqueue=False, comment_score_hide_mins=0,\n public_traffic=False, collapse_deleted_comments=False,\n spam_comments='low', spam_links='high',\n spam_selfposts='high', submit_text='',\n hide_ads=False, suggested_comment_sort='',\n key_color='',\n **kwargs):\n data = {'sr': subreddit.fullname,\n 'allow_top': default_set,\n 'comment_score_hide_mins': comment_score_hide_mins,\n 'collapse_deleted_comments': collapse_deleted_comments,\n 'description': description,\n 'domain': domain or '',\n 'domain_css': domain_css,\n 'domain_sidebar': domain_sidebar,\n 'exclude_banned_modqueue': exclude_banned_modqueue,\n 'header-title': header_hover_text or '',\n 'hide_ads': hide_ads,\n 'key_color': key_color,\n 'lang': language,\n 'link_type': content_options,\n 'over_18': over_18,\n 'public_description': public_description,\n 'public_traffic': public_traffic,\n 'show_media': show_media,\n 'submit_link_label': submit_link_label or '',\n 'submit_text': submit_text,\n 'submit_text_label': submit_text_label or '',\n 'suggested_comment_sort': suggested_comment_sort or '',\n 'spam_comments': spam_comments,\n 'spam_links': spam_links,\n 'spam_selfposts': spam_selfposts,\n 'title': title,\n 'type': subreddit_type,\n 'wiki_edit_age': six.text_type(wiki_edit_age),\n 'wiki_edit_karma': six.text_type(wiki_edit_karma),\n 'wikimode': wikimode}\n\n if kwargs:\n msg = 'Extra settings fields: {0}'.format(kwargs.keys())\n warn_explicit(msg, UserWarning, '', 0)\n data.update(kwargs)\n evict = self.config['subreddit_settings'].format(\n subreddit=six.text_type(subreddit))\n self.evict(evict)\n return self.request_json(self.config['site_admin'], data=data)",
"def parse_post_config(json_data):\n post_config = {}\n\n original_post_id = json_data.get(ORIGINAL_POST_ID)\n rendered_post_id = json_data.get(RENDERED_POST_ID)\n\n if original_post_id is None and rendered_post_id is None:\n raise NullPostId()\n\n if rendered_post_id is None:\n post_config[POST_ID] = original_post_id\n post_config[POST_TYPE] = POST_TYPE_ORIGINAL\n else:\n post_config[POST_ID] = rendered_post_id\n post_config[POST_TYPE] = POST_TYPE_RENDERED\n post_config[REPLACE] = json_data.get(REPLACE, 0)\n\n post_config[AS_ORIGINAL] = json_data.get(AS_ORIGINAL, 0)\n\n post_config[AUTO] = json_data.get(AUTO, 1)\n\n post_config[FONT] = json_data.get(FONT, 'anonymouspro.ttf')\n post_config[IMG] = json_data.get(IMG, 1)\n post_config[IMG_WIDTH] = json_data.get(IMG_WIDTH, 800)\n post_config[IMG_COUNT] = json_data.get(IMG_COUNT, 1)\n\n post_config[IMG_WITH_TEXT] = json_data.get(IMG_WITH_TEXT, 1)\n\n post_config[IMG_WITH_ORIGINAL_TEXT] = json_data.get(IMG_WITH_ORIGINAL_TEXT, 1)\n post_config[IMG_WITH_POST_IMG] = json_data.get(IMG_WITH_POST_IMG, 1)\n post_config[IMG_WITH_COMMENT] = json_data.get(IMG_WITH_COMMENT, 1)\n post_config[IMG_COMMENT_ID] = json_data.get(IMG_COMMENT_ID, None)\n post_config[IMG_WITH_COMMENT_TEXT] = json_data.get(IMG_WITH_COMMENT_TEXT, 1)\n post_config[IMG_COMMENT_TEXT_WITH_REF] = json_data.get(IMG_COMMENT_TEXT_WITH_REF, 1)\n post_config[IMG_COMMENT_WITH_IMG] = json_data.get(IMG_COMMENT_WITH_IMG, 1)\n\n return post_config",
"def test_unit_post_init(self):\n post = Post({}, \"content\", \"src\", \"build\")\n\n assert post.meta == {}\n assert post.content == \"content\"\n assert post.src_path == \"src\"\n assert post.build_path == \"build\"\n assert post.from_path == \"\"\n assert post.to_path == \"\"",
"def set_PostContent(self, value):\n super(CreateAuthenticatedPostInputSet, self)._set_input('PostContent', value)",
"def get_details(self):\n post_json = self.get_post_json(self.id)\n try:\n # All expected post types have these properties.\n self.title = self.validate_string(post_json['title'])\n self.author = self.validate_string(post_json['by'])\n self.points = self.validate_int(post_json['score'])\n\n # 'job' posts don't have descendents.\n if post_json['type'] == 'job':\n self.comments = 0\n else:\n self.comments = self.validate_int(post_json['descendants'])\n\n # Some story posts don't have a 'url'. Construct their URL from\n # their ID.\n if 'url' not in post_json:\n self.uri = TEXT_POST_URL % self.id\n else:\n self.uri = self.validate_uri(post_json['url'])\n\n except KeyError as e:\n # Missing JSON field. Don't expect to hit this unless API changes.\n # We continue to execute with as much of the post as we processed\n # before the error.\n logging.exception(\"KeyError whilst populating Post object ID %s, \"\n \"rank %d\" % (self.id, self.rank))\n logging.critical(post_json)",
"def settings_handler(sender, instance, **kwargs):\n instance.settings = {\n 'autoplay': instance.autoplay,\n 'autoplaySpeed': instance.autoplay_speed,\n 'speed': instance.speed,\n 'arrows': instance.arrows,\n 'dots': instance.dots,\n 'slidesToShow': instance.slides_to_show\n }\n\n return",
"def postid(self, postid):\n\n self._postid = postid",
"def post_detail(self, post_detail):\n\n self._post_detail = post_detail",
"def get_custom_model(self, data):\n blog_post = Post(\n post_title=data.get('post_title', data),\n article=data.get('article', data),\n slug=data.get('slug', data),\n status=data.get('status', data),\n user=data.get('user', data)\n )\n\n return blog_post",
"def configure(**kwargs):\n global settings\n settings = Settings(**kwargs)",
"def post_notificationsettings_config(self, data={}, **kwargs):\n request_endpoint = '/api/v1/config/notification-settings'\n return self.query(query_endpoint=request_endpoint, query_type='POST', data=data, **kwargs)",
"def _augment_page_post(page_post: Dict[str, Any]) -> Dict[str, Any]:\n if 'attachments' in page_post and 'data' in page_post['attachments'] and len(page_post['attachments']['data']) == 1:\n data = page_post['attachments']['data'][0]\n if 'title' in data:\n page_post['caption'] = data['title']\n page_post['name'] = data['title']\n if 'description' in data:\n page_post['description'] = data['description']\n if 'url_unshimmed' in data:\n page_post['link'] = data['url_unshimmed']\n if 'target' in data and 'id' in data['target']:\n page_post['object_id'] = data['target']['id']\n if 'media' in data and 'source' in data['media']:\n page_post['source'] = data['media']['source']\n if 'type' in data:\n page_post['type'] = data['type']\n\n return page_post",
"def set_preset_render_settings(scene, presets=['BASE']):\n if 'BASE' in presets:\n # If using cycles, don't sample.\n scene.cycles.samples = 1\n scene.cycles.max_bounces = 1\n scene.cycles.min_bounces = 1\n\n # Quality settings\n scene.render.resolution_percentage = 100\n scene.render.tile_x = settings.TILE_SIZE\n scene.render.tile_y = settings.TILE_SIZE\n\n # Turn off all but the first renderlayer\n for i, layer in enumerate(scene.layers):\n layer = (i == 0)\n render_layer = scene.render.layers[\"RenderLayer\"]\n bpy.types.WorldLighting.indirect_bounces = 1\n scene.render.layers[0].use_all_z = True\n\n # We don't need raytracing or shadows\n render_layer.use_edge_enhance = False\n scene.render.use_sss = False\n scene.render.use_envmaps = False\n scene.render.use_raytrace = False\n scene.render.use_shadows = False\n scene.render.use_simplify = True\n render_layer.use_solid = True\n render_layer.use_halo = False\n render_layer.use_all_z = False\n render_layer.use_ztransp = False\n render_layer.use_sky = False\n render_layer.use_edge_enhance = False\n render_layer.use_strand = False\n\n # Antialiasing leads to incorrect values\n scene.render.use_antialiasing = False\n\n if 'NON-COLOR' in presets: # Save as non-color data\n scene.view_settings.view_transform = 'Raw'",
"def test_unit_datapost_init(self):\n meta = {\"path\": \"posts/post\"}\n post = DataPost(meta, \"content\", \"src\", \"build\")\n\n assert post.meta == meta\n assert post.content == \"content\"\n assert post.src_path == \"src\"\n assert post.build_path == \"build\"\n assert post.from_path == \"src/data/post\"\n assert post.to_path == \"build/posts/post/data\"",
"def default(**kwargs):\n for name, default_val in kwargs.items():\n try:\n getattr(SETTINGS, name)\n except AttributeError:\n setattr(SETTINGS, name, default_val)",
"def post_new_wp_objects(self):\n for post_object in self.wp_post_objects:\n post_object.id = self.connection.call(NewPost(post_object))",
"def config(cls, duration=None, top=None):\n if duration is not None:\n cls.default_config[\"duration\"] = duration\n if top is not None:\n cls.default_config[\"top\"] = top"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
J_ij = t , or if rdep is true, J_ij = t/r^3 t selfexplanatory rdep whether or not to have J depend on r^3
|
def __init__(self, nnOnly, t, rdep):
super().__init__(nnOnly)
self.t = t
self.rdep = rdep
if (rdep):
self.desc = "j_ij = t/r**3, with t = %f" % (t)
else:
self.desc = "j_ij = t, with t = %f" % (t)
if (self.nnOnly):
self.desc = self.desc + ", j_ij on nearest neighbours only\n"
|
[
"def jacobian(\n self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass",
"def __constant_jerk__(x, dt, params, options=None):\n\n if options is None:\n options = {'backward': False}\n\n r, q = params\n A = np.matrix([[1, dt, 0, 0],\n [0, 1, dt, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]])\n B = np.matrix([[0], [0], [0], [0]])\n C = np.matrix([[1, 0, 0, 0]])\n R = np.matrix([[r]])\n Q = np.matrix([[1e-16, 0, 0, 0],\n [0, 1e-16, 0, 0],\n [0, 0, 1e-16, 0],\n [0, 0, 0, q]])\n x0 = np.matrix([[x[0]], [0], [0], [0]])\n P0 = np.matrix(10*np.eye(4))\n y = np.matrix(x)\n u = None\n\n if options['backward']:\n A = A.I\n y = y[:, ::-1]\n\n xhat_fp, xhat_fm, P_fp, P_fm = __kalman_forward_filter__(x0, P0, y, u, A, B, C, R, Q)\n xhat_smooth, _ = __kalman_backward_smooth__(xhat_fp, xhat_fm, P_fp, P_fm, A)\n\n x_hat = np.ravel(xhat_smooth[0,:])\n dxdt_hat = np.ravel(xhat_smooth[1,:])\n\n if not options['backward']:\n return x_hat, dxdt_hat\n\n return x_hat[::-1], dxdt_hat[::-1]",
"def get_resid_wave_eq_first_order(u):\n #See autograd docs for jacobian documentation. \n #This code treats u as a vector valued function of the arguments x,t\n #So have to compute two jacobians, one for each. Consider changing depending on efficiency. \n #Is one jacobian w.r.t single vector [x,t] much faster than two jacobians w.r.t. (x,t)?\n\n #Jx is vector valued function of params,x,t\n #Jx(params,x,t) is d([u,ut,ux])/dx(params,x,t)\n Jx=jacobian(u, 1)\n Jt=jacobian(u, 2)\n\n\n elementwise_error=lambda params,x,t: np.array([\\\n Jx(params,x,t)[0]-Jt(params,x,t)[0]-u(params,x,t)[2]+u(params,x,t)[1], \\\n Jx(params,x,t)[1]-Jt(params,x,t)[2], \\\n Jx(params,x,t)[2]-Jt(params,x,t)[1]\n ])\n\n #elementwise_error=lambda params,x,t: np.array([\\\n # Jx(params,x,t)[0], 0., 0.])\n\n resid=lambda params,x,t: np.linalg.norm((elementwise_error(params,x,t)), ord=2)\n return resid",
"def hjert(x,a):\n r2=x*x+a*a\n# return jnp.where(r2<111., rewofz(x,a), jnp.real(wofzs2(x,a)))\n return jnp.where(r2<111., rewofz(x,a), rewofzs2(x,a))",
"def _compute_joint_torque(model, robo, j):\n # local variables\n qdot_j = robo.qdots[j]\n qddot_j = robo.qddots[j]\n j_a_j = robo.geos[j].axisa\n ia_j = robo.dyns[j].ia\n f_cj = robo.dyns[j].frc\n f_vj = robo.dyns[j].frv\n j_f_j = model.wrenchs[j].val\n # actual computation\n wrench_term = j_f_j.transpose() * j_a_j\n actuator_inertia_term = Matrix([ia_j * qddot_j])\n coriolis_friction_term = Matrix([f_cj * sign(qdot_j)])\n viscous_friction_term = Matrix([f_vj * qdot_j])\n gamma_j = wrench_term + actuator_inertia_term + \\\n viscous_friction_term + coriolis_friction_term\n # store computed torque in model\n model.torques[j] = gamma_j[0, 0]\n return model",
"def Jacobian(self,t,y):\n return -self.lambd",
"def trajOpt(self, state_initial, dircol=0, second_pass=False):\n\n # stopwatch for solver time\n tsolve_pre = time.time()\n\n (x_goal, V_goal, gamma_goal, q_goal) = (200.0, state_initial[2], 0.0, 0.0)\n\n # number of knot points - proportional to x-distance seems to work well\n if not dircol:\n N = int(np.floor(0.8 * np.abs(x_goal - state_initial[0])))\n else:\n N = 30\n\n # optimization problem: variables t_f, u[k], x[k]\n mp = MathematicalProgram()\n\n t_f = mp.NewContinuousVariables(1, \"t_f\")\n dt = t_f[0] / N\n\n k = 0\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n input_trajectory = u\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n state_trajectory = x\n\n for k in range(1, N):\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n input_trajectory = np.vstack((input_trajectory, u))\n state_trajectory = np.vstack((state_trajectory, x))\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % N)\n state_trajectory = np.vstack((state_trajectory, x))\n\n # for dircol we can use u_N and first-order hold\n if dircol:\n u = mp.NewContinuousVariables(2, \"u_%d\" % N)\n input_trajectory = np.vstack((input_trajectory, u))\n\n print \"Number of decision vars\", mp.num_vars()\n\n # cost function: penalize time and control effort\n thrust = input_trajectory[:, 0]\n elev = input_trajectory[:, 1]\n vel = state_trajectory[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n # TODO: use u of length n+1 for dircol\n def totalcost(X):\n dt = X[0] / N\n u0 = X[1:N + 1]\n u1 = X[N + 1:2 * N + 1]\n v = X[2 * N + 1:3 * N + 1] # cut last item if dirtrans\n return dt * (1.0 * u0.dot(u0) + 1.0 * u1.dot(u1)) + 1.0 * X[0] * (u0.dot(v))\n # return dt * (1.0 * u0.dot(u0) + 1.0 * u1.dot(u1) + 10.0 * X[0] * (u0.dot(v)))\n\n mp.AddCost(totalcost, allvars)\n\n # initial state constraint\n for i in range(len(state_initial)):\n mp.AddLinearConstraint(state_trajectory[0, i] == state_initial[i])\n\n # final state constraint (x position)\n mp.AddLinearConstraint(state_trajectory[-1, 0] == x_goal)\n\n # final state constraint (z position) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 1] <= 1.5)\n mp.AddLinearConstraint(state_trajectory[-1, 1] >= 0.5)\n\n # final state constraint (velocity) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 2] <= 1.5 * V_goal)\n mp.AddLinearConstraint(state_trajectory[-1, 2] >= V_goal)\n\n # final state constraint (flight path angle) NOTE: small range here\n mp.AddLinearConstraint(state_trajectory[-1, 3] <= gamma_goal + 1.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[-1, 3] >= gamma_goal - 1.0 * np.pi / 180.0)\n\n # final state constraint (pitch rate)\n mp.AddLinearConstraint(state_trajectory[-1, 5] == q_goal)\n\n # input constraints\n for i in range(len(input_trajectory[:, 0])):\n mp.AddLinearConstraint(input_trajectory[i, 0] >= 0.0)\n mp.AddLinearConstraint(input_trajectory[i, 0] <= 1.2 * self.m * self.g)\n mp.AddLinearConstraint(input_trajectory[i, 1] >= -30.0)\n mp.AddLinearConstraint(input_trajectory[i, 1] <= 30.0)\n\n # state constraints\n for i in range(len(state_trajectory[:, 0])):\n # x position\n mp.AddLinearConstraint(state_trajectory[i, 0] >= state_initial[0])\n mp.AddLinearConstraint(state_trajectory[i, 0] <= x_goal)\n # z position\n mp.AddLinearConstraint(state_trajectory[i, 1] >= 0.3)\n mp.AddLinearConstraint(state_trajectory[i, 1] <= 2.0)\n # velocity\n mp.AddLinearConstraint(state_trajectory[i, 2] >= 1.0)\n mp.AddLinearConstraint(state_trajectory[i, 2] <= 3.0 * state_initial[2])\n # flight path angle\n mp.AddLinearConstraint(state_trajectory[i, 3] >= -30.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 3] <= 30.0 * np.pi / 180.0)\n # pitch angle\n mp.AddLinearConstraint(state_trajectory[i, 4] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 4] <= 40.0 * np.pi / 180.0)\n # pitch rate\n mp.AddLinearConstraint(state_trajectory[i, 5] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 5] <= 20.0 * np.pi / 180.0)\n\n # dynamic constraints\n if not dircol:\n # direct transcription\n for j in range(1, N + 1):\n dynamic_prop = dt * self.airplaneLongDynamics(state_trajectory[j - 1, :], input_trajectory[j - 1, :])\n for k in range(len(state_initial)):\n mp.AddConstraint(state_trajectory[j, k] == state_trajectory[j - 1, k] + dynamic_prop[k])\n else:\n # direct collocation\n for j in range(1, N + 1):\n x0 = state_trajectory[j - 1, :]\n x1 = state_trajectory[j, :]\n xdot0 = self.airplaneLongDynamics(x0, input_trajectory[j - 1, :])\n xdot1 = self.airplaneLongDynamics(x1, input_trajectory[j, :])\n\n xc = 0.5 * (x1 + x0) + dt * (xdot0 - xdot1) / 8.0\n xdotc = - 1.5 * (x0 - x1) / dt - 0.25 * (xdot0 + xdot1)\n uc = 0.5 * (input_trajectory[j - 1, :] + input_trajectory[j, :])\n f_xc = self.airplaneLongDynamics(xc, uc)\n for k in range(len(state_initial)):\n # TODO: why does \"==\" cause \"kUnknownError\"?\n # mp.AddConstraint(xdotc[k] - f_xc[k] == 0.0)\n mp.AddConstraint(xdotc[k] <= f_xc[k] + 0.001)\n mp.AddConstraint(xdotc[k] >= f_xc[k] - 0.001)\n\n # allow for warm start of dircol program with output of dirtrans program\n if (second_pass) and (self.mp_result == SolutionResult.kSolutionFound):\n # warm start using previous output\n print 'warm start to traj opt'\n t_guess = self.ttraj[-1]\n mp.SetInitialGuess(t_f[0], t_guess)\n\n for i in range(len(state_trajectory[:, 0])):\n for j in range(len(state_initial)):\n mp.SetInitialGuess(state_trajectory[i, j], self.xdtraj[i, j])\n for i in range(N):\n mp.SetInitialGuess(input_trajectory[i, 0], self.udtraj[i, 0])\n mp.SetInitialGuess(input_trajectory[i, 1], self.udtraj[i, 1])\n\n # time constraints\n mp.AddLinearConstraint(t_f[0] <= 1.25 * t_guess)\n mp.AddLinearConstraint(t_f[0] >= 0.8 * t_guess)\n\n else:\n # initial guesses\n t_guess = np.abs(x_goal - state_initial[0]) / (0.5 * (V_goal + state_initial[2]))\n mp.SetInitialGuess(t_f[0], t_guess)\n\n z_final_dummy = state_initial[1]\n theta_final_dummy = state_initial[4]\n state_final_dummy = np.array([x_goal, z_final_dummy, V_goal, gamma_goal, theta_final_dummy, q_goal])\n for i in range(len(state_trajectory[:, 0])):\n state_guess = ((N - i) / N) * state_initial + (i / N) * state_final_dummy\n for j in range(len(state_guess)):\n mp.SetInitialGuess(state_trajectory[i, j], state_guess[j])\n\n for i in range(N):\n mp.SetInitialGuess(input_trajectory[i, 0], self.m * self.g / 3.5)\n mp.SetInitialGuess(input_trajectory[i, 1], 0.01)\n\n # time constraints\n mp.AddLinearConstraint(t_f[0] <= 2.0 * t_guess)\n mp.AddLinearConstraint(t_f[0] >= 0.5 * t_guess)\n\n # set SNOPT iteration limit\n it_limit = int(max(20000, 40*mp.num_vars()))\n mp.SetSolverOption(SolverType.kSnopt, 'Iterations limit', it_limit)\n\n print(\"** solver begin with N = %d **\" % N)\n # solve nonlinear optimization problem (w/SNOPT)\n result = mp.Solve()\n print result\n\n # convert from symbolic to float\n input_trajectory = mp.GetSolution(input_trajectory)\n t_f = mp.GetSolution(t_f)\n state_trajectory_approx = mp.GetSolution(state_trajectory)\n time_array = t_f[0] * np.linspace(0.0, 1.0, (N + 1))\n\n tsolve_post = time.time()\n tsolve = tsolve_post - tsolve_pre\n\n solver_id = mp.GetSolverId()\n\n print (\"** %s solver finished in %.1f seconds **\\n\" % (solver_id.name(), tsolve))\n print (\"t_f computed: %.3f seconds\" % t_f[0])\n\n # get total cost of solution\n if result == SolutionResult.kSolutionFound:\n thrust = input_trajectory[:, 0]\n elev = input_trajectory[:, 1]\n vel = state_trajectory_approx[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n print (\"cost computed: %.3f\" % totalcost(allvars))\n\n # save traj (this is a bit sloppy and redundant but scripts for visualization currently rely on this)\n self.udtraj = input_trajectory\n self.xdtraj = state_trajectory_approx\n self.ttraj = time_array\n self.mp_result = result\n\n # save polynomials of input, state trajectories\n if not dircol:\n self.udtraj_poly = PiecewisePolynomial.FirstOrderHold(time_array[0:-1], input_trajectory.T)\n else:\n self.udtraj_poly = PiecewisePolynomial.FirstOrderHold(time_array, input_trajectory.T)\n self.xdtraj_poly = PiecewisePolynomial.Cubic(time_array, state_trajectory_approx.T)\n\n return input_trajectory, state_trajectory_approx, time_array",
"def GradSimplex3DP(a, b, c, id, jd, kd):\n\n fa = JacobiP(a, 0, 0, id).reshape(len(a),1)\n dfa = GradJacobiP(a, 0, 0, id)\n gb = JacobiP(b, 2*id+1,0, jd).reshape(len(b),1)\n dgb = GradJacobiP(b, 2*id+1,0, jd)\n hc = JacobiP(c, 2*(id+jd)+2,0, kd).reshape(len(c),1)\n dhc = GradJacobiP(c, 2*(id+jd)+2,0, kd)\n\n # r-derivative\n # d/dr = da/dr d/da + db/dr d/db + dc/dr d/dx\n dmodedr = dfa*gb*hc\n if(id>0):\n dmodedr = dmodedr*((0.5*(1-b))**(id-1))\n if(id+jd>0):\n dmodedr = dmodedr*((0.5*(1-c))**(id+jd-1))\n\n # s-derivative\n dmodeds = 0.5*(1+a)*dmodedr\n tmp = dgb*((0.5*(1-b))**id)\n if(id>0):\n tmp = tmp+(-0.5*id)*(gb*(0.5*(1-b))**(id-1))\n\n if(id+jd>0):\n tmp = tmp*((0.5*(1-c))**(id+jd-1))\n\n tmp = fa*tmp*hc\n dmodeds = dmodeds + tmp\n\n # t-derivative\n dmodedt = 0.5*(1+a)*dmodedr+0.5*(1+b)*tmp\n tmp = dhc*((0.5*(1-c))**(id+jd))\n if(id+jd>0):\n tmp = tmp-0.5*(id+jd)*(hc*((0.5*(1-c))**(id+jd-1)));\n\n tmp = fa*(gb*tmp)\n tmp = tmp*((0.5*(1-b))**id);\n dmodedt = dmodedt+tmp;\n\n # Normalize\n dmodedr = 2**(2*id+jd+1.5)*dmodedr\n dmodeds = 2**(2*id+jd+1.5)*dmodeds\n dmodedt = 2**(2*id+jd+1.5)*dmodedt\n\n return dmodedr[:,0], dmodeds[:,0], dmodedt[:,0]",
"def J_dense(x): # dense Jacobian\n return np.array([[1.004, -1e3*x[2], -1e3*x[1]],\n [-0.004, 1.0 + 1e3*x[2] + 60.0*x[1], 1e3*x[1]],\n [0.0, -60.0*x[1], 1.0]])",
"def iLQRsimple_py(x0, xg, utraj0, Q, R, Qf, dt, tol):\n Nx = x0.shape[0]\n Nu = utraj0.shape[0]\n N = utraj0.shape[1] + 1\n\n A = np.zeros((Nx, Nx * (N - 1)))\n B = np.zeros((Nx, Nu * (N - 1)))\n\n J = 0\n Jhist = []\n xtraj = np.zeros((Nx, N))\n xtraj[:, 0] = x0\n utraj = utraj0\n\n # Forward simulate using initial controls\n for k in range(0, N - 1):\n J = (\n J\n + 0.5 * (xtraj[:, k] - xg) @ Q @ (xtraj[:, k] - xg)\n + 0.5 * R * utraj[:, k] @ utraj[:, k]\n )\n (\n xtraj[:, k + 1],\n A[:, Nx * k : Nx * (k + 1)],\n B[:, Nx * k : Nx * (k + 1)],\n ) = rkstep_py(xtraj[:, k], utraj[:, k], dt)\n\n J = J + 0.5 * (xtraj[:, N - 1] - xg) @ Qf @ (xtraj[:, N - 1] - xg)\n Jhist.append(J)\n\n S = np.zeros((Nx, Nx))\n s = np.zeros(Nx)\n K = np.zeros((Nu, Nx * (N - 1)))\n l = (tol + 1) * np.ones((Nu, N - 1))\n\n count = 0\n while np.amax(np.abs(l)) > tol:\n\n count += 1\n\n S = Qf\n s = Qf @ (xtraj[:, N - 1] - xg)\n\n # Backward pass\n for k in range(N - 2, -1, -1):\n\n # Calculate cost gradients for this time step\n q = Q @ (xtraj[:, k] - xg)\n r = R * utraj[:, k]\n\n # Make assignments for ease of reading\n Ak = A[:, Nx * k : Nx * (k + 1)]\n Bk = B[:, Nu * k : Nu * (k + 1)]\n\n # Calculate l and K\n LH = R + Bk.T @ S @ Bk\n l[:, k] = np.linalg.solve(LH, (r + Bk.T @ s))\n K[:, Nx * k : Nx * (k + 1)] = np.linalg.solve(LH, Bk.T @ S @ Ak)\n\n # Calculate new S and s\n Kk = K[:, Nx * k : Nx * (k + 1)]\n Snew = Q + R * Kk.T @ Kk + (Ak - Bk @ Kk).T @ S @ (Ak - Bk @ Kk)\n snew = (\n q\n - Kk.T @ r\n + R * Kk.T @ l[:, k]\n + (Ak - Bk @ Kk).T @ (s - S @ Bk @ l[:, k])\n )\n S = Snew\n s = snew\n\n # Forward pass line search with new l and K\n unew = np.zeros((Nu, N - 1))\n xnew = np.zeros((Nx, N))\n xnew[:, 0] = xtraj[:, 0]\n alpha = 1.0\n Jnew = J + 1\n while Jnew > J:\n Jnew = 0\n for k in range(0, N - 1):\n unew[:, k] = (\n utraj[:, k]\n - alpha * l[:, k]\n - K[:, Nx * k : Nx * (k + 1)] @ (xnew[:, k] - xtraj[:, k])\n )\n (\n xnew[:, k + 1],\n A[:, Nx * k : Nx * (k + 1)],\n B[:, Nx * k : Nx * (k + 1)],\n ) = rkstep_py(xnew[:, k], unew[:, k], dt)\n Jnew = (\n Jnew\n + 0.5 * (xnew[:, k] - xg).T @ Q @ (xnew[:, k] - xg)\n + 0.5 * R * unew[:, k].T @ unew[:, k]\n )\n\n Jnew = Jnew + 0.5 * (xnew[:, N - 1] - xg).T @ Qf @ (xnew[:, N - 1] - xg)\n alpha = 0.5 * alpha\n\n xtraj = xnew\n utraj = unew\n J = Jnew\n Jhist.append(J)\n\n print(\"Iteration {}\".format(count))\n print(\"Final l = {}\".format(np.max(np.abs(l))), \"alpha = {}\".format(2 * alpha))\n\n return xtraj, utraj, K, Jhist",
"def _compute_joint_inertia(model, robo, j):\n h_j = 0\n # local variables\n j_a_j = robo.geos[j].axisa\n ia_j = Matrix([robo.dyns[j].ia])\n j_inertia_j_s = model.star_inertias[j].val\n # actual computation\n h_j = (j_a_j.transpose() * j_inertia_j_s * j_a_j) + ia_j\n # store in model\n model.joint_inertias[j] = h_j[0, 0]\n return model",
"def jacobian(Q, d):\n return zeros([n, n])",
"def ExplicitRK3(self): \r\n self.DifferentialFunction(self.Yn,self.dy1,time=self.Clock)\r\n self.dYtmp[:]=self.dy1[:]\r\n self.Ytmp[:]=self.NextStepComputation(self.Yn,self.dYtmp,self.TimeStep*self.inv2)\r\n \r\n self.DifferentialFunction(self.Ytmp,self.dy2,time=self.Clock+self.TimeStep*self.inv2)\r\n self.dYtmp[:]=-self.dy1[:]+2.0*self.dy2[:]\r\n \r\n self.Ytmp[:]=self.NextStepComputation(self.Yn,self.dYtmp,self.TimeStep)\r\n self.DifferentialFunction(self.Ytmp,self.dy3,time=self.Clock+self.TimeStep)\r\n self.dYtmp[:]=(self.dy1[:]+4.0*self.dy2[:]+self.dy3)*self.inv6\r\n\r\n \r\n self.Ynp1[:]=self.NextStepComputation(self.Yn,self.dYtmp,self.TimeStep)",
"def scheme(u, q, f, i, j, n, i2, i3, j2, j3, x ,y, dtdx2, dtdy2, dt2, dt, b):\n\n u[i,j,n+1] = 2*u[i,j,n] - (1 - 0.5*b*dt)*u[i,j,n-1] + \\\n dtdx2*((q(x[i2],y[j]) + q(x[i],y[j]))*(u[i2,j,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i3],y[j]))*(u[i,j,n] -u[i3,j,n])) + \\\n dtdy2*((q(x[i],y[j2]) + q(x[i],y[j]))*(u[i,j2,n] - u[i,j,n]) - (q(x[i],y[j]) + q(x[i],y[j3]))*(u[i,j,n] -u[i,j3,n])) + \\\n dt2*f(x[i],y[j],dt*n)\n \n u[i,j,n+1] /= 1 + 0.5*b*dt",
"def trajOptRRT(self, state_initial, state_final, goal=False, verbose=False):\n # TODO: reconcile trajOpt and trajOptRRT (shouldn't take long)\n\n # stopwatch for solver time\n tsolve_pre = time.time()\n\n # number of knot points - proportional to x-distance seems to work well\n N = int(max([np.floor(0.8 * np.abs(state_final[0] - state_initial[0])), 6]))\n\n # optimization problem: variables t_f, u[k], x[k]\n mp = MathematicalProgram()\n\n # variable for time to reach goal\n t_f = mp.NewContinuousVariables(1, \"t_f\")\n dt = t_f[0] / N\n\n k = 0\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n input_trajectory = u\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n state_trajectory = x\n\n for k in range(1, N):\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n input_trajectory = np.vstack((input_trajectory, u))\n state_trajectory = np.vstack((state_trajectory, x))\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % N)\n state_trajectory = np.vstack((state_trajectory, x))\n\n if verbose:\n print \"Number of decision vars\", mp.num_vars()\n\n # cost function: penalize electric energy use and overall control effort\n thrust = input_trajectory[:, 0]\n elev = input_trajectory[:, 1]\n vel = state_trajectory[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n def totalcost(X):\n dt = X[0] / N\n u0 = X[1:N + 1]\n u1 = X[N + 1:2 * N + 1]\n v = X[2 * N + 1:3 * N + 1]\n return dt * (1.0 * u0.dot(u0) + 1.0 * u1.dot(u1) + 10.0 * X[0] * (u0.dot(v)))\n\n mp.AddCost(totalcost, allvars)\n\n # initial state constraint\n for i in range(len(state_initial)):\n mp.AddLinearConstraint(state_trajectory[0, i] == state_initial[i])\n\n # final state constraints\n if goal:\n # final state constraint (x position)\n mp.AddLinearConstraint(state_trajectory[-1, 0] == 0.0)\n\n # final state constraint (z position) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 1] <= 1.5)\n mp.AddLinearConstraint(state_trajectory[-1, 1] >= 0.5)\n\n # final state constraint (velocity) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 2] <= 9.0)\n mp.AddLinearConstraint(state_trajectory[-1, 2] >= 6.0)\n\n # final state constraint (flight path angle) NOTE: small range here\n mp.AddLinearConstraint(state_trajectory[-1, 3] <= 1.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[-1, 3] >= - 1.0 * np.pi / 180.0)\n\n # final state constraint (pitch rate)\n mp.AddLinearConstraint(state_trajectory[-1, 5] == 0.0)\n else:\n for i in range(len(state_initial)):\n mp.AddLinearConstraint(state_trajectory[-1, i] == state_final[i])\n\n # input constraints\n for i in range(len(input_trajectory[:, 0])):\n mp.AddLinearConstraint(input_trajectory[i, 0] >= 0.0)\n mp.AddLinearConstraint(input_trajectory[i, 0] <= 1.2 * self.m * self.g)\n mp.AddLinearConstraint(input_trajectory[i, 1] >= -30.0)\n mp.AddLinearConstraint(input_trajectory[i, 1] <= 30.0)\n\n # state constraints\n for i in range(len(state_trajectory[:, 0])):\n # x position\n mp.AddLinearConstraint(state_trajectory[i, 0] >= state_initial[0])\n mp.AddLinearConstraint(state_trajectory[i, 0] <= state_final[0])\n # z position\n mp.AddLinearConstraint(state_trajectory[i, 1] >= 0.3)\n mp.AddLinearConstraint(state_trajectory[i, 1] <= 2.0)\n # velocity\n mp.AddLinearConstraint(state_trajectory[i, 2] >= 2.0)\n mp.AddLinearConstraint(state_trajectory[i, 2] <= 18.0)\n # flight path angle\n mp.AddLinearConstraint(state_trajectory[i, 3] >= -30.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 3] <= 30.0 * np.pi / 180.0)\n # pitch angle\n mp.AddLinearConstraint(state_trajectory[i, 4] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 4] <= 40.0 * np.pi / 180.0)\n # pitch rate\n mp.AddLinearConstraint(state_trajectory[i, 5] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 5] <= 20.0 * np.pi / 180.0)\n\n # dynamic constraints (direct transcription)\n for j in range(1, N + 1):\n dynamic_prop = dt * self.airplaneLongDynamics(state_trajectory[j - 1, :], input_trajectory[j - 1, :])\n for k in range(len(state_initial)):\n mp.AddConstraint(state_trajectory[j, k] == state_trajectory[j - 1, k] + dynamic_prop[k])\n\n # initial guess for time\n t_guess = np.abs(state_final[0] - state_initial[0]) / (0.5 * (state_final[2] + state_initial[2]))\n mp.SetInitialGuess(t_f[0], t_guess)\n\n # initial guesses for state\n if goal:\n state_final_dummy = np.array(state_final)\n state_final_dummy[1] = state_initial[1]\n state_final_dummy[4] = state_initial[4]\n for i in range(len(state_trajectory[:, 0])):\n state_guess = ((N - i) / N) * state_initial + (i / N) * state_final_dummy\n for j in range(len(state_guess)):\n mp.SetInitialGuess(state_trajectory[i, j], state_guess[j])\n else:\n for i in range(len(state_trajectory[:, 0])):\n state_guess = ((N - i) / N) * state_initial + (i / N) * state_final\n for j in range(len(state_guess)):\n mp.SetInitialGuess(state_trajectory[i, j], state_guess[j])\n\n # initial guesses for input\n for i in range(N):\n mp.SetInitialGuess(input_trajectory[i, 0], self.m * self.g / 3.5)\n mp.SetInitialGuess(input_trajectory[i, 1], 0.01)\n\n # time constraints\n mp.AddLinearConstraint(t_f[0] <= 2.0 * t_guess)\n mp.AddLinearConstraint(t_f[0] >= 0.5 * t_guess)\n\n # set SNOPT iteration limit\n it_limit = int(max(20000, 40 * mp.num_vars()))\n mp.SetSolverOption(SolverType.kSnopt, 'Iterations limit', it_limit)\n\n if verbose:\n print(\"** solver begin with N = %d **\" % N)\n # solve nonlinear optimization problem (w/SNOPT)\n result = mp.Solve()\n if verbose:\n print result\n\n # convert from symbolic to float\n utraj = mp.GetSolution(input_trajectory)\n t_f = mp.GetSolution(t_f)\n xtraj = mp.GetSolution(state_trajectory)\n ttraj = t_f[0] * np.linspace(0.0, 1.0, (N + 1))\n\n tsolve_post = time.time()\n tsolve = tsolve_post - tsolve_pre\n\n solver_id = mp.GetSolverId()\n\n if verbose:\n print (\"** %s solver finished in %.1f seconds **\\n\" % (solver_id.name(), tsolve))\n print (\"t_f computed: %.3f seconds\" % t_f[0])\n\n cost = -1\n # get total cost of solution\n if result == SolutionResult.kSolutionFound:\n thrust = utraj[:, 0]\n elev = utraj[:, 1]\n vel = xtraj[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n cost = totalcost(allvars)\n if verbose:\n print (\"cost computed: %.3f\" % cost)\n\n return utraj, xtraj, ttraj, result, cost",
"def res_ode(self, t, r, u):\n return self.gamma * (-1 * r + self.activ_f(self.res @ r + self.sigma * self.W_in @ u(t)))",
"def coeffs_rcut(self, rr, rcut):\n j2less = np.where(rr<rcut)\n rr_wh = rr[j2less]\n ir2c = np.zeros(tuple([6])+rr_wh.shape[:])\n #print(__name__, i2less[0].shape)\n \n lr = np.ma.log(rr_wh)\n j2k = np.zeros(lr.shape, dtype=np.int32)\n j2k[...] = (lr-self.gammin_jt)/self.dg_jt-2\n #print(__name__, 'r2r 1', r2k)\n \n j2k = np.where(j2k<0,0,j2k)\n j2k = np.where(j2k>self.nr-6,self.nr-6,j2k)\n hp = self.gg[0]*0.5\n j2k = np.where(rr_wh<hp, 0, j2k)\n #print('r2k 2 ', r2k)\n \n dy = (lr-self.gammin_jt-(j2k+2)*self.dg_jt)/self.dg_jt\n dy2 = dy**2\n dydy2m1 = dy*(dy2-1.0)\n dy2m4dym3 = (dy2-4.0)*(dy-3.0)\n \n ir2c[0] = np.where(rr_wh<hp, 120.0, -dydy2m1*(dy-2.0)*(dy-3.0))\n ir2c[1] = np.where(rr_wh<hp, 0.0, + 5.0* dy* (dy-1.0)*dy2m4dym3)\n ir2c[2] = np.where(rr_wh<hp, 0.0, -10.0* (dy2-1.0)*dy2m4dym3)\n ir2c[3] = np.where(rr_wh<hp, 0.0, +10.0* dy* (dy+1.0)*dy2m4dym3)\n ir2c[4] = np.where(rr_wh<hp, 0.0, -5.0* dydy2m1*(dy+2.0)*(dy-3.0))\n ir2c[5] = np.where(rr_wh<hp, 0.0, dydy2m1*(dy2-4.0))\n ir2c = ir2c / 120.0\n return j2less,j2k,ir2c",
"def jacobian(self, x):\n return self.jnz",
"def test_jacobian_is_none_single(self):\n\n tangent = np.array([[1.0, 2.0], [3.0, 4.0]])\n jac = None\n\n jvp = qml.gradients.compute_jvp_single(tangent, jac)\n assert jvp is None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tries to use the configuration to get the principal. If this fails with an exception, the client was not configured corectly, so this is a good way to check for that.
|
def try_configuration(self) -> None:
with self.context():
kerberos.getServerPrincipalDetails(self.service, self.hostname)
|
[
"def _get_credentials():\n if not CONFIG:\n raise ConfigError(\"Configuration is not passed\")\n\n try:\n return CONFIG[\"credentials\"]\n except KeyError:\n raise ConfigError(\"Credentials configurations are missing from config\")",
"def authenticate_client(self):\n client_params = self.parse_basic_auth_header()\n if client_params:\n # authenticate the client if client authentication is included\n client_id, client_secret = client_params\n client = self.get_and_validate_client(client_id)\n # Client secrets are stored as hash.\n hashed = client.client_secret\n if (\n bcrypt.hashpw(\n client_secret.encode(\"utf-8\"), hashed.encode(\"utf-8\")\n ).decode(\"utf-8\")\n != hashed\n ):\n raise InvalidClientError(uri=self.uri)\n\n return client\n\n # require client authentication for confidential clients or for any\n # client that was issued client credentials (or with other\n # authentication requirements)\n client_id = self.params.get(\"client_id\")\n client = self.get_and_validate_client(client_id)\n if client.check_client_type(\"confidential\") or client.client_secret:\n raise UnauthorizedClientError(uri=self.uri)\n\n return client",
"def principalForUser(user):",
"def _introspect(self, access_token):\n principal = None\n try:\n principal = self._openid_client.introspect(access_token)\n except KeycloakError as e:\n LOG.debug(\"could not introspect token: {}\".format(str(e)))\n\n return principal",
"def authenticate_and_get_user():\n try:\n check_auth(None, None, None)\n return get_current_user()\n except (AssertionError, BadRequest, PreconditionFailed, Unauthorized):\n return None",
"def principalForAuthID(self, creds):\n # XXX either move this to CalendarServer entirely or document it on\n # IDAVPrincipalCollectionResource\n return self.principalForShortName('users', creds.username)",
"def pick_config(self, issuer):\n return self.client_configs[issuer]",
"def try_get_creds(cloud):\n if not path.isfile(cred_path):\n return None\n\n existing_creds = yaml.safe_load(open(cred_path))\n if 'credentials' not in existing_creds:\n return None\n\n if cloud not in existing_creds['credentials'].keys():\n return None\n\n if len(existing_creds['credentials'][cloud].keys()) == 0:\n return None\n\n if 'default-credential' in existing_creds['credentials'][cloud]:\n return existing_creds['credentials'][cloud]['default-credential']\n\n # XXX we should really prompt to select because this is non-deterministic\n for k, v in existing_creds['credentials'][cloud].items():\n if 'default-region' in k:\n app.current_region = v\n else:\n return k",
"def principal_object_id(self) -> Optional[str]:\n return pulumi.get(self, \"principal_object_id\")",
"def _direct_authenticate(self):\n _logger.debug('%s', where_am_i())\n try:\n self.oci_config = self._read_oci_config(fname=self.config_file, profile=self.config_profile)\n self._identity_client = oci_sdk.identity.IdentityClient(self.oci_config)\n except Exception as e:\n _logger.debug('Direct authentication failed: %s', str(e))\n raise Exception(\"Direct authentication failed\") from e",
"def getUserClient(self, auth):\n if auth.admin:\n userId = ANY_WRITER\n elif auth.userId < 0:\n userId = ANONYMOUS\n else:\n userId = auth.userId\n client = self.getClient(userId)\n if auth.username:\n client.cfg.name = auth.username\n client.cfg.contact = auth.fullName or ''\n return client",
"def service_principal_get(sp_name: str, sp_dir: str = '', tenant: str = '') -> ServicePrincipal:\n jsonstr: str = ''\n # Full filepath to service principal data\n if not is_valid_resource_name(sp_name):\n LOG.error('\"sp_name\" parameter expected as valid resource name')\n sh.fail_process()\n # Gather login info from service principal\n if sp_dir:\n LOG.debug('gathering service principal credentials from file...')\n sp_path = sh.join_path(sh.expand_path(sp_dir), f'{sp_name}.json')\n jsonstr = sh.read_file(sp_path)\n else:\n LOG.debug('gathering service principal from Azure...')\n if tenant:\n command = ['az', 'ad', 'sp', 'show', f'--id=https://{tenant}.onmicrosoft.com/{sp_name}']\n else:\n command = ['az', 'ad', 'sp', 'show', f'--id=http://{sp_name}']\n sh.print_command(command)\n process = sh.run_subprocess(command)\n # sh.log_subprocess(LOG, process, debug=ARGS.debug)\n if process.returncode != 0:\n return ServicePrincipal()\n jsonstr = process.stdout\n # LOG.debug(f'jsonstr: {jsonstr}')\n service_principal: ServicePrincipal = json_to_dataclass(jsonstr, ServicePrincipal)\n # LOG.debug(f'service_principal: {service_principal}')\n return service_principal",
"def get_principal(event):\n user_identity = get_user_identity(event)\n return user_identity.get('principalId', '').split(':')[-1]",
"def configuration_account(self):\n if \"configurationAccount\" in self._prop_dict:\n return self._prop_dict[\"configurationAccount\"]\n else:\n return None",
"def _resolve_client_endpoint(self):\n return self.client_endpoint or self._client_endpoint_template.format(domain=self.domain)",
"def get_config_for_principal(self, auth_data):\n # If there is no auth_data yet, then we can't write our config.\n if not auth_data:\n return {}\n # If the state from the assess_status is not None then we're blocked,\n # so don't send any config to the principal.\n state, message = self.custom_assess_status_check()\n if state:\n return {}\n options = self.options # tiny optimisation for less typing.\n\n # If there is no backend name, then we can't send the data yet as the\n # manila-charm won't know what to do with it.\n if not options.share_backend_name:\n return {}\n\n # We have the auth data & the config is reasonably sensible.\n # We can try and render the config file segment.\n # TODO this is horrible, and we should have something in\n # charms.openstack to do this, but we need a c.r relation to be able to\n # add it to the adapters_instance\n manila_plugin = relations.endpoint_from_flag('manila-plugin.available')\n self.adapters_instance.add_relation(manila_plugin)\n rendered_configs = charmhelpers.core.templating.render(\n source=os.path.basename(MANILA_CONF),\n template_loader=os_templating.get_loader(\n 'templates/', self.release),\n target=None,\n context=self.adapters_instance)\n\n return {\n MANILA_CONF: rendered_configs\n }",
"def get_current_user(self):\n admin_cookie = self.get_secure_cookie(COOKIE_NAME)\n try:\n if admin_cookie:\n try:\n (user, expires) = json.loads(admin_cookie)\n except ValueError:\n # Old cookie format; delete it.\n self.clear_cookie(COOKIE_NAME)\n return None\n if expires > time.time():\n self._auth_user = user\n return self._auth_user\n except Exception:\n logging.exception('cannot authenticate admin access')\n return None",
"def _setup_client(self, create=False, container=None):\n\n if container is None:\n container = self.args.container\n\n try:\n values = self.conf.get_container(container)\n except ValueError as ex:\n self.log.error(ex)\n return (None, None)\n\n auth = dict(authurl = self.args.authurl,\n user = values['username'],\n key = values['password'],\n )\n\n if self.args.keystone:\n try:\n from keystoneclient.v2_0 import client as _check_for_ksclient\n except ImportError:\n sys.exit(\"auth 2.0 (keystone) requires python-keystoneclient\")\n else:\n self.log.debug(\"using auth 2.0 (keystone)\")\n\n if self.args.keystone_separator not in values['username']:\n self.log.error(\"%s: separator not found in %r\" % (container, values['username']))\n return (None, None)\n\n keystone_auth = values['username'].split(self.args.keystone_separator, 1)\n auth['tenant_name'], auth['user'] = keystone_auth\n auth['auth_version'] = '2.0'\n auth['os_options'] = dict(service_type = self.args.keystone_service,\n endpoint_type = self.args.keystone_endpoint,\n region_name = self.args.keystone_region,\n )\n self.log.debug(\"os_options: %r\" % auth['os_options'])\n\n self.auth = auth\n cli = client.Connection(**auth)\n\n try:\n headers, _ = cli.get_container(container)\n except (socket.error, client.ClientException) as ex:\n if getattr(ex, 'http_status', None) == 404:\n if create:\n self.log.warning(\"%s doesn't exist, will be created\" % container)\n return (cli, dict())\n else:\n self.log.error(\"%s doesn't exist\" % container)\n else:\n self.log.error(ex)\n return (None, None)\n\n self.log.debug(headers)\n\n meta = getMeta(headers)\n self.log.debug(\"Meta: %s\" % meta)\n\n if not meta:\n self.log.error(\"%s hasn't been setup to be used with swiftnbd\" % container)\n return (None, None)\n\n return (cli, meta)",
"def _get_provider(self, canonical_name):\n provider = self._get_provider_by_canonical_name(canonical_name)\n\n # The AssumeRole provider should really be part of the SharedConfig\n # provider rather than being its own thing, but it is not. It is\n # effectively part of both the SharedConfig provider and the\n # SharedCredentials provider now due to the way it behaves.\n # Therefore if we want either of those providers we should return\n # the AssumeRole provider with it.\n if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:\n assume_role_provider = self._get_provider_by_method('assume-role')\n if assume_role_provider is not None:\n # The SharedConfig or SharedCredentials provider may not be\n # present if it was removed for some reason, but the\n # AssumeRole provider could still be present. In that case,\n # return the assume role provider by itself.\n if provider is None:\n return assume_role_provider\n\n # If both are present, return them both as a\n # CredentialResolver so that calling code can treat them as\n # a single entity.\n return AioCredentialResolver([assume_role_provider, provider])\n\n if provider is None:\n raise UnknownCredentialError(name=canonical_name)\n\n return provider"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Runs the block inside the context manager with the keytab set to the provider's keytab. All functions that interact with kerberos must be run inside this context. For convenience, this context returns the kerberos module when invoked.
|
def context(self) -> 'Iterator[None]':
previous = os.environ.pop('KRB5_KTNAME', None)
os.environ['KRB5_KTNAME'] = self.keytab
yield
if previous is not None:
os.environ['KRB5_KTNAME'] = previous
|
[
"def kerberos_http_auth(self):\n\n try:\n r = None\n if self.version == 7:\n r = requests.get(\n \"{}://{}:{}/api/v40/cm/kerberosPrincipals\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n elif self.version == 6:\n r = requests.get(\n \"{}://{}:{}/api/v30/cm/kerberosPrincipals\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n elif self.version == 5:\n r = requests.get(\n \"{}://{}:{}/api/v18/cm/kerberosPrincipals\".format(\n self.http,\n self.cloudera_manager_host_ip,\n self.cloudera_manager_port,\n ),\n auth=HTTPBasicAuth(\n self.cloudera_manager_username, self.cloudera_manager_password\n ),\n verify=False,\n )\n else:\n self.logger.error(\"kerberos_http_auth as cloudera does not exist\")\n r = None\n if r.status_code == 200:\n keytab1 = r.json()\n if len(keytab1[\"items\"]) > 0:\n keytab = \"keytab exist\"\n else:\n keytab = \"keytab not exist\"\n keytab1 = keytab1[\"items\"]\n new_list = []\n for i in range(0, len(keytab1)):\n dt = keytab1[i].split(\"/\", 1)\n neww_list = new_list.append(dt[0])\n new_list = [x.lower() for x in new_list]\n\n if \"hue\" in new_list:\n hue_flag = \"Kerberos on hue is enabled\"\n else:\n hue_flag = \"Kerberos on hue is not enabled\"\n\n if \"yarn\" in new_list:\n yarn_flag = \"Kerberos on yarn is enabled\"\n else:\n yarn_flag = \"Kerberos on yarn is not enabled\"\n\n if \"mapred\" in new_list:\n mapred_flag = \"Kerberos on mapreduce is enabled\"\n else:\n mapred_flag = \"Kerberos on mapreduce is not enabled\"\n\n if \"hdfs\" in new_list:\n hdfs_flag = \"Kerberos on HDFS is enabled\"\n else:\n hdfs_flag = \"Kerberos on HDFS is not enabled\"\n\n self.logger.info(\"kerberos_http_auth successful\")\n return hue_flag, mapred_flag, hdfs_flag, yarn_flag, keytab\n else:\n self.logger.error(\n \"kerberos_http_auth failed due to invalid API call. HTTP Response: \"\n + str(r.status_code)\n )\n return None\n except Exception as e:\n self.logger.error(\"kerberos_http_auth failed\", exc_info=True)\n return None",
"def _prepare_krb(tm_env, container_dir, root_dir, app):\n etc_dir = os.path.join(container_dir, 'overlay', 'etc')\n fs.mkdir_safe(etc_dir)\n kt_dest = os.path.join(etc_dir, 'krb5.keytab')\n kt_sources = glob.glob(os.path.join(tm_env.spool_dir, 'keytabs', 'host#*'))\n keytabs.make_keytab(kt_dest, kt_sources)\n\n for kt_spec in app.keytabs:\n if ':' in kt_spec:\n owner, princ = kt_spec.split(':', 1)\n else:\n owner = kt_spec\n princ = kt_spec\n\n kt_dest = os.path.join(root_dir, 'var', 'spool', 'keytabs', owner)\n kt_sources = glob.glob(os.path.join(tm_env.spool_dir, 'keytabs',\n '%s#*' % princ))\n keytabs.make_keytab(kt_dest, kt_sources, owner)",
"def test_keystone_kerberos_authentication(self):\n logging.info('Retrieving a kerberos token with kinit for admin user')\n\n ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0]\n result = zaza.model.run_on_unit(ubuntu_test_host.name,\n \"echo password123 | kinit admin\")\n assert result['Code'] == '0', result['Stderr']\n\n logging.info('Changing token mod for user access')\n result = zaza.model.run_on_unit(\n ubuntu_test_host.name,\n \"sudo install -m 777 /tmp/krb5cc_0 /tmp/krb5cc_1000\"\n )\n assert result['Code'] == '0', result['Stderr']\n\n logging.info('Fetching user/project info in OpenStack')\n domain_name = 'k8s'\n project_name = 'k8s'\n keystone_session = openstack_utils.get_overcloud_keystone_session()\n keystone_client = openstack_utils.get_keystone_session_client(\n keystone_session)\n domain_id = keystone_client.domains.find(name=domain_name).id\n project_id = keystone_client.projects.find(name=project_name).id\n keystone_hostname = get_unit_full_hostname('keystone')\n\n logging.info('Retrieving an OpenStack token to validate auth')\n cmd = 'openstack token issue -f value -c id ' \\\n '--os-auth-url http://{}:5000/krb/v3 ' \\\n '--os-project-id {} ' \\\n '--os-project-name {} ' \\\n '--os-project-domain-id {} ' \\\n '--os-region-name RegionOne ' \\\n '--os-interface public ' \\\n '--os-identity-api-version 3 ' \\\n '--os-auth-type v3kerberos'.format(keystone_hostname,\n project_id,\n project_name,\n domain_id)\n\n result = zaza.model.run_on_unit(ubuntu_test_host.name, cmd)\n assert result['Code'] == '0', result['Stderr']",
"def __get_token_gss(self):\n if not EXTRA_MODULES['requests_kerberos']:\n raise MissingModuleException('The requests-kerberos module is not installed.')\n\n url = build_url(self.auth_host, path='auth/gss')\n\n result = self._send_request(url, get_token=True, auth=HTTPKerberosAuth())\n\n if not result:\n self.logger.error('Cannot retrieve authentication token!')\n return False\n\n if result.status_code != codes.ok: # pylint: disable-msg=E1101\n exc_cls, exc_msg = self._get_exception(headers=result.headers,\n status_code=result.status_code,\n data=result.content)\n raise exc_cls(exc_msg)\n\n self.auth_token = result.headers['x-rucio-auth-token']\n return True",
"def __enter__(self):\n self.group = AppConfGroupContext()\n return self.group",
"def authenticateKerberos(user, pwd):\n try:\n from sys import platform\n cmd = [\"kinit\", user]\n if platform == 'darwin':\n cmd = [\"kinit\", \"--password-file=STDIN\", user]\n\n procKinit = Popen(cmd, stdin=PIPE, stdout=PIPE)\n procKinit.stdin.write((\"%s\\n\" % pwd).encode('utf-8'))\n rcKinit = procKinit.wait()\n log.debug(\"kinit rc: %d\" % rcKinit)\n return (rcKinit == 0)\n except OSError as exp:\n log.debug(\"could not find kinit...\")\n log.debug(exp)\n return False",
"def testkeychain():\n\n # For the same reasons as in tests/test_plugin.py, all imports that trigger\n # HTTPie importing must be postponed till one of our fixtures is evaluated\n # and patched a path to HTTPie configuration.\n from httpie_credential_store import _keychain\n\n return _keychain.SystemKeychain()",
"def keyring_backend():\n\n prev_backend = keyring.get_keyring()\n keyring.set_keyring(_InmemoryKeyring())\n yield keyring.get_keyring()\n keyring.set_keyring(prev_backend)",
"def init_app(app):\n hostname = app.config.get(\"SERVER_NAME\")\n if not hostname:\n hostname = getfqdn()\n log.info(\"Kerberos: hostname %s\", hostname)\n\n service = \"airflow\"\n\n _KERBEROS_SERVICE.service_name = f\"{service}@{hostname}\"\n\n if \"KRB5_KTNAME\" not in os.environ:\n os.environ[\"KRB5_KTNAME\"] = conf.get(\"kerberos\", \"keytab\")\n\n try:\n log.info(\"Kerberos init: %s %s\", service, hostname)\n principal = kerberos.getServerPrincipalDetails(service, hostname)\n except kerberos.KrbError as err:\n log.warning(\"Kerberos: %s\", err)\n else:\n log.info(\"Kerberos API: server is %s\", principal)",
"async def inner(kernel_name=NATIVE_KERNEL_NAME, **kwargs):\n km, kc = await start_new_async_kernel(kernel_name=kernel_name, **kwargs)\n kms.append(km)\n kcs.append(kc)\n return km, kc",
"def auth_continue_krb(self, tgt_auth_challenge):\n _LOGGER.debug(\"tgt_auth challenge: %s\", tgt_auth_challenge)\n\n resp = self.ctx.step(tgt_auth_challenge)\n _LOGGER.debug(\"# context step response: %s\", resp)\n _LOGGER.debug(\"# context completed?: %s\", self.ctx.complete)\n\n return resp, self.ctx.complete",
"def switch_to_provider_for_test(request):\n switched_to_provider = False\n current_cluster = config.cluster_ctx\n if (\n request.node.get_closest_marker(\"runs_on_provider\")\n and config.multicluster\n and current_cluster.ENV_DATA.get(\"platform\", \"\").lower()\n in constants.MANAGED_SERVICE_PLATFORMS\n ):\n for cluster in config.clusters:\n if cluster.ENV_DATA.get(\"cluster_type\") == \"provider\":\n provider_cluster = cluster\n log.debug(\"Switching to the provider cluster context\")\n # TODO: Use 'switch_to_provider' function introduced in PR 5541\n config.switch_ctx(provider_cluster.MULTICLUSTER[\"multicluster_index\"])\n switched_to_provider = True\n break\n\n def finalizer():\n \"\"\"\n Switch context to the initial cluster\n\n \"\"\"\n if switched_to_provider:\n log.debug(\"Switching back to the previous cluster context\")\n config.switch_ctx(current_cluster.MULTICLUSTER[\"multicluster_index\"])\n\n request.addfinalizer(finalizer)",
"def start_kernel(self):\n km = KernelManager(kernel_name=TEST_KERNEL_NAME)\n km.start_kernel()\n kc = km.client()\n kc.start_channels()\n try:\n kc.wait_for_ready(timeout=60)\n except RuntimeError:\n kc.stop_channels()\n km.shutdown_kernel()\n raise\n\n yield km, kc\n kc.stop_channels()\n km.shutdown_kernel()\n assert km.context.closed",
"def __enter__(self) -> Context:\n return self",
"def __enter__(self):\n logging.debug(\"%s: __enter__\", self.cmd)\n self.start()\n return self",
"def passphrase_handler(spawn, context, session):\n credential = get_current_credential(context=context, session=session)\n try:\n spawn.sendline(to_plaintext(\n context['credentials'][credential]['passphrase']))\n except KeyError:\n raise UniconAuthenticationError(\"No passphrase found \"\n \"for credential {}.\".format(credential))",
"def run_kinit(username, password):\n kinit = Popen(['kinit', username],\n stdin=PIPE, stdout=PIPE, stderr=PIPE)\n kinit.stdin.write('%s\\n' % password)\n\n # The following code handles a corner case where the Kerberos password\n # has expired and a prompt is displayed to enter new password. Ideally,\n # we would want to read from stdout but these are blocked reads. This is\n # a hack to kill the process if it's taking too long!\n\n class Alarm(Exception):\n pass\n\n def signal_handler(signum, frame):\n raise Alarm\n # Set the signal handler and a 1-second alarm\n signal.signal(signal.SIGALRM, signal_handler)\n signal.alarm(1)\n try:\n kinit.wait() # Wait for the child to exit\n signal.alarm(0) # Reset the alarm\n return kinit.returncode # Exit status of child on graceful exit\n except Alarm:\n # Taking too long, kill and return error\n kinit.kill()\n return -1",
"def pyre_interactiveSessionContext(self, context=None):\n # prime the execution context\n context = context or {}\n # grant access to pyre\n context[\"pyre\"] = pyre\n # by default, nothing to do: the shell has already bound me in this context\n return context",
"def run(self):\n\t\ttry:\n\t\t\ttry:\n\t\t\t\tauth = PAM.pam()\n\t\t\t\tauth.start(PamAuthenticator.PAM_SERVICE)\n\t\t\t\tauth.set_item(PAM.PAM_RHOST, self.client_address)\n\t\t\t\tauth.set_item(PAM.PAM_CONV, self.pam_conv)\n\t\t\t\tauth.setUserData(self)\n\t\t\t\tauth.authenticate()\n\t\t\t\tauth.acct_mgmt()\n\t\t\t\tdel auth\n\t\t\t\t# signal success\n\t\t\t\tlogger.info('Authentication succeeded')\n\t\t\t\tself.state = PamAuthenticator.AUTH_OK\n\t\t\t\tself.res = None\n\t\t\texcept Exception, e:\n\t\t\t\tlogger.error('Authentication failed: %s' % e)\n\t\t\t\tself.state = PamAuthenticator.AUTH_FAIL\n\t\t\t\tself.res = protocol.Response_ERROR()\n\t\t\t\tself.res.translatable_text = _('Authentication failed')\n\t\tfinally:\n\t\t\tself.challenge_pending.set()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates code to instantiate a stateful 'Delay' object, and provides reference to that object's output. The name of the stateful object is based upon the passed in parameters, so if there are multiple places where identical delay functions are referenced, the translated python file will only maintain one stateful object, and reference it multiple times.
|
def add_delay(identifier, delay_input, delay_time, initial_value, order,
subs):
import_modules['functions'].add("Delay")
new_structure = []
py_name = '_delay_%s' % identifier
if len(subs) == 0:
stateful_py_expr = "Delay(lambda: %s, lambda: %s,"\
"lambda: %s, lambda: %s, time_step, '%s')" % (
delay_input, delay_time,
initial_value, order, py_name)
else:
stateful_py_expr = "Delay(_delay_input_%s, lambda: %s, _delay_init_%s,"\
"lambda: %s, time_step, '%s')" % (
identifier, delay_time, identifier,
order, py_name)
# following elements not specified in the model file, but must exist
# create the delay initialization element
new_structure.append({
'py_name': '_delay_init_%s' % identifier,
'real_name': 'Implicit',
'kind': 'setup', # not specified in the model file, but must exist
'py_expr': initial_value,
'subs': subs,
'doc': 'Provides initial conditions for %s function' % identifier,
'unit': 'See docs for %s' % identifier,
'lims': 'None',
'eqn': 'None',
'arguments': ''
})
new_structure.append({
'py_name': '_delay_input_%s' % identifier,
'real_name': 'Implicit',
'kind': 'component',
'doc': 'Provides input for %s function' % identifier,
'subs': subs,
'unit': 'See docs for %s' % identifier,
'lims': 'None',
'eqn': 'None',
'py_expr': delay_input,
'arguments': ''
})
# describe the stateful object
new_structure.append({
'py_name': py_name,
'real_name': 'Delay of %s' % delay_input,
'doc': 'Delay time: %s \n Delay initial value %s \n Delay order %s' % (
delay_time, initial_value, order),
'py_expr': stateful_py_expr,
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
})
return "%s()" % py_name, new_structure
|
[
"def add_delay_f(identifier, delay_input, delay_time, initial_value):\n import_modules['functions'].add(\"DelayFixed\")\n\n py_name = '_delayfixed_%s' % identifier\n\n stateful_py_expr = \"DelayFixed(lambda: %s, lambda: %s,\"\\\n \"lambda: %s, time_step, '%s')\" % (\n delay_input, delay_time,\n initial_value, py_name)\n\n # describe the stateful object\n stateful = {\n 'py_name': py_name,\n 'real_name': 'Delay fixed of %s' % delay_input,\n 'doc': 'DelayFixed time: %s \\n Delay initial value %s' % (\n delay_time, initial_value),\n 'py_expr': stateful_py_expr,\n 'unit': 'None',\n 'lims': 'None',\n 'eqn': 'None',\n 'subs': '',\n 'kind': 'stateful',\n 'arguments': ''\n }\n\n return \"%s()\" % py_name, [stateful]",
"def add_n_delay(identifier, delay_input, delay_time, initial_value, order,\n subs):\n import_modules['functions'].add(\"DelayN\")\n\n new_structure = []\n py_name = '_delayn_%s' % identifier\n\n if len(subs) == 0:\n stateful_py_expr = \"DelayN(lambda: %s, lambda: %s,\"\\\n \"lambda: %s, lambda: %s, time_step, '%s')\" % (\n delay_input, delay_time,\n initial_value, order, py_name)\n\n else:\n stateful_py_expr = \"DelayN(_delayn_input_%s, lambda: %s,\"\\\n \" _delayn_init_%s, lambda: %s, time_step, '%s')\" % (\n identifier, delay_time, identifier,\n order, py_name)\n\n # following elements not specified in the model file, but must exist\n # create the delay initialization element\n new_structure.append({\n 'py_name': '_delayn_init_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'setup', # not specified in the model file, but must exist\n 'py_expr': initial_value,\n 'subs': subs,\n 'doc': 'Provides initial conditions for %s function' % identifier,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'arguments': ''\n })\n\n new_structure.append({\n 'py_name': '_delayn_input_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'component',\n 'doc': 'Provides input for %s function' % identifier,\n 'subs': subs,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'py_expr': delay_input,\n 'arguments': ''\n })\n\n # describe the stateful object\n new_structure.append({\n 'py_name': py_name,\n 'real_name': 'DelayN of %s' % delay_input,\n 'doc': 'DelayN time: %s \\n DelayN initial value %s \\n DelayN order %s' % (\n delay_time, initial_value, order),\n 'py_expr': stateful_py_expr,\n 'unit': 'None',\n 'lims': 'None',\n 'eqn': 'None',\n 'subs': '',\n 'kind': 'stateful',\n 'arguments': ''\n })\n\n return \"%s()\" % py_name, new_structure",
"def init_time_effect(obj, name, parameters=('a', 'c')):\n time_effect_fun = time_effect_funcs[name]\n defaults = time_effect_fun.func_defaults\n\n a, c = parameters\n\n if getattr(obj, a, None) is None:\n setattr(obj, a, defaults[0])\n if getattr(obj, c, None) is None:\n setattr(obj, c, defaults[1])\n\n def time_effect(t):\n a_val, c_val = getattr(obj, a), getattr(obj, c)\n return time_effect_fun(t, a_val, c_val)\n\n return time_effect",
"def ActionDelayWrapper(delay_range_start, delay_range_end):\n\n class ActionDelayWrapper(gym.Wrapper):\n def _step(self, action):\n self._action_buffer.append(action)\n action = self._action_buffer.popleft()\n return self.env.step(action)\n\n def _reset(self):\n self._action_delay = np.random.randint(delay_range_start, delay_range_end)\n self._action_buffer = collections.deque([0 for _ in range(self._action_delay)])\n return self.env.reset()\n\n return ActionDelayWrapper",
"def DelayedInstantiation_kls(kls, *a, **kwd):\n return DelayedInstantiation(kls, kls, *a, **kwd)",
"def update_delay(self, delay):",
"def transition_delay(self, value: TransitionDelay) -> 'Tailwind':\n self.element.classes('delay-' + value)\n return self",
"def delay(source: Observable) -> Observable:\n return observable_delay_timespan(source, duetime, scheduler)",
"def __init__(self, timefunc, delayfunc):\r\n self.queue = []\r\n self.timefunc = timefunc\r\n self.delayfunc = delayfunc",
"def sample_delay(self, *args, **kwargs):\n return _bs_swig.bs_ax25_decoder_sptr_sample_delay(self, *args, **kwargs)",
"def create_translator(self, *args):\r\n translator_class = self.translator_class\r\n return translator_class(*args)",
"def new_delay(*args, **kwargs):\n if self.TIMESTAMP_KWARG not in kwargs:\n kwargs[self.TIMESTAMP_KWARG] = datetime.datetime.utcnow()\n self._original_delay(*args, **kwargs)",
"def DelayedInstantiation(resultant_kls, func, *a, **kwd):\n o = _class_cache.get(resultant_kls, None)\n if o is None:\n o = make_kls(resultant_kls)\n _class_cache[resultant_kls] = o\n return o(resultant_kls, func, *a, **kwd)",
"def __init__(self, dt=dt, failure_modes=[[]]): \n self.dt = dt\n self.t = 0\n self.sys_norm = StateSpace(A,B,C,D)\n\n self.yout = None\n self.last_input = [0,0]\n self.track_out = []\n\n self.failure_modes = failure_modes\n\n self.modes = [self.sys_norm]\n\n # Create failure modes\n if self.failure_modes[0]: # ie the list is not empty\n for mode in self.failure_modes:\n self.modes.append(StateSpace(*mode))\n\n self.current_mode = random.choice(self.modes)\n\n if self.current_mode == self.sys_norm:\n self.state = self.state_gen(impulse=True)\n else:\n self.state = self.state_gen()\n\n self.possibilities = len(self.modes) * 4",
"def sample_delay(self, *args, **kwargs):\n return _bs_swig.ec_descrambler_sync_sptr_sample_delay(self, *args, **kwargs)",
"def create_forwarder(name, classname, parameter):\n classobj = getattr(sys.modules[__name__], classname)\n return classobj(name, parameter)",
"def delayable(f):\n def delay(*args, **kwargs):\n queue_key = current_app.config.get('REDIS_QUEUE_KEY', 'default')\n task_id = '%s:result:%s' % (queue_key, str(uuid4()))\n s = dumps((f, task_id, args, kwargs))\n redis.set(task_id, '')\n redis.rpush(queue_key, s)\n return Task(task_id)\n def get_task(task_id):\n result = Task(task_id)\n return result if result.exists else None\n f.delay = delay\n f.get_task = get_task\n return f",
"def delay(labels, target_delay):\n delay_labels = torch.zeros(target_delay)\n labels = torch.cat((delay_labels, labels))\n\n return labels[: labels.shape[0] - target_delay]",
"def timed_automaton(cls):\n class TimedAutomaton(cls):\n def __init__(self, *args, **kwargs):\n self._ta = ta_base()\n self._template_cached = False\n self._pyuppaal = pyuppaal.Template(cls.__name__)\n super().__init__(*args, **kwargs)\n\n def generate_declarations(self):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n return f\"clock {', '.join(self.clocks)};\"\n\n def generate_locations(self):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n locations = [pyuppaal.Location(invariant=self.invariants.get(loc), name=loc) for loc in self.locations]\n return locations\n\n def generate_transitions(self):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n transitions = [pyuppaal.Transition(source, target, guard=guard) for\n (source, guard, action, select, target) in self.edges]\n return transitions\n\n def assign_initial_location(self, template):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n try:\n template.initlocation = template.get_location_by_name(self.l0)\n except AssertionError as a:\n logging.debug(f'No initial location matching {self.l0} found in current template')\n\n def create_template(self):\n \"\"\"\n overwrite this function in with a more detailed function\n :return:\n \"\"\"\n locations = self.generate_locations()\n transitions = self.generate_transitions()\n declarations = self.generate_declarations()\n template = pyuppaal.Template(self._pyuppaal.name, declaration=declarations, locations=locations,\n transitions=transitions)\n self.assign_initial_location(template)\n # try:\n # template.layout(auto_nails=True)\n # except AssertionError:\n # pass\n\n return template\n\n @property\n def locations(self):\n return self._ta.locations\n\n @locations.setter\n @outdate_cache\n def locations(self, locations):\n if len(locations) is 0:\n self._ta.locations = set()\n else:\n self._ta.locations.update(locations)\n\n @property\n def l0(self):\n return self._ta.l0\n\n @l0.setter\n @outdate_cache\n def l0(self, initial_location):\n self._ta.l0 = initial_location\n\n @property\n def actions(self):\n return self._ta.actions\n\n @actions.setter\n @outdate_cache\n def actions(self, actions):\n if len(actions) is 0:\n self._ta.actions = set()\n else:\n self._ta.actions.update(actions)\n\n @property\n def clocks(self):\n return self._ta.clocks\n\n @clocks.setter\n @outdate_cache\n def clocks(self, clocks):\n if len(clocks) is 0:\n self._ta.clocks = set()\n else:\n self._ta.clocks.update(clocks)\n\n @property\n def edges(self):\n return self._ta.edges\n\n @edges.setter\n @outdate_cache\n def edges(self, edges):\n if len(edges) is 0:\n self._ta.edges = set()\n else:\n self._ta.edges.update(edges)\n\n @property\n def invariants(self):\n return self._ta.invariants\n\n @invariants.setter\n @outdate_cache\n def invariants(self, invariants):\n if len(invariants) is 0:\n self._ta.invariants = dict()\n else:\n self._ta.invariants.update(invariants)\n\n @property\n def ta(self):\n return self._ta\n\n @property\n def name(self):\n return self._pyuppaal.name\n\n @name.setter\n def name(self, name):\n self._pyuppaal.name = name\n\n @property\n @update_cache\n def template(self):\n return self._pyuppaal\n\n return TimedAutomaton"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates code to instantiate a stateful 'DelayFixed' object, and provides reference to that object's output. The name of the stateful object is based upon the passed in parameters, so if there are multiple places where identical delay functions are referenced, the translated python file will only maintain one stateful object, and reference it multiple times.
|
def add_delay_f(identifier, delay_input, delay_time, initial_value):
import_modules['functions'].add("DelayFixed")
py_name = '_delayfixed_%s' % identifier
stateful_py_expr = "DelayFixed(lambda: %s, lambda: %s,"\
"lambda: %s, time_step, '%s')" % (
delay_input, delay_time,
initial_value, py_name)
# describe the stateful object
stateful = {
'py_name': py_name,
'real_name': 'Delay fixed of %s' % delay_input,
'doc': 'DelayFixed time: %s \n Delay initial value %s' % (
delay_time, initial_value),
'py_expr': stateful_py_expr,
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % py_name, [stateful]
|
[
"def add_delay(identifier, delay_input, delay_time, initial_value, order,\n subs):\n import_modules['functions'].add(\"Delay\")\n\n new_structure = []\n py_name = '_delay_%s' % identifier\n\n if len(subs) == 0:\n stateful_py_expr = \"Delay(lambda: %s, lambda: %s,\"\\\n \"lambda: %s, lambda: %s, time_step, '%s')\" % (\n delay_input, delay_time,\n initial_value, order, py_name)\n\n else:\n stateful_py_expr = \"Delay(_delay_input_%s, lambda: %s, _delay_init_%s,\"\\\n \"lambda: %s, time_step, '%s')\" % (\n identifier, delay_time, identifier,\n order, py_name)\n\n # following elements not specified in the model file, but must exist\n # create the delay initialization element\n new_structure.append({\n 'py_name': '_delay_init_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'setup', # not specified in the model file, but must exist\n 'py_expr': initial_value,\n 'subs': subs,\n 'doc': 'Provides initial conditions for %s function' % identifier,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'arguments': ''\n })\n\n new_structure.append({\n 'py_name': '_delay_input_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'component',\n 'doc': 'Provides input for %s function' % identifier,\n 'subs': subs,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'py_expr': delay_input,\n 'arguments': ''\n })\n\n # describe the stateful object\n new_structure.append({\n 'py_name': py_name,\n 'real_name': 'Delay of %s' % delay_input,\n 'doc': 'Delay time: %s \\n Delay initial value %s \\n Delay order %s' % (\n delay_time, initial_value, order),\n 'py_expr': stateful_py_expr,\n 'unit': 'None',\n 'lims': 'None',\n 'eqn': 'None',\n 'subs': '',\n 'kind': 'stateful',\n 'arguments': ''\n })\n\n return \"%s()\" % py_name, new_structure",
"def add_n_delay(identifier, delay_input, delay_time, initial_value, order,\n subs):\n import_modules['functions'].add(\"DelayN\")\n\n new_structure = []\n py_name = '_delayn_%s' % identifier\n\n if len(subs) == 0:\n stateful_py_expr = \"DelayN(lambda: %s, lambda: %s,\"\\\n \"lambda: %s, lambda: %s, time_step, '%s')\" % (\n delay_input, delay_time,\n initial_value, order, py_name)\n\n else:\n stateful_py_expr = \"DelayN(_delayn_input_%s, lambda: %s,\"\\\n \" _delayn_init_%s, lambda: %s, time_step, '%s')\" % (\n identifier, delay_time, identifier,\n order, py_name)\n\n # following elements not specified in the model file, but must exist\n # create the delay initialization element\n new_structure.append({\n 'py_name': '_delayn_init_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'setup', # not specified in the model file, but must exist\n 'py_expr': initial_value,\n 'subs': subs,\n 'doc': 'Provides initial conditions for %s function' % identifier,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'arguments': ''\n })\n\n new_structure.append({\n 'py_name': '_delayn_input_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'component',\n 'doc': 'Provides input for %s function' % identifier,\n 'subs': subs,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'py_expr': delay_input,\n 'arguments': ''\n })\n\n # describe the stateful object\n new_structure.append({\n 'py_name': py_name,\n 'real_name': 'DelayN of %s' % delay_input,\n 'doc': 'DelayN time: %s \\n DelayN initial value %s \\n DelayN order %s' % (\n delay_time, initial_value, order),\n 'py_expr': stateful_py_expr,\n 'unit': 'None',\n 'lims': 'None',\n 'eqn': 'None',\n 'subs': '',\n 'kind': 'stateful',\n 'arguments': ''\n })\n\n return \"%s()\" % py_name, new_structure",
"def init_time_effect(obj, name, parameters=('a', 'c')):\n time_effect_fun = time_effect_funcs[name]\n defaults = time_effect_fun.func_defaults\n\n a, c = parameters\n\n if getattr(obj, a, None) is None:\n setattr(obj, a, defaults[0])\n if getattr(obj, c, None) is None:\n setattr(obj, c, defaults[1])\n\n def time_effect(t):\n a_val, c_val = getattr(obj, a), getattr(obj, c)\n return time_effect_fun(t, a_val, c_val)\n\n return time_effect",
"def update_delay(self, delay):",
"def ActionDelayWrapper(delay_range_start, delay_range_end):\n\n class ActionDelayWrapper(gym.Wrapper):\n def _step(self, action):\n self._action_buffer.append(action)\n action = self._action_buffer.popleft()\n return self.env.step(action)\n\n def _reset(self):\n self._action_delay = np.random.randint(delay_range_start, delay_range_end)\n self._action_buffer = collections.deque([0 for _ in range(self._action_delay)])\n return self.env.reset()\n\n return ActionDelayWrapper",
"def delayable(f):\n def delay(*args, **kwargs):\n queue_key = current_app.config.get('REDIS_QUEUE_KEY', 'default')\n task_id = '%s:result:%s' % (queue_key, str(uuid4()))\n s = dumps((f, task_id, args, kwargs))\n redis.set(task_id, '')\n redis.rpush(queue_key, s)\n return Task(task_id)\n def get_task(task_id):\n result = Task(task_id)\n return result if result.exists else None\n f.delay = delay\n f.get_task = get_task\n return f",
"def DelayedInstantiation_kls(kls, *a, **kwd):\n return DelayedInstantiation(kls, kls, *a, **kwd)",
"def create_forwarder(name, classname, parameter):\n classobj = getattr(sys.modules[__name__], classname)\n return classobj(name, parameter)",
"def __init__(self, timefunc, delayfunc):\r\n self.queue = []\r\n self.timefunc = timefunc\r\n self.delayfunc = delayfunc",
"def transition_delay(self, value: TransitionDelay) -> 'Tailwind':\n self.element.classes('delay-' + value)\n return self",
"def delay(source: Observable) -> Observable:\n return observable_delay_timespan(source, duetime, scheduler)",
"def task_factory(f):\n return _RTAMonadStack.do(f)",
"def __init__(self, fn_to_add_timeout=None):\n\n self.fn_to_add_timeout = fn_to_add_timeout\n self.delayed_commands = [] # list of tuples in the format (time, function, arguments)",
"def new_delay(*args, **kwargs):\n if self.TIMESTAMP_KWARG not in kwargs:\n kwargs[self.TIMESTAMP_KWARG] = datetime.datetime.utcnow()\n self._original_delay(*args, **kwargs)",
"def make_delay_message(dateStored, timestamp, date, user, stream, topic, message):\n return {\n \"dateStored\": dateStored,\n \"timestamp\": timestamp,\n \"date\": date,\n \"user\": user,\n \"stream\": stream,\n \"topic\": topic,\n \"message\": message\n }",
"def timed_automaton(cls):\n class TimedAutomaton(cls):\n def __init__(self, *args, **kwargs):\n self._ta = ta_base()\n self._template_cached = False\n self._pyuppaal = pyuppaal.Template(cls.__name__)\n super().__init__(*args, **kwargs)\n\n def generate_declarations(self):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n return f\"clock {', '.join(self.clocks)};\"\n\n def generate_locations(self):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n locations = [pyuppaal.Location(invariant=self.invariants.get(loc), name=loc) for loc in self.locations]\n return locations\n\n def generate_transitions(self):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n transitions = [pyuppaal.Transition(source, target, guard=guard) for\n (source, guard, action, select, target) in self.edges]\n return transitions\n\n def assign_initial_location(self, template):\n \"\"\"\n Overload this function with a more detailed variant in your TA\n :return:\n \"\"\"\n try:\n template.initlocation = template.get_location_by_name(self.l0)\n except AssertionError as a:\n logging.debug(f'No initial location matching {self.l0} found in current template')\n\n def create_template(self):\n \"\"\"\n overwrite this function in with a more detailed function\n :return:\n \"\"\"\n locations = self.generate_locations()\n transitions = self.generate_transitions()\n declarations = self.generate_declarations()\n template = pyuppaal.Template(self._pyuppaal.name, declaration=declarations, locations=locations,\n transitions=transitions)\n self.assign_initial_location(template)\n # try:\n # template.layout(auto_nails=True)\n # except AssertionError:\n # pass\n\n return template\n\n @property\n def locations(self):\n return self._ta.locations\n\n @locations.setter\n @outdate_cache\n def locations(self, locations):\n if len(locations) is 0:\n self._ta.locations = set()\n else:\n self._ta.locations.update(locations)\n\n @property\n def l0(self):\n return self._ta.l0\n\n @l0.setter\n @outdate_cache\n def l0(self, initial_location):\n self._ta.l0 = initial_location\n\n @property\n def actions(self):\n return self._ta.actions\n\n @actions.setter\n @outdate_cache\n def actions(self, actions):\n if len(actions) is 0:\n self._ta.actions = set()\n else:\n self._ta.actions.update(actions)\n\n @property\n def clocks(self):\n return self._ta.clocks\n\n @clocks.setter\n @outdate_cache\n def clocks(self, clocks):\n if len(clocks) is 0:\n self._ta.clocks = set()\n else:\n self._ta.clocks.update(clocks)\n\n @property\n def edges(self):\n return self._ta.edges\n\n @edges.setter\n @outdate_cache\n def edges(self, edges):\n if len(edges) is 0:\n self._ta.edges = set()\n else:\n self._ta.edges.update(edges)\n\n @property\n def invariants(self):\n return self._ta.invariants\n\n @invariants.setter\n @outdate_cache\n def invariants(self, invariants):\n if len(invariants) is 0:\n self._ta.invariants = dict()\n else:\n self._ta.invariants.update(invariants)\n\n @property\n def ta(self):\n return self._ta\n\n @property\n def name(self):\n return self._pyuppaal.name\n\n @name.setter\n def name(self, name):\n self._pyuppaal.name = name\n\n @property\n @update_cache\n def template(self):\n return self._pyuppaal\n\n return TimedAutomaton",
"def create_flow(sn, tn, dn):\n return f\"{FLOW_VAR}{sn}{tn}{dn}\"",
"def sample_delay(self, *args, **kwargs):\n return _bs_swig.bs_ax25_decoder_sptr_sample_delay(self, *args, **kwargs)",
"def delay(labels, target_delay):\n delay_labels = torch.zeros(target_delay)\n labels = torch.cat((delay_labels, labels))\n\n return labels[: labels.shape[0] - target_delay]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates code to instantiate a stateful 'DelayN' object, and provides reference to that object's output. The name of the stateful object is based upon the passed in parameters, so if there are multiple places where identical delay functions are referenced, the translated python file will only maintain one stateful object, and reference it multiple times.
|
def add_n_delay(identifier, delay_input, delay_time, initial_value, order,
subs):
import_modules['functions'].add("DelayN")
new_structure = []
py_name = '_delayn_%s' % identifier
if len(subs) == 0:
stateful_py_expr = "DelayN(lambda: %s, lambda: %s,"\
"lambda: %s, lambda: %s, time_step, '%s')" % (
delay_input, delay_time,
initial_value, order, py_name)
else:
stateful_py_expr = "DelayN(_delayn_input_%s, lambda: %s,"\
" _delayn_init_%s, lambda: %s, time_step, '%s')" % (
identifier, delay_time, identifier,
order, py_name)
# following elements not specified in the model file, but must exist
# create the delay initialization element
new_structure.append({
'py_name': '_delayn_init_%s' % identifier,
'real_name': 'Implicit',
'kind': 'setup', # not specified in the model file, but must exist
'py_expr': initial_value,
'subs': subs,
'doc': 'Provides initial conditions for %s function' % identifier,
'unit': 'See docs for %s' % identifier,
'lims': 'None',
'eqn': 'None',
'arguments': ''
})
new_structure.append({
'py_name': '_delayn_input_%s' % identifier,
'real_name': 'Implicit',
'kind': 'component',
'doc': 'Provides input for %s function' % identifier,
'subs': subs,
'unit': 'See docs for %s' % identifier,
'lims': 'None',
'eqn': 'None',
'py_expr': delay_input,
'arguments': ''
})
# describe the stateful object
new_structure.append({
'py_name': py_name,
'real_name': 'DelayN of %s' % delay_input,
'doc': 'DelayN time: %s \n DelayN initial value %s \n DelayN order %s' % (
delay_time, initial_value, order),
'py_expr': stateful_py_expr,
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
})
return "%s()" % py_name, new_structure
|
[
"def add_delay(identifier, delay_input, delay_time, initial_value, order,\n subs):\n import_modules['functions'].add(\"Delay\")\n\n new_structure = []\n py_name = '_delay_%s' % identifier\n\n if len(subs) == 0:\n stateful_py_expr = \"Delay(lambda: %s, lambda: %s,\"\\\n \"lambda: %s, lambda: %s, time_step, '%s')\" % (\n delay_input, delay_time,\n initial_value, order, py_name)\n\n else:\n stateful_py_expr = \"Delay(_delay_input_%s, lambda: %s, _delay_init_%s,\"\\\n \"lambda: %s, time_step, '%s')\" % (\n identifier, delay_time, identifier,\n order, py_name)\n\n # following elements not specified in the model file, but must exist\n # create the delay initialization element\n new_structure.append({\n 'py_name': '_delay_init_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'setup', # not specified in the model file, but must exist\n 'py_expr': initial_value,\n 'subs': subs,\n 'doc': 'Provides initial conditions for %s function' % identifier,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'arguments': ''\n })\n\n new_structure.append({\n 'py_name': '_delay_input_%s' % identifier,\n 'real_name': 'Implicit',\n 'kind': 'component',\n 'doc': 'Provides input for %s function' % identifier,\n 'subs': subs,\n 'unit': 'See docs for %s' % identifier,\n 'lims': 'None',\n 'eqn': 'None',\n 'py_expr': delay_input,\n 'arguments': ''\n })\n\n # describe the stateful object\n new_structure.append({\n 'py_name': py_name,\n 'real_name': 'Delay of %s' % delay_input,\n 'doc': 'Delay time: %s \\n Delay initial value %s \\n Delay order %s' % (\n delay_time, initial_value, order),\n 'py_expr': stateful_py_expr,\n 'unit': 'None',\n 'lims': 'None',\n 'eqn': 'None',\n 'subs': '',\n 'kind': 'stateful',\n 'arguments': ''\n })\n\n return \"%s()\" % py_name, new_structure",
"def add_delay_f(identifier, delay_input, delay_time, initial_value):\n import_modules['functions'].add(\"DelayFixed\")\n\n py_name = '_delayfixed_%s' % identifier\n\n stateful_py_expr = \"DelayFixed(lambda: %s, lambda: %s,\"\\\n \"lambda: %s, time_step, '%s')\" % (\n delay_input, delay_time,\n initial_value, py_name)\n\n # describe the stateful object\n stateful = {\n 'py_name': py_name,\n 'real_name': 'Delay fixed of %s' % delay_input,\n 'doc': 'DelayFixed time: %s \\n Delay initial value %s' % (\n delay_time, initial_value),\n 'py_expr': stateful_py_expr,\n 'unit': 'None',\n 'lims': 'None',\n 'eqn': 'None',\n 'subs': '',\n 'kind': 'stateful',\n 'arguments': ''\n }\n\n return \"%s()\" % py_name, [stateful]",
"def init_time_effect(obj, name, parameters=('a', 'c')):\n time_effect_fun = time_effect_funcs[name]\n defaults = time_effect_fun.func_defaults\n\n a, c = parameters\n\n if getattr(obj, a, None) is None:\n setattr(obj, a, defaults[0])\n if getattr(obj, c, None) is None:\n setattr(obj, c, defaults[1])\n\n def time_effect(t):\n a_val, c_val = getattr(obj, a), getattr(obj, c)\n return time_effect_fun(t, a_val, c_val)\n\n return time_effect",
"def DelayedInstantiation_kls(kls, *a, **kwd):\n return DelayedInstantiation(kls, kls, *a, **kwd)",
"def ActionDelayWrapper(delay_range_start, delay_range_end):\n\n class ActionDelayWrapper(gym.Wrapper):\n def _step(self, action):\n self._action_buffer.append(action)\n action = self._action_buffer.popleft()\n return self.env.step(action)\n\n def _reset(self):\n self._action_delay = np.random.randint(delay_range_start, delay_range_end)\n self._action_buffer = collections.deque([0 for _ in range(self._action_delay)])\n return self.env.reset()\n\n return ActionDelayWrapper",
"def update_delay(self, delay):",
"def sample_delay(self, *args, **kwargs):\n return _bs_swig.bs_ax25_decoder_sptr_sample_delay(self, *args, **kwargs)",
"def delay(source: Observable) -> Observable:\n return observable_delay_timespan(source, duetime, scheduler)",
"def transition_delay(self, value: TransitionDelay) -> 'Tailwind':\n self.element.classes('delay-' + value)\n return self",
"def __init__(self, dt=dt, failure_modes=[[]]): \n self.dt = dt\n self.t = 0\n self.sys_norm = StateSpace(A,B,C,D)\n\n self.yout = None\n self.last_input = [0,0]\n self.track_out = []\n\n self.failure_modes = failure_modes\n\n self.modes = [self.sys_norm]\n\n # Create failure modes\n if self.failure_modes[0]: # ie the list is not empty\n for mode in self.failure_modes:\n self.modes.append(StateSpace(*mode))\n\n self.current_mode = random.choice(self.modes)\n\n if self.current_mode == self.sys_norm:\n self.state = self.state_gen(impulse=True)\n else:\n self.state = self.state_gen()\n\n self.possibilities = len(self.modes) * 4",
"def create_flow(sn, tn, dn):\n return f\"{FLOW_VAR}{sn}{tn}{dn}\"",
"def delay(labels, target_delay):\n delay_labels = torch.zeros(target_delay)\n labels = torch.cat((delay_labels, labels))\n\n return labels[: labels.shape[0] - target_delay]",
"def DelayedInstantiation(resultant_kls, func, *a, **kwd):\n o = _class_cache.get(resultant_kls, None)\n if o is None:\n o = make_kls(resultant_kls)\n _class_cache[resultant_kls] = o\n return o(resultant_kls, func, *a, **kwd)",
"def __init__(self, fn_to_add_timeout=None):\n\n self.fn_to_add_timeout = fn_to_add_timeout\n self.delayed_commands = [] # list of tuples in the format (time, function, arguments)",
"def __init__(self, timefunc, delayfunc):\r\n self.queue = []\r\n self.timefunc = timefunc\r\n self.delayfunc = delayfunc",
"def delayable(f):\n def delay(*args, **kwargs):\n queue_key = current_app.config.get('REDIS_QUEUE_KEY', 'default')\n task_id = '%s:result:%s' % (queue_key, str(uuid4()))\n s = dumps((f, task_id, args, kwargs))\n redis.set(task_id, '')\n redis.rpush(queue_key, s)\n return Task(task_id)\n def get_task(task_id):\n result = Task(task_id)\n return result if result.exists else None\n f.delay = delay\n f.get_task = get_task\n return f",
"def new_random_delay():\n return random.randrange(100, 200, 3)",
"def layer_maker(n_layers, n_nodes, activation, drop=None, d_rate=.5):\r\n \r\n # Creating the specified number of hidden layers with the specified number of nodes\r\n for x in range(1,n_layers+1):\r\n model.add(LSTM(n_nodes, activation=activation, return_sequences=True))\r\n\r\n # Adds a Dropout layer after every Nth hidden layer (the 'drop' variable)\r\n try:\r\n if x % drop == 0:\r\n model.add(Dropout(d_rate))\r\n except:\r\n pass",
"def create_translator(self, *args):\r\n translator_class = self.translator_class\r\n return translator_class(*args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates code to instantiate a stateful 'SampleIfTrue' object, and provides reference to that object's output.
|
def add_sample_if_true(identifier, condition, actual_value, initial_value):
import_modules['functions'].add("SampleIfTrue")
py_name = '_sample_if_true_%s' % identifier
# describe the stateful object
stateful = {
'py_name': py_name,
'real_name': 'Sample if true of %s' % identifier,
'doc': 'Initial value: %s \n Input: %s \n Condition: %s' % (
initial_value, actual_value, condition),
'py_expr': "SampleIfTrue(lambda: %s, lambda: %s, lambda: %s, '%s')" % (
condition, actual_value, initial_value, py_name),
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
}
return "%s()" % stateful['py_name'], [stateful]
|
[
"def test_samples_bool_true(base_store: Store, helpers):\n\n # GIVEN sample that is received, prepared, sequenced and delivered\n new_case = add_case(helpers, base_store)\n sample = helpers.add_sample(\n base_store,\n received_at=datetime.now(),\n prepared_at=datetime.now(),\n sequenced_at=datetime.now(),\n delivered_at=datetime.now(),\n )\n assert sample.received_at\n assert sample.prepared_at\n assert sample.sequenced_at\n assert sample.delivered_at\n sample.invoice = base_store.add_invoice(helpers.ensure_customer(base_store))\n sample.invoice.invoiced_at = datetime.now()\n base_store.relate_sample(new_case, sample, \"unknown\")\n\n # WHEN getting active cases\n cases = base_store.cases()\n\n # THEN cases should contain true for all sample booleans\n assert cases\n for case in cases:\n assert case.get(\"samples_received_bool\") is True\n assert case.get(\"samples_prepared_bool\") is True\n assert case.get(\"samples_sequenced_bool\") is True\n assert case.get(\"samples_delivered_bool\") is True\n assert case.get(\"samples_invoiced_bool\") is True",
"def __init__(self, classifier, samples, sample_classif_func):\n self.classifier = classifier\n self.samples = list(samples)\n self.sample_classif_func = sample_classif_func",
"def _sample_conditional(\n Xnew: tf.Tensor,\n inducing_variable: InducingVariables,\n kernel: Kernel,\n f: tf.Tensor,\n *,\n full_cov: bool = False,\n full_output_cov: bool = False,\n q_sqrt: Optional[tf.Tensor] = None,\n white: bool = False,\n num_samples: Optional[int] = None,\n) -> SamplesMeanAndVariance:\n\n if full_cov and full_output_cov:\n msg = \"The combination of both `full_cov` and `full_output_cov` is not permitted.\"\n raise NotImplementedError(msg)\n\n mean, cov = conditional(\n Xnew,\n inducing_variable,\n kernel,\n f,\n q_sqrt=q_sqrt,\n white=white,\n full_cov=full_cov,\n full_output_cov=full_output_cov,\n )\n if full_cov:\n # mean: [..., N, P]\n # cov: [..., P, N, N]\n mean_for_sample = tf.linalg.adjoint(mean) # [..., P, N]\n samples = sample_mvn(\n mean_for_sample, cov, full_cov=True, num_samples=num_samples\n ) # [..., (S), P, N]\n samples = tf.linalg.adjoint(samples) # [..., (S), N, P]\n else:\n # mean: [..., N, P]\n # cov: [..., N, P] or [..., N, P, P]\n samples = sample_mvn(\n mean, cov, full_cov=full_output_cov, num_samples=num_samples\n ) # [..., (S), N, P]\n\n return samples, mean, cov",
"def run_sample():\n from autumn.projects.covid_19.mixing_optimisation.sample_code import run_sample_code\n\n run_sample_code()",
"async def test_template_triggers(hass: HomeAssistant) -> None:\n hass.states.async_set(\"input_boolean.test\", STATE_OFF)\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"observations\": [\n {\n \"platform\": \"template\",\n \"value_template\": \"{{ states.input_boolean.test.state }}\",\n \"prob_given_true\": 1.0,\n \"prob_given_false\": 0.0,\n },\n ],\n \"prior\": 0.2,\n \"probability_threshold\": 0.32,\n }\n }\n\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"binary_sensor.test_binary\").state == STATE_OFF\n\n events = []\n async_track_state_change_event(\n hass, \"binary_sensor.test_binary\", callback(lambda event: events.append(event))\n )\n\n context = Context()\n hass.states.async_set(\"input_boolean.test\", STATE_ON, context=context)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n assert events[0].context == context",
"def _sample(self, model_output: torch.Tensor) -> torch.Tensor:\n pass",
"def testStatelessIf(self):\n with ops.Graph().as_default():\n with session_lib.Session() as sess:\n input_data = {\"b\": constant_op.constant(True)}\n\n x = constant_op.constant([1., 2.], shape=[1, 2], name=\"x\")\n\n def true_fn():\n return x\n\n def false_fn():\n return x + 2\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)\n ])\n def model(b):\n return cond_v2.cond_v2(b, true_fn, false_fn)\n\n root, output_func = self._freezeModel(model)\n self._testConvertedFunction(sess, root, root.f, output_func, input_data)",
"def sampleGiven(self, value):\n assert self.sampler is not None\n return self.sampler.valueFor(self)",
"def create_sample(*args: Any, **kwargs: Any) -> SampleType:\n return cast(SampleType, Sample(*args, **kwargs))",
"def gen_sample(\n self,\n data_sampler: DataSampler,\n element: Any,\n output_index: int,\n transform_id: str = MAIN_TRANSFORM_ID):\n element_sampler = self.data_sampler.sampler_for_output(\n transform_id, output_index).element_sampler\n element_sampler.el = element\n element_sampler.has_element = True",
"def createFor(value: 'SbBool') -> \"ScXMLDataObj *\":\n return _coin.ScXMLBoolDataObj_createFor(value)",
"def trigger_sampler():\n global bottle_capacity\n global aliquot_vol_mL\n global aliquots_in_bottle\n global vol_in_bottle\n global time_last_sample\n ## Set trigger to True\n trigger = True\n\n # DO NOT SAMPLE conditions\n # if aliquots_in_bottle >= bottle_capacity:\n # trigger = False # out of capacity - won't overfill bottle\n # elif is_being_tested():\n # trigger = False # script is being tested\n # elif setup_read(\"Recording\").upper() == \"OFF\":\n # trigger = False # if recording is off, do not sample\n\n # If conditions are met, then trigger the sampler\n if trigger == True:\n print ('Sampler Triggered')\n # increment the number of bottles used\n aliquots_in_bottle += 1\n vol_in_bottle = vol_in_bottle + aliquot_vol_mL\n # update the time of the last trigger\n time_last_sample = utime.time()\n # trigger sampler by pulsing output for 0.5 seconds\n power_control('SW1', True)\n utime.sleep(0.5)\n power_control('SW1', False)\n # write a log entry\n t = utime.localtime(time_scheduled())\n day, minute = str(t[2]), str(t[4])\n if len(day) == 1:\n day = '0' + day\n if len(minute) == 1:\n minute = '0' + minute\n sample_time = str(t[1]) + '/' + day + '/' + str(t[0]) + ' ' + str(t[3]) + ':' + minute\n reading = Reading(label=\"Triggered Sampler\", time=time_scheduled(),\n etype='E', value=aliquots_in_bottle,\n right_digits=0, quality='G') # 'E' = event, 'M' = measurement, 'D' = debug\n reading.write_log()\n ## Write display log entries\n global sample_log\n global bottle_num\n global sample_pacing\n pacing_units = setup_read(\"M1 Units\")\n sample_log[sample_time] = {'Pacing': '%.0f' % sample_pacing+pacing_units, 'Bottle#': str(int(bottle_num)),\n 'Aliquot#': str(int(aliquots_in_bottle)), 'SampleTime': sample_time}\n return True\n # If conditions are NOT met, then DONOT trigger the sampler\n else:\n return False # Sampler was NOT triggered.",
"def ScXMLBoolDataObj_createFor(value: 'SbBool') -> \"ScXMLDataObj *\":\n return _coin.ScXMLBoolDataObj_createFor(value)",
"def sample_goal(self):\n #TODO: We don't need this\n raise NotImplementedError",
"def sample_outcome(self, state: State, action: Action):\n pass",
"def sample_from_target_function(samples):\n return target_function(samples)",
"def testIf(self):\n input_data = {\n \"x\": constant_op.constant([1., 2.], shape=[1, 2]),\n \"b\": constant_op.constant(True)\n }\n\n weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=dtypes.float32)\n\n def true_fn(x):\n return math_ops.matmul(x, weights)\n\n def false_fn(x):\n return math_ops.add(x, weights)\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),\n tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)\n ])\n def model(x, b):\n return cond.cond(\n b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))\n\n root, output_func = self._freezeModel(model)\n self._testConvertedFunction(root, root.f, output_func, input_data)",
"def evaluateSample(self, myInput, samplerType, kwargs):\n Input = self.createNewInput(myInput, samplerType, **kwargs)\n inRun = self._manipulateInput(Input[0])\n returnValue = inRun,self._externalRun(inRun)\n return returnValue",
"def draw_sample(self):\n return self.sample_fn(self.output_components)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructs stock and flow chains that implement the calculation of a smoothing function.
|
def add_n_smooth(identifier, smooth_input, smooth_time, initial_value, order,
subs):
import_modules['functions'].add("Smooth")
new_structure = []
py_name = '_smooth_%s' % identifier
if len(subs) == 0:
stateful_py_expr = "Smooth(lambda: %s, lambda: %s,"\
"lambda: %s, lambda: %s, '%s')" % (
smooth_input, smooth_time, initial_value,
order, py_name)
else:
# only need to re-dimension init and input as xarray will take care of other
stateful_py_expr = "Smooth(_smooth_input_%s, lambda: %s,"\
" _smooth_init_%s, lambda: %s, '%s')" % (
identifier, smooth_time, identifier,
order, py_name)
# following elements not specified in the model file, but must exist
# create the delay initialization element
new_structure.append({
'py_name': '_smooth_init_%s' % identifier,
'real_name': 'Implicit',
'kind': 'setup', # not specified in the model file, but must exist
'py_expr': initial_value,
'subs': subs,
'doc': 'Provides initial conditions for %s function' % identifier,
'unit': 'See docs for %s' % identifier,
'lims': 'None',
'eqn': 'None',
'arguments': ''
})
new_structure.append({
'py_name': '_smooth_input_%s' % identifier,
'real_name': 'Implicit',
'kind': 'component',
'doc': 'Provides input for %s function' % identifier,
'subs': subs,
'unit': 'See docs for %s' % identifier,
'lims': 'None',
'eqn': 'None',
'py_expr': smooth_input,
'arguments': ''
})
new_structure.append({
'py_name': py_name,
'real_name': 'Smooth of %s' % smooth_input,
'doc': 'Smooth time: %s \n Smooth initial value %s \n Smooth order %s' % (
smooth_time, initial_value, order),
'py_expr': stateful_py_expr,
'unit': 'None',
'lims': 'None',
'eqn': 'None',
'subs': '',
'kind': 'stateful',
'arguments': ''
})
return "%s()" % py_name, new_structure
|
[
"def smoothCurve(smoothness=float, replaceOriginal=bool, object=bool, nodeState=int, constructionHistory=bool, caching=bool, name=\"string\"):\n pass",
"def _smooth_price_data(self, sigma):\n self.High = features.gaussian_filter(self.High_raw, sigma)\n self.Low = features.gaussian_filter(self.Low_raw, sigma)\n self.Close = features.gaussian_filter(self.Close_raw, sigma)\n self.Open = features.gaussian_filter(self.Open_raw, sigma)\n self.Volume = features.gaussian_filter(self.Volume_raw, sigma)",
"def __init__(self, smoothing_window, points_number):\n self.smoothing_window = smoothing_window\n self.points_number = points_number",
"def __curve_splicing(self):",
"def smooth(processed):\n smoothed = savgol_filter(processed, 45, 6)\n # For future this could be a window that you type the order and the\n # number of points into, and then it will plot it to show you the\n #smooth before moving on\n return smoothed",
"def makeLambda(name, inputDecayDescriptors, inputDaughters, FISHER_D, LAMBDA_V_CHI2, LAMBDA_ADAMASS, LAMBDA_IPCHI2_MAX):\n# \"pi+\" : \"(P>2*GeV) & (MIPCHI2DV(PRIMARY)>9)\",\n# \"p+\" : \"(P>2*GeV) & (MIPCHI2DV(PRIMARY)>9)\"\n# X.CombinationCut = \"(ADAMASS('Lambda0')<50*MeV) & (ADOCACHI2CUT(30, ''))\"\n# X.MotherCut = \"(ADMASS('Lambda0')<35*MeV) & (VFASPF(VCHI2)<30) & (BPVVDCHI2 > 4.)\"\n \n _combCutMass = \"(ADAMASS('Lambda0')<%(LAMBDA_ADAMASS)s)\" % locals()\n \n# not needed because pion and proton are originating in Lambda (long-lived) decay vertex _combCutsPV = \"(AALLSAMEBPV)\"\n\n \n _combCuts = '(' + _combCutMass + ')'\n \n _motherCutFisher = '( (CHILD(MIPDV(PRIMARY),1)*CHILD(MIPDV(PRIMARY),2)/MIPDV(PRIMARY))>%(FISHER_D)s)' % locals()\n _motherCutVtx = '(VFASPF(VCHI2PDOF) < %(LAMBDA_V_CHI2)s)' % locals()\n _motherCutIPCHI2_MAX = '( BPVIPCHI2() < %(LAMBDA_IPCHI2_MAX)s)' % locals()\n \n _motherCuts = '(' + _motherCutFisher + '&' + _motherCutVtx + '&' + _motherCutIPCHI2_MAX + ')'\n \n# print 'inputDecayDescriptors', inputDecayDescriptors\n# print '_combCuts:MakeLambda', _combCuts \n# print '_motherCuts:MakeLambda', _motherCuts\n\n _Lambda = CombineParticles(DecayDescriptors = inputDecayDescriptors,\n CombinationCut = _combCuts,\n MotherCut = _motherCuts)\n return Selection(name,\n Algorithm = _Lambda,\n RequiredSelections = inputDaughters)",
"def create_single_local_smoother(self,sz,spacing):\n from . import module_parameters as pars\n s_m_params = pars.ParameterDict()\n s_m_params['smoother']['type'] = 'gaussian'\n s_m_params['smoother']['gaussian_std'] = self.params['forward_model']['smoother']['deep_smoother'][\n 'deep_network_local_weight_smoothing']\n self.embedded_smoother = SF.SmootherFactory(sz[2:], spacing).create_smoother(s_m_params)",
"def smooth(samples, label, label_smooth=None, alpha=.95, default_value=None, is_valid=None):\n if not label_smooth:\n label_smooth = label + '_smooth'\n\n last_sample = None\n for sample in samples:\n yield smooth1(sample, last_sample, label, label_smooth, alpha, default_value, is_valid)\n last_sample = sample",
"def test_stock_with_callable_flow_and_init(self):\n with mn.model() as m:\n S = mn.stock('S', \n \"\"\"Start at 22 and increase by 1\"\"\",\n lambda: 1, (), lambda: 22, ())\n self.assertEqual(S[''], 22)\n m.step()\n self.assertEqual(S[''], 23)\n m.step()\n self.assertEqual(S[''], 24)\n m.reset()\n self.assertEqual(S[''], 22)\n m.step(3)\n self.assertEqual(S[''], 25)\n self.assertEqual(S.__doc__, 'Start at 22 and increase by 1')",
"def __init__(self, ma_slow: int, ma_fast: int, target_profit_arg: float, traded_amount: float, is_best_px_calc: bool):\n self.__strategy_id = MomentumStrategy.generate_next_id()\n self.__is_best_price_calculation = is_best_px_calc\n self.__traded_amount = traded_amount\n # Arguments sanity check\n if ma_fast >= ma_slow:\n raise Exception(\"The Moving average fast ({0}) has to be lower than the Moving average slow ({1})\"\n .format(ma_fast, ma_slow))\n elif ma_fast <= 0 or ma_slow <= 0:\n raise Exception(\"The Moving average fast and slow ({0} and {1}) have to be more that 0\"\n .format(ma_fast, ma_slow))\n # Save input arguments\n self.__ma_slow_var = ma_slow\n self.__ma_fast_var = ma_fast\n self.__target_profit = target_profit_arg\n\n # Init the FiFo arrays (FifoDoublesList class)\n self.__ma_slow_fifo_list = FifoDoublesList(self.__ma_slow_var)\n self.__ma_fast_fifo_list = FifoDoublesList(self.__ma_fast_var)\n\n # Init locals\n self.__current_trading_way = False\n self.__open_position = None\n self.__positions_history = []\n self.__is_filled_start_data = False\n self.__filled_data_points = 0",
"def add_curves_Wyoming(ax,datetime,station,linewidth=1.0,LH_Tdepend=False):\n from siphon.simplewebservice.wyoming import WyomingUpperAir\n\n date = datetime\n station = station\n df = WyomingUpperAir.request_data(date, station)\n pressure = df['pressure'].values\n Temp = df['temperature'].values\n Temp_dew = df['dewpoint'].values\n altitude = df['height'].values\n q = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp_dew*units('degC')),pressure*units('mbar'))\n q = mpcalc.specific_humidity_from_mixing_ratio(q)\n qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp*units('degC')),pressure*units('mbar'))\n \n # specific energies\n if LH_Tdepend == False:\n mse = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),q)\n mse_s = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),qs)\n dse = mpcalc.dry_static_energy(altitude*units('meter'),Temp*units('degC'))\n else:\n # A short course in cloud physics, Roger and Yau (1989)\n Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 - \n 0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation\n #Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion\n \n mse = Cp_d*T + g*altitude + Lvt*q\n mse_s = Cp_d*T + g*altitude + Lvt*qs\n dse = mpcalc.dry_static_energy(altitude,T)\n \n # adding curves on the main axes\n ax.plot(dse.magnitude, pressure, 'k', linewidth=linewidth)\n ax.plot(mse.magnitude, pressure, 'b', linewidth=linewidth)\n ax.plot(mse_s.magnitude, pressure, 'r', linewidth=linewidth)",
"def make_pipeline():\n\n symbol_filter = StaticSids([sid(40515), sid(41969), sid(38054)])\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n\n # dollar_volume = AverageDollarVolume(window_length=10)\n # high_dollar_volume = dollar_volume.top(TOP_NUM_STOCKS_BY_DOLLAR_VOLUME)\n vol_sma = SimpleMovingAverage(\n inputs=[USEquityPricing.volume], window_length=90)\n\n price_sma = SimpleMovingAverage(\n inputs=[USEquityPricing.close], window_length=30)\n\n vol_filter = ~(symbol_filter) & (vol_sma > VOLUME_MIN_AVG) & (price_sma > CLOSE_PRICE_MIN_AVG)\n\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=vol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=vol_filter\n )\n weekly_gamma_filter = WeeklyGammaFilter(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=vol_filter\n )\n\n monthly_current_open = MonthlyCurrentOpen(\n inputs=[USEquityPricing.open],\n mask=vol_filter\n )\n weekly_current_open = WeeklyCurrentOpen(\n inputs=[USEquityPricing.open],\n mask=vol_filter\n )\n\n pipe = Pipeline(\n screen=(weekly_gamma_filter),\n # screen = symbol_filter,\n columns={\n # 'daily_classifier': daily_classifier,\n # 'daily_high': USEquityPricing.high.latest,\n # 'daily_low': USEquityPricing.low.latest,\n 'weekly_gamma_filter': weekly_gamma_filter,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low,\n 'weekly_ohlc': weekly_ohlc,\n 'monthly_current_open': monthly_current_open,\n 'weekly_current_open': weekly_current_open\n }\n )\n return pipe",
"def smooth(fx,w,mode='box',edge_degree=1):\n\n import numpy as np\n import tayph.util as ut\n from tayph.vartests import typetest,postest\n import tayph.functions as fun\n from matplotlib import pyplot as plt\n typetest(w,[int,float],'w in ops.smooth()')\n typetest(fx,np.ndarray,'fx in ops.smooth()')\n typetest(mode,str,'mode in ops.smooth()')\n typetest(edge_degree,int,'edge_degree in ops.smooth()')\n postest(w,'w in ops.smooth()')\n\n if mode not in ['box','gaussian']:\n raise Exception(f'RuntimeError in ops.smooth(): Mode should be set to \"top\" or \"bottom\" ({mode}).')\n truncsize=4.0#The gaussian is truncated at 4 sigma.\n shape=np.shape(fx)\n\n sig_w = w / 2*np.sqrt(2.0*np.log(2)) #Transform FWHM to Gaussian sigma. In km/s.\n trunc_dist=np.round(sig_w*truncsize).astype(int)\n\n #First define the kernel.\n kw=int(np.round(truncsize*sig_w*2.0))\n if kw % 2.0 != 1.0:#This is to make sure that the kernel has an odd number of\n #elements, and that it is symmetric around zero.\n kw+=1\n\n kx=fun.findgen(kw)\n kx-=np.mean(kx)#This must be centered around zero. Doing a hardcoded check:\n if (-1.0)*kx[-1] != kx[0]:\n print(kx)\n raise Exception(\"ERROR in box_smooth: Kernel could not be made symmetric somehow. Attempted kernel grid is printed above. Kernel width is %s pixels.\" % kw)\n\n\n if mode == 'gaussian':\n k=fun.gaussian(kx,1.0,0.0,sig_w)\n\n if mode == 'box':\n k=fun.box(kx,1.0,0.0,w)\n kx=kx[k > 0.0]\n k=k[k > 0.0]\n if (-1.0)*kx[-1] != kx[0]:\n print(kx)\n raise Exception(\"ERROR in box_smooth: Kernel could not be made symmetric AFTER CROPPING OUT THE BOX, somehow. Attempted kernel grid is printed above. Kernel width is %s pixels.\" % kw)\n\n k/=np.sum(k)\n\n return(convolve(fx,k,edge_degree))",
"def __init__(self):\n\n self.title = \"Weight-based growth curve for females aged 3 to 20 years\"\n\n self.ages = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n\n self.percentis_3 = [8.78, 10.02, 11.29, 12.69, 14.30, 16.10, 18.10, 20.22,\n 22.35, 24.37, 26.19, 27.75, 29.06, 30.19, 31.23, 32.21,\n 33.18, 34.15]\n\n self.percentis_10 = [10.17, 11.74, 13.39, 15.24, 17.37, 19.79, 22.49, 25.36,\n 28.27, 31.04, 33.51, 35.59, 37.31, 38.77, 40.05, 41.23,\n 42.37, 43.48]\n\n self.percentis_25 = [11.55, 13.46, 15.48, 17.78, 20.43, 23.47, 26.87, 30.51,\n 34.20, 37.70, 40.83, 43.43, 45.56, 47.34, 48.86, 50.25,\n 51.55, 52.82]\n\n self.percentis_50 = [12.94, 15.18, 17.58, 20.33, 23.50, 27.16, 31.25, 35.66,\n 40.13, 44.37, 48.14, 51.27, 53.82, 55.91, 57.68, 59.26,\n 60.74, 62.16]\n\n self.percentis_75 = [14.33, 16.90, 19.68, 22.87, 26.57, 30.84, 35.64, 40.80,\n 46.05, 51.04, 55.46, 59.11, 62.07, 64.48, 66.50, 68.28,\n 69.92, 71.50]\n\n self.percentis_90 = [15.71, 18.62, 21.78, 25.42, 29.64, 34.52, 40.02, 45.95,\n 51.98, 57.70, 62.77, 66.95, 70.33, 73.06, 75.32, 77.29,\n 79.11, 80.84]\n\n self.percentis_97 = [17.10, 20.34, 23.88, 27.96, 32.71, 38.21, 44.41, 51.10,\n 57.90, 64.37, 70.09, 74.80, 78.58, 81.63, 84.14, 86.31,\n 88.29, 90.18]",
"def savitzky_golay(y, window_size, order, deriv=0, rate=1):\n \n# The following is, for this application, constant so it is not necessary\n# to recalculate all this each time. Do once and then skip on subsequent\n# entries (this application only):\n global first_SG_call, half_window, window_size_SG, m_SG, order_SG, deriv_SG # preserve these for re-use\n if window_size != window_size_SG or order_SG != order or deriv_SG != deriv:\n# from math import factorial\n window_size_SG = window_size\n order_SG = order\n deriv_SG = deriv \n try:\n window_size = np.abs(np.int(window_size))\n order = np.abs(np.int(order))\n except ValueError, msg:\n raise ValueError(\"window_size and order have to be of type int\")\n if window_size % 2 != 1 or window_size < 1:\n raise TypeError(\"window_size size must be a positive odd number\")\n if window_size < order + 2:\n raise TypeError(\"window_size is too small for the polynomials order\")\n order_range = range(order+1)\n half_window = (window_size -1) // 2\n # precompute coefficients\n b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])\n m_SG = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)\n# End of first-call initialization\n \n # pad the signal at the extremes with\n # values taken from the signal itself\n# firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] ) # this is standard\n# lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1]) # this is standard\n# # change this to reflect that this is a folded histogram\n firstvals = y[1:half_window+1][::-1]\n lastvals = y[-half_window-1:-1][::-1]\n y = np.concatenate((firstvals, y, lastvals))\n return np.convolve( m_SG[::-1], y, mode='valid')",
"def __init__(self, model, eta_c=0.9, eta_d=0.9, Tleak=96, ts=15):\n Act = np.array([[-1.0/(Tleak*3600), 0], [0, 0]])\n Bct = np.array([[eta_c, -1.0/eta_d], [0, 1]])\n Cct = np.array([[0, 0]])\n Dct = np.array([[0, 0]])\n (A, B, C, D, dt) = cont2discrete(\n (Act, Bct, Cct, Dct), ts*60, method='zoh')\n super(GenericBufferedProduction, self).__init__(model, A, B)",
"def smooth(scalars, weight):\n if weight <= 0.0: # no smoothing\n return scalars\n last = scalars[0] # First value in the plot (first timestep)\n smoothed = list()\n for point in scalars:\n smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value\n smoothed.append(smoothed_val) # Save it\n last = smoothed_val # Anchor the last smoothed value\n\n return smoothed",
"def perform_spline_interpolation_helper(c,weight,index):\n\n return PerformSplineInterpolationHelper(index)(c,weight)",
"def produce_loss_graph(self):\n\n def interpolate(epochs, data):\n \"\"\"\n Creates a smoothed array to make the graph continous rather than discrete.\n \"\"\"\n\n epochs_new = np.linspace(1, epochs.iloc[-1], epochs.shape[0]*100)\n new_line = make_interp_spline(epochs, data)\n data_new = new_line(epochs_new)\n\n return pd.Series(epochs_new), pd.Series(data_new)\n\n print(\"[INFO] producing loss graph...\", end = \"\")\n\n # ---------------------\n # Wasserstein Losses\n # ---------------------\n\n # Convert list of lists into a DataFrames\n gen_losses = pd.Series([loss[0] for loss in self.val_loss])\n disc_losses = pd.Series([loss[1] for loss in self.val_loss])\n\n # Combine losses in a dataframe\n combined_losses = pd.DataFrame(\n {\"gen_loss\": gen_losses,\n \"disc_loss\": disc_losses}\n )\n\n # Save losses as csv\n combined_losses.to_csv(get_path(f\"{self.ckpt_path}/{self.file_name}/data/losses.csv\"), index=False)\n\n # Create training loss graph\n fig, ax = plt.subplots(1, 1, figsize=(axis_size*3, axis_size), squeeze=True)\n ax.plot(gen_losses, linewidth = line_width)\n ax.plot(disc_losses, linewidth = line_width)\n ax.set_ylabel('Wasserstein-1 Distance')\n ax.set_xlabel('Epoch')\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\n\n fig.legend(['Generator Loss', 'Discriminator Loss'], loc='lower center', frameon = False, ncol=2)\n\n fig.savefig(fname=get_path(f\"{self.ckpt_path}/{self.file_name}/images/losses_plot.png\"))\n plt.clf()\n\n\n # ---------------------\n # Hasudorff Distances\n # ---------------------\n\n # Convert list of lists into a numpy arrays\n epochs = pd.Series([loss[0] for loss in self.hausdorff_dist], dtype=np.float32)\n PCA_losses = pd.Series([loss[1] for loss in self.hausdorff_dist], dtype=np.float32)\n TSNE_losses = pd.Series([loss[2] for loss in self.hausdorff_dist], dtype=np.float32)\n UMAP_losses = pd.Series([loss[3] for loss in self.hausdorff_dist], dtype=np.float32)\n\n # Interpolate the data to ensure a smooth graph\n _, PCA_losses = interpolate(epochs, PCA_losses)\n _, TSNE_losses = interpolate(epochs, TSNE_losses)\n epochs, UMAP_losses = interpolate(epochs, UMAP_losses)\n\n # Combine losses in a dataframe\n distance_losses = pd.DataFrame(\n {\"epoch\": epochs,\n \"pca_loss\": PCA_losses,\n \"tsne_loss\": TSNE_losses,\n \"umap_loss\": UMAP_losses}\n )\n\n # Save distances as csv\n distance_losses.to_csv(get_path(f\"{self.ckpt_path}/{self.file_name}/data/distance_losses.csv\"), index=False)\n\n # Create distances graph\n fig, ax = plt.subplots(1, 1, figsize=(axis_size*3, axis_size), squeeze=True)\n ax.plot(epochs, PCA_losses, linewidth = line_width)\n ax.plot(epochs, TSNE_losses, linewidth = line_width)\n ax.plot(epochs, UMAP_losses, linewidth = line_width)\n ax.set_ylabel('Hausdorff Distance')\n ax.set_xlabel('Epoch')\n ax.set_ylim(ymin = 0.0)\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])\n\n fig.legend(['PCA', 't-SNE', 'UMAP'], loc='lower center', frameon = False, ncol=3)\n\n fig.savefig(fname=get_path(f\"{self.ckpt_path}/{self.file_name}/images/distances_plot.png\"))\n plt.clf()\n\n print(\"done!\")\n\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Incomplete functions don't really need to be 'builders' as they add no new real structure, but it's helpful to have a function in which we can raise a warning about the incomplete equation at translate time.
|
def add_incomplete(var_name, dependencies):
import_modules['functions'].add("incomplete")
warnings.warn('%s has no equation specified' % var_name,
SyntaxWarning, stacklevel=2)
# first arg is `self` reference
return "incomplete(%s)" % ', '.join(dependencies), []
|
[
"def Expression(self) -> _n_4_t_1:",
"def dummy_no_ephem():",
"def solve(equation):\r\n\r\n if not validator.is_valid(equation):\r\n raise Invalid(\"not valid\")\r\n #make haircut for the minuses\r\n equation = solver_helper.minuses_haircut(equation)\r\n #strip the expression from it's brackets if necessary\r\n # if an expression needs to be striped twice then it's\r\n # invalid equation\r\n if solver_helper.needs_to_be_bracket_striped(equation):\r\n equation = solver_helper.strip_outer_brackets(equation)\r\n if solver_helper.needs_to_be_bracket_striped(equation):\r\n raise Exception(\"unnecessary brackets on an expression: (\" +\r\n str(equation) + \")\")\r\n #make a list\r\n lst = solver_helper.make_a_list(equation)\r\n #(on the list)\r\n\r\n # while there are expressions, solve them\r\n # (expression is an equation in between brackets)\r\n\r\n i = finder.find_expression(lst)\r\n while i != -1:\r\n res = solve(lst[i])\r\n lst[i] = res\r\n i = finder.find_expression(lst)\r\n\r\n if solver_helper.list_is_valid(lst):\r\n pass\r\n #while len(lst) > 1 or lst[0] is not an expression\r\n #find the strongest operator and operate\r\n lst = clear_from_operators(lst)\r\n if solver_helper.list_is_valid(lst):\r\n pass\r\n if len(lst) > 1:\r\n raise Exception(\"an operator is missing between two expressions\")\r\n return lst[0]",
"def test_all_adjoint_nonexp(self, interface, return_type, shots, wire_specs):\n msg = \"Adjoint differentiation method does not support measurement .*\"\n\n warn_msg = (\n \"Requested adjoint differentiation to be computed with finite shots. \"\n \"Adjoint differentiation always calculated exactly.\"\n )\n\n with pytest.raises(QuantumFunctionError, match=msg):\n with pytest.warns(UserWarning, match=warn_msg):\n circuit = get_qnode(interface, \"adjoint\", return_type, shots, wire_specs)\n x = get_variable(interface, wire_specs)\n compute_gradient(x, interface, circuit, return_type)",
"def ungraded(f):\n return substitute(f, t=1)",
"def test_functionals(self):\n for i, f in enumerate(self.get_basis_functions()):\n for j, d in enumerate(self.dofs):\n if i == j:\n assert d.eval(f).expand().simplify() == 1\n else:\n assert d.eval(f).expand().simplify() == 0\n assert d.entity_dim() is not None",
"def stickel_method(U: Set[Equation], ac_symbol: Function) -> SubstituteTerm:\n # Gather all variables for fresh var calculation\n ALL_VARS = vars_from_equations(U)\n original_from_generalized : Dict[Variable, Term] = dict()\n\n def generalize_term(t: Term) -> Variable:\n \"\"\"\n Returns a generalized variable for every\n term that's not a variable.\n \"\"\"\n vt = t\n if isinstance(t, Variable):\n original_from_generalized[t] = t\n else:\n vt = None\n for gen_var, og_term in original_from_generalized.items():\n if t == og_term:\n vt = gen_var\n break\n if vt is None:\n vt = fresh_variable(ALL_VARS)\n ALL_VARS.add(vt)\n original_from_generalized[vt] = t\n return vt\n\n var_count = Counter()\n # Go through each equation\n for e in U:\n LS, RS = flatten_equation(e, ac_symbol)\n # print(\"LS\", LS)\n # print(\"RS\", RS)\n\n # Generalize left and right sides\n LS_VARS = [generalize_term(t) for t in LS]\n RS_VARS = [generalize_term(t) for t in RS]\n\n # Calculate multiplicity\n VARS_IN_EQ = set(LS_VARS).union(set(RS_VARS))\n for x in VARS_IN_EQ:\n num = LS_VARS.count(x) - RS_VARS.count(x)\n var_count[x] += num\n\n # Create the equation with variable coeficients\n # being the counts above\n sympy_expression = 0\n var_map: Dict[sympy.core.Symbol, Variable] = dict()\n for x, count in var_count.items():\n # Construct Z3 variable\n sympy_var = symbols(x.symbol + \"_0\", integer=True, positive=True)\n var_map[sympy_var] = x\n\n # Construct part of expression\n sympy_expression += count * sympy_var\n\n\n # Determine the ordering of the diophantine solver output\n sympy_ordering = list(sympy_expression.expand(force=True).free_symbols)\n sympy_ordering.sort(key=default_sort_key)\n\n # Solve diophantine equation\n # print(original_from_generalized)\n # print(sympy_expression)\n basis_vector = diop_linear(sympy_expression)\n basis_tables = generate_basis_table(basis_vector)\n\n sigma = False\n while not sigma:\n # Generate the basis table\n basis_table = next(basis_tables)\n # print(basis_table)\n\n # Create variables representing each row\n row_vars = n_fresh_variables(ALL_VARS, len(basis_table))\n ALL_VARS = ALL_VARS.union(set(row_vars))\n\n # Craft intermediate substitution from basis table\n sub_basis: Dict[Variable, Term] = dict()\n for column, sympy_var in enumerate(sympy_ordering):\n term = None\n for i, row in enumerate(basis_table):\n if row[column] == 0:\n continue\n row_var = row_vars[i]\n for _ in range(row[column]):\n if term is None:\n term = row_var\n else: # z_2 + z_4\n term = ac_symbol(term, row_var)\n sub_basis[var_map[sympy_var]] = term\n\n # [TODO] [IN PROGRESS] Unify variables in the generalized terms with\n # their counterparts in the original terms.\n # print(sub_basis)\n new_eqs = set()\n for gen_var, basis_var in sub_basis.items():\n rhs = original_from_generalized[gen_var]\n new_eqs.add(Equation(\n basis_var,\n rhs\n ))\n sigma = syntactic_unification(new_eqs)\n\n\n # Currently returning one posisble unifier but we can keep generating\n # using the basis vector\n return {sigma}",
"def _getEquationBlockAsFunction(self, differential_form='residual', side='rhs', compilation_mechanism=\"mpmath\"):\n\n #Check if the problem is differential or purely algebraic\n\n if len(self._equation_groups[\"differential\"]) > 0:\n\n if differential_form == 'elementary':\n\n fun_ = sp.lambdify(self._var_list,\n np_array(self._getEquationList(differential_form,side)),\n [{'Min':min, 'Max':max, 'Sin':np.sin, 'Cos':np.cos}, compilation_mechanism]\n )\n\n return jit(fun_)\n\n\n if differential_form == 'residual':\n\n yd_map, y_map = self._getMapForRewriteSystemAsResidual()\n\n # Add y_map dict to yd_map\n\n yd_map.update(y_map)\n\n original_eqs = self._getEquationList(differential_form, side)\n\n rewritten_eqs = [eq_i.subs(yd_map) for eq_i in original_eqs]\n\n _fun_ = sp.lambdify([\"t\",\"y\",\"yd\"],\n np_array(rewritten_eqs),\n [{'Min':min, 'Max':max, 'Sin':math.sin, 'Cos':math.cos}, compilation_mechanism]\n )\n\n #Provide result as numpy.array\n\n fun_ = lambda t,y,yd: np_array(_fun_(t,y,yd))\n\n return fun_\n\n else:\n\n fun_ = sp.lambdify(self._var_list,\n np_array(self._equations_list),\n [{'Min': min, 'Max': max, 'Sin': np.sin, 'Cos': np.cos}, compilation_mechanism]\n )\n\n fun_unpacked_ = lambda x: fun_(*x)\n\n return fun_unpacked_",
"def flux_function(x):\n \n # User can change the definition of the flux function to fit their problem at hand \n f_u = # ADD definition of flux function \n \n return f_u",
"def test_missing_symbols(self):\n # pylint: disable=protected-access\n with self.assertRaises(NameError):\n eqn = Equation(self.model, '14*x = 23*ww')\n eqn.parse(self.model._local_context)",
"def function2():\r\n x = sp.Symbol('x')\r\n y = sp.Symbol('y')\r\n # f = 0.2 * x - 20 + 3 * y ** 2\r\n f = 6*x + y**2 - 100\r\n\r\n return f",
"def test_a22(self):\n self.assertEqual(transmaths.NULLITY ** -1, transmaths.NULLITY)",
"def test_undifferentiable_operation(self, operable_mock_device_2_wires):\n\n def circuit(x):\n qml.BasisState(np.array([x, 0]), wires=[0, 1])\n qml.RX(x, wires=[0])\n return qml.expval(qml.PauliZ(0))\n\n node = qml.QNode(circuit, operable_mock_device_2_wires)\n\n with pytest.raises(ValueError, match=\"Cannot differentiate wrt parameter\"):\n node.jacobian(0.5)",
"def quad_add_empty_return(self, line_num):\n\n current_scope = IdleCompiler.__current_scope\n while not isinstance(current_scope, Func):\n current_scope = current_scope.parent\n if IdleCompiler.__should_gen_quads and current_scope.return_type != None:\n IdleCompiler.__compiler_errors.append(\"line %i: Type mismatch. Expecting expression with return type %s.\" % (line_num, current_scope.return_type))\n\n IdleCompiler.__interp.add_empty_return()",
"def _reconstruct(self):\n\n # We assemble the unknown which is an expression\n # of the first eliminated variable.\n self._sub_unknown()\n # Recover the eliminated unknown\n self._elim_unknown()",
"def solutions_ok_quadratic(eq):\n s = diop_solve(eq)\n x, y = symbols(\"x, y\", Integer=True)\n ok = True\n\n while len(s) and ok:\n u, v = s.pop()\n\n if simplify(simplify(Subs(eq, (x, y), (u, v)).doit())) != 0:\n ok = False\n return ok",
"def test_quadratic_cost_function_expansion_variables(\n self,l,\n lx,lu,\n lux,lxu,\n luu,lxx):\n\n Time = self.return_time_array()\n\n # l should be a list of length len(Time)-1, with each element with shape (1,1), where n is the number of states.\n assert len(l)==len(Time)-1, \"l has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(l)) + \".\"\n assert all([el.shape==(1,1) for el in l]), \"Elements of l have incorrect shape. Should be of length (1,1). Check l function.\"\n\n # lx should be a list of length len(Time)-1, with each element with shape (n,1), where n is the number of states.\n assert len(lx)==len(Time)-1, \"lx has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lx)) + \".\"\n assert all([el.shape==(2,1) for el in lx]), \"Elements of lx have incorrect shape. Should be of length (2,1). Check lx function.\"\n\n # lu should be a list of length len(Time)-1, with each element with shape (m,1), where n is the number of states.\n assert len(lu)==len(Time)-1, \"lu has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lu)) + \".\"\n assert all([el.shape==(1,1) for el in lu]), \"Elements of lu have incorrect shape. Should be of length (1,1). Check lu function.\"\n\n # lux should be a list of length len(Time)-1, with each element with shape (m,n), where m is the number of inputs and n is the number of states.\n assert len(lux)==len(Time)-1, \"lux has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lux)) + \".\"\n assert all([el.shape==(1,2) for el in lux]), \"Elements of lux have incorrect shape. Should be of length (1,1). Check lux function.\"\n\n # lxu should be a list of length len(Time)-1, with each element with shape (n,m), where n is the number of states and m is the number of inputs.\n assert len(lxu)==len(Time)-1, \"lxu has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lxu)) + \".\"\n assert all([el.shape==(2,1) for el in lxu]), \"Elements of lxu have incorrect shape. Should be of length (2,1). Check lxu function.\"\n\n # luu should be a list of length len(Time)-1, with each element with shape (m,m), where m is the number of inputs.\n assert len(luu)==len(Time)-1, \"luu has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(luu)) + \".\"\n assert all([el.shape==(1,1) for el in luu]), \"Elements of luu have incorrect shape. Should be of length (1,1). Check luu function.\"\n\n # lxx should be a list of length len(Time)-1, with each element with shape (n,n), where n is the number of states.\n assert len(lxx)==len(Time)-1, \"lxx has incorrect length. Should be of length \" + str(len(Time)-1) + \", not \" + str(len(lxx)) + \".\"\n assert all([el.shape==(2,2) for el in lxx]), \"Elements of lxx have incorrect shape. Should be of length (2,2). Check lxx function.\"",
"def test_inline_into_function():\n before_program = \"\"\"\n #[version = \"0.0.5\"]\n def @main() {\n let %x = 1 + 1;\n let %f = fn (%y: int) -> int {\n let %z = %y + %y;\n %x + %z\n };\n (%f(2), %f(3))\n }\n \"\"\"\n\n after_program = \"\"\"\n #[version = \"0.0.5\"]\n def @main() {\n let %x = 1 + 1;\n let %f = fn (%y: int) -> int {\n %x + (%y + %y)\n };\n (%f(2), %f(3))\n }\n \"\"\"\n\n optimize_and_check(\n before_program, after_program, transform.DeadCodeElimination(inline_once=True)\n )",
"def test_formulas_already_in_nnf(self):\n\t\tformula = bf.Or([bf.And([bf.Var(\"a\"), bf.Var(\"b\")]), bf.Var(\"c\")])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.And([bf.Or([bf.Var(\"a\"), bf.Var(\"b\")]), bf.Var(\"c\")])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.And([bf.Var(\"a\"), bf.Or([bf.Var(\"b\"), bf.And([bf.Var(\"c\"), bf.Var(\"d\")])])])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.Or([bf.Var(\"a\"), bf.Not(bf.Var(\"b\"))])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.And([bf.Var(\"a\"), bf.Not(bf.Var(\"b\"))])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.And([bf.Var(\"a\"), bf.Not(bf.Var(\"b\"))])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.Or([bf.And([bf.Var(\"a\"), bf.Or([bf.Not(bf.Var(\"b\")), bf.Var(\"c\")]), bf.Not(bf.Var(\"b\"))]), bf.Var(\"d\")])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.And([bf.Var(\"a\"), bf.Or([bf.Var(\"b\"), bf.Not(bf.Var(\"c\"))])])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")\n\n\t\tformula = bf.Or([bf.And([bf.Var(\"a\"), bf.Var(\"b\")]), bf.And([bf.Var(\"a\"), bf.Not(bf.Var(\"c\"))])])\n\t\tself.assertEqual(formula, au.nnf(formula), \"Invalid formula, expected the same as entered.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ZBar's implementation of bch15_5_encode
|
def zbar_bch15_5_encode(x):
return (
(-(x & 1) & 0x0537) ^
(-(x >> 1 & 1) & 0x0A6E) ^
(-(x >> 2 & 1) & 0x11EB) ^
(-(x >> 3 & 1) & 0x23D6) ^
(-(x >> 4 & 1) & 0x429B)
)
|
[
"def encode(postings_list):\n ### Begin your code\n if postings_list == []:\n return array.array('B', []).tobytes()\n result = []\n pre = postings_list[0]\n for item in CompressedPostings().getBin(pre):\n result.append(int(item, 2))\n for i in range(1, len(postings_list)):\n p = postings_list[i] - pre\n for item in CompressedPostings().getBin(p):\n result.append(int(item, 2))\n pre = pre + p\n return array.array('B', result).tobytes()\n ### End your code",
"def b85encode(octet_string):\n return base_N_encode(octet_string, _b85chars )",
"def encode_trading_pair(pair):",
"def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")",
"def encode(volume_np: np.ndarray, volume_meta: dict) -> bytes:\n\n directions = np.array(volume_meta[\"directions\"]).reshape(3, 3)\n directions *= volume_meta[\"spacing\"]\n\n volume_bytes = nrrd_encoder.encode(\n volume_np,\n header={\n \"encoding\": \"gzip\",\n # \"space\": \"left-posterior-superior\",\n \"space\": \"right-anterior-superior\",\n \"space directions\": directions.T.tolist(),\n \"space origin\": volume_meta[\"origin\"],\n },\n compression_level=1,\n )\n\n # with open(\"/work/output/test.nrrd\", \"wb\") as file:\n # file.write(volume_bytes)\n\n return volume_bytes",
"def encodeString():\n pass",
"def b1kencode(s, style='s', width=None):\n if style == 's':\n alphabet = ALPHABET_S\n elif style == 't':\n alphabet = ALPHABET_T\n else:\n raise ValueError('Unknown style')\n # lcm(10, 8) = 5x8\n chk, rem = divmod(len(s), 5)\n # += actually faster\n encoded = ''\n for i in range(chk):\n chunk = s[i * 5:(i + 1) * 5]\n encoded += (\n alphabet[(chunk[0] << 2) | (chunk[1] >> 6)] +\n alphabet[0x3FF & (chunk[1] << 4) | (chunk[2] >> 4)] +\n alphabet[0x3FF & (chunk[2] << 6) | (chunk[3] >> 2)] +\n alphabet[0x3FF & (chunk[3] << 8) | chunk[4]]\n )\n if not rem:\n if width:\n encoded = SimpleTextWrap.wrap(encoded, width)\n return encoded\n chunk = s[chk * 5:]\n if rem == 1:\n encoded += alphabet[chunk[0] << 2]\n elif rem == 2:\n encoded += (\n alphabet[(chunk[0] << 2) | (chunk[1] >> 6)] +\n alphabet[0x3FF & (chunk[1] << 4)]\n )\n elif rem == 3:\n encoded += (\n alphabet[(chunk[0] << 2) | (chunk[1] >> 6)] +\n alphabet[0x3FF & (chunk[1] << 4) | (chunk[2] >> 4)] +\n alphabet[0x3FF & (chunk[2] << 6)]\n )\n elif rem == 4:\n encoded += (\n alphabet[(chunk[0] << 2) | (chunk[1] >> 6)] +\n alphabet[0x3FF & (chunk[1] << 4) | (chunk[2] >> 4)] +\n alphabet[0x3FF & (chunk[2] << 6) | (chunk[3] >> 2)] +\n alphabet[0x3FF & (chunk[3] << 8)]\n )\n encoded += '。'\n if width:\n encoded = SimpleTextWrap.wrap(encoded, width)\n return encoded",
"def encode(self, string):\n\t\tbytes = [0x17, len(string)];\n\t\tbytes[2:len(string)] = map(ord, a);",
"def encoded_huffman_tree(tree):\n\n\tbinary_string = '' #huffman tree in binary form stored as string\n\tno_keys = 0 #count number of item in huffman tree, needed for decompression\n\tfor item in tree:\n\t\tkey = [bin(ord(x))[2:].zfill(16) for x in item][0] #convert each key into 16 bit ascii\n\t\tno_bits = \"{:08b}\".format(len(tree[item])) #convert the number of bits used for each huffman code to binary\n\t\tcode = tree[item] #get huffman code\n\t\tno_keys +=1\n\t\tbinary_string += key+no_bits+code #item in tree is stored as | key | length of code | code | \n\n\tno_keys = \"{:08b}\".format(no_keys) #number of items in huffman tree in binary form\n\n\tbinary_string = no_keys+binary_string \n\n\treturn binary_string",
"def test_encode_bytearray(self):\n self.assertEqual(\n encode(bytearray(b'Hello, IOTA!'), 'trytes'),\n b'RBTC9D9DCDQAEASBYBCCKBFA',\n )",
"def encode(self, state: State) -> bytes:\n ...",
"def encode_bytes(matrix):\n return IdxEncoder().write(matrix)",
"def test_encode():\n\n assert ceaser.encode(\"bbb\", 3) == \"eee\"\n\n assert ceaser.encode(\"ccccc\", 2) == \"eeeee\"\n\n assert ceaser.encode(\"blake\", 4) == \"fpeoi\"\n \n assert ceaser.encode(\"\", 4) == \"\"",
"def encode(frames):\n data = []\n prev_comment = \"\"\n # used to output ct flag (\"Guideline\" checkbox for colors) on the first frame only\n ct_flag = 1\n prev_frame = [0] * FIELD_BLOCKS\n\n for field, comment in frames:\n new_frame = [0] * FIELD_BLOCKS\n # add field from bottom->top into blank frame\n for y, row in enumerate(field):\n for x in range(10):\n new_frame[((22 - y) * 10) + x] = row[x]\n\n # fumen encoding starts here\n frame = [0] * FIELD_BLOCKS\n for i in range(FIELD_BLOCKS):\n frame[i] += new_frame[i] + 8 - prev_frame[i]\n\n # simple run-length encoding for field-data\n repeat_count = 0\n for j in range(FIELD_BLOCKS - 1):\n repeat_count += 1\n if frame[j] != frame[j + 1]:\n val = (frame[j] * FIELD_BLOCKS) + (repeat_count - 1)\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n repeat_count = 0\n # output final block\n val = (frame[FIELD_BLOCKS - 1] * FIELD_BLOCKS) + (repeat_count)\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n # ignore check for blank frame/field repeat here\n\n # piece/data output\n # only thing I implement here is comment flag + \"ct\" flag (Guideline colors)\n val = 1 if comment != prev_comment else 0\n val = 128 * FIELD_BLOCKS * ((val * 2) + ct_flag)\n ct_flag = 0 # should only be set on the first frame\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n\n if comment != prev_comment:\n # quote similulates escape() in javascript, but output is not one-to-one\n # (since escape is deprecated)\n comment_str = quote(comment[:4096])\n comment_len = len(comment_str)\n\n comment_data = [ASC_TABLE.index(c) for c in comment_str]\n # pad data if necessary\n if (comment_len % 4) > 0:\n comment_data.extend([0] * (4 - (comment_len % 4)))\n\n # output length of comment\n val = comment_len\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n\n # every 4 chars becomes 5 bytes (4 * 96 chars in ASCII table = 5 * 64)\n for i in range(0, comment_len, 4):\n val = comment_data[i]\n val += comment_data[i + 1] * 96\n val += comment_data[i + 2] * 9216\n val += comment_data[i + 3] * 884736\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n val = val // 64\n data.append(val % 64)\n prev_frame = new_frame\n prev_comment = comment\n\n encode_str = \"v115@\"\n for i, output_byte in enumerate(data):\n encode_str += ENC_TABLE[output_byte]\n if i % 47 == 41:\n encode_str += \"?\"\n return encode_str",
"def sia_binary_encode(self, encoder):\n pass",
"def b58encode(value):\n encoded = ''\n while value >= __b58base:\n div, mod = divmod(value, __b58base)\n encoded = __b58chars[mod] + encoded # add to left\n value = div\n encoded = __b58chars[value] + encoded # most significant remainder\n return encoded",
"def encode(self, data):\n\t\traise NotImplementedError()",
"def encode_and_enbase(n):\n return enbase(encode(n))",
"def bech32_encode( hrp, data ):\n combined = data + bech32_create_checksum( hrp, data )\n return hrp + \"1\" + \"\".join( [ CHARSET[ d ] for d in combined ] )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decode a QR code grid
|
def decode_qr_grid(qrgrid):
qrsize = len(qrgrid)
assert all(len(col) == qrsize for col in qrgrid), "not a square grid"
# Extract format info, which is present in lines
format_int1 = 0
format_int2 = 0
for y in range(6):
format_int1 |= qrgrid[8][y] << y
format_int1 |= qrgrid[8][7] << 6
format_int1 |= qrgrid[8][8] << 7
format_int1 |= qrgrid[7][8] << 8
for x in range(6):
format_int1 |= qrgrid[5 - x][8] << (x + 9)
for x in range(8):
format_int2 |= qrgrid[qrsize - 1 - x][8] << x
assert qrgrid[8][qrsize - 8] == 1 # "unused" bit
for y in range(7):
format_int2 |= qrgrid[8][qrsize - 7 + y] << (8 + y)
# cf. http://upload.wikimedia.org/wikipedia/commons/4/49/QRCode-4-Levels%2CMasks.png for the magic masking
fmtint1 = format_int1 ^ int('101010000010010', 2)
fmtint2 = format_int2 ^ int('101010000010010', 2)
if qrformat_bch_check(fmtint1):
fmtint = fmtint1
if qrformat_bch_check(fmtint2):
if fmtint1 != fmtint2:
print("Error: format-1 ({:#x}) and format-2 ({:#x}) were sane but different".format(fmtint1, fmtint2))
raise ValueError("Disagreeing format integers")
else:
print("Warning: format-1 ({:#x}) was corrupted, using format-2 ({:#x})".format(fmtint1, fmtint2))
else:
if qrformat_bch_check(fmtint2):
print("Warning: format-2 ({:#x}) was corrupted, using format-1 ({:#x})".format(fmtint2, fmtint1))
fmtint = fmtint2
else:
print("Error: format-1 ({:#x}) and format-2 ({:#x}) were corrupted".format(fmtint1, fmtint2))
raise ValueError("Unable to decode format")
# Sanity checks
assert qrformat_bch_check(fmtint)
assert qrformat_bch(fmtint >> 10) == fmtint
assert zbar_bch15_5_encode(fmtint >> 10) == fmtint
edc_level = fmtint >> 13
mask = (fmtint >> 10) & 7
print("QR code size={}, format={:#x}: EDC level {} Mask {}".format(qrsize, fmtint, edc_level, mask))
# Apply the mask
for x in range(qrsize):
for y in range(qrsize):
if (x <= 8 and y <= 8) or (x <= 8 and y >= qrsize - 8) or (x >= qrsize - 8 and y <= 8):
continue
if mask == 4:
if (y // 2 + x // 3) % 2 == 0:
qrgrid[x][y] ^= 1
elif mask == 6:
if ((x * y) % 3 + x * y) % 2 == 0:
qrgrid[x][y] ^= 1
else:
raise NotImplementedError("Unknown QR code mask {}".format(mask))
if qrsize == 21:
# Decode the encoding
encoding = qrgrid[20][20] << 3
encoding |= qrgrid[19][20] << 2
encoding |= qrgrid[20][19] << 1
encoding |= qrgrid[19][19]
if encoding == 4:
print("... encoding {}: Bytes".format(encoding))
else:
print("... encoding {}: ?".format(encoding))
blocks = bytearray(19)
# Positions to turn up2down
turn_pos = [(3, 1), (2, 1), (3, 0), (2, 0), (1, 0), (0, 0), (1, 1), (0, 1)]
for i in range(4):
for j in range(2):
tposx, tposy = turn_pos[i * 2 + j]
blocks[0] |= qrgrid[20 - j][18 - i] << (7 - (i * 2 + j))
blocks[1] |= qrgrid[20 - j][14 - i] << (7 - (i * 2 + j))
blocks[2] |= qrgrid[17 + tposx][9 + tposy] << (7 - (i * 2 + j))
blocks[3] |= qrgrid[18 - j][11 + i] << (7 - (i * 2 + j))
blocks[4] |= qrgrid[18 - j][15 + i] << (7 - (i * 2 + j))
blocks[5] |= qrgrid[15 + tposx][20 - tposy] << (7 - (i * 2 + j))
blocks[6] |= qrgrid[16 - j][18 - i] << (7 - (i * 2 + j))
blocks[7] |= qrgrid[16 - j][14 - i] << (7 - (i * 2 + j))
blocks[8] |= qrgrid[13 + tposx][9 + tposy] << (7 - (i * 2 + j))
blocks[9] |= qrgrid[14 - j][11 + i] << (7 - (i * 2 + j))
blocks[10] |= qrgrid[14 - j][15 + i] << (7 - (i * 2 + j))
blocks[11] |= qrgrid[11 + tposx][20 - tposy] << (7 - (i * 2 + j))
blocks[12] |= qrgrid[12 - j][18 - i] << (7 - (i * 2 + j))
blocks[13] |= qrgrid[12 - j][14 - i] << (7 - (i * 2 + j))
blocks[14] |= qrgrid[12 - j][10 - i] << (7 - (i * 2 + j))
blocks[15] |= qrgrid[12 - j][5 - i] << (7 - (i * 2 + j))
blocks[16] |= qrgrid[9 + tposx][0 + tposy] << (7 - (i * 2 + j))
blocks[17] |= qrgrid[10 - j][2 + i] << (7 - (i * 2 + j))
blocks[18] |= qrgrid[10 - j][7 + i] << (7 - (i * 2 + j))
print("... hex: {}".format(' '.join('{:02x}'.format(b) for b in blocks)))
if encoding == 4:
# Byte encoding
length = blocks[0]
if length >= len(blocks):
print("Error: length {} too large".format(length))
else:
print("... bytes[{}]: {}".format(blocks[0], repr(bytes(blocks[1:length + 1]))))
if length + 1 < len(blocks):
print("... padding: {}".format(repr(bytes(blocks[length + 1:]))))
|
[
"def decode_hello():\n # Load the image\n im = Image.open(os.path.join(os.path.dirname(__file__), 'barcode-image21helloqrworld.png'))\n im = im.crop((24, 24, 108, 108))\n imdata = im.getdata()\n\n qrsize = 21\n qrgrid = [[None] * qrsize for _ in range(qrsize)]\n for x in range(qrsize):\n for y in range(qrsize):\n qrgrid[x][y] = 0 if imdata[(4 * y + 2) * 84 + (4 * x + 2)][0] & 0x80 else 1\n del imdata\n del im\n\n decode_qr_grid(qrgrid)\n\n # Show the grid\n # im = draw_grid(qrgrid)\n # im.show()",
"def decode_qr(arg_image):\n qr_result = decode(arg_image)\n\n if (len( qr_result ) > 0):\n decoded_data = qr_result[0].data\n else:\n decoded_data = \"NA\"\n\n #Return the Decode data from QR \n return decoded_data",
"def replacement_decode(self, codes):\n n, cs = codes.shape\n codes = _check_dtype_uint8(codes)\n assert cs == self.code_size\n x = np.empty((n, self.d), dtype='float32')\n self.decode_c(swig_ptr(codes), swig_ptr(x), n)\n return x",
"def decode(\n self,\n patternImages,\n disparityMap=...,\n blackImages=...,\n whiteImages=...,\n flags=...,\n ) -> Tuple[retval, disparityMap]:\n ...",
"def test_decode_subsampled(self):\n # Component 1 is (1, 1)\n # Component 2 is (2, 1)\n # Component 3 is (2, 1)\n jpg = DIR_15444 / \"2KLS\" / \"oj36.j2k\"\n with open(jpg, 'rb') as f:\n arr = decode(f.read())\n\n assert arr.flags.writeable\n assert 'uint8' == arr.dtype\n assert (256, 256, 3) == arr.shape\n assert [235, 244, 245] == arr[0, 0, :].tolist()",
"def decode(image_file):\n \n # Set message\n message = \"\"\n \n # Create a zbar image reader\n scanner = zbar.ImageScanner()\n \n # Set the reader configuration to default\n scanner.parse_config('enable')\n \n # Read the image file and convert it into greyscale data readable by zbar\n try:\n pil = Image.open(image_file).convert('L')\n except:\n # Image file is not an image\n print \"Sorry, the file provided was not an image.\"\n raise TypeError\n \n width, height = pil.size # Extract image size\n raw = pil.tostring() # Convert image to a string of data\n \n # Put the image data in a container with the size and data together\n image = zbar.Image(width, height, 'Y800', raw)\n \n # Use zbar to scan the data for a QR code\n scanner.scan(image)\n \n # Scan through results\n for symbol in image: \n \n # Check image is actually a QR code\n if str(symbol.type) == \"QRCODE\":\n # Set message to success and include the encoded message\n message = \"Success ({0})\".format(symbol.data)\n # Exit\n return message\n \n # If results do not contain a zbar symbol (unsuccessful read) \n else:\n # Set message to fail\n message = \"Fail\" \n # Exit\n return message",
"def detectAndDecode(\n self, img, points=..., straight_qrcode=...\n ) -> Tuple[retval, points, straight_qrcode]:\n ...",
"def _decode_matrix(self, data):\n\n dtype, shape, data = data[1:]\n if isinstance(data, str):\n out = np.fromstring(base64.b64decode(data.encode()), dtype)\n else:\n out = np.frombuffer(data, dtype)\n shape = [int(n) for n in shape]; # numpy requires integer indices\n return out.reshape(*shape)",
"def read_codes(image):\n decodedObjects = pyzbar.decode(image)\n codes = []\n for obj in decodedObjects:\n try:\n codes.append(\n {\n \"data\": obj.data.decode(),\n \"top\": obj.rect.top,\n \"left\": obj.rect.left,\n \"bottom\": obj.rect.top + obj.rect.height,\n \"right\": obj.rect.left + obj.rect.width,\n \"type\": obj.type,\n }\n )\n except Exception:\n continue\n return codes",
"def GetQRCodeDetectionRegion(img_height, img_width):\n y_pos, x_pos = MAX_GRID_SIZE\n width = (img_width // 2) - x_pos\n height = img_height - y_pos * 2\n\n return x_pos, y_pos, width, height",
"def decode_row(inp: str):\n inp = inp.replace('F', '0')\n inp = inp.replace('B', '1')\n inp = inp.replace('L', '0')\n inp = inp.replace('R', '1')\n\n return int(inp[0:7], 2)*8+int(inp[7:], 2)",
"def read_qr_camera(self, runtime: int) -> str:\n\n cap = cv.VideoCapture(0)\n\n data = ''\n crude_run_time = 0\n while len(data.split(\":\")) != 4 & crude_run_time < runtime * 2:\n\n _, frame = cap.read()\n\n data, pts, straight_qrcode = self._detector.detectAndDecode(frame)\n\n if len(data.split(\":\")) == 4:\n return data\n\n\n cv.imshow(\"Frame\", frame)\n\n key = cv.waitKey(1)\n\n if key == 27:\n break\n sleep(0.5)\n crude_run_time += 1\n return ''",
"def _read_barcode(self, image):\n if self.LOG.getEffectiveLevel() is logging.DEBUG:\n start = timeit.default_timer()\n \n runner = \"com.google.zxing.client.j2se.CommandLineRunner\"\n libs = [\"javase-3.0.0.jar\", \"core-3.0.0.jar\"]\n libs_fullpath = [os.path.join(os.path.dirname(__file__), \"lib\", lib) for lib in libs]\n classpath = \":\".join(libs_fullpath)\n\n p = subprocess.Popen([\"java\", \"-cp\", classpath, runner, image, \"--possibleFormats=AZTEC\"],\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)\n out, err = p.communicate()\n \n # There was a problem.. \n if p.returncode != 0:\n # Attempt to get a meaninful error\n err_lines = err.split('\\n')\n out_lines = out.split('\\n')\n if len(err_lines) > 5: # Assume a Java stack trace\n for line in err_lines:\n if line.startswith(\"Caused by\"):\n error_msg = line\n break\n elif out: error_msg = out_lines[0] \n else: error_msg = err_lines[0]\n raise BarcodeDecodeError(error_msg)\n\n decoded = out.split('\\n')[2] # Get 3rd line (raw string)\n \n if self.LOG.getEffectiveLevel() is logging.DEBUG:\n stop = timeit.default_timer()\n self.LOG.debug(\"_read_barcode: %ss\" % (stop - start))\n \n return decoded",
"def decode_image(data: bytes) -> np.ndarray:\n height = int.from_bytes(data[0:2], 'little')\n width = int.from_bytes(data[2:4], 'little')\n image_format = ImageFormat(int.from_bytes(data[4:5], 'little', signed=False))\n if image_format == ImageFormat.RAW_BGR:\n return np.frombuffer(data[5:], np.uint8).reshape((height, width, 3))\n elif image_format == ImageFormat.JPG_RGB:\n return cv2.imdecode(np.frombuffer(data[5:], np.uint8), cv2.IMREAD_COLOR)",
"def decode(self, m):\n\n F = self.field\n n = self._n\n k = self._k\n # e = number of errors\n # e is largest integer less than (n - k + 1)/2\n if (n-k+1)%2 == 0:\n e = (n-k+1)/2 - 1\n else:\n e = (n-k+1)//2\n\n ##print(\"making A\")\n A = Matrix(F, n, 2*e + k + 1)\n for i in range(n):\n row = []\n for j in range(e + k):\n row.append(F(i)**j)\n for j in range(e + k, 2*e + k + 1):\n row.append(-(F(i)**(j-e-k))*m[0][i])\n A[i] = row\n #print(\"A is \")\n #print(A)\n kernel = A.basis_of_kernel()\n #print(\"basis of kernel is\")\n #print(kernel)\n xVec = kernel[0]\n #print(\"element of kernel is\")\n #print(xVec)\n #print(\"making polys\")\n qVec = []\n for i in range(e + k):\n qVec.append(xVec[i])\n eVec = []\n for i in range(e+1):\n eVec.append(xVec[e+k+i])\n Q = Polynomial(qVec)\n #print(\"Q is \")\n #print(Q)\n E = Polynomial(eVec)\n #print(\"E is\")\n #print(E)\n #print(\"polynomial division\")\n P = Q/E\n #print(\"P is \")\n #print(P)\n res = Matrix(F, 1, k)\n resPoly = Polynomial(res[0])\n P = P.leading_zeros(resPoly)[0]\n res[0] = P._coeffs\n #print(\"res is \")\n #print(res)\n return res",
"def QR_unpack(QR, tau):\n (m, n) = QR.shape\n code = get_typecode(QR)\n q = zeros([m,m], code)\n r = zeros([m,n], code)\n _gslwrap.gsl_linalg_QR_unpack(QR, tau, q, r)\n return (q,r)",
"def read_qr(self, qr_path: os.path) -> str:\n\n img = cv.imread(qr_path)\n data, pts, straight_qrcode = self._detector.detectAndDecode(img)\n if pts is not None:\n return data\n else:\n raise QRReadError(\"Failed to fetch data from image\")",
"def deinterlace(self, raw):\r\n\r\n # print >> sys.stderr, (\"Reading interlaced, w=%s, r=%s, planes=%s,\" +\r\n # \" bpp=%s\") % (self.width, self.height, self.planes, self.bps)\r\n \r\n flt_list = [self.ifilter0, self.ifilter1, self.ifilter2, self.ifilter3, self.ifilter4]\r\n # Values per row (of the target image)\r\n vpr = self.width * self.bpp\r\n\r\n # Make a result array, and make it big enough. Interleaving\r\n # writes to the output array randomly (well, not quite), so the\r\n # entire output array must be in memory.\r\n # fmt = 'BH'[self.bitdepth > 8]\r\n # a = array(fmt, [0]*vpr*self.height)\r\n source_offset = 0\r\n\r\n # after = [[0]*vpr] * self.height\r\n after = []\r\n for i in range(self.height):\r\n after.append([0]*vpr)\r\n \r\n for xstart, ystart, xstep, ystep in self._adam7:\r\n \r\n if xstart >= self.width:\r\n continue\r\n \r\n # Pixels per row (reduced pass image)\r\n ppr = int(math.ceil((self.width-xstart)/float(xstep)))\r\n row_size = int(math.ceil(self.bpp * ppr))\r\n recon = [0] * row_size\r\n for y in range(ystart, self.height, ystep):\r\n filter_type = raw[source_offset]\r\n source_offset += 1\r\n scanline = [ord(b) for b in raw[source_offset:source_offset+row_size]]\r\n source_offset += row_size\r\n recon = flt_list[ord(filter_type)](scanline,recon,self.bpp)\r\n if xstep == 1 and xstart==0:\r\n after[y][0:vpr] = recon[0:vpr]\r\n else:\r\n reconIndex = 0\r\n for xpos in range(xstart,self.width,xstep):\r\n after[y][xpos*self.bpp:xpos*self.bpp+self.bpp] = recon[reconIndex*self.bpp:reconIndex*self.bpp+self.bpp]\r\n reconIndex += 1\r\n \r\n return after",
"def test_generateQR(self):\n self.assertTrue(qrcodes.generateQR(self.bridgelines))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Draw a QR code grid
|
def draw_grid(qrgrid):
qrsize = len(qrgrid)
assert all(len(col) == qrsize for col in qrgrid), "not a square grid"
im = Image.new("RGB", (qrsize * 8, qrsize * 8), "blue")
draw = ImageDraw.Draw(im)
for (x, column) in enumerate(qrgrid):
for (y, val) in enumerate(column):
if (x <= 8 and y <= 8) or (x <= 8 and y >= qrsize - 8) or (x >= qrsize - 8 and y <= 8) or (x == 6 or y == 6): # noqa
# Grayify the timing patterns and the format lines
draw.rectangle((x * 8, y * 8, (x+1) * 8, (y+1) * 8), "darkgray" if val else "lightgray")
elif val == 1:
draw.rectangle((x * 8, y * 8, (x+1) * 8, (y+1) * 8), "black")
elif val == 0:
draw.rectangle((x * 8, y * 8, (x+1) * 8, (y+1) * 8), "white")
return im
|
[
"def __draw_grid(self):\n MARGIN = self.MARGIN\n for i in range(4):\n x0 = (4-i) * MARGIN + MARGIN\n y0 = i * MARGIN\n x1 = 160-(4-i)*MARGIN + MARGIN\n y1 = i * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for j in range(3-i, 5+i+1):\n x0 = j * MARGIN + MARGIN\n y0 = (i+1) * MARGIN\n x1 = j * MARGIN + MARGIN\n y1 = 80\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(4, 4+9):\n x0 = 0 + MARGIN\n y0 = i * MARGIN\n x1 = 160 + MARGIN\n y1 = i * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(9):\n x0 = i * MARGIN + MARGIN\n y0 = 80\n x1 = i * MARGIN + MARGIN\n y1 = 80 + MARGIN*8\n self.canvas.create_line(x0, y0, x1, y1)\n\n for i in range(3):\n x0 = (i+1) * MARGIN + MARGIN\n y0 = (i+13)* MARGIN\n x1 = 160-(i+1)*MARGIN + MARGIN\n y1 = (i+13) * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)\n\n for j in range(7-i, i, -1):\n x0 = j * MARGIN + MARGIN\n y0 = 80 + MARGIN*8\n x1 = j * MARGIN + MARGIN\n y1 = (i+13) * MARGIN\n self.canvas.create_line(x0, y0, x1, y1)",
"def draw_grid():\n for y in range(num_rows):\n for x in range(num_cols):\n led_matrix.point(x, y, curr_gen[y][x])",
"def __draw_grid(self):\n for i in range(10):\n color = 'blue' if i % 3 == 0 else \"gray\"\n\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)",
"def decode_hello():\n # Load the image\n im = Image.open(os.path.join(os.path.dirname(__file__), 'barcode-image21helloqrworld.png'))\n im = im.crop((24, 24, 108, 108))\n imdata = im.getdata()\n\n qrsize = 21\n qrgrid = [[None] * qrsize for _ in range(qrsize)]\n for x in range(qrsize):\n for y in range(qrsize):\n qrgrid[x][y] = 0 if imdata[(4 * y + 2) * 84 + (4 * x + 2)][0] & 0x80 else 1\n del imdata\n del im\n\n decode_qr_grid(qrgrid)\n\n # Show the grid\n # im = draw_grid(qrgrid)\n # im.show()",
"def draw(self, canvas): \n self.draw_grid(canvas)",
"def draw_grid(self):\r\n\r\n for x in range(0, FULLSIZE[0], CELLSIZE):\r\n pygame.draw.line(self.screen, GRAY, (x, 0), (x, FULLSIZE[0]))\r\n for y in range(0, FULLSIZE[1], CELLSIZE):\r\n pygame.draw.line(self.screen, GRAY, (0, y), (FULLSIZE[0], y))\r\n\r\n for x in range(0, FULLSIZE[0], CUBESIZE):\r\n pygame.draw.line(self.screen, BLACK, (x, 0), (x, FULLSIZE[0]), 2)\r\n for y in range(0, FULLSIZE[1], CUBESIZE):\r\n pygame.draw.line(self.screen, BLACK, (0, y), (FULLSIZE[0], y), 2)",
"def grid(self):\n # Blank the grid\n for y in range(1, 21):\n self.addstr(y, 1, \" \" * 10)\n # Draw the new grid\n for x, column in enumerate(self.game.grid):\n for y, color in enumerate(column):\n y -= self.game.grid.top_buffer\n if y >= 0:\n self.pixel(x, y, color)\n\n # Finally refresh the screen\n self.refresh()",
"def decode_qr_grid(qrgrid):\n qrsize = len(qrgrid)\n assert all(len(col) == qrsize for col in qrgrid), \"not a square grid\"\n\n # Extract format info, which is present in lines\n format_int1 = 0\n format_int2 = 0\n for y in range(6):\n format_int1 |= qrgrid[8][y] << y\n format_int1 |= qrgrid[8][7] << 6\n format_int1 |= qrgrid[8][8] << 7\n format_int1 |= qrgrid[7][8] << 8\n for x in range(6):\n format_int1 |= qrgrid[5 - x][8] << (x + 9)\n\n for x in range(8):\n format_int2 |= qrgrid[qrsize - 1 - x][8] << x\n assert qrgrid[8][qrsize - 8] == 1 # \"unused\" bit\n for y in range(7):\n format_int2 |= qrgrid[8][qrsize - 7 + y] << (8 + y)\n\n # cf. http://upload.wikimedia.org/wikipedia/commons/4/49/QRCode-4-Levels%2CMasks.png for the magic masking\n fmtint1 = format_int1 ^ int('101010000010010', 2)\n fmtint2 = format_int2 ^ int('101010000010010', 2)\n\n if qrformat_bch_check(fmtint1):\n fmtint = fmtint1\n if qrformat_bch_check(fmtint2):\n if fmtint1 != fmtint2:\n print(\"Error: format-1 ({:#x}) and format-2 ({:#x}) were sane but different\".format(fmtint1, fmtint2))\n raise ValueError(\"Disagreeing format integers\")\n else:\n print(\"Warning: format-1 ({:#x}) was corrupted, using format-2 ({:#x})\".format(fmtint1, fmtint2))\n else:\n if qrformat_bch_check(fmtint2):\n print(\"Warning: format-2 ({:#x}) was corrupted, using format-1 ({:#x})\".format(fmtint2, fmtint1))\n fmtint = fmtint2\n else:\n print(\"Error: format-1 ({:#x}) and format-2 ({:#x}) were corrupted\".format(fmtint1, fmtint2))\n raise ValueError(\"Unable to decode format\")\n\n # Sanity checks\n assert qrformat_bch_check(fmtint)\n assert qrformat_bch(fmtint >> 10) == fmtint\n assert zbar_bch15_5_encode(fmtint >> 10) == fmtint\n\n edc_level = fmtint >> 13\n mask = (fmtint >> 10) & 7\n print(\"QR code size={}, format={:#x}: EDC level {} Mask {}\".format(qrsize, fmtint, edc_level, mask))\n\n # Apply the mask\n for x in range(qrsize):\n for y in range(qrsize):\n if (x <= 8 and y <= 8) or (x <= 8 and y >= qrsize - 8) or (x >= qrsize - 8 and y <= 8):\n continue\n if mask == 4:\n if (y // 2 + x // 3) % 2 == 0:\n qrgrid[x][y] ^= 1\n elif mask == 6:\n if ((x * y) % 3 + x * y) % 2 == 0:\n qrgrid[x][y] ^= 1\n else:\n raise NotImplementedError(\"Unknown QR code mask {}\".format(mask))\n\n if qrsize == 21:\n # Decode the encoding\n encoding = qrgrid[20][20] << 3\n encoding |= qrgrid[19][20] << 2\n encoding |= qrgrid[20][19] << 1\n encoding |= qrgrid[19][19]\n\n if encoding == 4:\n print(\"... encoding {}: Bytes\".format(encoding))\n else:\n print(\"... encoding {}: ?\".format(encoding))\n\n blocks = bytearray(19)\n # Positions to turn up2down\n turn_pos = [(3, 1), (2, 1), (3, 0), (2, 0), (1, 0), (0, 0), (1, 1), (0, 1)]\n for i in range(4):\n for j in range(2):\n tposx, tposy = turn_pos[i * 2 + j]\n blocks[0] |= qrgrid[20 - j][18 - i] << (7 - (i * 2 + j))\n blocks[1] |= qrgrid[20 - j][14 - i] << (7 - (i * 2 + j))\n blocks[2] |= qrgrid[17 + tposx][9 + tposy] << (7 - (i * 2 + j))\n blocks[3] |= qrgrid[18 - j][11 + i] << (7 - (i * 2 + j))\n blocks[4] |= qrgrid[18 - j][15 + i] << (7 - (i * 2 + j))\n blocks[5] |= qrgrid[15 + tposx][20 - tposy] << (7 - (i * 2 + j))\n blocks[6] |= qrgrid[16 - j][18 - i] << (7 - (i * 2 + j))\n blocks[7] |= qrgrid[16 - j][14 - i] << (7 - (i * 2 + j))\n blocks[8] |= qrgrid[13 + tposx][9 + tposy] << (7 - (i * 2 + j))\n blocks[9] |= qrgrid[14 - j][11 + i] << (7 - (i * 2 + j))\n blocks[10] |= qrgrid[14 - j][15 + i] << (7 - (i * 2 + j))\n blocks[11] |= qrgrid[11 + tposx][20 - tposy] << (7 - (i * 2 + j))\n blocks[12] |= qrgrid[12 - j][18 - i] << (7 - (i * 2 + j))\n blocks[13] |= qrgrid[12 - j][14 - i] << (7 - (i * 2 + j))\n blocks[14] |= qrgrid[12 - j][10 - i] << (7 - (i * 2 + j))\n blocks[15] |= qrgrid[12 - j][5 - i] << (7 - (i * 2 + j))\n blocks[16] |= qrgrid[9 + tposx][0 + tposy] << (7 - (i * 2 + j))\n blocks[17] |= qrgrid[10 - j][2 + i] << (7 - (i * 2 + j))\n blocks[18] |= qrgrid[10 - j][7 + i] << (7 - (i * 2 + j))\n\n print(\"... hex: {}\".format(' '.join('{:02x}'.format(b) for b in blocks)))\n if encoding == 4:\n # Byte encoding\n length = blocks[0]\n if length >= len(blocks):\n print(\"Error: length {} too large\".format(length))\n else:\n print(\"... bytes[{}]: {}\".format(blocks[0], repr(bytes(blocks[1:length + 1]))))\n if length + 1 < len(blocks):\n print(\"... padding: {}\".format(repr(bytes(blocks[length + 1:]))))",
"def test_generateQR(self):\n self.assertTrue(qrcodes.generateQR(self.bridgelines))",
"def draw_board(self):\r\n for i in range(9):\r\n for j in range(9):\r\n # Draw black lines to demarkate the 'boxes'\r\n if j%3 == 0 and j != 0:\r\n pygame.draw.line(self.window, BLACK, ((j//3)*180, 0), ((j//3)*180, 540), 4)\r\n if i%3 == 0 and i != 0:\r\n pygame.draw.line(self.window, BLACK, (0, (i//3)*180), (540, (i//3)*180), 4)\r\n \r\n # Draw the cells \r\n self.cells[i][j].draw(BLACK, 1)\r\n\r\n # Don't draw the placeholder 0s on the grid\r\n if self.cells[i][j].value != 0:\r\n self.cells[i][j].display(self.cells[i][j].value, (21+(j*60), (16+(i*60))), (0, 0, 0))\r\n \r\n # Bottom most line\r\n pygame.draw.line(self.window, (0, 0, 0), (0, ((i+1) // 3) * 180), (540, ((i+1) // 3) * 180), 4)",
"def render(qrcode):\n if qrcode.data_cache is None:\n qrcode.make()\n offset = qrcode.border # Spec says border should be at least four boxes wide\n pixelsize = (qrcode.modules_count + offset * 2) * qrcode.box_size\n oh = ShapeBuilder()\n # pysvg backward compatibility fix\n try:\n img = svg()\n except NameError:\n img = Svg()\n img.addElement(oh.createRect(0,0,pixelsize, pixelsize, strokewidth=0, fill=\"white\"))\n for r in range(qrcode.modules_count):\n for c in range(qrcode.modules_count):\n if qrcode.modules[r][c]:\n x = (c + offset) * qrcode.box_size\n y = (r + offset) * qrcode.box_size\n #SVG Rectangle is x, y, size_x, size_y\n img.addElement(oh.createRect(x,y, qrcode.box_size, qrcode.box_size,\n strokewidth=0, fill=\"black\"))\n return img",
"def draw_grid(self, screen):\n if self.cfg.draw_grid:\n for row in range(self.row_count - 1):\n pygame.draw.line(screen, self.grid_colour, (0, (row + 1) * self.cell_height),\n (self.screen_width, (row + 1) * self.cell_height), self.grid_thickness)\n \n for col in range(self.col_count - 1):\n pygame.draw.line(screen, self.grid_colour, ((col + 1) * self.cell_width, 0),\n ((col + 1) * self.cell_width, self.screen_height), self.grid_thickness)\n \n pygame.draw.rect(screen, self.grid_colour, pygame.Rect(0, 0, self.screen_width + 1, self.screen_height + 1), self.grid_thickness)",
"def draw_grid(img):\n\tlog.info('Draw grid...')\n\t# Bottom zone\n\tret_img = cv2.rectangle(img,(b_zn.x-2,b_zn.y-2),(b_zn.x-2+b_zn.w+4,b_zn.y+b_zn.h+4), (255,255,0), 1)\n\theight, width = img.shape[:2]\n\tcenter = int(round(width/2))\n\tret_img = cv2.line(img, (center, 0), (center,height), (255,255,0), 1)\n\t# Left zone\n\tret_img = cv2.rectangle(ret_img,(l_zn.x,l_zn.y-2),(l_zn.x+l_zn.w+2,l_zn.y+l_zn.h+4), (255,255,0), 1)\n\t# right zone\n\tret_img = cv2.rectangle(ret_img,(r_zn.x-2,r_zn.y-2),(r_zn.x+r_zn.w+2,r_zn.y+r_zn.h+4), (255,255,0), 1)\n\treturn ret_img",
"def __add_reference_grid(self):\n if self.compact:\n return\n center = self.size // 2\n ring_radius = 5 if self.compact else 7\n for x in range(-center, center + 1):\n for y in range(-center, center + 1):\n # skip finder pattern\n if -ring_radius <= x <= ring_radius and -ring_radius <= y <= ring_radius:\n continue\n # set pixel\n if x % 16 == 0 or y % 16 == 0:\n val = '#' if (x + y + 1) % 2 != 0 else ' '\n self.matrix[center + y][center + x] = val",
"def _plotGrid(self):\n \n vx = self._GridX\n while vx < self._MAX_COLS:\n self.libtft.VLine(vx, 1, self._MAX_ROWS - 1, self.libtft.GRAY)\n vx += self._GridX\n \n vy = self._GridY\n while vy < self._MAX_ROWS:\n self.libtft.HLine(1, self._MAX_COLS - 1, vy, self.libtft.GRAY)\n vy += self._GridY",
"def draw_cells(self, surface):\n for cell in self.cells:\n pygame.draw.rect(surface, cell.color, cell, 2)",
"def draw_board(self, canvas):\n #for column_num in range(0, WIDTH):\n # canvas.draw_line((column_num * BOX_SIZE, 0), (column_num * BOX_SIZE, HEIGHT * BOX_SIZE), 0.2, '#000000')\n #for row_num in range(0, HEIGHT):\n # canvas.draw_line((0, row_num * BOX_SIZE), (WIDTH * BOX_SIZE, row_num * BOX_SIZE), 0.2, '#000000')\n \n for row_num, row in enumerate(self._layout):\n for column_num, block_name in enumerate(row):\n if block_name != 0:\n square_colour = BLOCK_COLOURS[block_name]\n \n canvas.draw_polygon([(0 + column_num * BOX_SIZE, 0 + row_num * BOX_SIZE),\n (BOX_SIZE + column_num * BOX_SIZE, 0 + row_num * BOX_SIZE), \n (BOX_SIZE + column_num * BOX_SIZE, BOX_SIZE + row_num * BOX_SIZE), \n (0 + column_num * BOX_SIZE, BOX_SIZE + row_num * BOX_SIZE)], \n 0.0001, '#FFFFFF', square_colour)",
"def draw_grid(play_area):\n for x in range(0, PLAY_AREA_WIDTH, GRID_SIZE):\n pygame.draw.line(play_area, GRID_COLOR,\n (x, 0), (x, PLAY_AREA_HEIGHT), 1)\n for y in range(0, PLAY_AREA_HEIGHT, GRID_SIZE):\n pygame.draw.line(play_area, GRID_COLOR,\n (0, y), (PLAY_AREA_WIDTH, y), 1)",
"def draw_grid(self, x, y):\n shrink = (1 - self.shrink) + 1\n center_x, center_y = self.grid * (x + 1), self.grid * (y + 1)\n self.canvas.create_rectangle(center_y - self.half_grid,\n center_x - self.half_grid,\n center_y + self.half_grid,\n center_x + self.half_grid,\n fill=self.board_color,\n outline=self.board_color)\n a, b = [0, shrink] if y == 0 else [-shrink, 0] if y == self.size - 1 else [-shrink, shrink]\n c, d = [0, shrink] if x == 0 else [-shrink, 0] if x == self.size - 1 else [-shrink, shrink]\n self.canvas.create_line(center_y + a * self.half_grid, center_x, center_y + b * self.half_grid, center_x)\n self.canvas.create_line(center_y, center_x + c * self.half_grid, center_y, center_x + d * self.half_grid)\n [self.canvas.create_text(self.grid * (i + 1), self.grid * 0.8, text=f'{i}') for i in range(self.size)]\n [self.canvas.create_text(self.grid * 0.8, self.grid * (i + 1), text=f'{i}') for i in range(self.size)]\n\n # draw special points\n if ((x == 3 or x == 7) and (y == 3 or y == 7)):\n self.canvas.create_oval(center_y - self.special_point,\n center_x - self.special_point,\n center_y + self.special_point,\n center_x + self.special_point,\n fill=\"#555555\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decode a basic QR code
|
def decode_hello():
# Load the image
im = Image.open(os.path.join(os.path.dirname(__file__), 'barcode-image21helloqrworld.png'))
im = im.crop((24, 24, 108, 108))
imdata = im.getdata()
qrsize = 21
qrgrid = [[None] * qrsize for _ in range(qrsize)]
for x in range(qrsize):
for y in range(qrsize):
qrgrid[x][y] = 0 if imdata[(4 * y + 2) * 84 + (4 * x + 2)][0] & 0x80 else 1
del imdata
del im
decode_qr_grid(qrgrid)
# Show the grid
# im = draw_grid(qrgrid)
# im.show()
|
[
"def decode_qr(arg_image):\n qr_result = decode(arg_image)\n\n if (len( qr_result ) > 0):\n decoded_data = qr_result[0].data\n else:\n decoded_data = \"NA\"\n\n #Return the Decode data from QR \n return decoded_data",
"def decode(image_file):\n \n # Set message\n message = \"\"\n \n # Create a zbar image reader\n scanner = zbar.ImageScanner()\n \n # Set the reader configuration to default\n scanner.parse_config('enable')\n \n # Read the image file and convert it into greyscale data readable by zbar\n try:\n pil = Image.open(image_file).convert('L')\n except:\n # Image file is not an image\n print \"Sorry, the file provided was not an image.\"\n raise TypeError\n \n width, height = pil.size # Extract image size\n raw = pil.tostring() # Convert image to a string of data\n \n # Put the image data in a container with the size and data together\n image = zbar.Image(width, height, 'Y800', raw)\n \n # Use zbar to scan the data for a QR code\n scanner.scan(image)\n \n # Scan through results\n for symbol in image: \n \n # Check image is actually a QR code\n if str(symbol.type) == \"QRCODE\":\n # Set message to success and include the encoded message\n message = \"Success ({0})\".format(symbol.data)\n # Exit\n return message\n \n # If results do not contain a zbar symbol (unsuccessful read) \n else:\n # Set message to fail\n message = \"Fail\" \n # Exit\n return message",
"def _read_barcode(self, image):\n if self.LOG.getEffectiveLevel() is logging.DEBUG:\n start = timeit.default_timer()\n \n runner = \"com.google.zxing.client.j2se.CommandLineRunner\"\n libs = [\"javase-3.0.0.jar\", \"core-3.0.0.jar\"]\n libs_fullpath = [os.path.join(os.path.dirname(__file__), \"lib\", lib) for lib in libs]\n classpath = \":\".join(libs_fullpath)\n\n p = subprocess.Popen([\"java\", \"-cp\", classpath, runner, image, \"--possibleFormats=AZTEC\"],\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)\n out, err = p.communicate()\n \n # There was a problem.. \n if p.returncode != 0:\n # Attempt to get a meaninful error\n err_lines = err.split('\\n')\n out_lines = out.split('\\n')\n if len(err_lines) > 5: # Assume a Java stack trace\n for line in err_lines:\n if line.startswith(\"Caused by\"):\n error_msg = line\n break\n elif out: error_msg = out_lines[0] \n else: error_msg = err_lines[0]\n raise BarcodeDecodeError(error_msg)\n\n decoded = out.split('\\n')[2] # Get 3rd line (raw string)\n \n if self.LOG.getEffectiveLevel() is logging.DEBUG:\n stop = timeit.default_timer()\n self.LOG.debug(\"_read_barcode: %ss\" % (stop - start))\n \n return decoded",
"def get_qr_data(arg_image):\n qr_result = decode(arg_image)\n\n if qr_result:\n return qr_result[0].data\n\n return 'NA'",
"def read_qr_camera(self, runtime: int) -> str:\n\n cap = cv.VideoCapture(0)\n\n data = ''\n crude_run_time = 0\n while len(data.split(\":\")) != 4 & crude_run_time < runtime * 2:\n\n _, frame = cap.read()\n\n data, pts, straight_qrcode = self._detector.detectAndDecode(frame)\n\n if len(data.split(\":\")) == 4:\n return data\n\n\n cv.imshow(\"Frame\", frame)\n\n key = cv.waitKey(1)\n\n if key == 27:\n break\n sleep(0.5)\n crude_run_time += 1\n return ''",
"def get_qrcode(jwt):\n qr = qrcode.make('authy://account?token=' + jwt,\n image_factory=qrcode.image.svg.SvgImage)\n stream = BytesIO()\n qr.save(stream)\n return stream.getvalue()",
"def str2qr_terminal(text):\n Log.debug(text)\n qr = qrcode.QRCode()\n qr.border = 1\n qr.add_data(text)\n mat = qr.get_matrix()\n print_qr(mat)",
"def extract_barcode(record, eb):\n seq = record.sequence[eb.start:eb.end]\n qual = record.quality[eb.start:eb.end]\n return (eb.sequence_tag, seq, 'Z'), (eb.quality_tag, qual, 'Z')",
"def scan_qrcode():\n # initialize the camera and grab a reference to the raw camera capture\n print(\"[INFO] Start scanning QR code!\")\n is_payed = False\n camera = PiCamera()\n camera.resolution = (320, 240)\n camera.framerate = 30\n rawCapture = PiRGBArray(camera, size=(320, 240))\n # allow the camera to warmup\n sleep(3)\n # capture frames from the camera\n for frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n # grab the raw NumPy array representing the image, then initialize the timestamp\n # and occupied/unoccupied text\n image = frame.array\n qrdata = decode(image)\n if qrdata:\n print(\"[INFO] Payment received!\")\n is_payed = True\n break\n\n cv2.imshow(\"Frame\", image)\n key = cv2.waitKey(1) & 0xFF\n # clear the stream in preparation for the next frame\n rawCapture.truncate(0)\n\n if key == ord(\"q\"):\n break\n\n camera.close()\n cv2.destroyAllWindows()\n return is_payed",
"def decode_qr_grid(qrgrid):\n qrsize = len(qrgrid)\n assert all(len(col) == qrsize for col in qrgrid), \"not a square grid\"\n\n # Extract format info, which is present in lines\n format_int1 = 0\n format_int2 = 0\n for y in range(6):\n format_int1 |= qrgrid[8][y] << y\n format_int1 |= qrgrid[8][7] << 6\n format_int1 |= qrgrid[8][8] << 7\n format_int1 |= qrgrid[7][8] << 8\n for x in range(6):\n format_int1 |= qrgrid[5 - x][8] << (x + 9)\n\n for x in range(8):\n format_int2 |= qrgrid[qrsize - 1 - x][8] << x\n assert qrgrid[8][qrsize - 8] == 1 # \"unused\" bit\n for y in range(7):\n format_int2 |= qrgrid[8][qrsize - 7 + y] << (8 + y)\n\n # cf. http://upload.wikimedia.org/wikipedia/commons/4/49/QRCode-4-Levels%2CMasks.png for the magic masking\n fmtint1 = format_int1 ^ int('101010000010010', 2)\n fmtint2 = format_int2 ^ int('101010000010010', 2)\n\n if qrformat_bch_check(fmtint1):\n fmtint = fmtint1\n if qrformat_bch_check(fmtint2):\n if fmtint1 != fmtint2:\n print(\"Error: format-1 ({:#x}) and format-2 ({:#x}) were sane but different\".format(fmtint1, fmtint2))\n raise ValueError(\"Disagreeing format integers\")\n else:\n print(\"Warning: format-1 ({:#x}) was corrupted, using format-2 ({:#x})\".format(fmtint1, fmtint2))\n else:\n if qrformat_bch_check(fmtint2):\n print(\"Warning: format-2 ({:#x}) was corrupted, using format-1 ({:#x})\".format(fmtint2, fmtint1))\n fmtint = fmtint2\n else:\n print(\"Error: format-1 ({:#x}) and format-2 ({:#x}) were corrupted\".format(fmtint1, fmtint2))\n raise ValueError(\"Unable to decode format\")\n\n # Sanity checks\n assert qrformat_bch_check(fmtint)\n assert qrformat_bch(fmtint >> 10) == fmtint\n assert zbar_bch15_5_encode(fmtint >> 10) == fmtint\n\n edc_level = fmtint >> 13\n mask = (fmtint >> 10) & 7\n print(\"QR code size={}, format={:#x}: EDC level {} Mask {}\".format(qrsize, fmtint, edc_level, mask))\n\n # Apply the mask\n for x in range(qrsize):\n for y in range(qrsize):\n if (x <= 8 and y <= 8) or (x <= 8 and y >= qrsize - 8) or (x >= qrsize - 8 and y <= 8):\n continue\n if mask == 4:\n if (y // 2 + x // 3) % 2 == 0:\n qrgrid[x][y] ^= 1\n elif mask == 6:\n if ((x * y) % 3 + x * y) % 2 == 0:\n qrgrid[x][y] ^= 1\n else:\n raise NotImplementedError(\"Unknown QR code mask {}\".format(mask))\n\n if qrsize == 21:\n # Decode the encoding\n encoding = qrgrid[20][20] << 3\n encoding |= qrgrid[19][20] << 2\n encoding |= qrgrid[20][19] << 1\n encoding |= qrgrid[19][19]\n\n if encoding == 4:\n print(\"... encoding {}: Bytes\".format(encoding))\n else:\n print(\"... encoding {}: ?\".format(encoding))\n\n blocks = bytearray(19)\n # Positions to turn up2down\n turn_pos = [(3, 1), (2, 1), (3, 0), (2, 0), (1, 0), (0, 0), (1, 1), (0, 1)]\n for i in range(4):\n for j in range(2):\n tposx, tposy = turn_pos[i * 2 + j]\n blocks[0] |= qrgrid[20 - j][18 - i] << (7 - (i * 2 + j))\n blocks[1] |= qrgrid[20 - j][14 - i] << (7 - (i * 2 + j))\n blocks[2] |= qrgrid[17 + tposx][9 + tposy] << (7 - (i * 2 + j))\n blocks[3] |= qrgrid[18 - j][11 + i] << (7 - (i * 2 + j))\n blocks[4] |= qrgrid[18 - j][15 + i] << (7 - (i * 2 + j))\n blocks[5] |= qrgrid[15 + tposx][20 - tposy] << (7 - (i * 2 + j))\n blocks[6] |= qrgrid[16 - j][18 - i] << (7 - (i * 2 + j))\n blocks[7] |= qrgrid[16 - j][14 - i] << (7 - (i * 2 + j))\n blocks[8] |= qrgrid[13 + tposx][9 + tposy] << (7 - (i * 2 + j))\n blocks[9] |= qrgrid[14 - j][11 + i] << (7 - (i * 2 + j))\n blocks[10] |= qrgrid[14 - j][15 + i] << (7 - (i * 2 + j))\n blocks[11] |= qrgrid[11 + tposx][20 - tposy] << (7 - (i * 2 + j))\n blocks[12] |= qrgrid[12 - j][18 - i] << (7 - (i * 2 + j))\n blocks[13] |= qrgrid[12 - j][14 - i] << (7 - (i * 2 + j))\n blocks[14] |= qrgrid[12 - j][10 - i] << (7 - (i * 2 + j))\n blocks[15] |= qrgrid[12 - j][5 - i] << (7 - (i * 2 + j))\n blocks[16] |= qrgrid[9 + tposx][0 + tposy] << (7 - (i * 2 + j))\n blocks[17] |= qrgrid[10 - j][2 + i] << (7 - (i * 2 + j))\n blocks[18] |= qrgrid[10 - j][7 + i] << (7 - (i * 2 + j))\n\n print(\"... hex: {}\".format(' '.join('{:02x}'.format(b) for b in blocks)))\n if encoding == 4:\n # Byte encoding\n length = blocks[0]\n if length >= len(blocks):\n print(\"Error: length {} too large\".format(length))\n else:\n print(\"... bytes[{}]: {}\".format(blocks[0], repr(bytes(blocks[1:length + 1]))))\n if length + 1 < len(blocks):\n print(\"... padding: {}\".format(repr(bytes(blocks[length + 1:]))))",
"def detectAndDecode(\n self, img, points=..., straight_qrcode=...\n ) -> Tuple[retval, points, straight_qrcode]:\n ...",
"def read_qr(self, qr_path: os.path) -> str:\n\n img = cv.imread(qr_path)\n data, pts, straight_qrcode = self._detector.detectAndDecode(img)\n if pts is not None:\n return data\n else:\n raise QRReadError(\"Failed to fetch data from image\")",
"def get_qr_code(self):\n # 二维码的版本号,二维码总共有1到40个版本,最小的版本号是1,对应的尺寸是21×21\n QR_VERSION = 1\n # 生成图片的像素\n QR_BOX_SIZE = 10\n # 二维码的边框宽度,4是最小值\n QR_BORDER = 4\n qr = qrcode.QRCode(\n version=QR_VERSION,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=QR_BOX_SIZE,\n border=QR_BORDER\n )\n qr.add_data(self.get_otp_uri())\n qr.make(fit=True)\n img = qr.make_image()\n\n output = BytesIO()\n img.save(output)\n qr_data = output.getvalue()\n output.close()\n\n return base64.b64encode(qr_data).decode('ascii')",
"def qr(): \r\n\r\n input_string = request.args.get('input_string')\r\n input_string = input_string[:512]\r\n\r\n img_io = BytesIO()\r\n img = qrcode.make(input_string)\r\n img.save(img_io, 'JPEG', quality=70)\r\n img_io.seek(0)\r\n return send_file(img_io, mimetype='image/jpeg')",
"def create_qr():\n qr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=5\n )\n data = str(int(time.time() * 1000))\n crc = hex(zlib.crc32(data.encode('utf-8')))\n\n qrdata = \"{}|{}\".format(data, crc)\n qr.add_data(qrdata)\n qr.make(fit=True)\n\n img = qr.make_image()\n return img",
"def analysis_video(vname, scan_qr_rate=10, quite_mode=False):\n\n cap = cv2.VideoCapture(vname)\n if not cap.isOpened():\n raise ValueError(f'cannot open video \"{vname}\"')\n\n state = {'paused': False, 'scan_qr': scan_qr_rate}\n\n qr_strings = {}\n nr_chunks = None\n while cap.isOpened():\n if not state['paused']:\n ret, frame = cap.read()\n if ret:\n if state['scan_qr'] <= 0:\n frame, qr_str = scan_qr(frame, quite_mode)\n if qr_str is not None:\n state['scan_qr'] = scan_qr_rate\n if qr_str not in qr_strings:\n chunk_id, data, last_chunk = code2data(qr_str, prepend_idx=True)\n qr_strings[qr_str] = chunk_id, data\n if last_chunk:\n nr_chunks = chunk_id + 1\n else:\n state['scan_qr'] -= 1\n\n if not quite_mode:\n cv2.imshow('frame', frame)\n\n key = cv2.waitKey(25)\n\n if key & 0xFF == ord('q'):\n break\n elif key & 0xFF == ord('p'):\n if state['paused']:\n state['paused'] = False\n print('Unpaused')\n else:\n state['paused'] = True\n print('Paused')\n else:\n break\n if nr_chunks is None:\n raise Exception('Cannot get nr_chunks')\n if len(qr_strings) != nr_chunks:\n raise Exception(f'got {len(qr_strings)} qrcodes but should have {nr_chunks}')\n qs_list = [None for _ in range(nr_chunks)]\n for chunk_id, data in qr_strings.values():\n if chunk_id < 0 or chunk_id >= nr_chunks:\n raise Exception(f'got chunk_id={chunk_id} but should between 0 and {nr_chunks-1}')\n if qs_list[chunk_id] is not None:\n raise Exception(f'got multiple chunk_id={chunk_id} that have different data')\n qs_list[chunk_id] = data\n return code2data(''.join(qs_list))",
"def test_g_et_cobtxidqrcode(self):\n pass",
"def qr_data_input(string_data: str) -> qrcode.QRCode:\r\n qr = qrcode.QRCode()\r\n qr.add_data(string_data)\r\n return qr",
"def decode(self, data):\n\t\traise NotImplementedError()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize a new StatsD client.
|
def __init__(self, prefix="", host="127.0.0.1", port="8125"):
self.stat = statsd.StatsClient(host=host, port=port, prefix=prefix)
|
[
"def _init_client(self):\n pass",
"def __init__(self):\r\n super().__init__()\r\n self.params = SysParams().params\r\n self.database = self.params['influx_database']\r\n self.logger = Logger().logger\r\n\r\n # The client should be an instance of InfluxDBClient.\r\n self.dbclient = influxdb.InfluxDBClient(host=self.params['influx_host'],\r\n port=self.params['influx_port'],\r\n username=self.params['influx_user'],\r\n password=self.params['influx_password'],\r\n timeout=self.params['influx_timeout'],\r\n database=self.database)",
"def init_client(self):\n self._transport = RequestsHTTPTransport(url=self._url,\n use_json=True,\n headers={\n \"Content-type\":\n \"application/json\",\n \"Authorization\":\n \"bearer \" +\n str(self._token).strip()\n },\n verify=False)\n self._client = Client(retries=3,\n transport=self._transport,\n fetch_schema_from_transport=False)",
"def __init__(self, input_stream, output_stream):\n self.current_id = 1\n self.json_rpc_client = json_rpc_client.JsonRpcClient(\n input_stream, output_stream)\n self.json_rpc_client.start()\n\n logger.info(u'Sql Tools Client Initialized')",
"def __init__( self, url = None ):\n Client.__init__( self )\n self.setServer( 'DataManagement/DataLogging' )\n if url:\n self.setServer( url )\n self.setTimeout( 120 )",
"def _create_suds_client(self):\n\n self.client = Client(const.WSDLLOCAL)\n self.client.set_options(service = ApiClient._sdict[self.service][0],\n headers = {'user-agent': const.USERAGENT})\n\n # put username (and password if necessary) into the headers.\n # note that another way to do this is to call betdaq.set_user,\n # so the username and password in const.py do not need to be\n # specified.\n self.set_headers(const.BDAQUSER, const.BDAQPASS)",
"def set_stats_client(stats_client: StatsClient) -> None:\n global _scoped_stats_client\n _scoped_stats_client.set_stats_client(stats_client)",
"def __init__(self):\n project_id = os.environ.get(\"GOOGLE_PROJECT_ID\", \"\")\n client = spanner.Client(project=project_id)\n instance_id = os.environ.get(\"GLUU_GOOGLE_SPANNER_INSTANCE_ID\", \"\")\n self.instance = client.instance(instance_id)\n\n database_id = os.environ.get(\"GLUU_GOOGLE_SPANNER_DATABASE_ID\", \"\")\n self.database = self.instance.database(database_id)",
"def __init__(self, client, pool, config):\n self.is_primitive = False\n self.client = client\n self.pool = pool\n self.config = {}\n self.config.update(self.DEFAULT_CONFIG)\n self.config.update(config)",
"def __init__(self,\n dogstatsd_host: str,\n dogstatsd_port: str,\n extra_tags: Optional[List[str]],\n verbose: bool = False) -> None:\n self.stats = DogStatsd(host=dogstatsd_host, port=dogstatsd_port, constant_tags=extra_tags)\n self.verbose = verbose\n\n self.last_time_and_size_info = None # type: Optional[Tuple[datetime, GCSizeInfo]]\n self.last_minor_time = None # type: Optional[datetime]\n self.last_major_time = None # type: Optional[datetime]",
"def get_stats_client(config, prefix=None):\n if prefix is not None:\n prefix = '{0}.{1}'.format(config.STATSD_PREFIX, prefix)\n else:\n prefix = config.STATSD_PREFIX\n\n if not config.STATSD_ENABLED:\n return NoopStatsClient()\n\n return statsd.StatsClient(config.STATSD_HOST,\n config.STATSD_PORT,\n prefix=prefix)",
"def _init_client():\n return _Client(_ARM_WS_URL)",
"def __init__(self, service):\n\n # allowed services\n aservices = ApiClient._sdict.keys()\n if service not in aservices:\n raise IOError('service must be one of: {0}'.\\\n format(' '.join(aservices)))\n self.service = service\n self.snum = ApiClient._sdict[self.service][1]\n # after this call, self.client will be a SUDS client object\n self._create_suds_client()",
"def __init__(\n self,\n *,\n credentials: Optional[ga_credentials.Credentials] = None,\n transport: Union[str, BetaAnalyticsDataTransport] = \"grpc_asyncio\",\n client_options: Optional[ClientOptions] = None,\n client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,\n ) -> None:\n self._client = BetaAnalyticsDataClient(\n credentials=credentials,\n transport=transport,\n client_options=client_options,\n client_info=client_info,\n )",
"def initialize_client(cls, db = \"test\"):\r\n\r\n config = configparser.ConfigParser()\r\n config.read(\"config.ini\")\r\n\r\n username = config[\"mongodb\"][\"user\"]\r\n password = config[\"mongodb\"][\"password\"]\r\n dbname = config[\"mongodb\"][\"dbname\"]\r\n\r\n print(\"Attempting to initialize client ...\")\r\n DatabaseClient.client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.kcedc.mongodb.net/{dbname}?retryWrites=true&w=majority\")[db]\r\n print(\"Successfully initialized client\")",
"def __init__(self):\n apikey = os.environ.get('DOCOMO_API_KEY', None)\n if apikey is None:\n raise Exception('DOCOMO_API_KEY is None')\n\n self.client = DocomoClient(apikey=apikey)\n self.options = None",
"def __init__(self):\n super(OvhApiPower, self).__init__()\n self._client = ovh_client.BaseClient()",
"def initialize_analyticsreporting():\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n KEY_FILE_LOCATION, SCOPES)\n\n#Build the service object.\n analytics = build('analyticsreporting', 'v4', credentials=credentials)\n\n return analytics",
"def __init__(self):\n self.clients = {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks that the embeddings can be loaded, have the right dimension, and that one line matches.
|
def test_load(self):
embedder = LookupEmbedder(init_fastext='examples/data/wiki.ja.vec.small', emb_dim=300, vocab=self.input_reader.vocab)
# self.assertEqual(embedder.embeddings.shape()[::-1], (self.input_reader.vocab_size(), 300))
with open('examples/data/wiki.ja.vec.small', encoding='utf-8') as vecfile:
test_line = next(islice(vecfile, 9, None)).split() # Select the vector for '日'
test_word = test_line[0]
test_id = self.input_reader.vocab.w2i[test_word]
test_emb = test_line[1:]
self.assertTrue(np.allclose(embedder.embeddings.batch([test_id]).npvalue().tolist(),
np.array(test_emb, dtype=float).tolist(), rtol=1e-5))
|
[
"def _check_constraints(self):\n assert self.instance.entity_representations[0].enriched_embeddings is None",
"def _check_constraints(self):\n assert all_in_bounds(self.instance.entity_embeddings(indices=None).norm(p=2, dim=-1), high=1.0, a_tol=EPSILON)",
"def _check_constraints(self):\n for emb in (self.instance.entity_embeddings, self.instance.relation_embeddings):\n assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1.0, a_tol=EPSILON)",
"def _check_constraints(self):\n for emb in (self.instance.entity_embeddings, self.instance.relation_embeddings):\n assert all_in_bounds(emb(indices=None).norm(p=2, dim=-1), high=1.0, a_tol=1.0e-06)",
"def _check_constraints(self):\n entity_norms = self.instance.entity_embeddings(indices=None).norm(p=2, dim=-1)\n assert torch.allclose(entity_norms, torch.ones_like(entity_norms))",
"def embeddings_available() -> bool:\n all_files = []\n for root, dirs, files in os.walk(ROOT_DIR):\n all_files.extend(files)\n if 'combined-320.txt' not in all_files:\n return False\n else:\n return True",
"def check_embeddings_shape(embeddings, docs):\n if embeddings is not None:\n if not any(\n [isinstance(embeddings, np.ndarray), isinstance(embeddings, csr_matrix)]\n ):\n raise ValueError(\n \"Make sure to input embeddings as a numpy array or scipy.sparse.csr.csr_matrix. \"\n )\n else:\n if embeddings.shape[0] != len(docs):\n raise ValueError(\n \"Make sure that the embeddings are a numpy array with shape: \"\n \"(len(docs), vector_dim) where vector_dim is the dimensionality \"\n \"of the vector embeddings. \"\n )",
"def _check_constraints(self):\n relation_abs = (\n self.instance.relation_embeddings(indices=None).view(self.factory.num_relations, -1, 2).norm(p=2, dim=-1)\n )\n assert torch.allclose(relation_abs, torch.ones_like(relation_abs))",
"def _check_vocab(self, vocab_file):\n assert os.path.exists(\n vocab_file), 'The vocab file %s does not exist' % vocab_file\n\n lines = [x.strip() for x in open(vocab_file)]\n\n if len(lines) != len(set(lines)):\n print('DATASET: vocab %s contains dups! fixing.', vocab_file)\n unk = self.config.unk\n os.system('rm %s' % vocab_file)\n s = unk + '\\n' + '\\n'.join([x for x in set(lines) if x != unk])\n with open(vocab_file, 'w') as f:\n f.write(s)\n # Re-read the vocab so that we can get the new length.\n lines = [x.strip() for x in open(vocab_file)]\n\n assert lines[0] == self.config.unk, 'The first words in %s is not %s' % (\n vocab_file)\n\n return len(lines)",
"def load_cl_embeddings(self,path_to_embeddings,dimension,skip_header):\n self.vocab_source = fit_vocab(self.train)\n self.vocab_target = fit_vocab(self.test)\n \n # full vocabulary\n self.vocab_ = fit_vocab(np.concatenate((self.train,self.test)))\n \n self.source_embeddings = load_embeddings(path_to_embeddings+\"concept_net_1706.300.\"+self.source_lang, dimension,skip_header=skip_header,vocab=self.vocab_)\n self.target_embeddings = load_embeddings(path_to_embeddings+\"concept_net_1706.300.\"+self.target_lang, dimension,skip_header=skip_header,vocab=self.vocab_)\n \n self.source_embeddings = sort_embeddings(self.source_embeddings,self.vocab_)\n self.target_embeddings = sort_embeddings(self.target_embeddings,self.vocab_)",
"def _load_embedding(self):\n logger.info(\"Loading embedding...\")\n embeddings_index = {}\n f = open(self.config.embedding_path)\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n embedding_matrix = np.zeros(\n (self.num_words + 1, self.config.embedding_dim))\n for word, i in self.word_index.items():\n if i >= self.config.max_nb_words:\n continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = embedding_vector\n return embedding_matrix",
"def is_word_embed_match(self, mention_x: MentionDataLight, mention_y: MentionDataLight):\n match_result = False\n x_embed = self.embedding.get_head_feature_vector(mention_x)\n y_embed = self.embedding.get_head_feature_vector(mention_y)\n # make sure words are not 'unk/None/0'\n if x_embed is not None and y_embed is not None:\n dist = cos(x_embed, y_embed)\n if not math.isnan(dist):\n sim = 1 - dist\n if sim >= self.accepted_dist:\n match_result = True\n\n return match_result",
"def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n raise ValueError(str(self) + \" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension is \" +\n str(other_dimension))",
"def _dimensions_check(self, element):\n if not (\"dimensions\" in self.attributes and \"dimensions\" in element.attributes):\n return True\n elif \"dimensions\" in self.attributes and \"dimensions\" in element.attributes:\n #The dimension text has to match perfectly. If variables names are specified\n #for bounds, we have no way of knowing whether the sizes are the same before\n #runtime. However, we can do some cleanup befor comparing.\n match = True\n selfdim = self.attributes[\"dimensions\"].lower().split(\",\")\n eldim = element.attributes[\"dimensions\"].lower().split(\",\")\n\n i = 0\n #We only need to compare dimensions until one fails\n while match and i < len(selfdim):\n if selfdim[i].strip() != eldim[i].strip():\n match = False\n i += 1\n \n return match\n else:\n return False",
"def _equal_embeddings(a: Representation, b: Representation) -> bool:\n return (a(indices=None) == b(indices=None)).all()",
"def check_dimension(self, filename, dimension):\n self.dimension = dimension\n self.filename = filename\n if self.dimension in self.filename:\n return True\n else:\n return False",
"def _check_vocabulary_size(self, name, vocab, expected_size):\n if expected_size is None:\n return\n vocab_size = self._vocabulary_size(vocab)\n if vocab_size is None:\n return\n if vocab_size != expected_size:\n raise ValueError(\n \"%s vocabulary has size %d but the model expected a vocabulary \"\n \"of size %d\" % (name.capitalize(), vocab_size, expected_size)\n )",
"def load_trimmed_word_embeddings(trimmed_file_name):\n trimmed_file_name += \".npz\"\n logger.info(\"start to load the trimmed file: {}\".format(trimmed_file_name))\n with np.load(trimmed_file_name) as data:\n we = data[\"embeddings\"]\n [w,d] = np.shape(we)\n logger.info(\"data is loaded. vocab size: {} dim: {}\".format(w,d))\n return we",
"def check_input_data_into_forward_once(self, x):\n assert len(x.shape) == 3, \"x should have the shape (batch_size, sequence_length, dimension)\"\n assert x.shape[2] == self.input_dim, \"x must have the same dimension as the input_dim you provided\"\n for embedding_dim in self.columns_of_data_to_be_embedded:\n data = x[:, :, embedding_dim]\n data = data.contiguous().view(-1, 1)\n data_long = data.long()\n assert all(data_long >= 0), \"All data to be embedded must be integers 0 and above -- {}\".format(data_long)\n assert torch.sum(abs(data.float() - data_long.float())) < 0.0001, \"\"\"Data columns to be embedded should be integer \n values 0 and above to represent the different \n classes\"\"\"\n if self.input_dim > len(self.columns_of_data_to_be_embedded):\n assert isinstance(x, torch.FloatTensor) or isinstance(x, torch.cuda.FloatTensor), \"Input data must be a float tensor\"\n self.checked_forward_input_data_once = True #So that it doesn't check again"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compile GPSGstyle rules into CFG rules.
|
def expand_gpsg_rules(rules):
def free_variables_in(element):
parts = element.split("_")
for part in parts:
if part.startswith("{") and part.endswith("}"):
yield part.strip("{}")
def possible_feature_values_in(element):
parts = element.split("_")
for part in parts:
if ":" in part:
k, v = part.split(":")
yield k, part
def possible_feature_values(rules):
elements = rfutils.flat((rule.lhs,) + rule.rhs for rule in rules)
pairs = rfutils.flatmap(possible_feature_values_in, elements)
return dict_of_sets(pairs)
rules = list(rules) # we'll have to go through twice
possibilities = possible_feature_values(rule for rule, _ in rules)
for rule, prob in rules:
free_variables = set(free_variables_in(rule.lhs))
for element in rule.rhs:
free_variables.update(free_variables_in(element))
assignments = dict_product(
dict_subset(possibilities, free_variables)
)
for assignment in assignments:
new_lhs = rule.lhs.format_map(assignment)
new_rhs = tuple(
element.format_map(assignment) for element in rule.rhs
)
yield Rule(new_lhs, new_rhs), prob
|
[
"def build_goci_rules():\n rules_dict = {\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_bottom_error, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n #'level 1b': processing_rules.build_rule('level 1b', ['level 1a','geo'],\n # run_l1b, False),\n 'level 1b': processing_rules.build_rule('level 1b', ['level 1a'],\n run_l1b, False),\n # 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n # False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 1a', 'l1brsgen', 'l1mapgen', 'level 1b', 'l2gen',\n 'l2extract', 'l2brsgen', 'l2mapgen', 'l2bin', 'l3bin',\n 'l3mapgen', 'smigen']\n rules = processing_rules.RuleSet('GOCI rules', rules_dict, rules_order)\n return rules",
"def build_rules(self):\n if self.compiler.unfold_plan is not None:\n plan = self.compiler.unfold_plan\n env = unfolding.plan_to_program(\n plan, self.context, self.datasource,\n self.relations, self.rules)\n else:\n env = {}\n for rule in self.rules:\n env_rule = env.get(rule.id, None)\n if env_rule is not None:\n for rec in env_rule:\n self.build_rule(rule, rec)\n else:\n self.build_rule(rule, {})\n z3c.register(self.context)\n logging.getLogger().debug(\"Compiled rules:\\n%s\", self.context)\n if self.compiler.project is not None:\n self.compiler.project.reconciliate(self.context)\n if cfg.CONF.smt2 is not None:\n with open(cfg.CONF.smt2, 'w') as fd:\n self.dump_primitive_tables(fd)\n primitives.dump_translations(fd)\n fd.write(str(self.context))",
"def build_general_rules():\n rules_dict = {\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_bottom_error, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n #'level 1b': processing_rules.build_rule('level 1b', ['level 1a','geo'],\n # run_l1b, False),\n 'level 1b': processing_rules.build_rule('level 1b', ['level 1a'],\n run_l1b, False),\n # 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n # False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1a'], run_l2gen,\n False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 1a', 'l1brsgen', 'l1mapgen', 'level 1b', 'l2gen',\n 'l2extract', 'l2brsgen', 'l2mapgen', 'l2bin', 'l3bin',\n 'l3mapgen', 'smigen']\n rules = processing_rules.RuleSet('General rules', rules_dict, rules_order)\n return rules",
"def build_rules():\n rules = dict(general=build_general_rules(),\n goci=build_goci_rules(),\n meris=build_meris_rules(),\n modis=build_modis_rules(),\n seawifs=build_seawifs_rules(),\n viirs=build_viirs_rules())\n return rules",
"def _compile_rules(rules: Iterable[Tuple[str, str]]):\n pattern = \"|\".join(f\"(?P<{name}>{pattern})\" for name, pattern in rules)\n return re.compile(pattern, re.DOTALL)",
"def compile_rules(s : str):\n ss = s.split('\\n')\n rules = {}\n for srules in ss:\n arrow_index = srules.find('->')\n left_nonterm = srules[:arrow_index].strip()\n right_derivs = list(map(lambda x: x.strip(), srules[arrow_index+2:].strip().split('|')))\n if left_nonterm != '' and len(right_derivs) != 0 and right_derivs[0] != '':\n right_derivs = list(map(lambda x: tuple(x.split(' ')), right_derivs))\n right_derivs = list(map(lambda x: x[0] if len(x) == 1 else x, right_derivs))\n rules[left_nonterm] = right_derivs\n return rules",
"def write_rules(self, line_list):\n\n self._setwatcomdirs(line_list)\n self._setcppflags(line_list)\n self._setasmflags(line_list)\n self._setlinkerflags(line_list)\n\n # Global compiler flags\n line_list.extend([\n \"\",\n \"# Now, set the compiler flags\",\n \"\",\n \"CL=WCC386 -6r -fp6 -w4 -ei -j -mf -zq -zp=8 \"\n \"-wcd=7 -i=\\\"$(INCLUDE_DIRS)\\\"\",\n \"CP=WPP386 -6r -fp6 -w4 -ei -j -mf -zq -zp=8 \"\n \"-wcd=7 -i=\\\"$(INCLUDE_DIRS)\\\"\",\n \"ASM=WASM -5r -fp6 -w4 -zq -d__WATCOM__=1\",\n \"LINK=*WLINK option caseexact option quiet PATH $(%WATCOM)/binnt;\"\n \"$(%WATCOM)/binw;.\",\n \"\",\n \"# Set the default build rules\",\n \"# Requires ASM, CP to be set\",\n \"\",\n \"# Macro expansion is on page 93 of the C/C++ Tools User's Guide\",\n \"# $^* = C:\\\\dir\\\\target (No extension)\",\n \"# $[* = C:\\\\dir\\\\dep (No extension)\",\n \"# $^@ = C:\\\\dir\\\\target.ext\",\n \"# $^: = C:\\\\dir\\\\\",\n \"\",\n \".i86.obj : .AUTODEPEND\",\n \"\\t@echo $[&.i86 / $(%CONFIG) / $(%TARGET)\",\n \"\\t@$(ASM) -0 -w4 -zq -d__WATCOM__=1 $(AFlags$(%CONFIG)\"\n \"$(%TARGET)) $[*.i86 -fo=$^@ -fr=$^*.err\",\n \"\",\n \".x86.obj : .AUTODEPEND\",\n \"\\t@echo $[&.x86 / $(%CONFIG) / $(%TARGET)\",\n \"\\t@$(ASM) $(AFlags$(%CONFIG)$(%TARGET)) \"\n \"$[*.x86 -fo=$^@ -fr=$^*.err\",\n \"\",\n \".c.obj : .AUTODEPEND\",\n \"\\t@echo $[&.c / $(%CONFIG) / $(%TARGET)\",\n \"\\t@$(CL) $(CFlags$(%CONFIG)$(%TARGET)) $[*.c \"\n \"-fo=$^@ -fr=$^*.err\",\n \"\",\n \".cpp.obj : .AUTODEPEND\",\n \"\\t@echo $[&.cpp / $(%CONFIG) / $(%TARGET)\",\n \"\\t@$(CP) $(CFlags$(%CONFIG)$(%TARGET)) $[*.cpp \"\n \"-fo=$^@ -fr=$^*.err\"\n ])\n return 0",
"def compile(self, ast):\n self.create_main_function(ast)\n\n for func in self.functions:\n for optimizer in OPTIMIZERS:\n optimizer(func)\n\n for func in self.functions:\n self.output.function_header(func.params)\n for line in func.code:\n self.output.code(*line)\n self.output.function_footer()",
"def compile(self):\n\n code = []\n \n self.logger.debug(\"compiling graph...\")\n for block in self._startBlocks:\n code.extend(block.evaluate())\n\n return \"\\n\".join(code)",
"def batch_compile( dst ):\n # NOTE: This function assumes there is one directory per student, and that all CPP files are at top level\n \n Nchars = 20 # Print this number of characters from the full path as a section heading\n\n print( \"##### COMPILE #####\\n\\n\" )\n \n # 1. Retrieve directories only\n dirList = os.listdir( dst )\n studentList = []\n for dName in dirList:\n studentPath = os.path.join( dst , dName )\n if os.path.isdir( studentPath ):\n studentList.append( studentPath )\n # 2. Sort the directories and report\n studentList.sort() \n print( \"There are\" , len( studentList ) , \"students to grade ...\\n\\n\" )\n \n # 2.1. Fetch rules, if they exist\n if _PRG:\n rules = parse_lines( _PRG , str )\n else:\n rules = []\n \n # 3. For each of the student directories\n for studentDir in studentList:\n print( \"==== Working on\" , studentDir , \"... ====\" )\n \n # 4. COMPILE\n if _PRG:\n \n print( \"\\t== Compilation results for\" , studentDir[-Nchars:] , \"==\" )\n \n results = [ rule.replace( _WILDCARD , '' ) + \" -->\\t\" for rule in rules ]\n # 4.A. For each rule\n for iRl , rule in enumerate( rules ):\n # B. Transform rule for this student\n compRule = insert_prefix_dir_at_char( rule , studentDir , char = _WILDCARD )\n # C. Compile\n if _VRB: print( \"Running:\" , compRule.split() )\n try:\n sproc = subprocess.run( compRule.split() , shell=False, check=False, \n stdout=(None if _VRB else subprocess.DEVNULL) , # ------------ If verbosity is False, then\n stderr=(subprocess.STDOUT if _VRB else subprocess.DEVNULL) ) # dump all subprocess output to DEVNULL\n except Exception as ex:\n if _VRB: print( \"\\t\\t\" , rule , \" failed with error: \" , str( ex )[:25] )\n if sproc.returncode == 0:\n results[ iRl ] += \"Passed!\"\n else:\n results[ iRl ] += \"! FAILED !\"\n \n print( \"\\t__ End\" , studentDir[-Nchars:] , \" Compilation __\\n\" )\n\n print( \"\\tThere are\" , len( rules ) , \"tests to run ...\" )\n\n for result in results:\n # 5. Report\n print( \"\\t\\t\" + result )\n \n print( \"\\tThe following files were submitted:\" , os.listdir( studentDir ) )\n \n else:\n # 4.B. Get a list of CPP files\n fList = os.listdir( studentDir )\n cppLst = []\n for fName in fList:\n if ( fName[-4:].upper() == \".cpp\".upper() ) and (fName[0] != '.') :\n cppPth = os.path.join( studentDir , fName )\n if os.path.isfile( cppPth ):\n cppLst.append( cppPth )\n cppLst.sort()\n \n nToComp = len( cppLst )\n \n # 4. For each of the CPP files\n print( \"\\t== Compilation results for\" , studentDir[-Nchars:] , \"==\" )\n results = [ False for elem in cppLst ]\n for iC , fCPP in enumerate( cppLst ):\n \n results[ iC ] = \"\"\n # 4.1. Attempt `main` repair\n if _RPM:\n if not has_main( fCPP ):\n if repair_CPP( fCPP , _RP1 , _RP2 ):\n results[ iC ] += \"Repaired ... \" \n \n # 5. Compile\n sproc = subprocess.run( [ 'g++' , '-std=c++11' , str( fCPP ) ] , shell=False, check=False, \n stdout=(None if _VRB else subprocess.DEVNULL) , # ------------ If verbosity is False, then\n stderr=(subprocess.STDOUT if _VRB else subprocess.DEVNULL) ) # dump all subprocess output to DEVNULL\n if sproc.returncode == 0:\n results[ iC ] += \"Compiled!\"\n else:\n results[ iC ] += \"! FAILED !\"\n print( \"\\t__ End\" , studentDir[-Nchars:] , \" Compilation __\\n\" )\n\n print( \"\\tThere are\" , len( cppLst ) , \"files to compile ...\" )\n if ( _NXP > 0 ) and ( nToComp != _NXP ) :\n if nToComp < _NXP:\n print( \"\\tWARN:\" , _NXP-nToComp , \"FILES MISSING!\" )\n else:\n print( \"\\tNote:\" , nToComp-_NXP , \"excess files!?!\" )\n\n for iC , fCPP in enumerate( cppLst ):\n # 5. Report\n print( \"\\t\\t\" , os.path.split( fCPP )[1] , ':' , end = ' ' )\n print( results[ iC ] )\n \n print( \"____ End Record ____\\n\\n\" )",
"def build_modis_rules():\n rules_dict = {\n 'level 0': processing_rules.build_rule('level 0', ['nothing lower'],\n run_bottom_error, False),\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_modis_l1a, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'geo': processing_rules.build_rule('geo', ['level 1a'], run_modis_geo,\n False),\n 'l1aextract_modis': processing_rules.build_rule('l1aextract_modis',\n ['level 1a', 'geo'],\n run_l1aextract_modis,\n False),\n 'level 1b': processing_rules.build_rule('level 1b',\n ['level 1a', 'geo'],\n run_modis_l1b, False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1b', 'geo'],\n run_l2gen, False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 0', 'level 1a', 'l1brsgen', 'l1mapgen', 'geo',\n 'l1aextract_modis', 'level 1b', 'l2gen', 'l2extract',\n 'l2bin', 'l2brsgen', 'l2mapgen', 'l3bin', 'l3mapgen',\n 'smigen']\n rules = processing_rules.RuleSet(\"MODIS Rules\", rules_dict, rules_order)\n return rules",
"def compile_reasoning_engine(self):\n psl_f, _, _, _, _, _, _, _ = self._folders()\n self.psl_obj.compile(psl_f)",
"def build_meris_rules():\n rules_dict = {\n 'level 1a': processing_rules.build_rule('level 1a', ['level 0'],\n run_bottom_error, False),\n 'l1brsgen': processing_rules.build_rule('l1brsgen', ['l1'],\n run_l1brsgen, False),\n 'l2brsgen': processing_rules.build_rule('l2brsgen', ['l2gen'],\n run_l2brsgen, False),\n 'l1mapgen': processing_rules.build_rule('l1mapgen', ['l1'],\n run_l1mapgen, False),\n 'l2mapgen': processing_rules.build_rule('l2mapgen', ['l2gen'],\n run_l2mapgen, False),\n #'level 1b': processing_rules.build_rule('level 1b', ['level 1a','geo'],\n # run_l1b, False),\n 'level 1b': processing_rules.build_rule('level 1b', ['level 1a'],\n run_l1b, False),\n # 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n # False),\n 'l2gen': processing_rules.build_rule('l2gen', ['level 1b'], run_l2gen,\n False),\n 'l2extract': processing_rules.build_rule('l2extract', ['l2gen'],\n run_l2extract, False),\n 'l2bin': processing_rules.build_rule('l2bin', ['l2gen'], run_l2bin,\n True),\n 'l3bin': processing_rules.build_rule('l3bin', ['l2bin'], run_l3bin,\n True),\n 'l3mapgen': processing_rules.build_rule('l3mapgen', ['l3bin'],\n run_l3mapgen, False),\n 'smigen': processing_rules.build_rule('smigen', ['l3bin'], run_smigen,\n False)\n }\n rules_order = ['level 1a', 'l1brsgen', 'l1mapgen', 'level 1b', 'l2gen',\n 'l2extract', 'l2brsgen', 'l2mapgen', 'l2bin', 'l3bin',\n 'l3mapgen', 'smigen']\n rules = processing_rules.RuleSet('MERIS rules', rules_dict, rules_order)\n return rules",
"def load_rules():\n rule_files = Yara._prepare_rules(RULES_DIR)\n url_rule_files = Yara._prepare_rules(INTEL_DIR)\n try:\n Yara._rules = yara.compile(filepaths=rule_files)\n Yara._url_rules = yara.compile(filepaths=url_rule_files)\n except Exception as error:\n logger.error(f\"Invalid Rule file/syntax error: \\n{error} [{Thread.name()}]\")",
"def compileStatements(self):\n\n self.start('statements')\n\n while self.token() in self.STATEMENTS:\n self.compileStatement()\n\n self.end('statements')",
"def parse_gcode(self, filename):\n\n try:\n infile = open(filename) # mode 'r'\n except:\n print(\"Could not open file\")\n raise\n\n lines = infile.readlines()\n execlist = []\n\n # Gcode parsing rules:\n # Ex: N3 G1 X10.3 Y23.4 *34 ; comment\n #\n # Strip comments\n # Strip any line number (N) fields\n # Strip checksums\n # Parse actual command\n for j, i in enumerate(lines):\n orig_line = i\n i = i.upper()\n i = i[0:i.find(\";\")] # strip comments\n i = re.sub(r\"N[0-9]*\\s\", \"\", i, 1) # strip line numbers\n try: # strip checksum [*33]\n ind = i.index(\"*\")\n i = i[0:ind]\n except ValueError:\n pass\n i = i.strip()\n\n\n # Parse command into python code\n line = i.split() # G-code line split list\n if not line: # No commands in this line\n continue\n\n execstr = \"self.\"\n for cmd in self.cmd_list:\n cmd = cmd.split() # cmd is command string split list\n\n if line[0] == cmd[0]: # line command is in cmd_list\n execstr += cmd[0] + \"(\" # construct direct python call\n cmd.pop(0)\n line.pop(0)\n\n # Parse command arguements\n for param in line: # param: \"X123.45\"\n if param[0] in cmd: # If first char in cmd param list\n try: # Check if it's a number\n float(param[1:])\n except ValueError:\n raise SyntaxError(\"G-code file parsing error: \"\n \"Command argument not a\"\n \" number at line\" + str(j) +\n \": \\n\" + orig_line)\n if execstr[-1] != \"(\":\n execstr += \", \"\n execstr += param[0] + \"=\" + param[1:]\n cmd.remove(param[0])\n else: # Command param not accepted in cmd args\n raise SyntaxError(\"G-code file parsing error: \"\n \"Command parameter not accepted\"\n \" at line \" + str(j) + \": \\n\"\n + orig_line)\n\n execstr += \")\"\n execlist.append(execstr)\n break # done making command for this line\n else: # call not found in cmd_list\n raise SyntaxError(\"G-code file parsing error: Command not found\"\n \" at line \" + str(j) + \": \\n\" + orig_line)\n # Finished parsing file\n infile.close()\n\n # TODO should this be changed to execute line by line? Parses whole file\n for execstr in execlist:\n try:\n # debug\n print(execstr)\n exec(execstr)\n # TODO Catch exceptions and fail correctly\n except RuntimeError:\n self.M1()\n raise",
"def parse_lwfg(rulesfile, lexfile):\n\n ##############################\n # Loading rules first\n ##############################\n rules_lines = [l.strip() for l in open(rulesfile).readlines()]\n lex_lines = [l.strip() for l in open(lexfile).readlines()]\n\n start = None\n productions = []\n\n for linenum, line in enumerate(rules_lines):\n # Comments start with #\n if not line or line.startswith('#'): continue\n\n # Directives start with %\n # The only implemented directive defines the start symbol.\n # If the directive is missing, the lhs of the first rule is taken to be the start symbol.\n if line[0] == '%':\n directive, args = line[1:].split(None,1)\n if directive == 'start':\n m = _NONTERM_RE.match(args, 0)\n start = LexNonterminal(m.group(1))\n if m.end() != len(args):\n raise ValueError('Bad argument to start directive: {!s}'.format(line))\n else:\n raise ValueError('Bad directive: {!s}'.format(line))\n\n # Each rule is of the form X -> W+.\n # Thus, the first step in identifying is to detect the arrow ->.\n # Each rule is followed in the next line by a set of compositional constraints.\n # The constraints are represented in a way that makes it easy to generate a FeatStruct.\n # All one has to do is call nltk.featstruct.FeatStruct.\n if _ARROW_RE.findall(line):\n lhs, rhses = re.split(_ARROW_RE, line)\n if rhses == '':\n raise ValueError('Incomplete production rule: {!s}'.format(line))\n rhses = rhses.split()\n \n if len(rhses) > 1:\n for r in rhses:\n if r[0] in \"\\'\\\"\":\n raise ValueError('Incorrectly formatted rule: {!s}'.format(line)) \n\n lhs = LexNonterminal(lhs)\n for i, rhs in enumerate(rhses):\n if rhs[0] not in \"\\'\\\"\":\n rhses[i] = LexNonterminal(rhs)\n\n # now identifying the ontological constraints, and the syntagma\n try:\n phi_c = rules_lines[linenum+1]\n phi_c = FS(phi_c)\n\n lhs.setHead(phi_c['h'])\n for i, rhs in enumerate(rhses):\n if is_nonterminal(rhs):\n rhses[i].setHead(phi_c['h'+str(i+1)])\n except IndexError:\n raise ValueError('Rule {!s} is missing compositional constraints'.format(line))\n except KeyError:\n raise ValueError('Compositional constraints improperly formatted: {!s}'.format(rules_lines[linenum+1]))\n\n productions.append(LexProduction(lhs, rhses, phi_c))\n\n if not productions:\n raise ValueError('No productions found!')\n \n if not start:\n start = productions[0].lhs()\n\n ###############################\n # Now loading the lexicon\n ###############################\n\n # Like other rules, each rule in the lexicon contains an arrow ->.\n # The following line contains the compositional constraints, once again formatted\n # in a way that can directly be passed to the FeatStruct initializer.\n # The line after the compositional constraints contains the semantic body corresponding\n # to the lexical item.\n for linenum, line in enumerate(lex_lines):\n if _ARROW_RE.findall(line):\n lhs, rhses = re.split(_ARROW_RE, line)\n if rhses == '':\n raise ValueError('Incomplete production rule: {!s}'.format(line))\n rhses = rhses.split()\n \n if len(rhses) > 1:\n raise ValueError('Not a lexical rule: {!s}'.format(line)) \n\n lhs = LexNonterminal(lhs)\n if not _TERMINAL_RE.match(rhses[0]):\n raise ValueError('Not a lexical rule: {!s}'.format(line))\n else:\n rhses[0] = rhses[0][1:-1]\n\n lhs.setString(rhses[0])\n\n # now identifying the ontological constraints, and the syntagma\n try:\n phi_c = lex_lines[linenum+1]\n phi_c = FS(phi_c)\n\n lhs.setHead(phi_c['h'])\n except IndexError:\n raise ValueError('Rule {!s} is missing compositional constraints'.format(line))\n except KeyError:\n raise ValueError('Compositional constraints improperly formatted: {!s}'.format(lex_lines[linenum+1]))\n\n # now the syntagma body\n try:\n body = lex_lines[linenum+2]\n \n lhs.setBody(OntoSeR(body))\n except IndexError:\n raise ValueError('Rule {!s} is missing its semantic body'.format(line))\n\n productions.append(LexProduction(lhs, rhses, phi_c))\n\n return LWFG(start, productions)",
"def buildCellRules(self, seed=None):\n\n random.seed(seed)\n self._rsCellRules()\n self._fsiCellRules()\n self._strD1CellRules()\n self._strD2CellRules()\n self._thCellRules()\n self._gpiCellRules()\n self._gpeCellRules()\n self._stnCellRules()",
"def _gpeCellRules(self, gahp=10e-3):\n\n cellRule = {'conds': {'cellModel': 'GPe', 'cellType': 'GPe'}, 'secs': {}}\n cellRule['secs']['soma'] = {'geom': {}, 'mechs': {}}\n cellRule['secs']['soma']['geom'] = {'diam': 5.642, 'L': 5.642, 'Ra': 1,\n 'nseg': 1}\n\n cellRule['secs']['soma']['mechs']['GP'] = {'gahp': gahp}\n # cellRule['secs']['GPe']['mechs']['GP'] = {}\n cellRule['secs']['soma']['vinit'] = random.gauss(-62, 5)\n cellRule['secs']['soma']['threshold'] = -10\n\n self.netParams.cellParams['GPe'] = cellRule"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the attributes `data_type` and 'loader_class` based of the given `smrf_config` parameter. Currently supports two types of data
|
def __determine_data_type(self, smrf_config):
loader_args = dict(start_date=self.start_date, end_date=self.end_date)
if InputCSV.DATA_TYPE in smrf_config:
self.data_type = InputCSV.DATA_TYPE
self.load_class = InputCSV(
**loader_args,
stations=smrf_config[InputCSV.DATA_TYPE]['stations'],
config=smrf_config[InputCSV.DATA_TYPE],
)
elif GriddedInput.TYPE in smrf_config:
self.data_type = smrf_config[GriddedInput.TYPE]['data_type']
data_inputs = dict(
bbox=self.bbox,
config=smrf_config,
topo=self.topo,
)
if self.data_type == InputGribHRRR.DATA_TYPE:
self.load_class = InputGribHRRR(**loader_args, **data_inputs)
elif self.data_type == InputNetcdf.DATA_TYPE:
self.load_class = InputNetcdf(**loader_args, **data_inputs)
elif self.data_type == InputWRF.DATA_TYPE:
self.load_class = InputWRF(**loader_args, **data_inputs)
else:
raise AttributeError(
'Unknown gridded data input type in ini-file'
)
else:
raise AttributeError(
'Missing required data type attribute in ini-file'
)
|
[
"def data_loader_cls(self, new_loader_cls):\n\n assert inspect.isclass(new_loader_cls) and issubclass(new_loader_cls,\n SlimDataLoaderBase)\n self._data_loader_cls = new_loader_cls",
"def set_from_dict(config):\n if \"CACHE\" in config:\n class_ = config['CACHE'].pop(\"class\", None)\n set_defaults(class_=class_, **config['CACHE'])\n\n if \"SERIALIZER\" in config:\n class_ = config['SERIALIZER'].pop(\"class\", None)\n set_default_serializer(class_=class_, **config['SERIALIZER'])\n\n if \"PLUGINS\" in config:\n set_default_plugins(config=config['PLUGINS'])",
"def get_data_loaders(config):\n dataset = core.with_file_like(config.dataset, \"rb\", pickle.load)\n with torch.no_grad():\n dataset = dataset[0], torch.from_numpy(dataset[1]).squeeze()\n assert config.sampling in {\"uniform\", \"quadratic\"}\n weights = None if config.sampling == \"uniform\" else dataset[1] ** 2\n train, rest = core.random_split(\n dataset,\n config.train_fraction + config.val_fraction,\n weights=weights,\n replacement=config.replacement,\n )\n train, val = core.random_split(\n train,\n config.train_fraction / (config.train_fraction + config.val_fraction),\n weights=None,\n replacement=False,\n )\n\n train_loader = make_spin_dataloader(*train, batch_size=config.train_batch_size)\n val_loader = make_spin_dataloader(*val, batch_size=config.val_batch_size)\n rest_loader = make_spin_dataloader(*rest, batch_size=config.val_batch_size)\n all_loader = make_spin_dataloader(*dataset, batch_size=config.val_batch_size)\n return collections.namedtuple(\"Loaders\", [\"training\", \"validation\", \"rest\", \"all\"])(\n train_loader, val_loader, rest_loader, all_loader\n )",
"def get_dataloader(config):\n if config.dataloader.lower() == 'mnist':\n return load_mnist(**config.__dict__)\n elif config.dataloader.lower() == 'compas':\n return load_compas(**config.__dict__)",
"def set_data_loader(self, data_loader):\n assert data_loader is not None\n self._data_loader = data_loader\n return self",
"def _create_simclr_dataloader(config, dataset_class, simclr_tf, partitions, shuffle=False):\r\n tf = _SimCLRDataTransform(simclr_tf)\r\n img_list = []\r\n for partition in partitions:\r\n imgs_curr = dataset_class(root=config.dataset_root, transform=tf, train=partition, download=True)\r\n img_list.append(imgs_curr)\r\n dataloader = ConcatDataset(img_list)\r\n dataloader = torch.utils.data.DataLoader(dataloader, batch_size=config.simclr_bs, shuffle=shuffle, drop_last=True)\r\n return dataloader",
"def __init__(self, config, memento, data_callback, state_callback, event_callback, exception_callback):\n # initialize the possible types of harvester/parser pairs for this driver\n data_keys = DataSourceKey.list()\n # link the data keys to the harvester type, multiple or single file harvester\n harvester_type = {DataSourceKey.FLORD_L_WFP_SIO_MULE: HarvesterType.SINGLE_FILE,\n DataSourceKey.FLORD_L_WFP: HarvesterType.SINGLE_DIRECTORY}\n super(FlordLWfpSioMuleDataSetDriver, self).__init__(config, memento, data_callback, state_callback,\n event_callback, exception_callback, data_keys,\n harvester_type=harvester_type)",
"def from_config(cls, config: Dict[str, Any]) -> \"ClassyMeter\":\n raise NotImplementedError",
"def register_dataclass(self, type_: type) -> None:\n\n type_name = f'_dataclass_{type_.__name__}'\n encode_function = partial(self._encode_dataclass, class_name=type_name)\n decode_function = partial(self._decode_dataclass, class_type=type_)\n self.register(type_, encode_function, type_name, decode_function)",
"def set_dataloader(args):\n if args.dataset == 'mnist':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=True, download=True, \n transform=transform)\n test_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=False, download=True, \n transform=transform)\n elif args.dataset == 'spatial':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = SpatialDataset(args.data_root, args.data_file_name)\n test_dataset = SpatialDataset(args.data_root, args.data_file_name)\n\n else:\n raise NotImplemented(\"dataset {} is not implemented.\".format(args.dataset))\n # train loader\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n # test loader\n test_dataloader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n\n return train_dataloader, test_dataloader",
"def set_config(self, config):\n\n self._model_config = json_format.ParseDict(\n config, model_config_pb2.ModelConfig())",
"def build_data_loader(data):\n if isinstance(data, dict):\n data_loader = SampleDictLoader(data)\n elif isinstance(data, SampleDictLoader):\n data_loader = data\n elif isinstance(data, CsvLoader):\n if data._force_eager == False:\n data_loader = data\n else:\n data_loader = SampleDictLoader(data.to_dict())\n else:\n raise TypeError('The `data` type must be dict or DataLoader.')\n return data_loader",
"def data_factory(config, data_type):\n data_source = config[data_type]['data_source']\n input_file = config[data_type]['file']\n outdir = config['outdir']\n output_file = f'{outdir}/{os.path.basename(input_file)}'\n\n if data_source == 's3':\n return S3Data(input_file, output_file)\n elif data_source == 'local':\n return LocalData(input_file, output_file)\n else:\n raise ValueError(\n f'Unknown data_source: \"{data_source}\".'\n ' data_source must be either \"s3\" or \"local\".')",
"def load(self, flo, serializer_name):\r\n serializer = self._settings_global_serializers[serializer_name]\r\n raw_settings = serializer.load(flo)\r\n self.__do_load(raw_settings)",
"def update_data_type(self):\n data_type_var = self.data_type_var.get()\n\n if data_type_var == 0:\n #Auto - determine data type\n bin_count = len(glob.glob1(self.dir_path,\"*.bin\"))\n bmp_count = len(glob.glob1(self.dir_path,\"*.bmp\"))\n\n dir_contents = os.listdir(self.dir_path)\n\n if bin_count >= bmp_count or (\"FTPdetectinfo_\" in dir_contents):\n self.data_type.set(1) #Set to CAMS if there are more bin files\n self.end_frame.set(255)\n else:\n self.data_type.set(2) #Set to Skypatrol if there are more BMP files\n self.end_frame.set(1500)\n\n elif data_type_var == 1:\n #CAMS\n self.data_type.set(1)\n self.end_frame.set(255)\n\n elif data_type_var == 2:\n #Skypatrol\n self.data_type.set(2)\n self.end_frame.set(1500)\n\n self.update_listbox(self.get_bin_list()) #Update listbox\n\n self.mode.set(1)\n self.filter.set(1)\n self.change_mode()\n self.move_top(0) #Move listbox cursor to the top\n\n self.update_image(0)",
"def get_datasets(config):\n mod = config[\"data_loader\"][\"modifier\"]\n head = config[\"data_loader\"][\"head\"]\n if config[\"model\"][\"type\"] == \"tw_joint\":\n label_1 = config[\"data_loader\"][\"label_1\"]\n label_2 = config[\"data_loader\"][\"label_2\"]\n dataset_train = JointRankingDataset(data_path=config[\"train_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label_1=label_1, label_2=label_2, mod=mod, head=head)\n dataset_valid = JointRankingDataset(data_path=config[\"validation_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label_1=label_1, label_2=label_2, mod=mod, head=head)\n dataset_test = JointRankingDataset(data_path=config[\"test_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label_1=label_1, label_2=label_2, mod=mod, head=head)\n elif \"classifier\" in config[\"model\"][\"type\"]:\n if config[\"feature_extractor\"][\"contextualized_embeddings\"] is True:\n bert_parameter = config[\"feature_extractor\"][\"contextualized\"][\"bert\"]\n bert_model = bert_parameter[\"model\"]\n max_len = bert_parameter[\"max_sent_len\"]\n lower_case = bert_parameter[\"lower_case\"]\n batch_size = bert_parameter[\"batch_size\"]\n label = config[\"data_loader\"][\"label\"]\n load_bert = config[\"data_loader\"][\"load_bert_embeddings\"]\n all_labels = extract_all_labels(training_data=config[\"train_data_path\"],\n validation_data=config[\"validation_data_path\"],\n test_data=config[\"test_data_path\"], separator=config[\"data_loader\"][\"sep\"],\n label=label\n )\n label_encoder = create_label_encoder(all_labels)\n print(\"labelsize %d\" % len(set(all_labels)))\n if \"semclass\" in config[\"model\"][\"type\"]:\n semclass = config[\"data_loader\"][\"semclass\"]\n dataset_train = ContextualizedSemPhraseDataset(data_path=config[\"train_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0,\n top_layer=4,\n load_bert_embeddings=load_bert, semclass=semclass)\n dataset_valid = ContextualizedSemPhraseDataset(data_path=config[\"validation_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0,\n top_layer=4,\n load_bert_embeddings=load_bert, semclass=semclass)\n dataset_test = ContextualizedSemPhraseDataset(data_path=config[\"test_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert, semclass=semclass)\n else:\n dataset_train = ContextualizedPhraseDataset(data_path=config[\"train_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert)\n dataset_valid = ContextualizedPhraseDataset(data_path=config[\"validation_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert)\n dataset_test = ContextualizedPhraseDataset(data_path=config[\"test_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert)\n\n else:\n\n label = config[\"data_loader\"][\"label\"]\n all_labels = extract_all_labels(training_data=config[\"train_data_path\"],\n validation_data=config[\"validation_data_path\"],\n test_data=config[\"test_data_path\"], separator=config[\"data_loader\"][\"sep\"],\n label=config[\"data_loader\"][\"label\"]\n )\n print(\"all labels\")\n print(all_labels)\n label_encoder = create_label_encoder(all_labels)\n print(\"labelsize %d\" % len(set(all_labels)))\n if \"semclass\" in config[\"model\"][\"type\"]:\n semclass = config[\"data_loader\"][\"semclass\"]\n dataset_train = JointClassificationDataset(data_path=config[\"train_data_path\"],\n embedding_path=config[\"feature_extractor\"][\n \"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder,\n feature=semclass)\n dataset_valid = JointClassificationDataset(data_path=config[\"validation_data_path\"],\n embedding_path=config[\"feature_extractor\"][\n \"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder,\n feature=semclass)\n dataset_test = JointClassificationDataset(data_path=config[\"test_data_path\"],\n embedding_path=config[\"feature_extractor\"][\n \"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder,\n feature=semclass)\n else:\n\n dataset_train = ClassificationDataset(data_path=config[\"train_data_path\"],\n embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder)\n dataset_valid = ClassificationDataset(data_path=config[\"validation_data_path\"],\n embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder)\n dataset_test = ClassificationDataset(data_path=config[\"test_data_path\"],\n embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder)\n\n else:\n label = config[\"data_loader\"][\"label\"]\n if config[\"feature_extractor\"][\"contextualized_embeddings\"] is True:\n bert_parameter = config[\"feature_extractor\"][\"contextualized\"][\"bert\"]\n bert_model = bert_parameter[\"model\"]\n max_len = bert_parameter[\"max_sent_len\"]\n lower_case = bert_parameter[\"lower_case\"]\n batch_size = bert_parameter[\"batch_size\"]\n load_bert = config[\"data_loader\"][\"load_bert_embeddings\"]\n load_labels = config[\"data_loader\"][\"load_labels\"]\n label_definition_path = config[\"feature_extractor\"][\"definition\"]\n dataset_train = ContextualizedRankingDataset(data_path=config[\"train_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size,\n label_definition_path=label_definition_path,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert,\n load_label_embeddings=load_labels)\n dataset_valid = ContextualizedRankingDataset(data_path=config[\"validation_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert,\n label_definition_path=label_definition_path,\n load_label_embeddings=load_labels)\n dataset_test = ContextualizedRankingDataset(data_path=config[\"test_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert,\n label_definition_path=label_definition_path,\n load_label_embeddings=load_labels)\n else:\n dataset_train = RankingDataset(data_path=config[\"train_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head)\n dataset_valid = RankingDataset(data_path=config[\"validation_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head)\n dataset_test = RankingDataset(data_path=config[\"test_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head)\n\n return dataset_train, dataset_valid, dataset_test",
"def set_seg_class_data(self, _segea):\t\t\n\t\treturn self.set_seg_class(_segea, \"DATA\")",
"def loaderInitialize(config = None):\n\n # reset the loader list\n _applicationLoaders = { }\n\n # create default loader\n createLoader(\"default\", config)",
"def from_bl_config(bl_config):\n ws_client = None\n beamline_setup = None\n \n try:\n ws_client = Client(_WS_COLLECTION_URL,\n cache = None)\n\n beamline_setup = ws_client.factory.create('ns0:beamLineSetup3VO')\n except:\n raise\n try: \n synchrotron_name = \\\n bl_config.synchrotron_name\n beamline_setup.synchrotronName = synchrotron_name\n except (IndexError, AttributeError), e:\n beamline_setup.synchrotronName = \"ESRF\" \n\n if bl_config.undulators:\n i = 1\n for und in bl_config.undulators:\n beamline_setup.__setattr__('undulatorType%d' % i, und.type)\n i += 1\n\n try:\n beamline_setup.monochromatorType = \\\n bl_config.monochromator_type\n \n beamline_setup.focusingOptic = \\\n bl_config.focusing_optic\n \n beamline_setup.beamDivergenceVertical = \\\n bl_config.beam_divergence_vertical\n \n beamline_setup.beamDivergenceHorizontal = \\\n bl_config.beam_divergence_horizontal\n \n beamline_setup.polarisation = \\\n bl_config.polarisation\n\n beamline_setup.minExposureTimePerImage = \\\n bl_config.minimum_exposure_time\n \n beamline_setup.goniostatMaxOscillationSpeed = \\\n bl_config.maximum_phi_speed\n \n beamline_setup.goniostatMinOscillationWidth = \\\n bl_config.minimum_phi_oscillation\n\n except:\n pass\n\n beamline_setup.setupDate = datetime.now()\n\n return beamline_setup"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the pixel location in the topo for each station
|
def metadata_pixel_location(self):
self.metadata['xi'] = self.metadata.apply(
lambda row: self.find_pixel_location(
row,
self.topo.x,
'utm_x'), axis=1)
self.metadata['yi'] = self.metadata.apply(
lambda row: self.find_pixel_location(
row,
self.topo.y,
'utm_y'), axis=1)
|
[
"def _set_pixel_geometries(self):\n\n feats = self.features\n if feats is None:\n return\n\n proj_method = self._extract_geolocation_details()\n for feat in feats.features:\n if feat.geometry is None or feat.uid in self._pixel_geometries:\n continue\n self._pixel_geometries[feat.uid] = feat.geometry.apply_projection(proj_method)",
"def assign_coord_ip_addresses(topo: Topology) -> None:\n bridge = topo.coordinator.bridge\n host_gen = bridge.valid_ip_iter()\n topo.coordinator.reserve_ip_addresses(host_gen)\n for service in topo.additional_services:\n service.reserve_ip_addresses(host_gen)\n for isd_as in topo.ases.keys():\n bridge.assign_ip_address(isd_as, pref_ip=next(host_gen))",
"def set_source_info(self):\n self.sd = self.roi.psm.point_sources[self.which].skydir\n self.name = self.roi.psm.point_sources[self.which].name",
"def transport(self, xoffset, yoffset):\n print(\"Changing coordinates\")\n transportLayer(self, xoffset, yoffset)\n print(\"Done\")",
"def set_gps(self, x, y):\n\t\tpass",
"def __init__(self):\n self.geometry = list()\n self.point_lights = list()",
"def set_neighbor(self, router, direction): \r\n self.connected[router.name()] = router",
"def setPixels(*args, **kwargs):\n \n pass",
"def set_map(self, map_object):\n for sensor in self.config.sensors:\n sensor.set_map(map_object)",
"def update_rasters(self):\n # Update preview_overview_scatter_plot\n self.plots.preview_overview_scatter_plot.setData(self.plots_data.all_spots)\n if self.Includes2DActiveWindowScatter:\n self.plots.scatter_plot.setData(self.plots_data.all_spots)",
"def markMap(self):\n for ind in self.path:\n self.exploredMap[tuple(ind)] = 6",
"def multi_station_graph(df,datstat,obsvar,modvar,regions,lims,figsize=(14,40),units='($\\mu$M)'):\n fig, ax = plt.subplots(len(regions),2,figsize = figsize)\n for d,r in zip(range(len(regions)),regions):\n ps=byStation(ax[d][0],df,datstat,r,obsvar,modvar,lims)\n ax[d][0].set_title(f'{obsvar} {units} in {r} by Station');\n\n with nc.Dataset('/data/vdo/MEOPAR/NEMO-forcing/grid/bathymetry_201702.nc') as grid:\n viz_tools.plot_coastline(ax[d][1], grid, coords = 'map',isobath=.1)\n\n for ind, istation in enumerate(df[df['Basin'] == r].Station.unique()):\n ax[d][1].plot(datstat[istation]['Lon'], datstat[istation]['Lat'],'.',\n color = colors[ind], label=istation)\n ax[d][1].set_ylim(47, 49)\n ax[d][1].legend(bbox_to_anchor=[1,.6,0,0])\n ax[d][1].set_xlim(-124, -122);\n ax[d][1].set_title(f'Observation Locations for {r}'); \n return ax",
"def set_initial_location(self):\n self.changed = True\n self.new_location = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn\n # random initial velocities of swarm\n self.velocities[0, :] = (np.random.rand(self.num_dimensions) * (self.mx-self.mn)) + self.mn",
"def draw_station_edges(self, tile, tiles):\n\n station_id = tile.occupant.station_id\n for direction in range(8):\n other_tile = self.parent.tile_grid.get_tile_dir(tile, direction)\n if not other_tile:\n continue\n is_same_station = other_tile.kind == 5 and other_tile.occupant.station_id == station_id\n if is_same_station:\n continue\n self.draw_station_edge(tile, direction)",
"def set_image_gps(ibs, gid_list, gps_list=None, lat_list=None, lon_list=None):\n if gps_list is not None:\n assert lat_list is None\n assert lon_list is None\n lat_list = [tup[0] for tup in gps_list]\n lon_list = [tup[1] for tup in gps_list]\n colnames = ('image_gps_lat', 'image_gps_lon',)\n val_list = zip(lat_list, lon_list)\n id_iter = ((gid,) for gid in gid_list)\n ibs.db.set(IMAGE_TABLE, colnames, val_list, id_iter)",
"def populate_positions(self, xs, ys):\n # All spexel positions should contain the same coordinates\n xy = Coordinate2D([xs, ys], unit='arcsec')\n self.position = self.info.detector_array.find_pixel_positions(xy)",
"def setlocation(self, density):\n import numpy as np\n\n self.cumu_prob, self.cumu_prob_index = self.assignprob(density)\n self.nodelocindex = []\n self.nodex, self.nodey = np.empty(self.nodenum, dtype = float), np.empty(self.nodenum, dtype = float)\n\n for i in range(self.nodenum):\n flag = 0\n while(flag == 0):\n temp = np.random.rand()\n for j in range(len(self.cumu_prob)):\n if(temp <= self.cumu_prob[j]):\n if(self.cumu_prob_index[j] in self.nodelocindex):\n continue\n else:\n self.nodelocindex.append(self.cumu_prob_index[j])\n self.nodey[i] = self.Geoy[self.cumu_prob_index[j][0]]\n self.nodex[i] = self.Geox[self.cumu_prob_index[j][1]]\n flag = 1\n break",
"def set_locations(self, locations_graph):\n for package in self._packages:\n if package.location not in map(lambda x: x.data, self._locations.get_vertex_list()):\n for vertex in locations_graph.get_vertex_list():\n if vertex.data == package.location:\n self._locations.add_vertex(vertex.data.name, vertex.data)\n break\n\n for location in map(lambda x: x.data, self._locations.get_vertex_list()):\n # index in truck graph\n index = self._locations.get_vertex_list().index(self._locations.get_vertex(location.name))\n\n # index in graph of all locations\n all_locations_index = locations_graph.get_vertex_list().index(locations_graph.get_vertex(location.name))\n\n for num, loc in enumerate(map(lambda x: x.data, self._locations.get_vertex_list())):\n cur_index = locations_graph.get_vertex(loc.name).index\n self._locations.adjacency_matrix[index][num] = locations_graph.adjacency_matrix[all_locations_index][cur_index]",
"def station_map(fill_alpha=0.9, show_neighor=False):\n dname = os.path.dirname(os.path.abspath(__file__))\n project_folder = '/'.join(dname.split('/')[:-1]) + '/NYC_bikeshare'\n df_station = pd.read_csv(project_folder + '/data/NYC_bike_stations_v1.csv')\n # preparing the source\n source = bkp.ColumnDataSource(\n data=dict(lat=df_station['latitude'],\n long=df_station['longitude'],\n name=df_station['name'],\n neighbor=df_station['neighborhood']))\n\n # ======== preparing the plot =======\n map_options = GMapOptions(lat=40.75, lng=-73.95,\n map_type=\"roadmap\", zoom=12)\n plot = GMapPlot(\n x_range=Range1d(),\n y_range=Range1d(),\n map_options=map_options,\n api_key=google_map_api_key,\n tools=[PanTool(), WheelZoomTool()],\n )\n # plot.title.text = 'Citibike stations'\n circle = Circle(x='long', y='lat', fill_color='red',\n fill_alpha=fill_alpha, line_alpha=0)\n plot.add_glyph(source, circle)\n# plot.add_tools(PanTool(), WheelZoomTool())\n hover = bkm.HoverTool(\n tooltips=[('Station name', '@name'), ('Neighborhood', '@neighbor')])\n plot.add_tools(hover)\n plot.toolbar.active_drag = 'auto'\n plot.toolbar.active_scroll = WheelZoomTool()\n bokeh_html = file_html(plot, CDN, \"tmp\")\n print('return the station plot...')\n\n return bokeh_html"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Insert the new titratable groups in to self.pkagroups
|
def insert_new_titratable_group(self, ligand_titratable_groups):
group_type = ligand_titratable_groups['type']
if group_type in self.pKagroups:
#
# Now modify the group so that it will correspond to the group
# we have in the ligand
#
ligand_name = 'LIG' # Note: we have to implement automatic determination of ligand name
import copy
new_group = copy.deepcopy(self.pKagroups[group_type])
new_group.DefTitrations[0].modelpKa = ligand_titratable_groups['modelpka']
new_group.name = 'LIG'
new_group.resname = 'LIG'
# print new_group.Residue
self.pKagroups['LIG'] = copy.deepcopy(new_group)
atom_map = ligand_titratable_groups['matching_atoms']
#
# Insert definition into HYDROGEN arrays
#
for hdef in self.hydrogenRoutines.hydrodefs:
if hdef.name == group_type:
newdef = copy.deepcopy(hdef)
# print newdef
newdef.name = ligand_name
#
# Change the names in each of the conformatinos
#
# The name of the H is not changed!
#
for conformation in newdef.conformations:
#
# Change the name of the atom that the H is bound to
#
if conformation.boundatom in atom_map:
conformation.boundatom = atom_map[conformation.boundatom]
#
# Change the name of the hydrogen
#
oldhname = conformation.hname
conformation.hname = 'H' + conformation.boundatom
#
# And then for the individual atom names
#
for atom in conformation.atoms:
if atom.name in atom_map:
atom.name = atom_map[atom.name]
elif atom.name == oldhname:
atom.name = conformation.hname
self.hydrogenRoutines.hydrodefs.append(copy.deepcopy(newdef))
# stop
return
|
[
"def insert_group(self, group):\n index = self.find_named_folder(\"Groups\")\n\n parent_node = self.get_node(index)\n\n # Check to see if an AOV Group of the same name already exists. If it\n # does then we want to just update the internal item for the node.\n for row, child in enumerate(parent_node.children):\n # Check the child's group against the one to be added.\n if child.group == group:\n # Update the internal item.\n child.group = group\n\n existing_index = self.index(row, 0, index)\n\n # Signal the internal data changed.\n self.dataChanged.emit(existing_index, existing_index)\n\n # We're done here.\n break\n\n else:\n position = len(parent_node.children)\n\n self.beginInsertRows(index, position, position)\n\n if isinstance(group, IntrinsicAOVGroup):\n IntrinsicAOVGroupNode(group, parent_node)\n else:\n AOVGroupNode(group, parent_node)\n\n self.endInsertRows()\n\n return True",
"def a_group(self):\n self.group_cache = {}\n self._insert = self._insert_group\n yield\n self._insert = self._insert_one\n self.data.append(self.group_cache)\n self.group_cache = None",
"def create(self, group):\n self.request.mongo_connection.shinken.hostgroups.insert(\n group.as_dict()\n )",
"def _add_current_groups(self):\n self.keys_final += self._current_keys_final\n nr_gr_f = max(self.group_for_inputs_final.values()) + 1\n for inp, grs in self._current_group_for_inputs_final.items():\n if isinstance(grs, int):\n grs = grs + nr_gr_f\n else: # a list\n grs = [gr + nr_gr_f for gr in grs]\n self.group_for_inputs_final[inp] = grs\n for i, stack in enumerate(self._current_groups_stack_final):\n if i == 0:\n stack = [gr + nr_gr_f for gr in stack]\n self.groups_stack_final[-1] += stack\n else:\n stack = [gr + nr_gr_f for gr in stack]\n self.groups_stack_final.append(stack)",
"def update_groups(self):\n\n self.list_grp.clear()\n self.list_wrd_in_grp.clear() # resets (left) groups list\n for group_name in self.db.get_groups(): # populates groups list from DB.\n self.list_grp.addItem(group_name[0])",
"def _create_groups(self, groups):\n\n acls = self.mumblectl.getACL(self.settings['mumble_server_id'], 0)\n glist = []\n for mgroup in acls[1]:\n glist.append(mgroup.name)\n\n newgroups = False\n for agroup in groups:\n if not str(agroup.name.replace(' ', '').lower()) in glist:\n group = self.mur.Group()\n group.name = str(agroup.name.replace(' ', '').lower())\n group.members = []\n group.add = []\n group.remove = []\n group.inheritable = True\n group.inherit = True\n group.inherited = False\n acls[1].append(group)\n newgroups = True \n\n if newgroups:\n self.mumblectl.setACL(self.settings['mumble_server_id'], 0, acls[0], acls[1], acls[2])\n\n return acls",
"def appendGroup(self, *args):\n\n\t\tcurGroup = cmds.textScrollList('hairGroups', q=1, si=1)[0]\n\n\t\tfor x in cmds.ls(sl=1):\n\t\t\tcmds.parent(x, curGroup)\n\n\t\treturn",
"def newGroup(self):\n self.appendJobGroup()\n self.currentGroup = self.groupInstance(subscription=self.subscription)\n map(lambda x: x.startGroup(self.currentGroup), self.generators)",
"def agg_insert_by_group(data: dd = None, groupby_columns: List[str] = None, agg_dict: dict = None,\n insert_dict: dict = None) -> dd:\n agg_data = data.groupby(groupby_columns).agg(agg_dict).reset_index()\n agg_data.columns = agg_data.columns.droplevel(1)\n for column, value in insert_dict.items():\n agg_data[column] = 'COMBINED'\n data = data.append(agg_data)\n return data",
"def test_put_groups_id(self):\n pass",
"def create_group():\n qgis_groups = get_group()\n for i, g_item in enumerate(reversed(PROJECT_GROUP[:2])):\n if g_item not in groups_to_array(qgis_groups):\n qgis_groups.insertGroup(i,g_item)\n rsx_group = qgis_groups.findGroup(PROJECT_GROUP[0])\n if rsx_group is not None:\n for item in PROJECT_GROUP[2:]:\n if qgis_groups.findGroup(item) is None:\n rsx_group.addGroup(item)",
"def add_groups(self, groups):\n\n for g in groups:\n self.add_group(g)\n return self",
"def create_groups(self,company_name,company_id):\n #pdb.set_trace()\n tech_group_name = slugify(company_name)+\"__tech__\"+str(self.company_id) # we need to check it does not exist before this step\n admin_group_name = slugify(company_name)+\"__admin__\"+str(self.company_id)\n superadmin_group_name = slugify(company_name)+\"__superadmin__\"+str(self.company_id)\n new_group1, created1 = Group.objects.get_or_create(name=tech_group_name)\n new_group2, created2 = Group.objects.get_or_create(name=admin_group_name)\n new_group3, created3 = Group.objects.get_or_create(name=superadmin_group_name)\n # now when a new user is created, we\n #ct = ContentType.objects.get_for_model(User)",
"def create_person_group(self):\n url = self.base_url + \"persongroups/\" + self.pg_name\n response = requests.put(url, headers=self.headers, json={\"name\" : self.pg_name})\n if response.status_code == 200 :\n print(\"added \" + self.pg_name + \" person group\")\n else:\n print(response.json())",
"def _addRefereesGroup(self,group,refpool=None):\n # pokud byl predan refpool\n if refpool:\n self.refPool = refpool\n # priravim si refPool\n else:\n # TODO dopredu vypocist velikost ref pool\n self._initRefPool(group.referee_group,100)\n # df zapasy skupiny\n group_matches_df = self.tdo.DfTester._getGroupMatchesDf(group)\n # projdu zapasy skupiny\n for pitch in group_matches_df.columns:\n for match_ind in group_matches_df.index:\n match = group_matches_df.iloc[match_ind,pitch]\n if match:\n if not match.referee:\n # zkontrolujeme, zda tym muze piskat\n for refPool_index in range(len(self.refPool)):\n # TODO zbytecne prochazime cely pool, stacila by jedna obratka\n if self.tdo.DfTester._canPlaceTph(self.refPool[refPool_index],match_ind):\n match.referee = self.refPool.pop(refPool_index)\n match.save()\n break",
"def createNewGroupRow(currGroup, nextGroup, pointsToGather, endPoint):\n newRow = {}\n newRow[\"Region No\"] = currGroup[\"Region No\"]\n currPoint = 1\n #Sort out the Group Points\n for currPointIndex in range(len(pointsToGather[0])):\n currGroupPoint = pointsToGather[0][currPointIndex]\n newRow[f\"Point {currPoint + currPointIndex} Orig Rt\"] = currGroup[f\"Point {currGroupPoint} Orig Rt\"]\n newRow[f\"Point {currPoint + currPointIndex} Agent Difference\"] = currGroup[f\"Point {currGroupPoint} Agent Difference\"]\n newRow[f\"Point {currPoint + currPointIndex} Action to Next Point\"] = currGroup[f\"Point {currGroupPoint} Action to Next Point\"]\n \n currPoint += len(pointsToGather[0])\n\n for currPointIndex in range(len(pointsToGather[1])):\n currGroupPoint = pointsToGather[1][currPointIndex]\n newRow[f\"Point {currPoint + currPointIndex} Orig Rt\"] = nextGroup[f\"Point {currGroupPoint} Orig Rt\"]\n newRow[f\"Point {currPoint + currPointIndex} Agent Difference\"] = nextGroup[f\"Point {currGroupPoint} Agent Difference\"]\n newRow[f\"Point {currPoint + currPointIndex} Action to Next Point\"] = nextGroup[f\"Point {currGroupPoint} Action to Next Point\"]\n \n #Sort out the Goal Point and Next Point\n goalPoint = pointsToGather[1][currPointIndex]+1\n\n newRow[f\"Goal Point Orig Rt\"] = nextGroup[f\"Point {goalPoint} Orig Rt\"]\n newRow[f\"Goal Point Agent Difference\"] = nextGroup[f\"Point {goalPoint} Agent Difference\"]\n\n if endPoint:\n newRow[f\"Next Point Rt Orig\"] = nextGroup[\"Goal Point Orig Rt\"]\n newRow[f\"Next Point Agent Difference\"] = nextGroup[\"Goal Point Agent Difference\"]\n newRow[f\"Agent Action to Next Point\"] = nextGroup[f\"Point {goalPoint} Action to Next Point\"]\n else:\n goalPoint += 1\n newRow[f\"Next Point Rt Orig\"] = nextGroup[f\"Point {goalPoint} Orig Rt\"]\n newRow[f\"Next Point Agent Difference\"] = nextGroup[f\"Point {goalPoint} Agent Difference\"]\n newRow[f\"Agent Action to Next Point\"] = nextGroup[f\"Point {goalPoint} Action to Next Point\"]\n\n return newRow",
"def test_put_groups_id_drm(self):\n pass",
"def add_vita_group(self, group):\n\n g = self.vita_groups.get(group.get_hash())\n if g is not None:\n g.m += group.m\n else:\n self.vita_groups[group.get_hash()] = group\n\n return self",
"def update_groups(self, props, **kws):\n new_groups = props.get('group', None)\n if new_groups is not None:\n if isinstance(new_groups, str):\n new_groups = new_groups,\n [self._group.add(g) for g in new_groups]\n if self._family is not None:\n self._group.add(self._family)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the radii for specific atoms in a residue
|
def setRadii(self, residue, atomlist):
for atom in residue.get("atoms"):
atomname = atom.get("name")
if atomname not in atomlist: continue
charge, radius = self.forcefield.getParams(residue, atomname)
if radius != None:
atom.set("radius", radius)
else:
text = "Could not find radius for atom %s" % atomname
text += " in residue %s %i" % (residue.name, residue.resSeq)
text += " while attempting to set radius!"
raise ValueError(text)
|
[
"def setAllRadii(self):\n for chain in self.protein.getChains():\n for residue in chain.get(\"residues\"):\n for atom in residue.get(\"atoms\"):\n atomname = atom.get(\"name\")\n if atomname.find('FLIP') != -1:\n continue\n if atomname == \"HD1\": ###PC\n charge = 0.44\n radiues = 1.05\n else:\n charge, radius = self.forcefield.getParams(residue, atomname)\n ###PC\n if radius != None:\n atom.set(\"radius\", radius)\n else:\n if residue.type != 2:\n text = \"Could not find radius for atom %s \" % atomname\n text += \"in residue %s %i\" % (residue.name, residue.resSeq)\n text += \" while attempting to set all radii!\"\n raise ValueError(text)",
"def set_bond_radii(atoms, bond_type='bond'):\n if atoms.info is None:\n atoms.info = {}\n if 'bond_radii' in atoms.info:\n r_a = atoms.info['bond_radii']\n else:\n r_a = np.ones(len(atoms))\n \n for atom in atoms:\n if bond_type == 'covalent':\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][0])\n else:\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][1])\n atoms.info['bond_radii'] = r_a\n return r_a",
"def set_radicals_by_map(mol, radical_map):\n for i, atom in enumerate(mol.atoms):\n if atom.element.number != radical_map.atoms[i].element.number:\n raise ConverterError('Atom order in mol and radical_map in set_radicals_by_map() do not match. '\n '{0} is not {1}.'.format(atom.element.symbol, radical_map.atoms[i].symbol))\n atom.radical_electrons = radical_map.atoms[i].radical_electrons",
"def setRadius(self, r):\n if not isinstance(r, (int,float)):\n raise TypeError('Radius must be a number')\n if r <= 0:\n raise ValueError(\"The circle's radius must be positive.\")\n\n factor = float(r)/self.getRadius()\n self._transform = self._transform * _Transformation((factor,0.,0.,factor,0.,0.))\n \n self._objectChanged(True,False,False)",
"def get_radii(self, particles):\n num_atoms = particles.get_num_atoms()\n radii = np.zeros((num_atoms+1,), dtype=np.float64)\n\n symbols = particles.get_atomic_symbol()\n atomic_variant = particles.get_atomic_variant()\n residue = particles.get_residue()\n\n table = self.get_ff_cm_dict()\n\n for i in range(num_atoms+1):\n ret_type = self.get_form_factor_atom_type(symbols[i], atomic_variant[i], residue[i])\n\n idx = table[ret_type]\n radii[i] = self.ff_radii[idx]\n\n return radii",
"def setRadius(self, r: 'float const') -> \"void\":\n return _coin.SbCylinder_setRadius(self, r)",
"def zeroAllRadiiCharges(self):\n for chain in self.protein.getChains():\n for residue in chain.get(\"residues\"):\n for atom in residue.get(\"atoms\"):\n atom.set(\"ffcharge\", 0.0)\n atom.set(\"radius\", 0.0)",
"def changeRadius(self, change) -> None:\n self.radius += change\n #redraws the circle based on new radius\n self.rect = pygame.Rect(self.rect.x, self.rect.y, 2 * self.radius, 2 * self.radius)\n self.changeCoordinates(0, -1, False)",
"def rim(self, rim):\n\n self._rim = rim",
"def setRadius(self, radiusinpixels: 'float const') -> \"void\":\n return _coin.SoRayPickAction_setRadius(self, radiusinpixels)",
"def test_saturate_aromatic_radical(self):\n indenyl = Molecule().from_adjacency_list(\"\"\"\nmultiplicity 2\n1 C u0 p0 c0 {2,B} {3,S} {4,B}\n2 C u0 p0 c0 {1,B} {5,B} {6,S}\n3 C u0 p0 c0 {1,S} {7,D} {11,S}\n4 C u0 p0 c0 {1,B} {8,B} {12,S}\n5 C u0 p0 c0 {2,B} {9,B} {15,S}\n6 C u1 p0 c0 {2,S} {7,S} {16,S}\n7 C u0 p0 c0 {3,D} {6,S} {10,S}\n8 C u0 p0 c0 {4,B} {9,B} {13,S}\n9 C u0 p0 c0 {5,B} {8,B} {14,S}\n10 H u0 p0 c0 {7,S}\n11 H u0 p0 c0 {3,S}\n12 H u0 p0 c0 {4,S}\n13 H u0 p0 c0 {8,S}\n14 H u0 p0 c0 {9,S}\n15 H u0 p0 c0 {5,S}\n16 H u0 p0 c0 {6,S}\n\"\"\")\n indene = Molecule().from_adjacency_list(\"\"\"\n1 C u0 p0 c0 {2,B} {3,S} {4,B}\n2 C u0 p0 c0 {1,B} {5,B} {6,S}\n3 C u0 p0 c0 {1,S} {7,D} {11,S}\n4 C u0 p0 c0 {1,B} {8,B} {12,S}\n5 C u0 p0 c0 {2,B} {9,B} {15,S}\n6 C u0 p0 c0 {2,S} {7,S} {16,S} {17,S}\n7 C u0 p0 c0 {3,D} {6,S} {10,S}\n8 C u0 p0 c0 {4,B} {9,B} {13,S}\n9 C u0 p0 c0 {5,B} {8,B} {14,S}\n10 H u0 p0 c0 {7,S}\n11 H u0 p0 c0 {3,S}\n12 H u0 p0 c0 {4,S}\n13 H u0 p0 c0 {8,S}\n14 H u0 p0 c0 {9,S}\n15 H u0 p0 c0 {5,S}\n16 H u0 p0 c0 {6,S}\n17 H u0 p0 c0 {6,S}\n\"\"\")\n saturated_molecule = indenyl.copy(deep=True)\n saturated_molecule.saturate_radicals()\n self.assertTrue(saturated_molecule.is_isomorphic(indene))",
"def setRadius(self, radius: 'float const') -> \"void\":\n return _coin.SbSphere_setRadius(self, radius)",
"def radii(self):\n return self._data.get(b'keyOriginRRectRadii')",
"def set_radius_at_zone():\n function = LegacyFunctionSpecification()\n function.can_handle_array = True\n function.addParameter(\n 'index_of_the_star', dtype='int32', direction=function.IN,\n description=\"The index of the star to set the value of\")\n function.addParameter(\n 'zone', dtype='int32', direction=function.IN,\n description=\"The zone/mesh-cell of the star to set the value of\")\n function.addParameter(\n 'R_i', dtype='float64', direction=function.IN,\n description=(\n \"The radius at the specified zone/mesh-cell of the star.\"\n )\n )\n function.result_type = 'int32'\n function.result_doc = \"\"\"\n 0 - OK\n The value was set.\n -1 - ERROR\n A star with the given index was not found.\n -2 - ERROR\n A zone with the given index was not found.\n \"\"\"\n return function",
"def setRadius(self, radius):\n # Change the radius\n self.radius = radius\n if self.radius * 2 <= self.height and self.radius * 2 <= self.width:\n self.radiusInd = True\n else:\n self.radiusInd = False\n # Redraw the Button\n self.undraw()\n self.draw(self.win)",
"def radii(data):\n rs = np.empty(len(data))\n for i, (points_sequence, labels_sequence) in enumerate(data):\n rs[i] = radius(points_sequence, labels_sequence, data)\n return rs",
"def set_radius(self, radius):\n self.widget.setRadius(radius)",
"def getRadii(self):\n\n return self._radii.copy()",
"def test_copy_residue_within_molecule(self):\n\n # Get the data pipe.\n dp = pipes.get_pipe('orig')\n\n # Create the first residue and add some data to its spin container.\n self.residue_fns.create(1, 'Ala')\n dp.mol[0].res[0].spin[0].num = 111\n dp.mol[0].res[0].spin[0].x = 1\n\n # Copy the residue a few times.\n self.residue_fns.copy(res_from=':1', res_to=':2')\n self.residue_fns.copy(res_from=':1', pipe_to='orig', res_to=':3')\n\n # Change the first residue's data.\n dp.mol[0].res[0].spin[0].num = 222\n dp.mol[0].res[0].spin[0].x = 2\n\n # Copy the residue once more.\n self.residue_fns.copy(res_from=':1', res_to=':4,Met')\n\n # Test the original residue.\n self.assertEqual(dp.mol[0].res[0].num, 1)\n self.assertEqual(dp.mol[0].res[0].name, 'Ala')\n self.assertEqual(dp.mol[0].res[0].spin[0].num, 222)\n self.assertEqual(dp.mol[0].res[0].spin[0].x, 2)\n\n # Test the new residue 2.\n self.assertEqual(dp.mol[0].res[1].num, 2)\n self.assertEqual(dp.mol[0].res[1].name, 'Ala')\n self.assertEqual(dp.mol[0].res[1].spin[0].num, 111)\n self.assertEqual(dp.mol[0].res[1].spin[0].x, 1)\n\n # Test the new residue 3.\n self.assertEqual(dp.mol[0].res[2].num, 3)\n self.assertEqual(dp.mol[0].res[2].name, 'Ala')\n self.assertEqual(dp.mol[0].res[2].spin[0].num, 111)\n self.assertEqual(dp.mol[0].res[2].spin[0].x, 1)\n\n # Test the new residue 4.\n self.assertEqual(dp.mol[0].res[3].num, 4)\n self.assertEqual(dp.mol[0].res[3].name, 'Met')\n self.assertEqual(dp.mol[0].res[3].spin[0].num, 222)\n self.assertEqual(dp.mol[0].res[3].spin[0].x, 2)",
"def _gain_radical(self, radical):\n radical_electrons = []\n if any([len(atomtype.increment_radical) == 0 for atomtype in self.atomtype]):\n raise ActionError('Unable to update GroupAtom due to GAIN_RADICAL action: '\n 'Unknown atom type produced from set \"{0}\".'.format(self.atomtype))\n if not self.radical_electrons:\n radical_electrons = [1, 2, 3, 4]\n else:\n for electron in self.radical_electrons:\n radical_electrons.append(electron + radical)\n # Set the new radical electron counts\n self.radical_electrons = radical_electrons"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the charges for specific atoms in a residue
|
def setCharges(self, residue, atomlist):
for atom in residue.get("atoms"):
atomname = atom.get("name")
if atomname not in atomlist:
continue
charge, radius = self.forcefield.getParams(residue, atomname)
if charge != None:
atom.set("ffcharge", charge)
else:
text = "Could not find charge for atom %s" % atomname
text += " in residue %s %i" % (residue.name, residue.resSeq)
text += " while attempting to set charge!"
raise ValueError(text)
return
|
[
"def update_charge(self):\n for atom in self.atoms:\n if (len(atom.charge) == 1) and (len(atom.lone_pairs) == 1) and (len(atom.radical_electrons) == 1):\n # if the charge of the group is not labeled, then no charge update will be\n # performed. If there multiple charges are assigned, no update either.\n # Besides, this groupatom should have enough information to be updated\n atom_type = atom.atomtype[0]\n for element in allElements:\n if atom_type is ATOMTYPES[element] or atom_type in ATOMTYPES[element].specific:\n bond_order = 0\n valence_electron = elements.PeriodicSystem.valence_electrons[element]\n for _, bond in atom.bonds.items():\n bond_order += bond.order[0]\n lone_pairs = atom.lone_pairs[0]\n radical_electrons = atom.radical_electrons[0]\n atom.charge[0] = valence_electron - bond_order - 2 * lone_pairs - radical_electrons\n else:\n # if the group is not specified to specific element, charge will not be updated\n pass",
"def zeroAllRadiiCharges(self):\n for chain in self.protein.getChains():\n for residue in chain.get(\"residues\"):\n for atom in residue.get(\"atoms\"):\n atom.set(\"ffcharge\", 0.0)\n atom.set(\"radius\", 0.0)",
"def assign_charge(self, charge):\n\n self.charge = charge\n intcharge = np.floor(charge)\n deccharge = charge % 1\n self.orbitals = [Orbital(s) for s in fc.orbital_configuration(self.element_symbol, intcharge)]\n if deccharge > 0:\n self.orbitals[-1].remove_electron(deccharge)",
"def set_charge(self, charge: int):\n self[\"FORCE_EVAL\"][\"DFT\"][\"CHARGE\"] = Keyword(\"CHARGE\", int(charge))",
"def setAllRadii(self):\n for chain in self.protein.getChains():\n for residue in chain.get(\"residues\"):\n for atom in residue.get(\"atoms\"):\n atomname = atom.get(\"name\")\n if atomname.find('FLIP') != -1:\n continue\n if atomname == \"HD1\": ###PC\n charge = 0.44\n radiues = 1.05\n else:\n charge, radius = self.forcefield.getParams(residue, atomname)\n ###PC\n if radius != None:\n atom.set(\"radius\", radius)\n else:\n if residue.type != 2:\n text = \"Could not find radius for atom %s \" % atomname\n text += \"in residue %s %i\" % (residue.name, residue.resSeq)\n text += \" while attempting to set all radii!\"\n raise ValueError(text)",
"def charges(self, molecule):\n\n # TODO add option to use chargemol on onetep cube files.\n copy(f'../density/{molecule.name}.wfx', f'{molecule.name}.wfx')\n c_mol = Chargemol(molecule, self.all_configs)\n c_mol.generate_input()\n\n append_to_log(f'Chargemol analysis with DDEC{self.qm[\"ddec_version\"]} complete')\n\n return molecule",
"def give_resp_charges(old_atoms_list, new_charges):\n new_atoms_list = copy.copy(old_atoms_list) \n for index, atom in enumerate(new_atoms_list):\n atom.mm.charge = new_charges[index]\n \n old_charges_sum = new_charges_sum = 0 \n for no, charge in enumerate(new_charges):\n old_charges_sum += old_atoms_list[no].mm.charge\n new_charges_sum += new_atoms_list[no].mm.charge\n\n diff = new_charges_sum - old_charges_sum\n\n no_link_atoms = 0.0\n for atom in old_atoms_list:\n if atom.oniom.link_atom:\n no_link_atoms += 1.0\n \n for atom in new_atoms_list:\n if atom.oniom.link_atom:\n atom.mm.charge = atom.mm.charge - diff/no_link_atoms\n\n return new_atoms_list",
"def set_bond_radii(atoms, bond_type='bond'):\n if atoms.info is None:\n atoms.info = {}\n if 'bond_radii' in atoms.info:\n r_a = atoms.info['bond_radii']\n else:\n r_a = np.ones(len(atoms))\n \n for atom in atoms:\n if bond_type == 'covalent':\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][0])\n else:\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][1])\n atoms.info['bond_radii'] = r_a\n return r_a",
"def set_reference_distance(self):\n for residue in self.residues:\n if not isinstance(residue, aa.Amino):\n continue\n # Initialize some variables\n map_ = {}\n caatom = residue.get_atom(\"CA\")\n if caatom is None:\n # TODO: What does the %s mean? Is it the residue name?\n text = \"Cannot set references to %s without CA atom!\"\n raise ValueError(text)\n # Set up the linked map\n for atom in residue.atoms:\n map_[atom] = atom.bonds\n # Run the algorithm\n for atom in residue.atoms:\n if atom.is_backbone:\n atom.refdistance = -1\n elif residue.is_c_term and atom.name == \"HO\":\n atom.refdistance = 3\n elif residue.is_n_term and (\n atom.name == \"H3\" or atom.name == \"H2\"\n ):\n atom.refdistance = 2\n else:\n path = util.shortest_path(map_, atom, caatom)\n if path is not None:\n atom.refdistance = len(path) - 1\n else:\n raise ValueError(\n \"Found gap in biomolecule structure for atom \"\n f\"{atom}\"\n )",
"def _add_bond_force_terms(self):\n core_energy_expression = '(K/2)*(r-length)^2;'\n # linearly interpolate spring constant\n core_energy_expression += 'K = (1-lambda_bonds)*K1 + lambda_bonds*K2;'\n # linearly interpolate bond length\n core_energy_expression += 'length = (1-lambda_bonds)*length1 + lambda_bonds*length2;'\n\n # Create the force and add the relevant parameters\n custom_core_force = openmm.CustomBondForce(core_energy_expression)\n custom_core_force.addPerBondParameter('length1') # old bond length\n custom_core_force.addPerBondParameter('K1') # old spring constant\n custom_core_force.addPerBondParameter('length2') # new bond length\n custom_core_force.addPerBondParameter('K2') # new spring constant\n\n custom_core_force.addGlobalParameter('lambda_bonds', 0.0)\n\n self._hybrid_system.addForce(custom_core_force)\n self._hybrid_system_forces['core_bond_force'] = custom_core_force\n\n # Add a bond force for environment and unique atoms (bonds are never\n # scaled for these):\n standard_bond_force = openmm.HarmonicBondForce()\n self._hybrid_system.addForce(standard_bond_force)\n self._hybrid_system_forces['standard_bond_force'] = standard_bond_force",
"def set_donors_acceptors(self):\n for residue in self.residues:\n residue.set_donors_acceptors()",
"def setCharge(self, charge):\n\t\tself._charge = charge",
"def set_dative_bonds(\n mol: Chem.Mol, from_atoms: Tuple[str, ...] = ('N', 'P')) -> Chem.Mol:\n p_table = Chem.GetPeriodicTable()\n edit_mol = Chem.RWMol(mol)\n edit_mol.UpdatePropertyCache(strict=False)\n metals = [atom for atom in edit_mol.GetAtoms() if is_transition_metal(atom)]\n for metal in metals:\n for nbr in metal.GetNeighbors():\n nbr_atom = nbr.GetSymbol()\n # Handles carbon-bound (e.g., NHC-type or CO) ligands\n # Converts carbon-metal bond to dative if carbon's total valence +\n # formal charge does not equal 4\n if nbr_atom in from_atoms and nbr_atom == 'C':\n if nbr.GetFormalCharge() > 0:\n warnings.warn(\n f'A positively charged C atom bound to '\n f'{metal.GetSymbol()} was found in the compound '\n f'with SMILES {Chem.MolToSmiles(mol)}. If this is '\n f'a datively bound metal-carbene complex, '\n f'the positive charge should be removed from '\n f'the SMILES string before setting dative bonds')\n if (nbr.GetTotalValence() + nbr.GetFormalCharge() !=\n p_table.GetDefaultValence(nbr_atom) and\n edit_mol.GetBondBetweenAtoms(\n nbr.GetIdx(), metal.GetIdx()).GetBondType()\n == Chem.BondType.SINGLE):\n edit_mol.RemoveBond(nbr.GetIdx(), metal.GetIdx())\n edit_mol.AddBond(nbr.GetIdx(), metal.GetIdx(),\n Chem.BondType.DATIVE)\n\n # Handles atoms other than carbon (P, N, O, S, etc.)\n # Converts atom-metal bond to dative if bonds to atom\n # excedes its default valence\n elif nbr_atom in from_atoms and nbr_atom != 'C':\n if (nbr.GetExplicitValence() >\n p_table.GetDefaultValence(nbr_atom) and\n edit_mol.GetBondBetweenAtoms(\n nbr.GetIdx(), metal.GetIdx()).GetBondType()\n == Chem.BondType.SINGLE):\n edit_mol.RemoveBond(nbr.GetIdx(), metal.GetIdx())\n edit_mol.AddBond(nbr.GetIdx(), metal.GetIdx(),\n Chem.BondType.DATIVE)\n\n return edit_mol.GetMol()",
"def _symmetrize_charges(\n cls, molecule: \"Molecule\", charges: numpy.ndarray\n ) -> numpy.ndarray:\n\n symmetry_groups = get_atom_symmetries(molecule)\n\n charges_by_group = {group: [] for group in symmetry_groups}\n\n for group, charge in zip(symmetry_groups, charges):\n charges_by_group[group].append(charge)\n\n average_charges = {\n group: float(numpy.mean(charges_by_group[group]))\n for group in charges_by_group\n }\n\n return numpy.array([[average_charges[group]] for group in symmetry_groups])",
"def compute_hydration_energies(molecules, parameters):\n\n energies = dict() # energies[index] is the computed solvation energy of molecules[index]\n\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n\n index = 0 # DEBUG\n for molecule in molecules:\n print \"molecule %d / %d\" % (index, len(molecules)) # DEBUG\n index += 1 # DEBUG\n \n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add nonbonded term.\n # nonbonded_force = openmm.NonbondedSoftcoreForce()\n # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n # for atom in molecule.GetAtoms():\n # charge = 0.0 * units.elementary_charge\n # sigma = 1.0 * units.angstrom\n # epsilon = 0.0 * units.kilocalories_per_mole\n # nonbonded_force.addParticle(charge, sigma, epsilon)\n # system.addForce(nonbonded_force)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n\n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n\n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n\n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energies[molecule] = state.getPotentialEnergy()\n\n return energies",
"def force_partial_charge_computation(mol):\n rdkit_util.compute_charges(mol)",
"def gasteiger_charges(mol):\n AllChem.ComputeGasteigerCharges(mol)\n return [\n mol.GetAtomWithIdx(i).GetDoubleProp(\"_GasteigerCharge\") for i in range(mol.GetNumAtoms())\n ]",
"def gasteiger_charges(mol):\n\n rdPartialCharges.ComputeGasteigerCharges(mol)\n return mol.atoms.props.pop('_GasteigerCharge')",
"def set_unit_mass(self):\n self.masstype = 'unit'\n self.amass = []\n for i in range(self.natoms):\n self.amass.append(1.0)\n return",
"def initGravCompensation(self):\n for link in self.joints:\n self.jointGravCompensation[link] = 0.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set all radii for the entire protein
|
def setAllRadii(self):
for chain in self.protein.getChains():
for residue in chain.get("residues"):
for atom in residue.get("atoms"):
atomname = atom.get("name")
if atomname.find('FLIP') != -1:
continue
if atomname == "HD1": ###PC
charge = 0.44
radiues = 1.05
else:
charge, radius = self.forcefield.getParams(residue, atomname)
###PC
if radius != None:
atom.set("radius", radius)
else:
if residue.type != 2:
text = "Could not find radius for atom %s " % atomname
text += "in residue %s %i" % (residue.name, residue.resSeq)
text += " while attempting to set all radii!"
raise ValueError(text)
|
[
"def zeroAllRadiiCharges(self):\n for chain in self.protein.getChains():\n for residue in chain.get(\"residues\"):\n for atom in residue.get(\"atoms\"):\n atom.set(\"ffcharge\", 0.0)\n atom.set(\"radius\", 0.0)",
"def set_bond_radii(atoms, bond_type='bond'):\n if atoms.info is None:\n atoms.info = {}\n if 'bond_radii' in atoms.info:\n r_a = atoms.info['bond_radii']\n else:\n r_a = np.ones(len(atoms))\n \n for atom in atoms:\n if bond_type == 'covalent':\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][0])\n else:\n r_a[atom.index] = (pyTEMlib.crystal_tools.electronFF[atom.symbol]['bond_length'][1])\n atoms.info['bond_radii'] = r_a\n return r_a",
"def getRadii(self):\n\n return self._radii.copy()",
"def set_states(self):\n for residue in self.residues:\n if isinstance(residue, (aa.Amino, na.Nucleic)):\n residue.set_state()",
"def setRadius(self, r):\n if not isinstance(r, (int,float)):\n raise TypeError('Radius must be a number')\n if r <= 0:\n raise ValueError(\"The circle's radius must be positive.\")\n\n factor = float(r)/self.getRadius()\n self._transform = self._transform * _Transformation((factor,0.,0.,factor,0.,0.))\n \n self._objectChanged(True,False,False)",
"def set_radial(self, vsv=3.57, vsh=3.74, vpv=6.14, vph=6.52, eta=0.87, rho=2790, resetCijkl=True):\n self.rho = rho\n A = rho*(vph**2)/1000.\n C = rho*(vpv**2)/1000.\n N = rho*(vsh**2)/1000.\n L = rho*(vsv**2)/1000.\n F = eta*(A-2*L)\n self.set_love(A=A, C=C, L=L, N=N, F=F, resetCijkl=resetCijkl)\n return",
"def set_radial_pars(self):\n\n self.curve_of_growth_pars = imexam_defpars.curve_of_growth_pars",
"def rim(self, rim):\n\n self._rim = rim",
"def irradiation(self, value: Quantity):\r\n self._irradiation = value",
"def setRadius(self, radius):\n # Change the radius\n self.radius = radius\n if self.radius * 2 <= self.height and self.radius * 2 <= self.width:\n self.radiusInd = True\n else:\n self.radiusInd = False\n # Redraw the Button\n self.undraw()\n self.draw(self.win)",
"def set_rois(self, ROIlist):\n self.stats['ROIs'] = ROIlist\n self.create_rois()",
"def set_rois(self, ROIlist):\n self.rh.create_rois(len(ROIlist))\n self.rh.resize_rois(ROIlist)\n self.display_rois()",
"def set_radicals_by_map(mol, radical_map):\n for i, atom in enumerate(mol.atoms):\n if atom.element.number != radical_map.atoms[i].element.number:\n raise ConverterError('Atom order in mol and radical_map in set_radicals_by_map() do not match. '\n '{0} is not {1}.'.format(atom.element.symbol, radical_map.atoms[i].symbol))\n atom.radical_electrons = radical_map.atoms[i].radical_electrons",
"def radii(data):\n rs = np.empty(len(data))\n for i, (points_sequence, labels_sequence) in enumerate(data):\n rs[i] = radius(points_sequence, labels_sequence, data)\n return rs",
"def set_radius(self, radius):\n self.widget.setRadius(radius)",
"def calc_rias_forallpeps(self):\r\n for aaseq in self.selpex_results:\r\n self.selpex_results[aaseq].calc_rias()",
"def incrementRadical(self):\n # Set the new radical electron counts and spin multiplicities\n self.radicalElectrons += 1\n self.spinMultiplicity += 1",
"def setRadius(self, r: 'float const') -> \"void\":\n return _coin.SbCylinder_setRadius(self, r)",
"def setRadialFactor(self, rad: 'float const'=0.0) -> \"void\":\n return _coin.SbSphereSectionProjector_setRadialFactor(self, rad)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set all charges and radii for the protein to zero
|
def zeroAllRadiiCharges(self):
for chain in self.protein.getChains():
for residue in chain.get("residues"):
for atom in residue.get("atoms"):
atom.set("ffcharge", 0.0)
atom.set("radius", 0.0)
|
[
"def zero(self):\n self.set(0.0)",
"def _zeronan(self):\n self.rate[np.isnan(self.rate)] = 0\n self.error[np.isnan(self.error)] = 0",
"def clear_zero(self):\n self._zero = None\n self._vel_sp = 0\n self._pos_sp = None\n self._on_sp = False\n self._log.info(\"Zero angle cleared\")",
"def setAllRadii(self):\n for chain in self.protein.getChains():\n for residue in chain.get(\"residues\"):\n for atom in residue.get(\"atoms\"):\n atomname = atom.get(\"name\")\n if atomname.find('FLIP') != -1:\n continue\n if atomname == \"HD1\": ###PC\n charge = 0.44\n radiues = 1.05\n else:\n charge, radius = self.forcefield.getParams(residue, atomname)\n ###PC\n if radius != None:\n atom.set(\"radius\", radius)\n else:\n if residue.type != 2:\n text = \"Could not find radius for atom %s \" % atomname\n text += \"in residue %s %i\" % (residue.name, residue.resSeq)\n text += \" while attempting to set all radii!\"\n raise ValueError(text)",
"def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_output = None\n self._last_input = None",
"def reset_grads(self):\n for dparam in self.dparams:\n dparam.set_value(0.0 * dparam.get_value())",
"def reset(self):\n self.x_mean_pr, self.x_cov_pr = None, None\n self.x_mean_sm, self.x_cov_sm = None, None\n self.xx_cov, self.xy_cov = None, None\n self.pr_mean, self.pr_cov, self.pr_xx_cov = None, None, None\n self.fi_mean, self.fi_cov = None, None\n self.sm_mean, self.sm_cov = None, None\n self.D, self.N = None, None\n self.flags = {'filtered': False, 'smoothed': False}",
"def reset(self):\r\n # reset PID values\r\n self.proportional, self.integral, self.derivative = 0, 0, 0\r\n\r\n # reset previous time and error variables\r\n self.previous_time, self.previous_error = 0, 0",
"def zero(self):\n for i in range(len(self.b)):\n self.b[i] = 0",
"def set_zero(self):\n for y in range(self.length):\n for x in range(self.length):\n self.grid[x, y] = 0",
"def _analysis_reset(self):\n\n self._normal_forces = 0\n self._shear_forces = 0\n self._bending_moments = 0\n self._deflection_equation = 0\n\n self._reactions = {}\n self._plotting_vectors = {}",
"def set_zero(self):\n self._zero = self._motor.pos[self._index] if not self._reverse else self.reverse_pos(self._motor.pos[self._index])\n self._vel_sp = 0\n self._pos_sp = None\n self._on_sp = True\n self._log.info(\"Zero angle: {:.2f}\".format(self._zero))",
"def reset(self):\n self.potentials = None\n self.in_spike_counts = None\n self.out_spike_counts = None\n self.frequency_duration = 0",
"def reset(self):\r\n Neuron.reset(self)\r\n self.voltage.set_value(numpy.zeros(self.size).astype('float32'))\r\n self.refractory_time.set_value(numpy.zeros(self.size).astype('float32'))",
"def clear(self):\n # Clear all terms!\n self.set_point = 0.0\n self.Pterm = 0.0\n self.Iterm = 0.0\n self.Dterm = 0.0\n self.last_error = 0.0\n self.control_variable = 0.0",
"def reset(self):\n\n super().reset()\n self.dynamics = self.set_dynamics()",
"def reset_to_constraints_zero(self) -> None:\n dict_to_reset = {\n \"cons_mu\": 0,\n \"cons_eta\": 0,\n \"cons_chi\": 0,\n \"cons_phi\": 0,\n \"cons_nu\": 0,\n \"cons_del\": 0,\n \"cons_alpha\": 0,\n \"cons_beta\": 0,\n \"cons_psi\": 0,\n \"cons_omega\": 0,\n \"cons_qaz\": 0,\n \"cons_naz\": 0,\n }\n for key in dict_to_reset:\n self.experiment_file_dict[key] = dict_to_reset[key]",
"def _init_atom_zero(self):\n self.atoms[0].x = self.atoms[0].radius\n self.atoms[0].y = self.borders[\"down\"] - self.atoms[0].radius\n self.atoms[0].color = color[\"INFECTED\"]\n self.atoms[0].angle = 5 * math.pi / 4\n self.atoms[0].type = \"ZERO\"",
"def reset(self):\n self.prev_risk = 0.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find all titratable groups in the protein based on the definition Returns
|
def findTitratableGroups(self):
pKalist = []
print("Finding Titratable residues:")
for chain in self.protein.getChains():
for residue in chain.get("residues"):
resname = residue.get("name")
for group in self.pKagroups:
if resname == group:
amb = None
for hydrodef in self.hydrogenRoutines.hydrodefs:
hydname = hydrodef.name
if hydname == group:
amb = hydrogenAmbiguity(residue, hydrodef, self.routines)
if amb == None:
text = "Could not find hydrogen ambiguity "
text += "for titratable group %s!" % group
raise ValueError(text)
thispKa = pKa(residue, self.pKagroups[group], amb)
pKalist.append(thispKa)
print("\t%s %s" % (resname, residue.resSeq))
#
# Print the residues that we have selected
#
print()
print()
print('Titratable residues')
for pKa_v in pKalist:
print(pKa_v.residue.name, pKa_v.residue.resSeq)
print()
print()
#
# Find a neutral state for each group
#
self.neutral_ref_state = {}
for this_pka in pKalist:
residue = this_pka.residue
pKaGroup = this_pka.pKaGroup
ambiguity = this_pka.amb
for titration in pKaGroup.DefTitrations:
neutral_state = self.getAtomsForPotential(this_pka, titration, get_neutral_state=1)
self.neutral_ref_state[this_pka] = neutral_state
return pKalist
|
[
"def getGroupedHebergementTypes():",
"def get_taxonomy_groups(self):\n tgroups = TaxonomyGroup.objects.filter(taxonomyitem__taxonomymap__object_id=self.pk).distinct()\n return list(tgroups)",
"def test_get_groups_list(self):\n pass",
"def _findgroups(self):\n\t\t# find all attribute groups defined for the attributes\n\t\tattrgroups = []\n\t\tattrgroupset = {}\n\t\tfor node in self.elements.values():\n\t\t\tfor attr in node.attrs.values():\n\t\t\t\tif attr.shared is not None and attr.shared not in attrgroupset:\n\t\t\t\t\tattrgroups.append(attr.shared)\n\t\t\t\t\tattrgroupset[attr.shared] = True\n\t\treturn attrgroups",
"def query_all_groups():\n grp = MetalGroup.query.order_by(MetalGroup.level).all()\n return grp",
"def get_all_template_groups(self) -> dict:\n return self._get(\"/template/templateGroups\")",
"def _get_groups(scenario,elements):\n\t_groups = collections.OrderedDict()\n\tfor T in elements:\n\t\tif T.group is not None:\n\t\t\tgroup_name = T.group\n\t\t\tif group_name in _groups:\n\t\t\t\t_groups[group_name].append(T)\n\t\t\telse:\n\t\t\t\t_groups[group_name] = [T]\n\tgroups = collections.OrderedDict([ (_groups[group_name][0],_groups[group_name])\n\t\t\t\t\t\t\t\t\t\t for group_name in _groups])\n\tel_in_groups = [ T_ for T in groups for T_ in groups[T] ]\n\tgroups.update([ (T,[T]) for T in elements if T not in el_in_groups ])\n\treturn groups",
"def get_groups(self):\n return sorted([k for k, v in self.TOKENIZED.groupindex.items()])",
"def groups( self , pattern = None ):\n return EclSum.cNamespace().create_group_list( self , pattern )",
"def getGroups(self):\n tag_to_group_mapping = self._createTagToGroupMapping()\n groups = []\n for function_address in self.last_scan_result.keys():\n for call_ctx in self.last_scan_result[function_address].call_contexts:\n if tag_to_group_mapping[call_ctx.tag] not in groups:\n groups.append(tag_to_group_mapping[call_ctx.tag])\n return groups",
"def get_contest_groups(self):\n groups = db.get_items_by_data_type(DataType.DATA_TYPE_CONTEST_GROUPING)\n return_val = []\n for g in groups:\n\n group_contests = [c for c in self.contests if\n c.parents(DataType.DATA_TYPE_CONTEST_GROUPING, lambda x: x.value == g.value)]\n\n return_val.append({'group': g.value,\n 'expanded': False,\n 'contests': group_contests})\n\n return return_val",
"def Groups(self) -> GroupCollection:",
"def test_grouping():\n TestScanner._run(**{\n 'name': 'Grouping',\n 'expressions': {\n 'group': [RegularGrammar.left_group(), 'a', RegularGrammar.alternative(), 'b',\n RegularGrammar.right_group(), RegularGrammar.kleene_star()]\n },\n 'DFA': {\n 'Q': set(['AB*']),\n 'V': set('ab'),\n 'T': [\n [' ', 'AB*'],\n ['a', 'AB*'],\n ['b', 'AB*']\n ],\n 'S': 'AB*',\n 'F': set(['AB*']),\n 'G': {\n 'group': set(['AB*'])\n }\n }\n })",
"def _iter_groups(self, data):\n groups = data.groupby(self.segmentation_col)\n\n for name in self.models:\n yield name, groups.get_group(name)",
"def get_protein_groups(protein_chain_map, chain_group_map):\n # UnionFind uses numbers 0..n-1 to identify groups, so map\n # cluster ids to that.\n group_id = {}\n inverse_group_id = {}\n for group in chain_group_map.values():\n group_id[group] = group_id.get(group, len(group_id))\n inverse_group_id[group_id[group]] = group\n\n uf = UnionFind(len(group_id))\n for protein, chains in protein_chain_map.items():\n first_group = chain_group_map[chains[0]]\n for chain in chains:\n chain_group = chain_group_map[chain]\n uf.union(group_id[first_group], group_id[chain_group])\n\n # Now that UF group ids are stable we can assign protein groups.\n protein_group_map = {}\n for protein, chains in protein_chain_map.items():\n uf_group = uf.find(group_id[chain_group_map[chains[0]]])\n protein_group_map[protein] = inverse_group_id[uf_group]\n\n return protein_group_map",
"def get_techniques_used_by_all_groups(self, stix_format=True):\n groups = self.get_groups()\n techniques = self.get_techniques()\n group_relationships = list()\n group_techniques_ref = list()\n groups_use_techniques = list()\n filters = [\n Filter(\"type\", \"=\", \"relationship\"),\n Filter('relationship_type','=','uses')\n ]\n relationships = self.COMPOSITE_DS.query(filters)\n \n for rel in relationships:\n if get_type_from_id(rel.source_ref) == 'intrusion-set'\\\n and get_type_from_id(rel.target_ref) == 'attack-pattern':\n group_relationships.append(rel)\n \n for g in groups:\n for rel in group_relationships:\n if g['id'] == rel['source_ref']:\n gs = json.loads(g.serialize())\n gs['technique_ref'] = rel['target_ref']\n gs['relationship_description'] = rel['description']\n gs['relationship_id'] = rel['id']\n group_techniques_ref.append(gs)\n \n for gt in group_techniques_ref:\n for t in techniques:\n if gt['technique_ref'] == t['id']:\n if 'revoked' in t.keys():\n gt['revoked'] = t['revoked']\n tactic_list = list()\n if 'kill_chain_phases' in t.keys():\n tactic_list = t['kill_chain_phases']\n gt['technique'] = t['name']\n if 'description' in t.keys():\n gt['technique_description'] = t['description']\n gt['tactic'] = tactic_list\n gt['technique_id'] = t['external_references'][0]['external_id']\n gt['matrix'] = t['external_references'][0]['source_name']\n if 'x_mitre_platforms' in t.keys():\n gt['platform'] = t['x_mitre_platforms']\n if 'x_mitre_data_sources' in t.keys():\n gt['data_sources'] = t['x_mitre_data_sources']\n if 'x_mitre_permissions_required' in t.keys():\n gt['permissions_required'] = t['x_mitre_permissions_required']\n if 'x_mitre_effective_permissions' in t.keys():\n gt['effective_permissions'] = t['x_mitre_effective_permissions']\n groups_use_techniques.append(gt)\n if not stix_format:\n groups_use_techniques = self.translate_stix_objects(groups_use_techniques)\n return groups_use_techniques",
"def get_age_groups():\n age_groups = db_tools.ezfuncs.query(\"\"\"\n SELECT *\n FROM shared.age_group\n \"\"\", conn_def='cod')\n return age_groups",
"def get_groups(_input):\n group = []\n for idx, line in enumerate(_input):\n if not line and group:\n yield group\n group = []\n continue\n\n group.append(list(line))\n\n if idx + 1 == len(_input) and group:\n yield group",
"def get_group_positions(u, indi):\n positions = []\n for i in indi.correction_groups:\n selstr = 'bynum %d' % i[0]\n for j in i[1:]:\n selstr += ' or bynum %d' % j\n positions.append(u.select_atoms(selstr).positions)\n return positions"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the Titration Definition
|
def readTitrationDefinition(self):
mygroups = {}
filename = TITRATIONFILE
if not os.path.isfile(TITRATIONFILE):
raise ValueError("Could not find TITRATION.DAT!")
file = open(filename)
while 1:
line = file.readline()
if line.startswith("//"):
pass
elif line == '':
break
elif line[0] == '*':
name = ""
resname = ""
type = ""
titrations = []
name = string.strip(line[1:])
line = file.readline()
if line[:8] != 'Residue:':
text = "Wrong line found when looking for 'Residue'"
raise ValueError("%s: %s" % (text, line))
resname = string.strip(string.split(line)[1])
line = file.readline()
if line[:10] != 'Grouptype:':
text = "Wrong line found when looking for 'Grouptype'"
raise ValueError("%s: %s" % (text, line))
type = string.lower(string.strip(string.split(line)[1]))
if type != 'acid' and type != 'base':
raise ValueError('Group type must be acid or base!')
line = file.readline()
while 1:
""" Find next transition """
#
# Skip comments
#
while line[:2] == '//':
line = file.readline()
startstates = []
endstates = []
modelpKa = None
if line[:11] != 'Transition:':
text = "Wrong line found when looking for 'Transition:'"
raise ValueError("%s: %s" % (text, line))
split = string.split(line[11:], '->')
for number in string.split(split[0], ','):
startstates.append(string.strip(number))
for number in string.split(split[1], ','):
endstates.append(string.strip(number))
line = file.readline()
#
# Skip comments
#
while line[:2] == '//':
line = file.readline()
#
# Must be the model pKa line
#
if line[:10] != 'Model_pKa:':
text = "Wrong line found when looking for 'Model_pKa'"
raise ValueError("%s: %s" % (text, line))
modelpKa = float(string.split(line)[1])
thisTitration = DefTitration(startstates, endstates, modelpKa)
titrations.append(thisTitration)
line = file.readline()
if string.strip(line) == 'END': break
thisGroup = pKaGroup(name, resname, type, titrations)
mygroups[name] = thisGroup
line = file.readline()
if string.strip(line) == 'END OF FILE': break
return mygroups
|
[
"def _read_structure_attributes(f):\n\n line = ''\n variogram_info = {}\n while \"end structure\" not in line:\n line = f.readline()\n if line == '':\n raise Exception(\"EOF while reading structure\")\n line = line.strip().lower().split()\n if line[0].startswith('#'):\n continue\n if line[0] == \"nugget\":\n nugget = float(line[1])\n elif line[0] == \"transform\":\n transform = line[1]\n elif line[0] == \"numvariogram\":\n numvariograms = int(line[1])\n elif line[0] == \"variogram\":\n variogram_info[line[1]] = float(line[2])\n elif line[0] == \"end\":\n break\n elif line[0] == \"mean\":\n warning.warn(\"'mean' attribute not supported, skipping\",PyemuWarningF)\n else:\n raise Exception(\"unrecognized line in structure definition:{0}\".\\\n format(line[0]))\n assert numvariograms == len(variogram_info)\n return nugget,transform,variogram_info",
"def read_pronunciation(pronunciation_file):",
"def read_template(self, template, space=None):\n pass",
"def read_from_qe_dos_txt(self):\n raise Exception(\"No function defined to read this quantity \"\n \"from a qe.dos.txt file\")",
"def read_file(self):\n stream_ifdef = ''\n lines = open(self.enum_fn,'r').readlines()\n simple_tuples = []\n density = 'automatic'\n namespace = None\n proto_prefix = ''\n extra_header = []\n cplusplus = False\n for line in lines:\n nline = metaenum_t.comment_pattern.sub('',line).strip()\n if len(nline) == 0:\n continue\n wrds = nline.split()\n if wrds[0] == 'cplusplus':\n cplusplus = True\n elif wrds[0] == 'namespace':\n namespace = wrds[1]\n elif wrds[0] == 'hfn':\n hfn = wrds[1]\n elif wrds[0] == 'cfn':\n cfn = wrds[1]\n elif wrds[0] == 'density':\n density = wrds[1]\n elif wrds[0] == 'prefix':\n prefix = wrds[1]\n elif wrds[0] == 'typename':\n typename = wrds[1]\n elif wrds[0] == 'stream_ifdef':\n stream_ifdef = wrds[1]\n elif wrds[0] == 'proto_prefix':\n proto_prefix = wrds[1]\n elif wrds[0] == 'extra_header':\n extra_header.append(wrds[1])\n else:\n token = wrds[0]\n comment = None\n value = None\n if len(wrds) > 1:\n if metaenum_t.doxygen_comment_pattern.match(wrds[1]):\n comment = ' '.join(wrds[1:])\n else:\n value = wrds[1]\n if len(wrds) > 2:\n comment = ' '.join(wrds[2:])\n simple_tuples.append( (token, value, comment) )\n \n self.tuples = []\n for token,value,comment in simple_tuples:\n self.tuples.append(enumer.enumer_value_t(token,value,comment))\n \n self.cfn = cfn\n self.hfn = hfn\n self.density = density\n self.namespace = namespace\n self.type_name = typename\n self.prefix = prefix\n self.stream_ifdef = stream_ifdef\n self.proto_prefix= proto_prefix\n self.extra_header= extra_header\n self.cplusplus = cplusplus",
"def task_definition(self) -> pulumi.Output['pulumi_aws.ecs.TaskDefinition']:\n return pulumi.get(self, \"task_definition\")",
"def setup_syntax():\n syntax = {}\n with open(syntax_file, \"r\") as s:\n try:\n line = s.readline().strip()\n while line:\n #finds the first ':' going backwards from end of line\n colon_index = line.rfind(':')\n syntax[line[1:colon_index-1]] = line[colon_index+1:].strip()\n line = s.readline()\n except:\n raise IOError(f\"The given syntax file {syntax_file} has faulty formatting.\")\n return syntax",
"def _writeTreatments(self, fout):\n fout.write(\"*TREATMENTS\\r\\n\")\n fout.write(\" 5 1 0 0 140 kg N as urea(2/3 18 D\\r\\n\")",
"def _definition(self):\n if self._current_token.type == FILE:\n result = self._file_assignment()\n else:\n options = self._options()\n value = self._current_token\n self._eat(VALUE)\n if self._current_token.type != LPAREN:\n statement_list = tuple()\n else:\n self._eat(LPAREN)\n statement_list = self._statement_list() \n self._eat(RPAREN)\n result = options, Definition(value,statement_list)\n\n return result",
"def ttInterpret( program ):\n # REMOVE THIS LINE AND THE print STATEMENT WHEN YOU IMPLEMENT THIS FUNCTION.\n\n return",
"def get_magnetomoeter_reading(self):\n self.current_heading = self.magnetometer.degrees(self.magnetometer.heading())\n logging.info(\"heading : %s\", self.current_heading)",
"def _toml(self):\r\n data = {}\r\n with open(self._filename, 'rb') as f:\r\n data = pytoml.load(f)\r\n return self._wrap(data)",
"def test_read_patent(self):\n with open(\"tests/json/patent_class_test1.txt\", 'r') as infile:\n output = infile.read().replace('\\n', '')\n p = Patent(\"tests/patsnap_corpus/EP0049154B2.xml\")\n self.assertEqual(output, str(p.get_data()))",
"def testReadTimelinerDefinition(self):\n test_timeliner_file = yaml_timeliner_file.YAMLTimelinerConfigurationFile()\n\n timeliner_definition = test_timeliner_file._ReadTimelinerDefinition(\n self._FORMATTERS_YAML)\n\n self.assertIsNotNone(timeliner_definition)\n self.assertEqual(timeliner_definition.data_type, 'test:fs:stat')\n\n with self.assertRaises(errors.ParseError):\n test_timeliner_file._ReadTimelinerDefinition({})\n\n with self.assertRaises(errors.ParseError):\n test_timeliner_file._ReadTimelinerDefinition({'bogus': 'error'})",
"def get_alice_trna_170():\n seq_ftr = create_1_part_seqfeature(96431, 96507, 1, \"tRNA\")\n return seq_ftr",
"def lkt_doc(full_decl):\n return '\\n'.join(l.text for l in full_decl.f_doc)",
"def testReadFormatDefinition(self):\n definition_values = {\n u'description': u'Windows Shortcut (LNK) file format',\n u'type': u'format',\n }\n\n definitions_registry = registry.DataTypeDefinitionsRegistry()\n definitions_reader = reader.DataTypeDefinitionsFileReader()\n\n # TODO: implement.\n _ = definitions_registry\n\n data_type_definition = definitions_reader._ReadFormatDefinition(\n definition_values, u'lnk')\n self.assertIsNotNone(data_type_definition)\n self.assertIsInstance(data_type_definition, data_types.FormatDefinition)",
"def word_types(template_filepath):\n with open(template_filepath, 'r') as fh:\n prompts = []\n content = fh.read().split()\n for word in content:\n if word[0] == '{' and word[-1] == '}':\n prompts.append(word[1:-1])\n return prompts\n\n # return fh.read()\n # return fh.readlines()\n # try/except FileNotFoundError:\n # sys.exit('File Not Found. Try again.')",
"def read_runtime_def_file(self, *args):\n with open(os.path.join(self.runtime_def_root, *args)) as fp:\n return fp.read()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the interface with pKaTool
|
def test_interface():
import pKaTool.pKa_calc
X = pKaTool.pKa_calc.Monte_Carlo_Mult_CPP()
X.intrinsic_pKa = {':0001:ASP': [0.0, 4.0, 5.0]}
X.charged_state = {':0001:ASP': [0, 1, 1]}
X.acid_base = {':0001:ASP': -1}
X.intene_mult = {':0001:ASP': {':0001:ASP': [[0, 0, 0], [0, 0, 0], [0, 0, 0]]}}
X._calc_pKas(0.0, 10.0, 0.5)
return
|
[
"def testSKPCA():\n pass",
"def test_example_azerty():\n azerty.main(test=True)",
"def test_ProjE():\n testing_function('proje_pointwise')",
"def test_pro_bowlers(self):\n pass",
"def test_vicars_get(self):\n pass",
"def test_run_feature_selection(self):",
"def tests(self):\n pass",
"def test_use_qa_initialize():\n client = USEQA2Vec()\n assert True",
"def test_get_checker_results(self):\n pass",
"def test_standings(self):\n pass",
"def test_chores_get(self):\n pass",
"def test_verify_toolkit_pip_install(self):\n topo = Topology()\n streamsx.spl.toolkit.add_toolkit(topo, stu._tk_dir('testtkpy_pip_toolkit'))\n s = topo.source(['a'])\n s = s.as_string()\n\n fp = op.Map(\n \"com.ibm.streamsx.topology.pytest.pypip::find_a_pint_toolkit\",\n s)\n tester = Tester(topo)\n tester.contents(fp.stream, ['RTTK_PintImported'])\n tester.test(self.test_ctxtype, self.test_config)",
"def test_example_textinput():\n textinput.main(test=True)",
"def test_010(self):\n\n HEADING()\n result = run(\"cm key list --source=cloudmesh\")\n print (result)\n assert \"OK.\" in result",
"def test_get_tool_constraints(self):\n\n # When no parameters are given an error is raised\n self.assertRaises(MissingParameterError, portal.get_tool)",
"def test_012(self):\n\n HEADING()\n result = run(\"cm key list --source=cloudmesh\")\n print (result)\n assert \"OK.\" in result",
"def setUp(self):\r\n\r\n self.DUT = Allocation()",
"def main():\n\n # automaticky_testovac()\n\n riadic()",
"def test02_get_kpi_threshold_config(setup):\n config, headers, json_req = setup\n LOGGER.info(\"Validating the GET kpi threshold config..\")\n try:\n response = request(\"GET\", url=config[\"QA\"][\"GET_TH_URL\"], headers=headers, verify=False)\n except Exception as error:\n LOGGER.error(\"HTTPS connection lost :\", error)\n print(\"till here 1\")\n\n assert response.status_code == 200, LOGGER.error(\"kpi threshold config get api is down\")\n print(\"Assert passing..\")\n obj2 = verifier.DataV1()\n print(\"-----mistake------\")\n print(obj2)\n obj2.kpi_name_list()\n\n res = json.loads(response.text)\n start_date = res['config'][0]['startDate']\n assert start_date\n settings = res['config'][0]['settings']\n kpi_names_set = set()\n for name_list in settings:\n kpi_names_set.add(name_list['kpi'])\n kpi_names = list(kpi_names_set)\n assert kpi_names, LOGGER.error(\"V1 Cluster is down, GET calls are failing..\")\n if len(kpi_names) > 0 and len(obj2.kpi_name_list()) > 0:\n assert kpi_names.sort() == obj2.kpi_name_list().sort(), LOGGER.error(\"'kpi names are not matching\"\n \"with database\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculates an angular defect of the given vertex
|
def angular_defect(self, vertex):
defect = 2 * math.pi
for face in self.faces:
if vertex in face:
tmp = list(face)
tmp.remove(vertex)
u, v = tmp
top = self.distance(vertex, u) ** 2 + self.distance(vertex, v) ** 2 - self.distance(u, v) ** 2
bottom = 2 * self.distance(vertex, u) * self.distance(vertex, v)
defect -= math.acos(top / bottom)
return defect
|
[
"def get_angularv(self):\n\t\treturn \"{} {} {}\".format(self.data.angular_velocity.x, self.data.angular_velocity.y, self.data.angular_velocity.z)",
"def angleDefect(self):\n if 'angleDefect' in self._cache: return self._cache['angleDefect']\n\n if(self.isBoundary()):\n defect = 0\n\n else:\n\n angSum = 0.0\n vList = [normalize(h.vec) for h in self.adjHalfEdges()]\n # print(\" \" + str(vList))\n for (a,b) in circlePairs(vList):\n oppAngle = acos(np.dot(a,b))\n\n # If the vectors make an angle of more than pi, we would be choosing\n # the wrong inverse of cosine. Need to detect and correct for this case\n if np.dot(cross(b,a), self.normal) < 0:\n oppAngle += pi\n\n # print(\" + \" + str(oppAngle))\n angSum += oppAngle\n\n defect = angSum - 2*pi\n\n if self.staticGeometry: self._cache['angleDefect'] = defect\n return defect",
"def compute_per_vertex_extrusion_vector(merged_vert, region):\n\n polys_at_vert = merged_vert.connected_polys & region\n normals_at_vert = [p.normal.normalized() for p in polys_at_vert]\n avg_poly_normal = sum(normals_at_vert, Vec3()).normalized()\n normals = []\n\n for n in normals_at_vert:\n for other_n in normals:\n if abs(n.dot(other_n)) > .999:\n break\n else:\n normals.append(n)\n\n normals_by_dot = {(avg_poly_normal.dot(n), i): n for i, n in enumerate(normals)}\n normals = [normals_by_dot[d] for d in sorted(normals_by_dot)][:4]\n planes = [Plane(n, Point3() + n) for n in normals]\n point_on_line = Point3()\n line_vec = Vec3()\n intersection_point = Point3()\n\n if len(planes) == 1:\n # there's only one poly at the vertex; the extrusion vector\n # is the normal to that poly\n extrusion_vec = normals[0]\n else:\n if planes[0].intersects_plane(point_on_line, line_vec, planes[1]):\n if len(planes) == 2:\n # there are two polys at the vertex; the extrusion\n # vector is perpendicular to the intersection line of\n # both polygon planes\n extrusion_vec = Vec3(point_on_line)\n extrusion_vec -= extrusion_vec.project(line_vec)\n elif len(planes) == 2:\n extrusion_vec = normals[0]\n\n if len(planes) < 3:\n return extrusion_vec * sign\n\n scale_vec = None\n\n while len(planes) > 2:\n\n if planes.pop(2).intersects_line(intersection_point, point_on_line,\n point_on_line + line_vec):\n tmp_vec = Vec3(intersection_point)\n else:\n tmp_vec = None\n\n if scale_vec and tmp_vec:\n scale_vec = (scale_vec + tmp_vec) * .5\n else:\n scale_vec = tmp_vec\n\n l = scale_vec.length() if scale_vec else 1.\n\n return avg_poly_normal * l * sign",
"def compute_divergence(self, field):\n edges = self.edges().reshape(-1, 3, 2)\n sorted_edges = np.sort(edges, axis=-1)\n vecs = np.diff(self.vertices[sorted_edges], axis=2)[:, :, 0, :]\n inner = util.dot(vecs, field[:, None, :])\n cotan = 1 / np.tan(self.compute_angles())\n vertex_incidence = self.compute_vertex_incidence()\n return vertex_incidence.T * self.remap_edges(inner * cotan) / 2",
"def get_angular(self):\n return self._v_ang.copy()",
"def calc_blade_friction_force():\r\n # return c_a * d * w\r\n return 0",
"def inradius(vertices):\n a = area(vertices)\n s = perimeter(vertices) / 2\n return a / s",
"def f(u):\n \n #h = u[0] # Not used anywhere\n v = u[1]\n \n return numpy.array([v,-g + mDot_p*v_e/(m_s+m_p) - 0.5*rho*v*abs(v)*A*C_D/(m_s+m_p) ]) # ohh abs(v) is sooo much important, for downward velocity, the drag must be up!",
"def set_angular(self, new_v_ang):\n self._v_ang = m3d.Vector(new_v_ang)",
"def update_angular_velocity(self, msg):\n\t\tself.ekf.ang_vel = enu_to_ned(np.array([[msg.twist.angular.x], [msg.twist.angular.y], [msg.twist.angular.z]]))",
"def voronoi_vertex(vy, vx, vertex_y, vertex_x):\n return numpy.argmin(numpy.hypot(vy - vertex_y, vx - vertex_x))",
"def left_integral3D(facets, index, expr, vertices, hp_param, degree):\n value = S.Zero\n facet = facets[index]\n x0 = vertices[facet[0]]\n facet_len = len(facet)\n for i, fac in enumerate(facet):\n side = (vertices[fac], vertices[facet[(i + 1) % facet_len]])\n value += distance_to_side(x0, side, hp_param[0]) * \\\n lineseg_integrate(facet, i, side, expr, degree)\n return value",
"def get_deg_in(self, vertex=None):\n deg = 0\n for element in self._edges.values():\n if vertex in element:\n print(element)\n deg += 1\n return deg",
"def in_triangleplane_coords(vertices, v):\n b = basis(vertices)\n v2 = numpy.zeros(2)\n for i in range(2):\n v2[i] = numpy.dot(v, b[i])\n return v2",
"def Jacvec(y, x, v):\n return torch.autograd.grad(y, x, v, retain_graph=True)",
"def calc_acc(vx,vy,beta):\n\n # Calculate magnitude of velocity:\n vmag = math.sqrt((vx**2)+(vy**2))\n\n # Calculate ax:\n ax = -beta*vmag*vx\n\n # Calculate ay:\n ay = -beta*vmag*vy-9.81\n\n return(ax,ay)",
"def v(self):\n return self.centroid_velocity_tangent / np.linalg.norm(\n self.centroid_velocity_tangent\n )",
"def altitude_vectors(vertices):\n a = area(vertices)\n hv = dual_edges(vertices)\n for i in range(3):\n h = 2 * a / numpy.linalg.norm(vertices[(i + 1) % 3] - vertices[(i + 2) % 3])\n hv[i] = h * hv[i] / numpy.linalg.norm(hv[i])\n return hv",
"def angular_velocity(self):\n return 0.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get slot of a specific index.
|
def slot(self, i):
if i < 0 or i > 2:
raise ValueError('Only three slots are available')
return self.get_slots()[i]
|
[
"def getSlot(self, index: int) -> InventoryItem:\r\n\t\treturn self._content[index]",
"def _find_slot(self, key):\n hashkey = hash(key) % self._size\n slot = self._slots[hashkey]\n return slot",
"def GetSlot(self, name):\n return next((x for x in self._slots if x.Name == name), None)",
"def get_slot(aMap, key, default=None):\n\tbucket = get_bucket(aMap,key)\n\tfor i,kv in enumerate(bucket):\n\t\tk,v = kv #kv is a tuple\n\t\tif key == k:\n\t\t\treturn i, k, v\n\treturn -1, key, default",
"def get_by_index(self, index):\n return self.tile_list[index]",
"def _get_slot(tracker: Tracker, slot: str) -> str:\n slot = tracker.get_slot(slot)\n\n if slot is None:\n raise SlotNotFound(slot)\n\n return slot",
"def getIndex(self, index: 'int const') -> \"int\":\n return _coin.SoPath_getIndex(self, index)",
"def get_part(self, idx):\n return self.parts[idx]",
"def __getitem__(self, index: int) -> Cell:\n\n if index[0] <= self.N and index[1] <= self.N:\n return self._safe_get(index)\n return None",
"def Map_get_slot(aMap, key, default=None):\n bucket = Map_get_bucket(aMap, key)\n \n for i, kv in enumerate(bucket):\n k, v = kv\n if key == k:\n return i, k, v\n \n return -1, key, default",
"def get(self, index: int) -> int: \n i = 0\n cur = self.head\n while cur is not None:\n if i==index:\n return cur.val\n i+=1\n cur = cur.nextNode\n return -1",
"def get(self, item):\n start_slot = self.hash_function(item)\n\n stop = False\n found = False\n position = start_slot\n while self.slots[position] is not None and not found and not stop:\n if self.slots[position] == item:\n found = True\n else:\n position = self.rehash(position)\n if position == start_slot:\n stop = True\n if found:\n return position\n return -1",
"def _get_slot_for(self, x, y) -> gui.Slot.Slot or None:\n for inventory in G.inventoryhandler.opened_inventorystack:\n dx, dy = inventory._get_position()\n for slot in inventory.get_interaction_slots():\n sx, sy = slot.position\n sx += dx\n sy += dy\n if 0 <= x - sx <= 32 and 0 <= y - sy <= 32:\n return slot\n return None",
"def __getitem__(self, index):\n return self.q[index]",
"def get_slot(self, var, name):\n named_slots = self._slots.get(name, None)\n if not named_slots:\n return None\n slot = named_slots.get(_var_key(var), None)\n if (distribute_utils.is_distributed_variable(slot) and\n not distribute_utils.is_distributed_variable(var)):\n # Make sure var and slot are either both DistributedVariable, or both\n # per replica variables.\n slot = slot._get_on_device_or_primary() # pylint: disable=protected-access\n return slot",
"def __getitem__(self, index=0):\n if index < 0:\n index = len(self) + index\n return self._get(index)",
"def GetIdSlot(self, name):\n return next((x for x in self._slots if x.Name == name + gc.SlotIdSuffix), None)",
"def _safe_get(self, index: int) -> Cell:\n return self._board[index[0] - 1][index[1] - 1]",
"def get(self, index: int) -> int:\n if index >= self.length:\n return -1\n else:\n i = 0\n tmp_node: Node = self.first\n while i < index:\n tmp_node = tmp_node.next\n i += 1\n return tmp_node.val if tmp_node else -1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get alarm slots. Speakers have 3 alarm slots available. This method will return the ones that are set as well as empty ones to use for setting new alarms.
|
def get_slots(self):
alarms = self._api.get_alarm_info()
for alarm in alarms:
index = int(alarm['@index'])
self._slots[index] = AlarmSlot(self._api, index, alarm)
# fill with empty slots
for index in range(3):
if self._slots[index] is None:
self._slots[index] = AlarmSlot(self._api, index)
return self._slots
|
[
"def get_slots(self):\n return set(self._slots.keys())",
"def get_schedule_slots_by_time(self, time):\n return # osid.calendaring.ScheduleSlotList",
"def get_alarms(zone=None):\n alarms = Alarms()\n alarms.update(zone)\n return set(alarms.alarms.values())",
"def get_timeslot_include_exams(self):\n t = Timeslot()\n while t.is_free():\n t = self.get_random_timeslot()\n return t",
"def list_time_slots(self):\n list_of_slots = []\n # index counters\n day = 0\n room = 0\n slot = 0\n\n for each_day in self.days:\n for each_room in each_day.rooms:\n for each_slot in each_room.schedule:\n list_of_slots.append(each_slot)\n each_slot.set_indices(day, room, slot)\n slot += 1\n room += 1\n slot = 0\n day += 1\n room = 0\n return list_of_slots",
"def get(self):\n LOGGER.info(\"Calling GET on the /timeslots\")\n return timeslot_service.get_all_timeslots()",
"def get_available_slots(office: Office, days: [datetime], format_time: bool = True, service: Service = None):\n try:\n available_slots_per_day = {}\n if office.appointments_enabled_ind == 0:\n return available_slots_per_day\n\n # find appointment duration per office and fetch timeslot master data\n appointment_duration = office.appointment_duration\n\n # If user has passed in service and it has duration, use that instead\n if (service and service.timeslot_duration):\n appointment_duration = service.timeslot_duration\n\n service_is_dltk = service and service.is_dlkt == YesNo.YES\n \n # Dictionary to store the available slots per day\n tz = pytz.timezone(office.timezone.timezone_name)\n\n # today's date and time\n today = datetime.datetime.now().astimezone(tz)\n\n # soonest a citizen can book an appointment\n soonest_appointment_date = today + datetime.timedelta(minutes = office.soonest_appointment or 0)\n\n # Find all appointments between the dates\n appointments = Appointment.find_appointment_availability(office_id=office.office_id, first_date=today,\n last_date=days[-1],\n timezone=office.timezone.timezone_name)\n grouped_appointments = AvailabilityService.group_appointments(appointments, office.timezone.timezone_name)\n\n # For each of the day calculate the slots based on time slots\n for day_in_month in days:\n formatted_date = day_in_month.strftime('%m/%d/%Y')\n available_slots_per_day[formatted_date] = []\n for timeslot in office.timeslots:\n # Calculate the slots per day\n timeslot_end_time = timeslot.end_time.replace(tzinfo=tz)\n timeslot_start_time = timeslot.start_time.replace(tzinfo=tz)\n if day_in_month.isoweekday() in day_indexes(timeslot.day_of_week):\n start_time = timeslot_start_time\n end_time = add_delta_to_time(timeslot_start_time, minutes=appointment_duration,\n timezone=office.timezone.timezone_name)\n\n # Cannot exceed office timeslot slots.\n dlkt_slots = office.number_of_dlkt or 0\n \n if ( dlkt_slots > timeslot.no_of_slots):\n dlkt_slots = timeslot.no_of_slots\n \n\n # Limit DLKT slots only for DLKT services.\n no_of_slots = timeslot.no_of_slots\n\n while end_time <= timeslot_end_time:\n slot = {\n 'start_time': start_time,\n 'end_time': end_time,\n 'no_of_slots': no_of_slots,\n 'no_of_dlkt_slots': dlkt_slots\n }\n # Check if today's time is past appointment slot\n # Arc - also check if in office.soonest_appointment\n if ((day_in_month.date() == soonest_appointment_date.date() and start_time >= soonest_appointment_date.time()) or day_in_month.date() > soonest_appointment_date.date()) and slot not in available_slots_per_day[formatted_date]: \n available_slots_per_day[formatted_date].append(slot)\n\n start_time = end_time.replace(tzinfo=tz)\n end_time = add_delta_to_time(end_time, minutes=appointment_duration,\n timezone=office.timezone.timezone_name)\n\n # Sort the slot by time for the day\n available_slots_per_day[formatted_date].sort(key=lambda x: x['start_time'])\n\n # Check if the slots are already booked\n for actual_slot in available_slots_per_day[formatted_date]:\n booked_slots = 0\n booked_dlkt_slots = 0\n for booked_slot in grouped_appointments.get(formatted_date, []):\n if booked_slot.get('start_time') \\\n <= actual_slot.get('start_time') \\\n < booked_slot.get('end_time') \\\n or \\\n actual_slot.get('end_time') \\\n > booked_slot.get('start_time') \\\n >= actual_slot.get('start_time'):\n\n\n if booked_slot.get('blackout_flag', False): # If it's blackout override the no of slots\n actual_slot['no_of_slots'] = 0\n else:\n if booked_slot['is_dlkt']:\n booked_dlkt_slots += 1\n else: \n booked_slots += 1 \n if service_is_dltk:\n dlkt_nos = actual_slot['no_of_dlkt_slots'] - booked_dlkt_slots\n if actual_slot['no_of_slots'] <= (booked_slots + booked_dlkt_slots):\n actual_slot['no_of_slots'] = 0\n elif actual_slot['no_of_slots'] - booked_slots >= dlkt_nos:\n actual_slot['no_of_slots'] = dlkt_nos\n else: \n actual_slot['no_of_slots'] = dlkt_nos - (actual_slot['no_of_slots'] - booked_slots) \n else:\n actual_slot['no_of_slots'] = actual_slot['no_of_slots'] - (booked_slots + booked_dlkt_slots)\n\n del actual_slot['no_of_dlkt_slots'] # no need to expose\n \n if format_time: # If true send formatted time\n actual_slot['start_time'] = actual_slot['start_time'].strftime('%H:%M')\n actual_slot['end_time'] = actual_slot['end_time'].strftime('%H:%M')\n\n return AvailabilityService.prune_appointments(available_slots_per_day)\n\n except exc.SQLAlchemyError as exception:\n logging.exception(exception)\n return {'message': 'API is down'}, 500",
"def get_device_alarms(self) -> Dict[str, Any]:\n\n logger.debug(\"Requesting device alarms\")\n\n alarms = []\n devices = self.get_devices()\n for device in devices:\n device_settings = self.get_device_settings(device[\"deviceId\"])\n alarms += device_settings[\"alarms\"]\n return alarms",
"def alarms(self):\n sources = copy.deepcopy(self.sources)\n alarms = []\n idx = 0\n for src in sources:\n if src.device.alarm:\n alarms.append(\"s_\"+str(idx).zfill(2)) #the s_ prefix is generated in pxp.py\n idx+=1\n return alarms",
"def get_schedule_slots_by_calendar(self, calendar_id):\n return # osid.calendaring.ScheduleSlotList",
"def find_empty_time_slots(self):\n empty_slots = []\n for each_day in self.days:\n for each_room in each_day.rooms:\n for each_slot in each_room.schedule:\n if each_slot.course is None:\n empty_slots.append(each_slot)\n return empty_slots",
"def axon_slots(self):\n return self.slots_per_axon * self.n_axons",
"def list_alarms(self):\r\n return self.manager.list_alarms(self)",
"def get_slots_set(employee=None, candidate=None):\n result = set()\n slots = Slot.objects.filter(employee=employee, candidate=candidate)\n for slot in slots:\n result.add(datetime.strptime(slot.code, '%Y%m%d%H').strftime('%Y-%m-%d %H:00'))\n return result",
"def all_button_alarms():\n return [ pin_alarm_button(board.BUTTON_A), pin_alarm_button(board.BUTTON_B), pin_alarm_button(board.BUTTON_C), pin_alarm_button(board.BUTTON_D) ]",
"def get_schedule_slots_by_weekdays(self, weekdays):\n return # osid.calendaring.ScheduleSlotList",
"def get_reserve_slots(self):\n slots = []\n for spot in self:\n slots += spot.get_reserve_slots()\n return slots",
"def appointments(self):\n appointments = []\n if self.show == 'forms':\n appointments = [self.appointment]\n else:\n # or filter appointments for the current membership categories\n # schedule_group__membership_form\n codes = []\n for category in self.membership_form_category:\n codes.extend(MembershipForm.objects.codes_for_category(membership_form_category=category))\n appointments = Appointment.objects.filter(\n registered_subject=self.registered_subject,\n visit_definition__code__in=codes).order_by(\n 'visit_definition__time_point', 'visit_instance', 'appt_datetime')\n return appointments",
"def get_schedule_slots_by_calendars(self, calendar_ids):\n return # osid.calendaring.ScheduleSlotList"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete this alarm and set alarm settings to defaults.
|
def delete(self):
self._api.del_alarm(self._index)
self._set_defaults()
|
[
"def delete_alarm(self, alarm):\r\n return self.manager.delete_alarm(self, alarm)",
"def reset(self):\n self.clear()\n dict.update(self, self.defaults)",
"def remove(self):\n result = self.zone.alarmClock.DestroyAlarm([(\"ID\", self.alarm_id)])\n alarms = Alarms()\n alarms.alarms.pop(self.alarm_id, None)\n self._alarm_id = None\n return result",
"def instance_clear_alarm(alarm_list):\n for alarm_data in alarm_list:\n alarm.alarm_clear(alarm_data.alarm_uuid)",
"def delete_alarms(self, req):\r\n self._enforce(req, 'DeleteAlarms')\r\n return exception.HeatAPINotImplementedError()",
"def clear_alarm(self, clear_history=True):\n self._alarm_state = False\n if clear_history:\n self._alarm_trace = list()",
"def execAlarm(self):\n self._alarm = None\n self.onAlarm()",
"async def ClearReminders(self, ctx: commands.Context):\n await self.config.user(ctx.author).set_raw('reminders', value={})\n await ctx.send(\"Okay, I've removed all your upcoming reminders, except for those\"\n \" set to go off in %d seconds.\" % (2 * self.monitoring_interval))",
"def reset_effimat_alarm(self, effimat_number=0):\n self.effimat_list[effimat_number].reset_alarm()",
"def delete_alarm(self, entity, alarm):\r\n uri = \"/%s/%s/alarms/%s\" % (self.uri_base, utils.get_id(entity),\r\n utils.get_id(alarm))\r\n resp, resp_body = self.api.method_delete(uri)",
"def clear_settings(self):\n self.settings.remove('recent_files_list')\n self.load_files_from_settings()\n self.update_actions()",
"def clear_all_alarms(self):\n self.navigate_to(self.MONITOR, self.MONITOR_ALL_ALARMS)\n\n #JLIN@20081112 add delay time for ZD implement alarms clear\n self.s.click_and_wait(self.info['loc_mon_alarms_clearall_button'], 5)",
"def clear_device_image_alarm(self, context):\n return self.cast(context, self.make_msg('clear_device_image_alarm'))",
"def clear_reminder(user_id=None):",
"def restore_defaults(self):\n\n pass",
"async def _alarm_room(self):\n await self._remind(alarm=True)",
"def reset(self):\n self._opts.update(self._defaults)",
"def ClearInterruptFlag(self,alarm):\n\t\tif (alarm == Alarm.ZERO):\n\t\t\ttemp = self.readRegister(ALM0WDAY)\t\t\t#Read WKDAY register for ALRAM 0 \n\t\t\ttemp &= (~ALMx_IF)\t\t\t\t\t\t\t#Clear 4-th bit \n\t\t\tself.writeRegister(ALM0WDAY,temp)\t\t\t#Enable backup battery mode\n\t\telse:\n\t\t\ttemp = self.readRegister(ALM1WDAY)\t\t\t#Read WKDAY register for ALRAM 1\n\t\t\ttemp &= (~ALMx_IF)\t\t\t\t\t\t\t#Clear 4-th bit\n\t\t\tself.writeRegister(ALM1WDAY,temp)\t\t\t#Enable backup battery mode",
"async def clear(self, ctx):\n\n await db.execute(\"Delete from Reminders where MemberID = ?\", (ctx.author.id,))\n await db.commit()\n\n await send_embed(ctx, \"Cleared all reminders.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get station_data by providing playlist of compatible items.
|
def _get_station_data_from_playlist(self, playlist):
for radio in playlist:
if radio.object_type not in ['tunein_radio']:
continue
station_data = self._api.get_station_data(radio.object_id)
return {
'title': station_data['title'] or '',
'description': station_data['description'] or '',
'thumbnail_url': station_data['thumbnail'] or '',
'station_url': station_data['stationurl'] or '',
}
raise ValueError('No compatible playlist items. Object type must be tunein_radio.')
|
[
"def get_similar(self):\n\n similar_url = 'http://songza.com/api/1/station/%s/similar'\n\n HEADER = {\"User-Agent\":\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)\"}\n\n\n REQUEST_KWARGS = {'headers':HEADER, 'timeout':10.0, 'allow_redirects':False}\n\n similar = requests.get(similar_url%str(self.playlistid), **REQUEST_KWARGS)\n if similar.status_code != 200:\n return None\n else:\n similar = similar.json()\n\n similar_ids = [station['id'] for station in similar]\n similar = Playlist.objects.filter(pk__in=similar_ids)\n\n return similar",
"async def get_station(station: avwx.Station) -> dict:\n await app.station.add(station.icao, \"station\")\n return asdict(station)",
"def get_playlist():\n return _playlist",
"def get_playlist_data(url):\n sourceCode = requests.get(url).content\n sourceCode = sourceCode.decode('utf-8')\n print(\"url: \", url)\n strat_idx = sourceCode.index('{\"responseContext\"')\n # end_idx = sourceCode.index(';</script><link rel=\"canonical')\n ori_texts = sourceCode[strat_idx:]\n playlist = []\n while True:\n if ori_texts.find('playlistPanelVideoRenderer') == -1:\n break\n texts = ori_texts[ori_texts.index('playlistPanelVideoRenderer'):]\n texts.index('webPageType')\n texts = texts[texts.index('{\"webCommandMetadata\":{\"url\":\"/watch'):]\n texts = texts[texts.index('/watch?'):texts.index('\",\"webPageType')]\n playlist.append(texts)\n ori_texts = ori_texts[ori_texts.index('playlistPanelVideoRenderer')+10:]\n return playlist",
"def get_playlist_data(self, playlist_id: str):\n playlist_data = self.sp.playlist(playlist_id) \n return (playlist_id, playlist_data['name'])",
"async def _get_station(self) -> Any:\n url = URL_STATION.format(self.station_id)\n return await self._async_get(url)",
"def get_stations_stream_url(station_id: int, session=None) -> TrackList:\n tracks = []\n response = call_api_tunein(station_id, session=session)\n playlist = response.get('playlist')\n api_track_list = playlist.get('trackList')\n\n api_tracks = api_track_list.get('track', [])\n\n if not isinstance(api_tracks, list):\n return TrackList([Track(api_tracks)])\n\n for item in api_tracks:\n tracks.append(Track(item))\n\n return TrackList(tracks)",
"def station_id_2_podcast_list(self, station_id, api_version=\"v1\", environment=STAGING):\n podcast_list = []\n station_id = str(station_id)\n r = \"NO DATA\"\n try:\n\n # Call out to the the API using the correct environment base url\n # and the correct api version string\n\n if api_version == \"v1\":\n\n base_url = api_base_urls[environment]\n api_call_url = \"%s/%s/podcasts?filter[station_id]=%s&page[size]=100\" % (base_url, api_version, station_id)\n r = \"NO DATA\"\n self.commLogTextArea.append(\"Calling: %s\\n----------------\\n\" % api_call_url)\n r = requests.get(api_call_url)\n if r == \"NO DATA\":\n raise ValueError(\"No data from %s\" % api_call_url)\n else:\n if r.status_code == requests.codes.ok:\n response = r.text\n self.commLogTextArea.append(\"Response:\\n%s\\n----------------\\n\" % response)\n python_data = json.loads(response)\n podcast_list = python_data[\"data\"]\n else:\n raise ValueError(\"Bad Response (%d) from %s \" % (r.status_code, api_call_url))\n\n if api_version == \"v2\":\n # V2 calls use a special header and return data differently\n # than v1 calls.\n pass\n\n # *** ********************************************************** ***\n # *** PODCAST FILTER BY STATION ID NOT YET IMPLEMENTED IN V2 API ***\n # *** ********************************************************** ***\n\n \"\"\" \n base_url = api_base_urls[environment]\n api_call_url = \"%s/%s/podcasts?filter[station_id]=%s&page[size]=100\" % (base_url, api_version, station_id)\n r = \"NO DATA\"\n r = requests.get(api_call_url, headers=api_header)\n if r == \"NO DATA\":\n raise ValueError(\"No data from %s\" % api_call_url)\n else:\n if r.status_code == requests.codes.ok:\n response = r.text\n python_data = json.loads(response)\n station_attributes = python_data[\"station\"]\n else:\n raise ValueError(\"Bad Response (%d) from %s \" % (r.status_code, api_call_url))\n \"\"\"\n\n else:\n pass\n\n except Exception as e:\n sys.stderr.write(\"ERROR -- Unable to obtain podcast information\\n\")\n sys.stderr.write(\"---------------------\\n%s\\n---------------------\\n\" % str(e))\n podcast_list = []\n finally:\n return podcast_list",
"def fetch_playlists_metadata():\n sp = get_client()\n\n from span.tasks.library import get_playlists_metadata\n\n playlist_metas = get_playlists_metadata(sp)\n\n # export data\n sys.stdout.write(jsonpickle.encode(playlist_metas))",
"def get_station_data(station_code, date=None, as_dataframe=False):\n\n station_dict = {}\n if date is None:\n date_str = 'current'\n year = datetime.date.today().year\n else:\n date = util.convert_date(date)\n date_str = date.strftime('%Y%m%d')\n year = date.year\n\n filename = '{}.{}.html'.format(station_code, date_str)\n data_url = 'http://www.swt-wc.usace.army.mil/webdata/gagedata/' + filename\n\n # requests without User-Agent header get rejected\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'\n }\n resp = requests.get(data_url, headers=headers)\n soup = BeautifulSoup(resp.content)\n pre = soup.find('pre')\n if pre is None:\n error_msg = 'no data could be found for station code %(station_code)s and date %(date)s (url: %(data_url)s)' % {\n 'date': date,\n 'data_url': data_url,\n 'station_code': station_code,\n }\n raise ValueError(error_msg)\n sio = StringIO.StringIO(str(pre.text.strip()))\n\n first_line = sio.readline()\n split = first_line[8:].strip().split()\n\n station_dict['code'] = split[0]\n station_dict['description'] = ' '.join(split[1:])\n\n second_line = sio.readline()\n station_dict['station_type'] = second_line.strip().split(':')[1].strip()\n\n notes = []\n\n while 1:\n next_line = sio.readline()\n if ':' in next_line:\n notes.append(next_line.strip())\n else:\n break\n\n if len(notes):\n station_dict['notes'] = '\\n'.join(notes)\n\n variable_names = _split_line(sio.readline()[11:], 10)\n variable_units = _split_line(sio.readline()[11:], 10)\n variable_sources = _split_line(sio.readline()[11:], 10)\n\n station_dict['variables'] = dict([\n (name, {'unit': unit, 'source': source})\n for name, unit, source in zip(\n variable_names, variable_units, variable_sources)\n ])\n\n station_dict['timezone'] = sio.readline().strip().strip('()')\n column_names = ['datetime'] + variable_names\n widths = [14] + ([10] * len(variable_names))\n converters = dict([\n (variable_name, lambda x: float(x) if x != '----' else np.nan)\n for variable_name in variable_names\n ])\n date_parser = lambda x: _convert_datetime(x, year)\n dataframe = pandas.read_fwf(\n sio, names=column_names, widths=widths, index_col=['datetime'],\n na_values=['----'], converters=converters, parse_dates=True,\n date_parser=date_parser)\n\n # parse out rows that are all nans (e.g. end of \"current\" page)\n dataframe = dataframe[~np.isnan(dataframe.T.sum())]\n\n if as_dataframe:\n station_dict['values'] = dataframe\n else:\n station_dict['values'] = util.dict_from_dataframe(dataframe)\n\n return station_dict",
"def download_songs(self, playlists, output_folder:str):\n if self.token :\n #init file and spotipy and create an output folder for all of the song files\n self.create_json_folder(output_folder)\n sp = spotipy.Spotify(auth =self.token)\n song_data = open('songs.json', 'w')\n songs_arr = []\n songs_dict = {}\n\n #iteratre through playlists, get tracks\n for playlist in playlists :\n print('playlist: ', playlist['name'])\n tracks = sp.user_playlist_tracks(self.username, playlist['id'])\n for i, track in enumerate(tracks['items']):\n if track['track']['id'] not in songs_dict:\n features = sp.audio_features(track['track']['id'])[0]\n song_info = {'album' : track['track']['album']['name'],\n 'artists' : track['track']['artists'][0]['name'],\n 'duration_ms' : track['track']['duration_ms'],\n 'episode' : track['track']['episode'],\n 'external_urls' : track['track']['external_urls'],\n 'id' : track['track']['id'],\n 'name' : track['track']['name'],\n 'popularity' : track['track']['popularity'],\n 'type' : track['track']['type'],\n 'playlists' : [playlist['id']],\n 'key' : features['key'],\n 'mode': features['mode'],\n 'acousticness' : features['acousticness'],\n 'danceability' : features['danceability'],\n 'energy' : features['energy'],\n 'instrumentalness' : features['instrumentalness'],\n 'liveness' : features['liveness'],\n 'speechiness' : features['speechiness'],\n 'valence': features['valence'],\n 'tempo' : features['tempo']}\n if not song_info['id'] :\n print (song_info['name'])\n songs_dict[track['track']['id']] = song_info\n print(i, ' ', track['track']['artists'][0]['name'], track['track']['name'])\n else:\n songs_dict[track['track']['id']]['playlists'].append(playlist['id'])\n print(i, ' ', track['track']['artists'][0]['name'], track['track']['name'])\n\n #place in dict, convert to json, save json\n print(len(songs_dict))\n # Write each individual song to its own json file\n song_file_array = self.write_indiv_json(output_folder, songs_dict)\n print(\"Number of files we got: \", len(song_file_array))\n return None\n else :\n print(\"Can't get token for\", self.username)\n return None",
"def get_station_data_sets(\n self,\n stations: list,\n ):\n result = None\n if stations is not None:\n sql = \"\"\"\n select ds.id, ds.expocode,\n count(distinct st.id) as station_count,\n min(d.date_and_time) as first_station\n from d2qc_stations st\n inner join d2qc_data_sets ds on (st.data_set_id = ds.id)\n inner join d2qc_casts c on (c.station_id = st.id)\n inner join d2qc_depths d on (d.cast_id = c.id)\n where st.id in ({})\n group by ds.id\n order by first_station\n \"\"\".format(\n DataSet._in_stations(stations)\n )\n result = DataSet._fetchall_query(sql)\n\n return result",
"def plays_by_session_and_item():\n\n cluster = Cluster()\n session = cluster.connect()\n\n rows = session.execute(\n 'SELECT artist, song_title, song_len FROM sparkify.plays_by_session_and_item WHERE session_id=338 AND item_in_session=4')\n\n session.shutdown()\n cluster.shutdown()\n\n # We are only expecting one row here\n for row in rows:\n return {\"artist\": row[0], \"song_title\": row[1], \"song_len\": row[2]}\n\n return {}",
"def get_spotify_playlist(self, title):\n\n # Check if playlist already exists\n users_playlists = self.sp.user_playlists(self.username)\n for playlist in users_playlists['items']:\n if playlist['name'] == title:\n return playlist['external_urls']['spotify'] #Return URL not URI so that it can be passed to the user. playlist['uri'] also works.\n\n # Create new playlist if needed\n playlist = self.sp.user_playlist_create(self.username, title)\n return playlist['external_urls']['spotify'] #Return URL not URI so that it can be passed to the user. playlist['uri'] also works.",
"def generatePlaylistsAndSongs(self) -> tuple:\n playlistsDict = {}\n songsDict = {}\n for path in glob(\"{}/*.wav\".format(WAV_DIRECTORY_PATH)):\n title = Path(path).stem\n if title in self.collectedData[\"Songs data\"]:\n data = self.collectedData[\"Songs data\"][title]\n addedDate = data[\"Added date\"]\n timesPlayed = data[\"Number of times played\"]\n hoursPlayed = data[\"Number of hours played\"]\n numberOfPlaylist = data[\"Number of playlists it is in\"]\n else:\n # If a new .wav file has been added to the directory ./wavFiles, its stats will be initialized\n addedDate = str(date.today())\n timesPlayed = 0\n hoursPlayed = 0\n numberOfPlaylist = 0\n self.playlistsData[\"All\"].append(title)\n\n songsDict[title] = Song(title, addedDate, timesPlayed, hoursPlayed, numberOfPlaylist)\n\n for title in self.playlistsData:\n songs = self.playlistsData[title]\n songsObjects = [songsDict[name] for name in songs]\n data = self.collectedData[\"Playlists data\"][title]\n creationDate = data[\"Creation date\"]\n timesPlayed = data[\"Number of times played\"]\n hoursPlayed = data[\"Number of hours played\"]\n firstSong = data[\"First song\"]\n changesHistory = data[\"Changes history\"]\n playlistsDict[title] = Playlist(title, creationDate, timesPlayed, hoursPlayed,\n songsObjects, firstSong, changesHistory)\n\n return playlistsDict, songsDict",
"def read_single_station(d, station_info, date):\n nztz = timezone('Pacific/Auckland')\n date_nz = nztz.localize(datetime.datetime(date.year, date.month,\n date.day, 6, 0, 0))\n timeshift = int(date_nz.utcoffset().seconds/3600.)\n datestr = '{:d}-{:02d}-{:02d}'.format(date.year, date.month, date.day)\n\n # Read the raw data\n if station_info['files']['raw'] is None:\n # There is no point continuing if we don't have any raw data\n msg = \"INFO 01: No raw data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n return\n\n e0 = d.read(station_info['files']['raw'],\n ftype='minidoas-raw', timeshift=timeshift)\n ib = InstrumentBuffer(name=station_info['stationID'],\n location=station_info['stationLoc'],\n no_bits=16,\n type='MiniDOAS')\n i = d.new(ib)\n try:\n rdt = d.elements['RawDataType'][0]\n except:\n rdt = d.new(e0['RawDataTypeBuffer'])\n rb = e0['RawDataBuffer']\n rb.type = rdt\n rb.instrument = i\n rb.target = station_info['target']\n lat = np.ones(rb.d_var.shape[0])*station_info['lat']\n lon = np.ones(rb.d_var.shape[0])*station_info['lon']\n elev = np.ones(rb.d_var.shape[0])*station_info['elev']\n bearing = np.ones(rb.d_var.shape[0])*np.rad2deg(station_info['bearing'])\n rb.position = np.array([lon, lat, elev]).T\n rb.bearing = bearing\n rb.inc_angle_error = np.ones(rb.d_var.shape[0])*0.013127537*180./np.pi\n rr = d.new(rb)\n\n # Read the concentration\n if station_info['files']['spectra'] is None:\n msg = \"INFO 02: No concentration (i.e. spectra) data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n return\n\n # First read in the smoothed version of the concentration\n # which the subsequent computation of flux values is\n # based on\n e1 = d.read(station_info['files']['spectra'],\n date=datestr, ftype='minidoas-spectra',\n timeshift=timeshift, model=True)\n cb = e1['ConcentrationBuffer']\n idxs = np.zeros(cb.value.shape)\n for i in range(cb.value.shape[0]):\n idx = np.argmin(np.abs(rr.datetime[:].astype('datetime64[ms]')\n - cb.datetime[i].astype('datetime64[ms]')))\n idxs[i] = idx\n cb.rawdata = [rr]\n cb.rawdata_indices = idxs\n cb.method = station_info['widpro_method']\n cb.user_notes = 'smoothed path concentration'\n cc = d.new(cb)\n\n # Now read in the original path concentration\n # to keep as a reference\n e2 = d.read(station_info['files']['spectra'],\n date=datestr, ftype='minidoas-spectra',\n timeshift=timeshift)\n cb2 = e2['ConcentrationBuffer']\n idxs = np.zeros(cb2.value.shape)\n for i in range(cb.value.shape[0]):\n idx = np.argmin(np.abs(rr.datetime[:].astype('datetime64[ms]')\n - cb2.datetime[i].astype('datetime64[ms]')))\n idxs[i] = idx\n cb2.rawdata = [rr]\n cb2.rawdata_indices = idxs\n cb2.method = station_info['widpro_method']\n cb2.user_notes = 'original path concentration'\n\n # Read in the flux estimates for assumed height\n if station_info['files']['flux_ah'] is None:\n msg = \"INFO 03: No assumed height flux data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n else:\n e3 = d.read(station_info['files']['flux_ah'],\n date=datestr, ftype='minidoas-scan',\n timeshift=timeshift)\n fb = e3['FluxBuffer']\n dt = fb.datetime[:].astype('datetime64[s]')\n indices = []\n for _dt in dt:\n idx = np.argmin(np.abs(cc.datetime[:].astype('datetime64[us]')\n - _dt))\n idx0 = idx\n while True:\n angle = rr.inc_angle[cc.rawdata_indices[idx]+1]\n if angle > 180.:\n break\n idx += 1\n idx1 = idx\n indices.append([idx0, idx1+1])\n fb.concentration = cc\n fb.concentration_indices = indices\n\n gfb1 = e3['GasFlowBuffer']\n\n m2 = None\n for _m in d.elements['Method']:\n if _m.name[:] == 'WS2PV':\n m2 = _m\n if m2 is None:\n mb2 = e3['MethodBuffer']\n m2 = d.new(mb2)\n\n gfb1.methods = [m2]\n gf1 = d.new(gfb1)\n fb.gasflow = gf1\n f = d.new(fb)\n # Now read in preferred flux values for assumed\n # height downloaded from FITS\n if station_info['files']['fits_flux_ah'] is None:\n msg = \"ERROR 01: No preferred flux for assumed height in FITS:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.error(msg)\n else:\n data_ah = np.loadtxt(station_info['files']['fits_flux_ah'],\n dtype=np.dtype([('date', 'S19'),\n ('val', np.float),\n ('err', np.float)]),\n skiprows=1, delimiter=',', ndmin=1)\n dates = data_ah['date'].astype('datetime64[s]')\n indices = []\n values = []\n val_err = []\n ndates = []\n for i, dt in enumerate(dates):\n min_tdiff = np.min(np.abs(f.datetime[:].astype('datetime64[s]')\n - dt))\n if min_tdiff.astype('int') > 1:\n msg = \"ERROR 02: No assumed height flux estimate can be\"\n msg += \" found for FITS value:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(dt))\n msg += \"-->FITS value: {}\\n\".format(data_ah['val'][i])\n logging.error(msg)\n else:\n idx = np.argmin(np.abs(f.datetime[:].\n astype('datetime64[s]') - dt))\n indices.append(idx)\n values.append(data_ah['val'][i])\n val_err.append(data_ah['err'][i])\n ndates.append(str(dt))\n if len(indices) > 0:\n pfb = PreferredFluxBuffer(fluxes=[f],\n flux_indices=[indices],\n value=values,\n value_error=val_err,\n datetime=ndates)\n d.new(pfb)\n\n # Read in the flux estimates for calculated height\n if station_info['files']['flux_ch'] is None:\n msg = \"INFO 04: No calculated height flux data for:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.info(msg)\n else:\n e4 = d.read(station_info['files']['flux_ch'],\n date=datestr, ftype='minidoas-scan',\n station=station_info['wp_station_id'],\n timeshift=timeshift)\n fb1 = e4['FluxBuffer']\n dt = fb1.datetime[:].astype('datetime64[s]')\n indices = []\n for _dt in dt:\n idx = np.argmin(np.abs(cc.datetime[:].astype('datetime64[us]')\n - _dt))\n idx0 = idx\n while True:\n angle = rr.inc_angle[cc.rawdata_indices[idx]+1]\n if angle > 180.:\n break\n idx += 1\n idx1 = idx\n indices.append([idx0, idx1])\n fb1.concentration = cc\n fb1.concentration_indices = indices\n\n m3 = None\n for _m in d.elements['Method']:\n if _m.name[:] == 'WS2PVT':\n m3 = _m\n if m3 is None:\n mb3 = e4['MethodBuffer']\n new_description = mb3.description[0]\n new_description += '; plume geometry inferred from triangulation'\n mb3.description = new_description\n mb3.name = 'WS2PVT'\n m3 = d.new(mb3)\n\n gfb2 = e4['GasFlowBuffer']\n gfb2.methods = [m3]\n gf2 = d.new(gfb2)\n fb1.gasflow = gf2\n f1 = d.new(fb1)\n\n # Now read in preferred flux values for calculated\n # height downloaded from FITS\n if station_info['files']['fits_flux_ch'] is None:\n msg = \"ERROR 01: No preferred flux for\"\n msg = \" calculated height in FITS:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(date))\n logging.error(msg)\n else:\n data_ch = np.loadtxt(station_info['files']['fits_flux_ch'],\n dtype=np.dtype([('date', 'S19'),\n ('val', np.float),\n ('err', np.float)]),\n skiprows=1, delimiter=',', ndmin=1)\n dates = data_ch['date'].astype('datetime64[s]')\n indices = []\n values = []\n val_err = []\n ndates = []\n for i, dt in enumerate(dates):\n min_tdiff = np.min(np.abs(f1.datetime[:].\n astype('datetime64[s]') - dt))\n if min_tdiff.astype('int') > 1:\n msg = \"ERROR 02: No calculated height flux estimate can be\"\n msg = \" found for FITS value:\\n\"\n msg += \"-->Station: {}\\n\".format(station_info['stationID'])\n msg += \"-->Date: {}\\n\".format(str(dt))\n msg += \"-->FITS value: {}\\n\".format(data_ah['val'][i])\n logging.error(msg)\n else:\n idx = np.argmin(np.abs(f1.datetime[:].\n astype('datetime64[s]') - dt))\n indices.append(idx)\n values.append(data_ch['val'][i])\n val_err.append(data_ch['err'][i])\n ndates.append(str(dt))\n if len(indices) > 0:\n pfb1 = PreferredFluxBuffer(fluxes=[f1],\n flux_indices=[indices],\n value=values,\n value_error=val_err,\n datetime=ndates)\n d.new(pfb1)",
"def fetch_playlist(id: str):\n sp = get_client()\n\n from span.tasks.library import get_playlist_from_id\n\n playlist = get_playlist_from_id(sp, id)\n\n # export data\n sys.stdout.write(jsonpickle.encode(playlist))",
"def get_playlists(self, search, start=0, max_items=100):\r\n return self.get_music_service_information('playlists', search, start,\r\n max_items)",
"def get_stations(self):\n\n status, data = self.http_client.get_json(\n STATIONS_URI,\n params={'appid': self.API_key},\n headers={'Content-Type': 'application/json'})\n return [Station.from_dict(item) for item in data]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper to convert speaker's hex representation of weekdays into list of integers represending weekdays.
|
def hexweek_to_weekday_list(hexweek):
intweek = int(hexweek, 16)
# Mon, Tue, Wed, Thu, Fri, Sat, Sun
weekday_bits = [32, 16, 8, 4, 2, 1, 64]
return [weekday for weekday, weekday_bit in enumerate(weekday_bits) if intweek & weekday_bit]
|
[
"def weekday_list_to_hexweek(weekday_list):\n # Mon, Tue, Wed, Thu, Fri, Sat, Sun\n weekday_bits = [32, 16, 8, 4, 2, 1, 64]\n weekday_list = set(weekday_list)\n\n return hex(sum([weekday_bits[weekday] for weekday in weekday_list]))",
"def get_weekday_time() -> list:\n return [DAYS[date.today().weekday()], get_utc_time()]",
"def convert_to_hexadecimal_list(list): \n for i in range (0, len(list)):\n list[i] = hex(int(list[i]))\n return list",
"def get_daysOfWeek_enum(self, days_of_week):\n\n days_of_week_enum = []\n for day in days_of_week:\n if day in utils.DayOfWeekEnum.__members__:\n days_of_week_enum.append(utils.DayOfWeekEnum[day])\n else:\n errormsg = \"Invalid choice {0} for days of week\".format(day)\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)\n return days_of_week_enum",
"def dayListNums(self):\n NthDays = self.genSeq()\n daysList = []\n start = datetime.datetime(self.year,self.month,self.day)\n for x in range(len(NthDays)):\n nth = datetime.timedelta(days=NthDays[x])\n newDate = start + nth\n daysList += [datetime.date(newDate.year,newDate.month,newDate.day).weekday()]\n return daysList",
"def convert_hex_to_list(inputstring):\n output = []\n if not inputstring:\n inputstring = \"000000\"\n inputstring = str(inputstring).zfill(6)\n for i in xrange(0, len(inputstring), 2): # step through every 2 chars\n output.append(int(inputstring[i:i+2], 16))\n # convert from base 16 (hex) to int\n return output",
"def get_weekdays(calendar_output: str):\n cal = {}\n for line in calendar_output.splitlines():\n if not line_contains_int_days(line):\n continue\n f = reversed if line[1] == \" \" else lambda x: x\n days = get_int_days(line)\n weekday_cycle = cycle(f(WEEKDAYS))\n for day in f(days):\n cal[day] = next(weekday_cycle)\n return cal",
"def normalize_fen(fen):\n result = []\n for char in fen:\n if char in const.piece_legend:\n result.append(const.piece_legend[char])\n elif is_int(char):\n for _ in range(int(char)):\n result.append(0)\n elif char == '/':\n continue\n else:\n raise ValueError(\"Not a valid character!\")\n return result",
"def get_days_abbrs():\n \n result = []\n result = ['Пн', 'Вт', 'Ср', 'Чт', 'Пт', 'Сб', 'Вс']\n # If it possible get days abbrs in system language\n try:\n with TimeEncoding(\"%s.%s\" % getdefaultlocale()) as time_enc:\n for i in range(7):\n result.append(day_abbr[i].decode(time_enc)) \n except:\n for i in range(7):\n result.append(day_abbr[i])\n \n return result",
"def weekday_ordinal(wkday):\n done = False\n while not done:\n try:\n rval = weekday_ordinal.spread.index(wkday[0:2])\n done = True\n except AttributeError:\n weekday_ordinal.spread = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']\n return rval + 1",
"def iterweekdays(self):\r\n for i in xrange(self.firstweekday, self.firstweekday + 7):\r\n yield i%7",
"def get_attendances_for_weekdays(self, weekdays):\n\n calendar = self.browse(1)\n return [att for att in calendar.attendance_ids\n if int(att.dayofweek) in weekdays]",
"def bytes_to_hex_list(data: bytes, bpl: int) -> List[str]:\n j = len(data)\n k: int = (j + bpl - 1) // bpl\n return [bytes_to_hex(data[bpl * i: min(j, bpl * (i + 1))]) for i in range(k)]",
"def get_schedule_slots_by_weekdays(self, weekdays):\n return # osid.calendaring.ScheduleSlotList",
"def _get_weekday_str(weekdays):\n if isinstance(weekdays, int):\n days = [int_to_weekday(weekdays)]\n else:\n days = [int_to_weekday(day) for day in weekdays]\n\n weekday_str = u''\n\n for i, d in enumerate(days):\n weekday_str += d\n\n if i == 0 and len(days) == 1:\n weekday_str += u' '\n\n if i < len(days) - 2:\n weekday_str += u', '\n elif i == len(days) - 2:\n weekday_str += _(' and ')\n\n return weekday_str",
"def weekdays(self):\n return self.filter(lambda date: date.weekday() not in [5, 6])",
"def get_weekdays(day):\r\n \r\n day_info = day.split(\"/\")\r\n anyday = datetime(int(day_info[2]), int(day_info[1]), int(day_info[0])).strftime(\"%w\")\r\n # print(anyday)\r\n \r\n return anyday",
"def __dates2days(self):\n\n days = []\n holidays = self.holidays.copy()\n for date_ in self.dates:\n holiday = None\n if len(holidays) > 0 and holidays[0].day == str(date_.day):\n holiday = holidays[0]\n del holidays[0]\n\n # weekday 5 means Saturday\n dayoff = holiday is not None or date_.weekday() >= 5\n days.append(Day(date_, holiday, dayoff))\n return days",
"def convert_str(message: str) -> [int]:\r\n new_list = []\r\n for character in message:\r\n new_list.append(ord(character))\r\n return new_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper to convert list of integers represending weekdays into speaker's hex representation of weekdays.
|
def weekday_list_to_hexweek(weekday_list):
# Mon, Tue, Wed, Thu, Fri, Sat, Sun
weekday_bits = [32, 16, 8, 4, 2, 1, 64]
weekday_list = set(weekday_list)
return hex(sum([weekday_bits[weekday] for weekday in weekday_list]))
|
[
"def convert_to_hexadecimal_list(list): \n for i in range (0, len(list)):\n list[i] = hex(int(list[i]))\n return list",
"def hexweek_to_weekday_list(hexweek):\n intweek = int(hexweek, 16)\n\n # Mon, Tue, Wed, Thu, Fri, Sat, Sun\n weekday_bits = [32, 16, 8, 4, 2, 1, 64]\n\n return [weekday for weekday, weekday_bit in enumerate(weekday_bits) if intweek & weekday_bit]",
"def uwp_hex (uwp_list): #input UWP list\r\n\tuwp=[]\r\n\tuwp.append(uwp_list[0])\r\n\tuwp.append(stellagama.pseudo_hex(uwp_list[1]))\r\n\tuwp.append(stellagama.pseudo_hex(uwp_list[2]))\r\n\tuwp.append(stellagama.pseudo_hex(uwp_list[3]))\r\n\tuwp.append(stellagama.pseudo_hex(uwp_list[4]))\r\n\tuwp.append(stellagama.pseudo_hex(uwp_list[5]))\r\n\tuwp.append(stellagama.pseudo_hex(uwp_list[6]))\r\n\tuwp.append(stellagama.pseudo_hex(uwp_list[7]))\r\n\tuwp_string =\"%s%s%s%s%s%s%s-%s \" % (uwp[0],uwp[1],uwp[2],uwp[3],uwp[4],uwp[5],uwp[6],uwp[7])\r\n\treturn uwp_string #output Cepheus-style UWP string\r",
"def to_Hex(rgb_list):\n return \"\".join(map(lambda x: hex(x)[2:].rjust(2, \"0\"), rgb_list)).upper()",
"def hexdump(l: list):\n return ' '.join('{:02x}'.format(x) for x in l)",
"def rgb_list_to_hex_list(rgb_list):\n\n return [rgb_to_hex(rgb_value) for rgb_value in rgb_list]",
"def int_to_text_converter(int_list):\n return [int_to_char[num] for num in int_list]",
"def _get_weekday_str(weekdays):\n if isinstance(weekdays, int):\n days = [int_to_weekday(weekdays)]\n else:\n days = [int_to_weekday(day) for day in weekdays]\n\n weekday_str = u''\n\n for i, d in enumerate(days):\n weekday_str += d\n\n if i == 0 and len(days) == 1:\n weekday_str += u' '\n\n if i < len(days) - 2:\n weekday_str += u', '\n elif i == len(days) - 2:\n weekday_str += _(' and ')\n\n return weekday_str",
"def to_hex(arr):\n return ''.join(chr(b) for b in arr)",
"def bin2hex(data):\n data = re.findall(r'[0-1]{4}',''.join(data))\n return map(lambda x: '{0:X}'.format(int(x,2)) , data )",
"def wordlist_to_hex(wlist):\n\n wlist = str(wlist)\n if int(sys.version_info.major) == 2:\n wlist = unicode(wlist)\n wlist = unicodedata.normalize('NFC',wlist)\n wlist = str(wlist).lower()\n if \" \" in wlist:\n wlist = wlist.replace(\" \",\"zzzzzzzz\").replace(\" \",\"\").replace(\"zzzzzzzz\",\" \")\n try:\n wordlistarray = str(wlist).split(\" \")\n if wordlistarray[0] == \"\":\n wordlistarray.pop(0)\n except:\n raise TypeError(\"Please make sure the input is a str of words, each separated by a single space, with no punctuation.\")\n if len(wordlistarray) != 12:\n raise TypeError(\"Electrum version 1 word lists are exactly 12 words long, your list has a length of \" + str(len(wordlistarray)))\n for word in wordlistarray:\n if word not in ElectrumWallet_V1.ELECTRUM_ENG_V1_WORDLIST:\n raise TypeError(\"Word: '\" + str(word) + \"' is not in the Electrum V1 wordlist. Check spelling maybe.\")\n wlist = str(wlist).replace(\"\\n\",\"\").replace(\"\\r\",\"\")\n wlist = wlist.split()\n output = ''\n for i in range(int(len(wlist) // 3)):\n word1, word2, word3 = wlist[3*i:3*i+3]\n w1 = ElectrumWallet_V1.ELECTRUM_ENG_V1_WORDLIST.index(word1)\n w2 = (ElectrumWallet_V1.ELECTRUM_ENG_V1_WORDLIST.index(word2)) % ElectrumWallet_V1.NUMBER_OF_WORDS\n w3 = (ElectrumWallet_V1.ELECTRUM_ENG_V1_WORDLIST.index(word3)) % ElectrumWallet_V1.NUMBER_OF_WORDS\n x = w1 + ElectrumWallet_V1.NUMBER_OF_WORDS*((w2-w1) % ElectrumWallet_V1.NUMBER_OF_WORDS) + ElectrumWallet_V1.NUMBER_OF_WORDS*ElectrumWallet_V1.NUMBER_OF_WORDS*((w3-w2) % ElectrumWallet_V1.NUMBER_OF_WORDS)\n output += '%08x'%x\n output = hexlify_(binascii.unhexlify(output))\n assert len(output) == 32\n return str(output)",
"def getId(l: list) -> str:\n h = \"0x\"+\"\".join(l)\n i = int(h, 16)\n return f\"{h} ({str(i)})\"",
"def hex(self: bitlist) -> str:\n return self.to_bytes().hex()",
"def format_bytes_as_hex(_bytes):\n out = \"\"\n for _int in _bytes:\n out = out + f\"{_int:02x} \"\n return out.upper().strip()",
"def array_to_hex(byte_arr):\n\n # Uses list comprehension which is a fractionally faster implementation than\n # the alternative, more readable, implementation below\n # \n # hex = []\n # for aChar in byte_arr:\n # hex.append( \"%02X \" % aChar )\n #\n # return ''.join( hex ).strip() \n\n return ''.join([\"%02X\" % x for x in byte_arr]).strip()",
"def hex_in_string(bytes_to_show):\n return ''.join('0x{:02x} '.format(letter) for letter in bytes_to_show)",
"def form_hex(dense_hash):\n return ''.join([format(number, '02x') for number in dense_hash])",
"def hex_list_to_bytes(hlist: List[str]) -> bytes:\n return b''.join([hex_to_bytes(h) for h in hlist])",
"def inttohex(int_):\n\tif int_ >= 0:\n\t\treturn (\"{0:0>4s}\".format(hex(int_ % (1 << 16))[2:])).upper()\n\telse:\n\t\treturn (hex((int_ + (1 << 16)) % (1 << 16)).upper()[2:]).upper()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sorts the grid by a specified metric
|
def sort_grid(self, metric= ''):
Point.sort_by = metric
self.grid = sorted(self.grid)
self.sorted_by = metric
return True
|
[
"def _sort(self, units, param):\n return np.array(sorted(units, key=lambda unit: unit.get(param),\n reverse=not self.winner))",
"def _sort_by(self, criteria):\n log.info('Sorting kernels by {}')\n assert self._select_drop_down('sort', criteria)",
"def sort(self):",
"def sort_species(species):\n from .. import periodic_table as pt\n def sortme(args):\n if args not in pt.__dict__: return 20\n specie = pt.__dict__[args]\n return (specie.column + specie.row * 0.01) \n return sorted(species, key=sortme)",
"def topo_sort(self):",
"def sql_rank_order(metric):\n if greater_is_better(metric):\n return \"desc\"\n else:\n return \"asc\"",
"def handle_sorting(self) -> None:\n self.subtrees.sort(key=lambda x: x.weight, reverse=True)",
"def sort_by_fitness(self):\n self.island.sort_all_animals_by_fitness()",
"def _sort_plots(self):\n pass",
"def sort_boxes_on_y_cm(self):\n self.boxes.sort( key=lambda box: box.y_cm)",
"def sortby(tree, col, descending):\n # grab values to sort\n data = [(tree.set(child, col), child) \\\n for child in tree.get_children('')]\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n data.sort(reverse=descending)\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby(tree, col, \\\n int(not descending)))",
"def sort(self,attr):\n reverse = True\n if attr == 'log2_fold_change':\n # Sorts largest to smallest\n reverse = True\n elif attr == 'p_value':\n # Sorts smallest to largest\n reverse = False\n else:\n # Unknown attribue\n raise AttributeError(\"Unknown attribute '%s'\" % attr)\n self.exons.sort(key=attrgetter(attr),reverse=reverse)",
"def _sort(self):\n\t\tfor node in self.nodes_by_size:\n\t\t\tnode.resort()\n\t\tself.nodes_by_size.sort(\n\t\t\tkey=lambda node: node.used / node.capacity,\n\t\t\treverse=True)",
"def _sort_plots(self):\n self.plots.sort(_cmp_plot)",
"def clickon(self, event):\n self._sort_by(self.columns.index(event.widget['text']))",
"def sort_by_fitness(self):\n self.individuals = sorted(self.individuals, key=lambda ii: self.IndividualClass.get_fitness(ii), reverse=True)",
"def sort_by_weights(self, increasing = True):\r\n self.neighbors.sort(key = lambda x: x[1], reverse = not increasing)",
"def do_sort(pixels, sort_func, reverse=False):\n pos = [pixel[\"pos\"] for pixel in pixels]\n col = [pixel[\"col\"] for pixel in pixels]\n\n sorted_col = sort_func(col, reverse)\n\n #combine back to positions\n out = [{\"pos\":pos, \"col\":col} for (pos,col) in zip(pos,sorted_col)]\n return out",
"def sort_by_crit(self):\r\n prev_ind = 0\r\n crit_prev = 0\r\n crit_curr = 0\r\n tasklen = len(self.taskset)\r\n prio_indices = []\r\n new_taskset = copy(self.taskset)\r\n new_taskset.sort(lambda x: x.crit, reverse=True)\r\n for i in range(tasklen):\r\n crit_curr = new_taskset[tasklen].crit\r\n if crit_curr != crit_prev:\r\n prio_indices.append((prev_ind, i))\r\n crit_prev = crit_curr\r\n prev_ind = i\r\n for ind in prio_indices:\r\n new_taskset[ind[0]:ind[1]] = sorted(new_taskset[ind[0]:ind[1]], key=lambda x: x.dl_lo, reverse=True)\r\n return new_taskset",
"def sort_items(self):\n print('Sorting items')\n for timestamp, border_measures in self.report_dict.items():\n new_border_measures = OrderedDict(sorted(border_measures.items(),\n key=lambda x: [x[1]['sum'], x[0][1], x[0][0]],\n reverse=True)\n )\n self.report_dict[timestamp] = new_border_measures"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a grid object from a specific ShakeMap
|
def create_grid(shakemap=None):
grid = ShakeMapGrid()
grid_location = os.path.join(shakemap.directory_name, 'grid.xml')
grid.load(grid_location)
return grid
|
[
"def _add_manual_grid_mapping(): \n data = {}\n data['crs'] = ncout.createVariable('crs', np.dtype('c').char)\n utils.addGridMappingVars(data['crs'], locationLat, locationLong, rotation)",
"def make_snakes(map,coords,surface=None):\r\n elements = {}\r\n snakes = []\r\n n = len(coords[0])\r\n for x in xrange(n):\r\n for y in xrange(n):\r\n v = coords[x][y]\r\n set_elements = SNAKE_VALUES\r\n if v in set_elements:\r\n if v not in elements:\r\n elements[v] = []\r\n se = SnakeElement(x, y)\r\n elements[v].append(se)\r\n for v in elements:\r\n elms = elements[v]\r\n s = Snake(map,v,elms,surface=surface)\r\n for e in elms:\r\n e.set_snake(s)\r\n snakes.append(s)\r\n # reorder the elements\r\n for s in snakes:\r\n for e in s.elements:\r\n e.reorder() # assign them an 'order'\r\n s.elements = sorted(s.elements, key=lambda se: se.order) # sort by order\r\n del elements # is this our leak\r\n return snakes",
"def gridworld_gen_objects_terrain(file, grass_file, road_file, right_end, left_end, upper_end, lower_end, grid_space):\r\n x_state_num = int((right_end - left_end)/grid_space)\r\n y_state_num = int((upper_end - lower_end)/grid_space)\r\n gridworld = np.zeros((x_state_num*y_state_num,1))\r\n point_cloud = open(file,'r')\r\n grass_image = np.load(grass_file)\r\n road_image = np.load(road_file)\r\n for j in range(grass_image.shape[0]):\r\n for i in range(grass_image.shape[1]):\r\n if grass_image[j,i] == 100:\r\n jth = int((i*0.2-307.7+198.75-21+300)//2.5)\r\n ith = int((j*0.2-109.3+473.75+23)//2.5)\r\n if ith >= 0 and ith < x_state_num and jth >=0 and jth < y_state_num:\r\n gridworld[jth*x_state_num + ith] = 8\r\n for line in point_cloud:\r\n line_split = line.split(\" \")\r\n if line_split[0] == \"g\":\r\n if line_split[1].startswith('Barn1'):\r\n home1 = True\r\n home2 = False\r\n boat = False\r\n villa1 = False\r\n villa2 = False\r\n brick = False\r\n wood = False\r\n elif line_split[1].startswith('Cabin1_DM'):\r\n home1 = False\r\n home2 = True\r\n boat = False\r\n villa1 = False\r\n villa2 = False\r\n brick = False\r\n wood = False\r\n elif line_split[1].startswith('Prop_Ship_A'):\r\n boat = True\r\n home1 = False\r\n home2 = False\r\n villa1 = False\r\n villa2 = False\r\n brick = False\r\n wood = False\r\n elif line_split[1].startswith('Villa1'):\r\n boat = False\r\n home1 = False\r\n home2 = False\r\n villa1 = True\r\n villa2 = False\r\n brick = False\r\n wood = False\r\n elif line_split[1].startswith('Villa2'):\r\n boat = False\r\n home1 = False\r\n home2 = False\r\n villa1 = False\r\n villa2 = True\r\n brick = False\r\n wood = False\r\n elif line_split[1].startswith('BrickHouse'):\r\n boat = False\r\n home1 = False\r\n home2 = False\r\n villa1 = False\r\n villa2 = False\r\n brick = True\r\n wood = False\r\n elif line_split[1].startswith('Struct_WoodPath') or line_split[1].startswith('Pavement') or line_split[1].startswith('Struct_Fence1_Gate') or line_split[1].startswith('Struct_Docking') or line_split[1].startswith('TreeCreator_Crinkly'):\r\n boat = False\r\n home1 = False\r\n home2 = False\r\n villa1 = False\r\n villa2 = False\r\n brick = False\r\n wood = True\r\n else:\r\n boat = False\r\n home1 = False\r\n home2 = False\r\n villa1 = False\r\n villa2 = False\r\n brick = False\r\n wood = False\r\n elif line_split[0] == \"v\":\r\n x = float(line_split[1])\r\n z = float(line_split[3])\r\n if ((x < right_end and x>left_end) and\r\n (z < upper_end and z > lower_end)):\r\n ith = int((x - left_end) // grid_space)\r\n jth = int((z - lower_end) // grid_space)\r\n if home1:\r\n gridworld[jth*x_state_num + ith] = 2\r\n elif home2:\r\n gridworld[jth*x_state_num + ith] = 3\r\n elif boat:\r\n gridworld[jth*x_state_num + ith] = 4\r\n elif villa1:\r\n gridworld[jth*x_state_num + ith] = 5\r\n elif villa2:\r\n gridworld[jth*x_state_num + ith] = 6\r\n elif brick:\r\n gridworld[jth*x_state_num + ith] = 7\r\n elif wood:\r\n gridworld[jth*x_state_num + ith] = 0\r\n else:\r\n gridworld[jth*x_state_num + ith] = 1\r\n\r\n for j in range(road_image.shape[0]):\r\n for i in range(road_image.shape[1]):\r\n if road_image[j,i] == 100:\r\n jth = int((i*0.2-307.7+198.75-21+300)//2.5)\r\n ith = int((j*0.2-109.3+473.75+23)//2.5)\r\n if ith >= 0 and ith < x_state_num and jth >=0 and jth < y_state_num:\r\n gridworld[jth*x_state_num + ith] = 9\r\n df = pd.DataFrame(gridworld.reshape((x_state_num,y_state_num)))\r\n plt.imshow(gridworld.reshape((x_state_num,y_state_num)))\r\n plt.show()\r\n filepath = 'excel_withobjects.xlsx'\r\n df.to_excel(filepath, index=False)\r\n return gridworld",
"def _set_netcdf_grid_mapping_variable(root, grid_mapping):\n name = grid_mapping.pop(\"name\")\n var = root.createVariable(name, \"S1\", dimensions=())\n for attr in grid_mapping.keys():\n setattr(var, attr, grid_mapping[attr])",
"def __defineMap(self):\n from bokeh.models import WMTSTileSource\n url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'\n wmts = WMTSTileSource(url=url)\n mapTiles = gv.WMTS(wmts)\n return mapTiles",
"def __init__(self, scene_map):\n self.scene_map = scene_map",
"def convert_map(rosmap):\n global grid\n global grid_res\n global grid_frame, grid_transform\n global map_lock\n global tf_list\n global pub\n debug(\"converting map\")\n grid_res = rosmap.info.resolution\n\n # Get map origin in coordinates of the '/map' tf frame.\n grid_zero = rosmap.info.origin\n grid_frame = rosmap.header.frame_id\n grid_stamped = PoseStamped()\n grid_stamped.pose = grid_zero\n grid_stamped.header.frame_id = rosmap.header.frame_id\n transform_pose = tf_list.transformPose('map', grid_stamped).pose\n position = transform_pose.position\n orientation = transform_pose.orientation\n new_grid_transform = tf.transformations.quaternion_matrix(\n [orientation.x, orientation.y, orientation.z, orientation.w])\n new_grid_transform[0, 3] = position.x\n new_grid_transform[1, 3] = position.y\n new_grid_transform[2, 3] = position.z\n #grid_zero_transform = numpy.array([[1, 0, 0, grid_zero.position.x],\n # [0, 1, 0, grid_zero.position.y],\n # [0, 0, 1, 0],\n # [0, 0, 0, 1]])\n #grid_transform = numpy.dot(grid_transform, grid_zero_transform)\n\n # Go through and put clearance around the walls.\n data = rosmap.data\n new_data = []\n diff = 4#int((.4 / 2) / grid_res) + 1 # Add one to provide buffer\n width = rosmap.info.width\n height = rosmap.info.height\n if rospy.get_param('~obstacle_expansion', True):\n print \"EXPANDING OBSTACLES\"\n for y in xrange(height):\n for x in xrange(width):\n new_data.append(get_surround(data, (x, y), diff, width, height))\n else: new_data = data\n #rosmap.data = new_data\n #rospy.sleep(10.)\n #pub.publish(rosmap)\n new_grid = Map(rosmap.info.width, rosmap.info.height, new_data)\n\n # Perform actual update of grid.\n if map_lock:\n while map_lock: continue\n map_lock = True\n grid_transform = new_grid_transform\n grid = new_grid\n map_lock = False\n\n\n debug(\"map converted\")\n # TODO: Convert between absolute coordinates and map.\n origin = rosmap.info.origin # Pose msg",
"def test_grid_layer_get_maps_by_grid_layer(self):\n pass",
"def load_new_map(self):\n self.map = gamemap.GameMap(self.datamap)",
"def create_worldmap( self, grid_size ):\n self.worldmap = [ [ ((y in [0, grid_size - 1] or x in [0, grid_size - 1]) and 'wall') or 'ground'\n for x in range(grid_size)] for y in range(grid_size)]\n self.worldmap[1][1] = 'pond'\n # First put out the stones randomly\n for i in range(int((grid_size - 2) ** 2 / self.STONE_PROBABILITY)):\n ok = False\n while not ok: \n (x, y) = random.randint(1, grid_size - 1), random.randint(1, grid_size - 1)\n if self.worldmap[y][x] == 'ground':\n count_stones = 0\n count_walls = 0\n # Check that the stone will not be adjacent to two other stones, \n # or one other stone and a wall.\n # This is to prevent the appearance of inaccessible areas.\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if self.worldmap[y + dy][x + dx] == 'stone':\n count_stones += 1\n if self.worldmap[y + dy][x + dx] == 'wall':\n count_walls += 1\n if count_stones == 0 or (count_stones <= 1 and count_walls == 0):\n self.worldmap[y][x] = 'stone'\n ok = True\n elif random.random() <= 0.1:\n ok = True\n # Then put out the lettuces randomly\n for i in range(int((grid_size - 2) ** 2 / self.LETTUCE_PROBABILITY)):\n ok = False\n while not ok: \n (x, y) = random.randint(1, grid_size - 1), random.randint(1, grid_size - 1)\n if self.worldmap[y][x] == 'ground':\n self.worldmap[y][x] = 'lettuce'\n self.lettuce_count += 1;\n ok = True\n # Finally put out the water ponds randomly\n for i in range(int((grid_size - 2) ** 2 / self.WATER_PROBABILITY)):\n ok = False\n while not ok: \n (x, y) = random.randint(1, grid_size - 1), random.randint(1, grid_size - 1)\n if self.worldmap[y][x] == 'ground':\n self.worldmap[y][x] = 'pond'\n ok = True",
"def make_map(self):\n map_options = GMapOptions(lat=MAP_LAT, lng=MAP_LON, map_type=MAP_TYPE, zoom=MAP_ZOOM)\n plot = GMapPlot(\n x_range=Range1d(), y_range=Range1d(), map_options=map_options,\n plot_width = MAP_WIDTH, plot_height = MAP_HEIGHT\n )\n\n plot.title.text = MAP_TITLE\n plot.title.text_font_size = TITLE_TEXT_SIZE\n plot.api_key = MAP_API_KEY\n\n #show unselected stations in grey\n # second column data source for map stn/lat/long only, single point per stn\n circle2 = Circle(x=\"long_single\", y=\"lat_single\", fill_alpha=0.8, size=15, fill_color=\"color\", line_color=\"black\")\n plot.add_glyph(self.map_datasource, circle2)\n # had to convert to tap tool for simplicity, so we can only select one station at a time. V2 may be able to figure out how to select multiple stations using box select\n\n plot.add_tools(PanTool(), WheelZoomTool())\n\n return plot",
"def grid_mapping(pollon, pollat, mapping_name=None):\n if mapping_name is None:\n mapping_name = cf.DEFAULT_MAPPING_NCVAR\n da = xr.DataArray(np.zeros((), dtype=cf.grid_mapping_dtype))\n attrs = cf.mapping.copy()\n attrs[\"grid_north_pole_longitude\"] = pollon\n attrs[\"grid_north_pole_latitude\"] = pollat\n da.attrs = attrs\n da.name = mapping_name\n return da",
"def inflate_map(self, grid_map):\n\n\n \"\"\"\n Fill in your solution here\n \"\"\"\n\n width = grid_map.get_width()\n height = grid_map.get_height()\n radius = self.radius\n #fill in the C space cells whose distance to occupied cells <= robot radius\n for x_grid in range(width):\n for y_grid in range(height):\n\n if grid_map[x_grid, y_grid] == self.occupied_space:\n x_0 = x_grid - radius\n y_0 = y_grid - radius\n\n for delta_x in range(2 * radius + 1):\n for delta_y in range(2 * radius + 1):\n x_check = x_0 + delta_x\n y_check = y_0 + delta_y\n if sqrt((x_check - x_grid)**2 + (y_check - y_grid)**2) <= radius and grid_map[x_check, y_check] != self.occupied_space:\n self.add_to_map(grid_map, x_check, y_check, self.c_space)\n\n\n # Return the inflated map\n return grid_map",
"def load_map(self, map_path):\n\n # Read file\n map_str = []\n with open(map_path, 'r') as file:\n for row in file.readlines():\n map_str.append(row.strip().split(\" \"))\n\n # Create Tiles with map position as key\n for i, row in enumerate(map_str):\n for j, tile_str in enumerate(row):\n pos = (i, j)\n tile = make_tile(tile_str, pos)\n tile.exploded_signal.connect(self.change_tile)\n self.tiles[pos] = tile\n self.everything[tile.id] = tile",
"def seafloor_grid(depths, lat, lon):",
"def _map_making(self):\n log.info(\"Creating geometry.\")\n geom = self._create_geometry(self.settings[\"datasets\"][\"geom\"])\n\n if \"geom-irf\" in self.settings[\"datasets\"]:\n geom_irf = self._create_geometry(self.settings[\"datasets\"][\"geom-irf\"])\n else:\n geom_irf = geom.to_binsz(binsz=BINSZ_IRF)\n\n offset_max = Angle(self.settings[\"datasets\"][\"offset-max\"])\n stack_datasets = self.settings[\"datasets\"][\"stack-datasets\"]\n log.info(\"Creating datasets.\")\n\n maker = MapDatasetMaker(\n geom=geom,\n geom_true=geom_irf,\n offset_max=offset_max,\n )\n if stack_datasets:\n stacked = MapDataset.create(geom=geom, geom_irf=geom_irf, name=\"stacked\")\n for obs in self.observations:\n dataset = maker.run(obs)\n stacked.stack(dataset)\n self._extract_irf_kernels(stacked)\n datasets = [stacked]\n else:\n datasets = []\n for obs in self.observations:\n dataset = maker.run(obs)\n self._extract_irf_kernels(dataset)\n datasets.append(dataset)\n\n self.datasets = Datasets(datasets)",
"def parse_map(self, map_text):\n cols = None\n rows = None\n agents_per_player = None\n agents = []\n num_players = None\n count_row = 0\n grid = [[]]\n for line in map_text.split(\"\\n\"):\n line = line.strip()\n\n # ignore blank lines and comments\n if not line or line[0] == \"#\":\n continue\n\n key, value = line.split(\" \", 1)\n key = key.lower()\n\n if key == \"cols\":\n cols = int(value)\n self.width = cols\n if rows != None:\n grid = self.init_grid(rows, cols)\n elif key == \"rows\":\n rows = int(value)\n self.height = rows\n if cols != None:\n grid = self.init_grid(rows, cols)\n\n elif key == 'p':\n loc = value.split()\n p_num = int(loc[0])\n p_row = int(loc[1])\n p_col = int(loc[2])\n self.players[p_num].row = p_row\n self.players[p_num].col = p_col\n grid[p_row][p_col].append(p_num)\n\n elif key == 'm':\n if len(value) != cols:\n raise Exception(\"map\",\n \"Incorrect number of cols in row %s. \"\n \"Got %s, expected %s.\"\n %(row, len(value), width))\n for count_col, c in enumerate(value):\n if c == MAP_OBJECT[WATER]:\n# print(\"len grid = \" + str(len (grid)))\n# print(\"len grid[0] = \" + str(len (grid[0])))\n grid[count_row][count_col].append(WATER)\n# elif c == MAP_OBJECT[LAND]:\n# grid[count_row][count_col].append(LAND)\n elif c not in MAP_OBJECT:\n raise Exception(\"map\",\n \"Invalid character in map: %s\" % c)\n count_row += 1\n\n elif key == 's': # server room\n loc = value.split()\n p_row = int(loc[0])\n p_col = int(loc[1])\n self.server.append((p_row, p_col))\n\n if count_row != rows:\n raise Exception(\"map\",\n \"Incorrect number of rows in map \"\n \"Got %s, expected %s.\"\n %(count_row, rows))\n self.field = grid\n return {\n \"size\": (rows, cols),\n \"grid\" : grid }",
"def test_populate_map(self):\n map1 = \"\"\"\\\n OOOOOO\n OJDJJO\n OSJJOO\n OOOOOO\"\"\"\n m = Ma.Map(map1)\n pos = (1, 1)\n pop = [Fa.Carnivore(), Fa.Herbivore(), Fa.Carnivore()]\n m.populate_map(pos, pop)\n assert m.island[1, 1].total_pop == 3\n assert m.island[1, 1].carnivore_pop == 2",
"def initiate_grid(self):\n\n self.set_goal_location() # determine the location of the goal\n self.add_goal() # adds the goal to the grid\n self.number_of_traps_chests() # determine the number of traps\n self.determine_trap_location() # determine the location of the traps\n self.determine_chest_location() # determine the location of the chests\n self.add_traps() # add traps to the map\n self.add_chests() # add chests to the map"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the name / code snippet pair for each Lua function in the file under file_name.
|
def _split_lua_file_into_funcs(self, file_name):
with open(self._get_lua_path(file_name)) as f:
for func in f.read().strip().split("function "):
if func:
bits = func.split("\n", 1)
name = bits[0].split("(")[0].strip()
snippet = bits[1].rsplit("end", 1)[0].strip()
yield name, snippet
|
[
"def _get_lua_funcs(self):\n with open(\"bitwise.lua\", \"r\") as f:\n for func in f.read().strip().split(\"local function \"):\n if func:\n bits = func.split(\"\\n\", 1)\n name = bits[0].split(\"(\")[0].strip()\n snippet = bits[1].rsplit(\"end\", 1)[0].strip()\n yield name, snippet",
"def GetFuncNames(filepath):\n\t# Get a list of all of the 'C' files.\n\tfilelist = glob.glob(filepath)\n\tfilelist.sort()\n\n\tfiledata = []\n\n\n\tfor fname in filelist:\n\t\twith open(fname, 'r') as f:\n\t\t\t# Search to see if the desired string is in the file.\n\t\t\tif 'PyDoc_STRVAR' in f.read():\n\t\t\t\t# Split the file name from the rest of the path, and then\n\t\t\t\t# split the file prefix from the file extension ('.c') to\n\t\t\t\t# get the function name.\n\t\t\t\tfuncname = os.path.split(os.path.basename(fname))[1].split('.')[0]\n\t\t\t\t# We exclude 'simdsupport', as we document it separately.\n\t\t\t\tif funcname != 'simdsupport':\n\t\t\t\t\tfiledata.append(funcname)\n\n\treturn filedata",
"def get_func(self, name):\n if name not in self.funcs:\n return None\n sym = self.get_symbol(name)\n addr = sym.rebased_addr\n end_addr = addr + sym.size\n self.log.debug('extracting function %s at %#x', name, addr)\n\n body = []\n for i in range(addr, end_addr, 4):\n instr = self.get_format_instr(i)\n if instr is None:\n instr = (i, '', '', self.get_data(i, 4))\n body.append(instr)\n return body",
"def parse_code_file(fname):\n with open(fname) as f:\n lineiter = f\n # Remove all leading comments (useful for specifying vim or emacs\n # settings without confusing things in the presentation).\n # Also remove leading blank lines. In short, stop when we find a\n # non-comment non-blank.\n for line in lineiter:\n if line.strip() and not re.match(r'^\\s*#.*$', line):\n break\n remaining_text = line + ''.join(lineiter)\n match = re.match(r'(?ms)\\s*\"\"\"(.*?)\\n\\s*(.*?)\"\"\"\\s+(.*)$',\n remaining_text)\n if match:\n # Eval to make sure we handle escaping properly.\n title = ast.literal_eval('\"\"\"%s\"\"\"' % match.group(1))\n description = ast.literal_eval('\"\"\"%s\"\"\"' % match.group(2))\n # Strip out '__doc__ =' from the remaining source.\n src = re.sub(r'^(\\s*)__doc__\\s*=\\s*', r'\\1', match.group(3))\n return title, description, src\n return \"\", \"\", \"\"",
"def parser(file_name, function_name):\n\tast = parse_file(file_name)\n\tfor ext in ast.ext:\n\t\tif type(ext).__name__ == 'FuncDef':\n\t\t\tfunction_decl = ext.decl\n\t\t\tfunc_name = function_decl.name\n\t\t\tif func_name == function_name:\n\t\t\t\treturn function_decl, ext.body\n\tprint('\\33[101m' + '[error][parser]: Function: '+ function_name + ' does not exist in ' + file_name + ' .\\033[0m')\n\texit(1)",
"def list_all_js_function_names(pfile):\n with open(pfile) as dataFile:\n data = dataFile.readlines()\n\n # searched = re.search(\"function\", data)\n\n for line in data:\n if 'function' in line:\n print ('found')\n\n else:\n print ('did not find')",
"def get_raw_func(self, name):\n if name not in self.funcs:\n return []\n sym = self.get_symbol(name)\n addr = sym.rebased_addr\n end_addr = addr + sym.size\n self.log.debug('extracting raw function %s at %#x', name, addr)\n\n body = []\n for i in range(addr, end_addr, 4):\n instr = self.get_instr(i)\n if instr is None:\n continue\n body.append(instr)\n return body",
"def get_code_lines(fname):\n print(\"@@@ FILE: \" + fname)\n return preprocessor.preprocess_file(fname)",
"def functions(func_name):\n\n def open(star_name):\n if star_name not in loaded_stars:\n with __builtins__.open('{}.txt'.format(star_name), 'r') as reader:\n lista = reader.readlines()\n return lista\n # completa aqui: debe leer el archivo\n # y cargarlo a un diccionario\n # TIP: desde el scope de esta funcion open,\n # puedes acceder al builtin \"open\" como\n # __builtins__.open\n\n def mean(star_name):\n # Modifica esto para que\n # no se abra el archivo nuevamente\n # sino que se trabaje con el diccionario\n # de estrellas ya cargadas\n lines = loaded_stars[star_name]\n ans = sum(map(lambda l: float(l), lines)) / len(lines)\n Worker.mean_data[star_name] = ans\n return ans\n\n def var(star_name):\n prom = Worker.mean_data[star_name]\n # modifica esto para que\n # no se abra el archivo nuevamente\n # sino que se trabaje con el diccionario\n # de estrellas ya cargadas\n lines = loaded_stars[star_name]\n n = len(lines)\n suma = sum(map(lambda l: (float(l) - prom)**2, lines))\n return suma / (n - 1)\n\n return locals()[func_name]",
"def find_methods_in_python_file(self, file_path):\n\n methods = []\n o = open(file_path, \"r\", encoding='utf-8')\n text = o.read()\n p = ast.parse(text)\n for node in ast.walk(p):\n if isinstance(node, ast.FunctionDef):\n methods.append(node.name)\n\n print(methods)\n return methods",
"def do_list_funcs(self, arg):\n # Check if file exists as .py\n if not (os.path.isfile(arg)\n and arg[-3:] == \".py\"\n and arg in os.listdir()):\n print(\"list_funcs: %s: Not a .py file\" % arg)\n return False\n # Search file contents for top-level function declarations\n file_contents = open(arg, mode=\"r\").read()\n for match in re.finditer(self.fun_pat, file_contents):\n # Don't return private methods\n if match.group(1)[:2] != \"__\":\n print(\"\\t\" + match.group(1))",
"def test_find_functions(self):\n self.filename = \"parser_tests/ruby_functions.txt\"\n expected_functions = ['multiply', 'method_name']\n self.run_parser()\n self.assertListEqual(expected_functions, self.p.scanner.functions_calls)",
"def get_script(qrunes_file):\n startStr = '@script:'\n endStr = '@end'\n if not fetch(startStr,endStr,qrunes_file,[]) :\n return \"\"\n newLi = []\n if 'Python' == get_language(qrunes_file):\n input_arr = get_import_file_name(qrunes_file)\n if input_arr:\n newLi.append('import sys\\n')\n newLi.append('sys.path.append(\"'+os.path.dirname(qrunes_file).replace('\\\\', '\\\\\\\\')+'\")\\n')\n newLi.append('from qcodes import *\\n')\n for import_path in input_arr:\n import_path = os.path.splitext(import_path)[0]\n newLi.append('from '+import_path+'_python.script import *\\n')\n newLi.append('from '+import_path+'_python.qcodes import *\\n')\n\n if is_autoimport(qrunes_file):\n newLi.append('from pyqpanda import *\\n')\n newLi.append('from pyqpanda.utils import *\\n')\n if 'C++' == get_language(qrunes_file):\n input_arr = get_import_file_name(qrunes_file)\n for import_path in input_arr:\n import_path = os.path.splitext(import_path)[0]\n newLi.append('#include \"'+os.path.dirname(qrunes_file).replace('\\\\', '\\\\\\\\')+'\\\\\\\\'+import_path+'_cpp\\\\\\\\qcodes.h\"\\n')\n\n if is_autoimport(qrunes_file):\n newLi.append('#include \"qcodes.h\"\\n')\n \n info = fetch(startStr,endStr,qrunes_file,newLi)\n script_content = ''.join(info)\n return script_content",
"def process_file(self, filename):\n results = [None, None]\n NORMAL, START_MEMORIZE, STOP_MEMORIZE = range(3)\n OCCUR, DESCRIPTION = 0, 1\n state = NORMAL\n function = \"\"\n funcs = collections.defaultdict(list)\n # Define valid previous symbols.\n pre_symbols = \" (!@&\\n\\t\\r\"\n comments = (\"/**\", \"/*\", \"*\", \"//\")\n # Define state of the php class.\n php_class = 0\n f = None\n try:\n f = open(filename, encoding=\"utf8\")\n for index, line in enumerate(f, start=1):\n # Skip all comments.\n if line.strip().startswith(comments):\n continue\n # If we entrance in php class,\n # then change its state.\n if line.strip().startswith(\"class\"):\n php_class = 1\n continue\n # If function defined in one of the processed files,\n # then it's a core's function.\n if line.strip().startswith(\"function\") and not php_class:\n func = re.findall(\"function[\\s+]([&\\w]+)(?=\\()\", line)\n if func:\n func = func.pop()\n description = \"core function - defined in \" + filename + \" on line \" + str(index)\n try:\n funcs[func + \"()\"][DESCRIPTION] = description\n except IndexError:\n funcs[func + \"()\"] = [0, description]\n continue\n for i, c in enumerate(line):\n if php_class:\n if c == \"{\":\n php_class += 1\n elif c == \"}\":\n php_class -= 1\n if state == NORMAL:\n pre_symbol = line[i - 1]\n # If character is a valid first character of the php functions name,\n # and previous symbol is a valid previous symbol.\n if is_valid_func_name(c) and pre_symbol in pre_symbols:\n function += c\n state = START_MEMORIZE\n continue\n if state == START_MEMORIZE:\n if is_valid_char(c):\n function += c\n elif c == \"(\":\n state = STOP_MEMORIZE\n else:\n function = \"\"\n state = NORMAL\n if state == STOP_MEMORIZE:\n if is_valid_func_name(function):\n try:\n funcs[function + \"()\"][OCCUR] += 1\n except IndexError:\n funcs[function + \"()\"] = [1, \"\"]\n function = \"\"\n state = NORMAL\n if funcs:\n results.insert(0, funcs)\n except (IOError, OSError, UnicodeDecodeError) as err:\n time_error = time.time()\n error = \"{0} -- {1} in file: {2}\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\",\n time.gmtime(time_error)), err, filename)\n results.insert(1, error)\n finally:\n if f is not None:\n f.close()\n self.results_queue.put(results)",
"def show_registered_snippets() -> None:\n vscode_snippet_dir = get_vscode_snippet_dirpath()\n snippet_path = vscode_snippet_dir + 'python.json'\n if not os.path.isfile(snippet_path):\n print('No snippet file exists.')\n return\n snippets = None\n with open(snippet_path, 'r') as f:\n snippets = json.loads(f.read())\n for name, content in snippets.items():\n section_bar = '-'*(len(name))\n print(section_bar)\n print('{}'.format(name))\n print(section_bar)\n print('[prefix]\\n{}'.format(content['prefix']))\n print('[description]\\n{}'.format(content.get('description')))\n print('[body]')\n if isinstance(content['body'], list):\n print('\\n'.join(content['body']).replace('\\t', ' '))\n else:\n print(content['body'])\n print()",
"def process_file(fname):\n # Get a list of pre-processed lines contained within the file\n code_lines = get_code_lines(fname)\n # Process these code lines one by one and get the results\n results = process_lines(fname, code_lines)\n return results",
"def _getFunctionDefinition(self, mainSource):\n\t\tparts = [self.getLateSetupCode(self._boundNames)]\n\t\tparts.append(mainSource)\n\t\tbody = \"\\n\".join(parts)\n\t\tif not body.strip():\n\t\t\tbody = \" pass\"\n\t\treturn \"def %s(%s):\\n%s\"%(self.name, self.formalArgs,\n\t\t\tbody)",
"def format_function_name(fname):\n # if default_lambda_var_name in fname:\n # return \"lambda function\"\n return fname",
"def get_functions(text, startswith='def '):\n return get_definition(text, startswith)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Registers the code snippet as a Lua script, and binds the script to the client as a method that can be called with the same signature as regular client methods, eg with a single key arg.
|
def _bind_lua_method(self, name, code):
script = self._client.register_script(code)
method = lambda key, *a, **k: script(keys=[key], args=a, **k)
setattr(self, name, method)
|
[
"def _bind_private_lua_script(self, name, code):\n script = self._client.register_script(code)\n setattr(self, '_' + name, script)",
"def register_script(self, script):\n from aredis.scripting import Script\n return Script(self, script)",
"def add_script(self, script, raw=False):\n if raw:\n self.raw_scripts.append(script)\n\n else:\n self.scripts.append(script)",
"def RegisterScript(self, script_path, script):\n\n self.__scripts[script_path] = script",
"def run_script(self, src, *args, **kwargs):\n return self.scala((src,), *args, **kwargs)",
"def set_code(self, key, value):\n self._code[key] = value",
"def hookSnippetInjecting(self):\n if self.pathInvalid():\n return 1\n\n # TODO if hooked then not hook\n with open(self.dalvikEntry) as f:\n codeSnippet = f.read()\n for index,types in enumerate(self.onCreateIns):\n try:\n insertLocation = codeSnippet.index(types)\n break\n except:\n continue\n insertLocation = insertLocation + codeSnippet[insertLocation:].index('.locals')\n # insertLocation = insertLocation + codeSnippet[insertLocation:].index('onCreate') # should not before system start (NO PRIVILIDGE)\n insertLocation = insertLocation + codeSnippet[insertLocation:].index('\\n')\n codeSnippet = codeSnippet[:insertLocation] + self.hookCodesEntry[index%2] + codeSnippet[insertLocation:]\n with open(self.dalvikEntry,'w') as f:\n f.write(codeSnippet)\n logger.info(\"[+]Finish hooking\")",
"def make_script(self):\n pass",
"def set_script(self, new_script_code):\n\n self.language_name = \"Unknown\"\n self.language_code_1 = None\n self.language_code_3 = \"unknown\"\n self.is_supported = True\n self.is_script = True\n self.script_code = new_script_code\n # Get the name of the script\n self.script_name = lang_code_search(new_script_code, True)[0]",
"def setScript(self, script: str):\n self.__script = script",
"def call_remote_command(self, key, payload=None):\n if key in self.code:\n self.code[key](payload)",
"def script(pyfunc):\n def wrapped_func(func, *args, **kwargs):\n from .util import _enter_hybrid_runtime, _restore_runtime, _is_tvm_arg_types\n if _is_tvm_arg_types(args):\n return parse(func, args)\n\n intersect = _enter_hybrid_runtime(func)\n value = func(*args, **kwargs)\n _restore_runtime(func, intersect)\n return value\n return decorate(pyfunc, wrapped_func)",
"def handle_jedi_command(self, method, uid):\r\n\r\n kwargs = {}\r\n if 'settings' in self.data:\r\n kwargs.update({'settings': self.data.pop('settings')})\r\n\r\n kwargs['script'] = self.jedi_script(**self.data)\r\n getattr(self, method)(uid, **kwargs)",
"def add(self, scriptclass, key=None, autostart=True):\r\n if self.obj.dbobj.__class__.__name__ == \"PlayerDB\":\r\n # we add to a Player, not an Object\r\n script = create.create_script(scriptclass, key=key, player=self.obj,\r\n autostart=autostart)\r\n else:\r\n # the normal - adding to an Object\r\n script = create.create_script(scriptclass, key=key, obj=self.obj,\r\n autostart=autostart)\r\n if not script:\r\n logger.log_errmsg(\"Script %s could not be created and/or started.\" % scriptclass)\r\n return False\r\n return True",
"def add_javascript(self, code_id, code):\n if self.javascript_code is None:\n self.javascript_code = {code_id: code}\n elif not self.javascript_code.has_key(code_id):\n self.javascript_code[code_id] = code",
"def scriptNode(attributeList, ignoreReferenceEdits=bool, scriptType=int, executeBefore=bool, afterScript=\"string\", beforeScript=\"string\", sourceType=\"string\", executeAfter=bool, name=\"string\"):\n pass",
"def run_python_script(script_filepath, host, port):\n command = \"script.run '%s'\" % script_filepath\n print(command)\n command = '@\"%s\"' % script_filepath\n lx = modosock.ModoSock(host, port)\n lx.eval(command)\n lx.close()",
"def bind(self, modifiers, key, handler, param=None):\r\n keys = modifiers+key\r\n return keybinder.bind(keys, handler, param)",
"def _execute_lua(self, keys, args, client):\n lua, lua_globals = Script._import_lua(self.load_dependencies)\n lua_globals.KEYS = self._python_to_lua(keys)\n lua_globals.ARGV = self._python_to_lua(args)\n\n def _call(*call_args):\n # redis-py and native redis commands are mostly compatible argument\n # wise, but some exceptions need to be handled here:\n nrm_cmd = str(call_args[0]).lower()\n if nrm_cmd == 'lrem':\n response = client.call(\n call_args[0], call_args[1],\n call_args[3], # \"count\", default is 0\n call_args[2])\n elif nrm_cmd == 'hmset':\n # redis-py hmset takes key value pairs in a dictionary and not as a flat list of arguments.\n call_iter = iter(call_args)\n response = client.call(next(call_iter), next(call_iter), dict(izip(call_iter, call_iter)))\n elif nrm_cmd == 'zadd':\n score_values = call_args[2:]\n mappings = {score_values[index + 1]: score\n for index, score in enumerate(score_values) if index % 2 == 0}\n response = client.call('zadd', call_args[1], mappings)\n else:\n response = client.call(*call_args)\n return self._python_to_lua(response)\n\n lua_globals.redis = {\"call\": _call,\n # TODO wrap _call with try to implement \"pcall\": _pcall,\n \"status_reply\": lambda status: self._python_to_lua({\"ok\": status}),\n \"error_reply\": lambda error: self._python_to_lua({\"err\": error}),\n \"log\": client._log,\n \"LOG_DEBUG\": logging.DEBUG,\n \"LOG_VERBOSE\": logging.INFO,\n \"LOG_NOTICE\": logging.WARNING,\n \"LOG_WARNING\": logging.ERROR\n }\n return self._lua_to_python(lua.execute(self.script), return_status=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Registers the code snippet as a Lua script, and binds the script to the client as a private method (eg. some_lua_func becomes a _some_lua_func method of HotClient) that can be latter wrapped in public methods with better argument and error handling.
|
def _bind_private_lua_script(self, name, code):
script = self._client.register_script(code)
setattr(self, '_' + name, script)
|
[
"def _bind_lua_method(self, name, code):\n script = self._client.register_script(code)\n method = lambda key, *a, **k: script(keys=[key], args=a, **k)\n setattr(self, name, method)",
"def register_script(self, script):\n from aredis.scripting import Script\n return Script(self, script)",
"def add_script(self, script, raw=False):\n if raw:\n self.raw_scripts.append(script)\n\n else:\n self.scripts.append(script)",
"def run_script(self, src, *args, **kwargs):\n return self.scala((src,), *args, **kwargs)",
"def hookSnippetInjecting(self):\n if self.pathInvalid():\n return 1\n\n # TODO if hooked then not hook\n with open(self.dalvikEntry) as f:\n codeSnippet = f.read()\n for index,types in enumerate(self.onCreateIns):\n try:\n insertLocation = codeSnippet.index(types)\n break\n except:\n continue\n insertLocation = insertLocation + codeSnippet[insertLocation:].index('.locals')\n # insertLocation = insertLocation + codeSnippet[insertLocation:].index('onCreate') # should not before system start (NO PRIVILIDGE)\n insertLocation = insertLocation + codeSnippet[insertLocation:].index('\\n')\n codeSnippet = codeSnippet[:insertLocation] + self.hookCodesEntry[index%2] + codeSnippet[insertLocation:]\n with open(self.dalvikEntry,'w') as f:\n f.write(codeSnippet)\n logger.info(\"[+]Finish hooking\")",
"def make_script(self):\n pass",
"def RegisterScript(self, script_path, script):\n\n self.__scripts[script_path] = script",
"def set_script(self, new_script_code):\n\n self.language_name = \"Unknown\"\n self.language_code_1 = None\n self.language_code_3 = \"unknown\"\n self.is_supported = True\n self.is_script = True\n self.script_code = new_script_code\n # Get the name of the script\n self.script_name = lang_code_search(new_script_code, True)[0]",
"def set_py_bridge(\n self, language:str, bridge_code, bridge_parameters=None, environment=None\n ):\n if not isinstance(language, str):\n raise TypeError(language)\n\n if callable(bridge_code):\n bridge_code = inspect.getsource(bridge_code)\n if bridge_code is None:\n raise ValueError(\"Cannot obtain source code for bridge_code\")\n if not isinstance(bridge_code, str):\n raise TypeError(type(bridge_code))\n if self._py_bridges is None:\n self._py_bridges = self._get_py_bridges()\n bridge = {\n \"code\": bridge_code\n }\n self._py_bridges[language] = bridge\n if bridge_parameters is not None:\n self.set_py_bridge_parameters(language, bridge_parameters)\n if environment is not None:\n self.set_py_bridge_environment(language, environment)\n self._update()",
"def add_handler(self, f):\n if f.__name__.startswith('rpc_'):\n raise ValueError(\"Server method names cannot start with rpc_.\")\n self._json_rpc_methods[f.__name__] = f\n return f",
"def addHandler(identifier, handler): #@NoSelf",
"def register(self):\n ExecuteJavascript('registerListener(\"%(id)s\", executeCell);'\n % {'id': self.id})",
"def hook(self, addr, code):\n code = \"{\\n%s\\n } \" % code\n linear = Addr16(str=str(addr)).linear\n\n if linear in self._hooks:\n self._hooks[linear] += code\n else:\n self._hooks[linear] = code",
"def _importCode(code, name, add_to_sys_modules=0):\n import imp\n module = imp.new_module(name)\n \n if add_to_sys_modules:\n sys.modules[name] = module\n \n exec code in module.__dict__\n return module",
"def setScript(self, script: str):\n self.__script = script",
"def addScript(self, state: 'ScXMLScriptElt') -> \"void\":\n return _coin.ScXMLScxmlElt_addScript(self, state)",
"def handle_recv_codelet(self, user_id, codelet_id, string):\n\n # Find the code_id\n\n codelet = self.sharedspace.codelets.get(codelet_id, None)\n\n if codelet is not None:\n\n codelet.update(user_id, string, self.socket.next_order_id())\n\n else:\n\n codelet = Codelet(self.socket.next_codelet_id(), user_id, string, self.socket.next_order_id())\n\n self.sharedspace.add_codelet(codelet)\n\n # Store the fact that the user isn't currently working on a codelet\n\n self.users[user_id].clear_codelet()\n\n # Evaluate the code\n\n self.evaluate_codelet(codelet)\n\n # Send back to clients\n\n self.socket.send_to_all(MESSAGE_UPDATE(user_id, codelet.get_id(), string, codelet.get_order_id()))\n\n return",
"def runAsNimbleScript(self, **kwargs):\n\n self.kwargs = kwargs if kwargs else nimble.getRemoteKwargs(globals())\n self.response = nimble.createRemoteResponse(globals())\n self.run()",
"def add_javascript(self, code_id, code):\n if self.javascript_code is None:\n self.javascript_code = {code_id: code}\n elif not self.javascript_code.has_key(code_id):\n self.javascript_code[code_id] = code"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the value of the other type instance to use in an operator method, namely when the method's instance is on the left side of the expression.
|
def value_left(self, other):
return other.value if isinstance(other, self.__class__) else other
|
[
"def op_left(op):\n\n def method(self, other):\n return op(self.value, value_left(self, other))\n\n return method",
"def __call__(self, other):\n return Type.engine.apply(self, other)",
"def _cast_other(binary_op):\r\n def cast_op(self, other):\r\n \"\"\"A wrapped binary operator that can handle non-Expression arguments.\r\n \"\"\"\r\n other = self.cast_to_const(other)\r\n return binary_op(self, other)\r\n return cast_op",
"def op_right(op):\n\n def method(self, other):\n return op(value_left(self, other), value_right(self, other))\n\n return method",
"def __call__(self, other):\n if isinstance(other, (int)):\n other = FieldElement(other)\n if isinstance(other, FieldElement):\n return self.eval(other)\n if isinstance(other, Polynomial):\n return self.compose(other)\n raise NotImplementedError()",
"def _handle_left_operand_function(self, other, function):\n if self._is_valid_subscriptable(other):\n return Vector2D(function(self.x, other[0]),\n function(self.y, other[1]))\n return Vector2D(function(self.x, other), function(self.y, other))",
"def __or__(self,other):\n if isinstance(other,(float,int,complex)): return self*field_traits.conjugate(other)\t\t# calls __mul__ below (handles \"0\" case)\n elif isinstance(other,_operator_base): return self.space.traits.back_act_on_vec(self,other)\n else: return self.space.traits.dot(self,other)\t\t# checks that both are _member class",
"def __asExpression(self, other):\n\n if isinstance(other, Expression):\n return other\n else:\n return Constant(other)",
"def __mod__(self, other):\n return Type.engine.compose(self, other)",
"def _of_proc(left, right):\n left = _normalize_type(left)\n if left in ('list', 'tuple', 'array', 'matrix', 'seq'):\n return left\n return None",
"def binary_operator(op):\n def _binary_operator(self, other):\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return return_type(\n \"({left} {op} {right})\".format(\n left=self_expr,\n op=op,\n right=other_expr\n ),\n binds=new_inputs\n )\n elif isinstance(other, NumExprFactor):\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n return commuted_method_getter(other)(self)\n elif isinstance(other, Term):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n binds=(self,)\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n binds=(self, other)\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant)\".format(op=op, constant=other),\n binds=(self,)\n )\n raise BadBinaryOperator(op, self, other)\n return _binary_operator",
"def __deref__(self):\n return _handle.OperatorHandle___deref__(self)",
"def __mod__(self, other):\r\n\r\n # Tensor product between the two operators\r\n if isinstance(other, Operator):\r\n result = Operator(self.n_qubits + other.n_qubits)\r\n result.matrix = csc_matrix(kron(self.matrix, other.matrix))\r\n return result\r\n else:\r\n raise TypeError(\r\n 'Operation not defined between operator and {}.'.format(type(other))\r\n )",
"def get_son(self):\n return self.left",
"def _apply_operator(self, other, op):\n symbols = {operator.add: \"+\", operator.sub: \"-\", operator.mul: \"*\", operator.truediv: \"/\", operator.pow: \"**\"}\n get_symbol = lambda op: symbols[op] if op in symbols.keys() else \"?\"\n other = var2link(other)\n return PartialLink(vars=self.vars.union(other.vars),\n fn=lambda values: op(self.fn(values), other.fn(values)),\n links=self.links.union(other.links),\n string=\"(\" + str(self) + get_symbol(op) + str(other) + \")\")",
"def binary_op(cls, operator, a, b):\n return cls.binary_operators[operator](a, b)",
"def currentOperator(self):\n return self.currentOp",
"def _math_op(left: \"Union[Quantity, Number]\",\n right: \"Union[Quantity, Number]\",\n op: \"Callable[[T, T], T]\") -> \"Quantity\":\n left, right = Quantity.validate(left, right)\n size = op(left.size, right.size)\n return Quantity(left.instrument, size, left.path_id)",
"def get_left(self):\n return self.left_child",
"def op(operator):\n return a(Token('OP', operator)) >> tok_to_value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a type instance method for the given operator, applied when the instance appears on the left side of the expression.
|
def op_left(op):
def method(self, other):
return op(self.value, value_left(self, other))
return method
|
[
"def binary_operator(op):\n def _binary_operator(self, other):\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return return_type(\n \"({left} {op} {right})\".format(\n left=self_expr,\n op=op,\n right=other_expr\n ),\n binds=new_inputs\n )\n elif isinstance(other, NumExprFactor):\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n return commuted_method_getter(other)(self)\n elif isinstance(other, Term):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n binds=(self,)\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n binds=(self, other)\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant)\".format(op=op, constant=other),\n binds=(self,)\n )\n raise BadBinaryOperator(op, self, other)\n return _binary_operator",
"def get_operator(cls, exp: Expression):\n if callable(exp.operator):\n return exp.operator\n\n if isinstance(exp.operator, str):\n try:\n return cls.OPERATORS[exp.operator.lower()]\n except KeyError:\n raise InvalidOperator(f\"No such operator {exp.operator!r}!\")\n\n raise InvalidOperator(\n f\"Please provide a valid operator or callable, not {exp.operator!r}!\"\n )",
"def add_unary_numeric_operator(self, operator, result_cppclass=None, left_cppclass=None):\n operator = utils.ascii(operator)\n if not isinstance(operator, str):\n raise TypeError(\"expected operator name as string\")\n if operator not in ['-']:\n raise ValueError(\"The operator %r is invalid or not yet supported by PyBindGen\" % (operator,))\n try:\n l = self.unary_numeric_operators[operator]\n except KeyError:\n l = []\n self.unary_numeric_operators[operator] = l\n if result_cppclass is None:\n result_cppclass = self\n if left_cppclass is None:\n left_cppclass = self\n op = (result_cppclass, left_cppclass)\n if op not in l:\n l.append(op)",
"def unary_op(cls, operator, operand):\n return cls.unary_operators[operator](operand)",
"def _add_logical_operator(self, operator):\n if not self.c_oper:\n raise QueryExpressionError(\"Logical operators must be preceded by a expression\")\n\n self.current_field = None\n self.c_oper = None\n\n self.l_oper = inspect.currentframe().f_back.f_code.co_name\n self._query.append(operator)\n return self",
"def add_binary_numeric_operator(self, operator, result_cppclass=None,\n left_cppclass=None, right=None):\n operator = utils.ascii(operator)\n if not isinstance(operator, str):\n raise TypeError(\"expected operator name as string\")\n if operator not in ['+', '-', '*', '/']:\n raise ValueError(\"The operator %r is invalid or not yet supported by PyBindGen\" % (operator,))\n try:\n l = self.binary_numeric_operators[operator]\n except KeyError:\n l = []\n self.binary_numeric_operators[operator] = l\n if result_cppclass is None:\n result_cppclass = self\n if left_cppclass is None:\n left_cppclass = self\n\n if right is None:\n right = self\n elif isinstance(right, CppClass):\n pass\n else:\n if isinstance(right, str):\n right = utils.param(right, 'right')\n try:\n right = utils.eval_param(right, None)\n except utils.SkipWrapper:\n return\n\n op = (result_cppclass, left_cppclass, right)\n if op not in l:\n l.append(op)",
"def op_right(op):\n\n def method(self, other):\n return op(value_left(self, other), value_right(self, other))\n\n return method",
"def binary_op(cls, operator, a, b):\n return cls.binary_operators[operator](a, b)",
"def _cast_other(binary_op):\r\n def cast_op(self, other):\r\n \"\"\"A wrapped binary operator that can handle non-Expression arguments.\r\n \"\"\"\r\n other = self.cast_to_const(other)\r\n return binary_op(self, other)\r\n return cast_op",
"def _apply_binary_operator(\n self,\n op: Callable[[Any, Any], Any],\n other: Any,\n right: bool = False,\n ) -> \"InitialCondition\":\n fn = lambda v: op(v, other)\n if right:\n fn = lambda v: op(other, v)\n\n return self.apply(fn)",
"def transform_unary_operator(self, node):\n # supported operators list\n operators_list = ['+', '-', '++', '--', '!']\n tokens = list(node.get_tokens())\n\n # it can be either pre increment/decrement or any other operator from the list\n if tokens[0].spelling in operators_list:\n child = self.transform(next(node.get_children()))\n # (decl_ref) e.g.; int a = ++b; or simply ++b;\n if isinstance(child, str):\n if tokens[0].spelling == '+':\n return Symbol(child)\n if tokens[0].spelling == '-':\n return Mul(Symbol(child), -1)\n if tokens[0].spelling == '++':\n return PreIncrement(Symbol(child))\n if tokens[0].spelling == '--':\n return PreDecrement(Symbol(child))\n if tokens[0].spelling == '!':\n return Not(Symbol(child))\n # e.g.; int a = -1; or int b = -(1 + 2);\n else:\n if tokens[0].spelling == '+':\n return child\n if tokens[0].spelling == '-':\n return Mul(child, -1)\n if tokens[0].spelling == '!':\n return Not(sympify(bool(child)))\n\n # it can be either post increment/decrement\n # since variable name is obtained in token[0].spelling\n elif tokens[1].spelling in ['++', '--']:\n child = self.transform(next(node.get_children()))\n if tokens[1].spelling == '++':\n return PostIncrement(Symbol(child))\n if tokens[1].spelling == '--':\n return PostDecrement(Symbol(child))\n else:\n raise NotImplementedError(\"Dereferencing operator, \"\n \"Address operator and bitwise NOT operator \"\n \"have not been implemented yet!\")",
"def get_operator(operator_name):\n if operator_name == \"gt\":\n return Gt\n elif operator_name == \"gte\":\n return Gte\n elif operator_name == \"eq\":\n return Eq\n elif operator_name == \"in\":\n return In\n elif operator_name == \"range\":\n return Range\n elif operator_name == \"contains\":\n return Contains\n elif operator_name == \"startswith\":\n return StartsWith\n elif operator_name == \"endswith\":\n return EndsWith\n\n raise Exception(\"Invalid operator name {0}\".format(operator_name))",
"def op(operator):\n return a(Token('OP', operator)) >> tok_to_value",
"def get_operator(self, operator_id):\n return self.operators.get_operator(operator_id, self)",
"def create_operation(model, fieldname, operator, argument, relation=None):\n opfunc = OPERATORS.get(operator)\n field = getattr(model, relation or fieldname, None)\n if opfunc and field:\n return opfunc(field, argument)",
"def create_unary_operator(operator_name, left_op, lineno, col_offset):\n operator_symbol = operator_name_to_symbol(operator_name)\n op_name = core_language_copy.create_str(operator_symbol)\n operation_comment = create_src_comment(\"Applying the '{0}' unary operator\".format(operator_symbol), lineno)\n operator_call, result_var = create_temp_Assign(\n operators_copy.create_unary_operator(op_name, left_op, lineno, col_offset), lineno, col_offset)\n\n return flatten_lists(operation_comment, operator_call), result_var",
"def test_operator_get_operator(self):\n pass",
"def _lookup_operator(self, column_is_array, operator):\n if not column_is_array:\n return self._operators_scalar.get(operator) or self._extra_scalar_ops[operator]\n else:\n return self._operators_array.get(operator) or self._extra_array_ops[operator]",
"def _binary_op(self, operator, other):\n if isinstance(other, list):\n other = np.asarray(other)\n if isinstance(other, self.__class__):\n if self.check_dimensions(other):\n inner_res = operator(self._raw_ws, other._raw_ws)\n else:\n raise RuntimeError(\"workspaces must have same dimensionality for binary operations (+, -, *, /)\")\n elif isinstance(other, np.ndarray):\n inner_res = self._binary_op_array(operator, other)\n else:\n inner_res = operator(self._raw_ws, other)\n return self.rewrap(inner_res)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a type instance method for the given operator, applied when the instance appears on the right side of the expression.
|
def op_right(op):
def method(self, other):
return op(value_left(self, other), value_right(self, other))
return method
|
[
"def binary_operator(op):\n def _binary_operator(self, other):\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return return_type(\n \"({left} {op} {right})\".format(\n left=self_expr,\n op=op,\n right=other_expr\n ),\n binds=new_inputs\n )\n elif isinstance(other, NumExprFactor):\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n return commuted_method_getter(other)(self)\n elif isinstance(other, Term):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n binds=(self,)\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n binds=(self, other)\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant)\".format(op=op, constant=other),\n binds=(self,)\n )\n raise BadBinaryOperator(op, self, other)\n return _binary_operator",
"def binary_op(cls, operator, a, b):\n return cls.binary_operators[operator](a, b)",
"def add_binary_numeric_operator(self, operator, result_cppclass=None,\n left_cppclass=None, right=None):\n operator = utils.ascii(operator)\n if not isinstance(operator, str):\n raise TypeError(\"expected operator name as string\")\n if operator not in ['+', '-', '*', '/']:\n raise ValueError(\"The operator %r is invalid or not yet supported by PyBindGen\" % (operator,))\n try:\n l = self.binary_numeric_operators[operator]\n except KeyError:\n l = []\n self.binary_numeric_operators[operator] = l\n if result_cppclass is None:\n result_cppclass = self\n if left_cppclass is None:\n left_cppclass = self\n\n if right is None:\n right = self\n elif isinstance(right, CppClass):\n pass\n else:\n if isinstance(right, str):\n right = utils.param(right, 'right')\n try:\n right = utils.eval_param(right, None)\n except utils.SkipWrapper:\n return\n\n op = (result_cppclass, left_cppclass, right)\n if op not in l:\n l.append(op)",
"def _invoke_binop_inference(instance, opnode, op, other, context, method_name):\n methods = dunder_lookup.lookup(instance, method_name)\n context = contextmod.bind_context_to_node(context, instance)\n method = methods[0]\n inferred = next(method.infer(context=context))\n if inferred is util.Uninferable:\n raise exceptions.InferenceError\n return instance.infer_binary_op(opnode, op, other, context, inferred)",
"def get_operator(cls, exp: Expression):\n if callable(exp.operator):\n return exp.operator\n\n if isinstance(exp.operator, str):\n try:\n return cls.OPERATORS[exp.operator.lower()]\n except KeyError:\n raise InvalidOperator(f\"No such operator {exp.operator!r}!\")\n\n raise InvalidOperator(\n f\"Please provide a valid operator or callable, not {exp.operator!r}!\"\n )",
"def _binary_op(self, operator, other):\n if isinstance(other, list):\n other = np.asarray(other)\n if isinstance(other, self.__class__):\n if self.check_dimensions(other):\n inner_res = operator(self._raw_ws, other._raw_ws)\n else:\n raise RuntimeError(\"workspaces must have same dimensionality for binary operations (+, -, *, /)\")\n elif isinstance(other, np.ndarray):\n inner_res = self._binary_op_array(operator, other)\n else:\n inner_res = operator(self._raw_ws, other)\n return self.rewrap(inner_res)",
"def _cast_other(binary_op):\r\n def cast_op(self, other):\r\n \"\"\"A wrapped binary operator that can handle non-Expression arguments.\r\n \"\"\"\r\n other = self.cast_to_const(other)\r\n return binary_op(self, other)\r\n return cast_op",
"def _apply_binary_operator(\n self,\n op: Callable[[Any, Any], Any],\n other: Any,\n right: bool = False,\n ) -> \"InitialCondition\":\n fn = lambda v: op(v, other)\n if right:\n fn = lambda v: op(other, v)\n\n return self.apply(fn)",
"def add_inplace_numeric_operator(self, operator, right=None):\n operator = utils.ascii(operator)\n if not isinstance(operator, str):\n raise TypeError(\"expected operator name as string\")\n if operator not in ['+=', '-=', '*=', '/=']:\n raise ValueError(\"The operator %r is invalid or not yet supported by PyBindGen\" % (operator,))\n try:\n l = self.inplace_numeric_operators[operator]\n except KeyError:\n l = []\n self.inplace_numeric_operators[operator] = l\n if right is None:\n right = self\n else:\n if isinstance(right, str):\n right = utils.param(right, 'right')\n try:\n right = utils.eval_param(right, None)\n except utils.SkipWrapper:\n return\n if right not in l:\n l.append((self, self, right))",
"def _bin_op(instance, opnode, op, other, context, reverse=False):\n if reverse:\n method_name = protocols.REFLECTED_BIN_OP_METHOD[op]\n else:\n method_name = protocols.BIN_OP_METHOD[op]\n return functools.partial(\n _invoke_binop_inference,\n instance=instance,\n op=op,\n opnode=opnode,\n other=other,\n context=context,\n method_name=method_name,\n )",
"def binary_op(type_spec: computation_types.Type) -> computation_types.Type:\n return reduction_op(type_spec, type_spec)",
"def _create_binary_operator(operator_func, description, list_kword=None):\n\n class _BinaryOperatorImpl(_BinaryOperator):\n \"\"\"Implements a binary operator specfication.\"\"\"\n\n def __init__(self, rvalue, key=None, description=description,\n default=DEFAULT_NOT_SET):\n _BinaryOperator.__init__(self, description, default, operator_func,\n rvalue, key, list_kword)\n return _BinaryOperatorImpl",
"def _lookup_operator(self, column_is_array, operator):\n if not column_is_array:\n return self._operators_scalar.get(operator) or self._extra_scalar_ops[operator]\n else:\n return self._operators_array.get(operator) or self._extra_array_ops[operator]",
"def op(operator):\n return a(Token('OP', operator)) >> tok_to_value",
"def operator_type(self) -> Optional[str]:\n return pulumi.get(self, \"operator_type\")",
"def evaluate(self, define_op, bin_op=_BINARY_OPERATOR_MAP, un_op=_UNARY_OPERATOR_MAP):\n \n if self.right:\n # binary or implicit operator\n op_text = self.operator[1] if self.operator else ''\n if op_text not in bin_op:\n raise DefinitionSyntaxError('missing binary operator \"%s\"' % op_text)\n left = self.left.evaluate(define_op, bin_op, un_op)\n return bin_op[op_text](left, self.right.evaluate(define_op, bin_op, un_op))\n elif self.operator:\n # unary operator\n op_text = self.operator[1]\n if op_text not in un_op:\n raise DefinitionSyntaxError('missing unary operator \"%s\"' % op_text)\n return un_op[op_text](self.left.evaluate(define_op, bin_op, un_op))\n else:\n # single value\n return define_op(self.left)",
"def get_operator(self, operator_id):\n return self.operators.get_operator(operator_id, self)",
"def typeof(self, expr):\n return self.generalize(self.w(expr))",
"def get_operator(operator_name):\n if operator_name == \"gt\":\n return Gt\n elif operator_name == \"gte\":\n return Gte\n elif operator_name == \"eq\":\n return Eq\n elif operator_name == \"in\":\n return In\n elif operator_name == \"range\":\n return Range\n elif operator_name == \"contains\":\n return Contains\n elif operator_name == \"startswith\":\n return StartsWith\n elif operator_name == \"endswith\":\n return EndsWith\n\n raise Exception(\"Invalid operator name {0}\".format(operator_name))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates an ics.Event object from the provided entry
|
def make_event( self, entry ):
e = ics.Event()
e.name = entry.name
e.begin = '%s %s' % (entry.date, entry.start)
e.end = '%s %s' % (entry.date, entry.end)
return e
|
[
"def add_event( self, entry: CalendarHelpers.DataStructures.Entry ):\n event = self.make_event( entry )\n self.calendar.events.append( event )",
"def _create_event_entry(event, originator, data):\n data = CaseLogger._format_data(data)\n event = Event(\n type=event.event_type.name,\n originator=originator,\n message=event.value.message,\n data=data)\n return event",
"def __create_event_copy(self, event):\n title = event.title\n when = event.when\n where = event.where\n content = event.content\n \n new_event = gdata.calendar.CalendarEventEntry()\n new_event.title = title\n new_event.content = content\n new_event.where = where\n new_event.when = when\n \n return new_event",
"def from_event(cls, event):\n return cls(\n uuid=event['uuid'],\n job_type=event['job_type'],\n event_type=event['type'],\n hostname=event['hostname'],\n pid=event['pid'],\n name=event['name'],\n workflow_id=event['workflow_id'],\n event_time=event['time'],\n duration=event['duration']\n )",
"def create_event(sub_event) -> any:\n content = {\n 'messagekey': sub_event,\n 'chat_id': 'None',\n 'timestampkey': 0\n }\n return content",
"def create_and_tag_entry(self, entry):\n return self._make_post_request(\"v3/entries\", data=entry)",
"def create_event(self, *args, **kwargs):\n return asyncio.Event(*args, **kwargs)",
"def create_event(neo_event,segment):\n event = models.Event()\n if neo_event.name is not None:\n event.name = neo_event.name\n if neo_event.description is not None:\n event.description = neo_event.description\n if neo_event.file_origin is not None:\n event.file_origin = neo_event.file_origin\n event.annotations = clean_annotations(neo_event.annotations)\n\n event.time = float(neo_event.time.rescale('s'))\n try:\n event.duration = float(neo_event.duration.rescale('s'))\n except AttributeError:\n pass\n\n event_type, created = models.EventType.objects.get_or_create(name=neo_event.label)\n\n event.label = event_type\n event.segment = segment\n\n return event",
"def create_event_from_msg(msg: EventMsgDict) -> Event:\n return create_event(msg)",
"def make_event(self, syncer):\n return modeldb_types.ExperimentEvent(self.experiment)",
"def _create_entry(self, start_time, end_time=None, user=None):\r\n data = {\r\n 'user': user or self.user,\r\n 'project': self.project,\r\n 'activity': self.activity,\r\n 'location': self.location,\r\n 'status': self.status,\r\n 'start_time': start_time,\r\n }\r\n if end_time:\r\n data['end_time'] = end_time\r\n return factories.Entry(**data)",
"def from_agile_dict(event):\n name = event['Name']\n showtimes = []\n for showing in event['CurrentShowings']:\n showtime = dateutil.parser.parse(showing['StartDate'])\n showtimes.append(showtime)\n\n return AgileEvent(name, showtimes)",
"def from_dict(event_dict):\n return CallbackEvent(callback_id=event_dict['callbackId'],\n name=event_dict['name'],\n creation_time=event_dict['time'],\n data=event_dict['data'])",
"def get_calendar_event(cal):\n for component in cal.walk():\n if component.name == 'VEVENT':\n summary = component.get('summary', ' ')\n description = component.get('description', ' ')\n location = component.get('location', ' ')\n startdt = component.get('dtstart').dt\n enddt = component.get('dtend').dt\n enddt = startdt if enddt.day > startdt.day else enddt\n return {\n 'summary': '{} - {}'.format(summary, location),\n 'location': '{} === {}'.format(location, description),\n 'description': description,\n 'start': {\n 'dateTime': parse_event_time(startdt),\n 'timeZone': 'America/Los_Angeles',\n },\n 'end': {\n 'dateTime': parse_event_time(enddt),\n 'timeZone': 'America/Los_Angeles',\n },\n }\n # there should always be a VEVENT in the icalendar event\n raise ValueError('No VEVENT component found in icalendar event.')",
"def createEvent(self):\n\n raise NotImplementedError( \"Should have implemented this\" )",
"def event_from_row(row_soup, day):\n ev = Event()\n ev.add('dtstart', day)\n ev.add('dtend', day+oneday)\n insider = row_soup.find('img', {'class' : 'CalendarDnDIImage'}) is not None\n prefix = \"\"\n span = row_soup.find('span', {'class' : 'CalendarPrefix'})\n if span is not None:\n prefix += span.contents[0] + ' '\n a = row_soup.find('a', {'class' : 'CalendarEvent'})\n if a is not None:\n url = base_url + a['href']\n ev.add(\"url\", url)\n ev.add(\"description\", url)\n else:\n a = row_soup.find('a', {'class' : 'CalendarEventNoLink'})\n \n title = a.contents[0]\n ev.add(\"summary\", prefix+title)\n return ev",
"def from_json(cls, event):\n # type: (Any) -> EventGridEvent\n dict_event = _get_json_content(event)\n return cast(EventGridEvent, EventGridEvent.from_dict(dict_event))",
"def build(cls, rule):\n\n if isinstance(rule, cls):\n return rule\n\n rule = parse(rule)\n assert isinstance(rule, dict), f'Not a valid rule: {rule}'\n type = get_event_class_by_type(rule.pop('type') if 'type' in rule else 'Event')\n\n args = {}\n for key, value in rule.items():\n args[key] = value\n\n return cls(type=type, **args)",
"def load_cal(entry: dict) -> Calendar:\n\n if \"cache\" in entry and entry[\"cache\"]:\n print(\"Getting\", entry[\"name\"], \"from cache\")\n try:\n return get_from_cache(entry)\n except FileNotFoundError:\n return Calendar()\n\n else:\n print(\"Getting\", entry[\"name\"], \"from remote\")\n r = requests.get(entry[\"url\"], allow_redirects=True)\n if \"encoding\" in entry:\n cal = Calendar(imports=r.content.decode(encoding=entry[\"encoding\"]))\n else:\n cal = Calendar(imports=r.content.decode())\n\n cal = horodate(cal, 'Downloaded at')\n return cal"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates an entry from the entry and adds it to the calendar
|
def add_event( self, entry: CalendarHelpers.DataStructures.Entry ):
event = self.make_event( entry )
self.calendar.events.append( event )
|
[
"def _create_entry(self, start_time, end_time=None, user=None):\r\n data = {\r\n 'user': user or self.user,\r\n 'project': self.project,\r\n 'activity': self.activity,\r\n 'location': self.location,\r\n 'status': self.status,\r\n 'start_time': start_time,\r\n }\r\n if end_time:\r\n data['end_time'] = end_time\r\n return factories.Entry(**data)",
"def make_event( self, entry ):\n e = ics.Event()\n e.name = entry.name\n e.begin = '%s %s' % (entry.date, entry.start)\n e.end = '%s %s' % (entry.date, entry.end)\n return e",
"def create_and_tag_entry(self, entry):\n return self._make_post_request(\"v3/entries\", data=entry)",
"def add_entry(data):\r\n text = input('Type your entry, <enter> to exit: \\n')\r\n date = today()\r\n timestamp = timestamp_now()\r\n journal.add_entry(text, data)\r\n journal.add_entry(date, data)\r\n journal.add_entry(timestamp, data)\r\n entries = []\r\n entries.extend(data)",
"def add_entry(self, entry):\n self.entries.update(entry)\n self.save()",
"def make_entry(self, **kwargs):\r\n valid = kwargs.get('valid', True)\r\n if valid:\r\n default_start = self.good_start\r\n default_end = self.good_end\r\n else:\r\n default_start = self.bad_start\r\n default_end = self.bad_end\r\n user = kwargs.get('user', self.user)\r\n start = kwargs.get('start_time', default_start)\r\n if 'end_time' in kwargs:\r\n end = kwargs.get('end_time', default_end)\r\n else:\r\n if 'start_time' in kwargs:\r\n end = start + relativedelta(minutes=10)\r\n else:\r\n end = default_end\r\n data = self.default_data\r\n data.update({\r\n 'user': user,\r\n 'start_time': start,\r\n 'end_time': end,\r\n })\r\n factories.Entry(**data)",
"def __create_event_copy(self, event):\n title = event.title\n when = event.when\n where = event.where\n content = event.content\n \n new_event = gdata.calendar.CalendarEventEntry()\n new_event.title = title\n new_event.content = content\n new_event.where = where\n new_event.when = when\n \n return new_event",
"def _create_event_entry(event, originator, data):\n data = CaseLogger._format_data(data)\n event = Event(\n type=event.event_type.name,\n originator=originator,\n message=event.value.message,\n data=data)\n return event",
"def create_log_entry(self, log_entry_form):\n return # osid.logging.LogEntry",
"def import_entry(self, entry, feed_obj):\n self.logger.debug(\"Importing entry... %s\" % feed_obj.feed_url)\n\n fields = self.post_fields_parsed(entry, feed_obj)\n post = Post.objects.update_or_create(feed_obj, **fields)\n\n if self.include_enclosures:\n post.enclosures.add(*(self.get_enclosures(entry) or []))\n if self.include_categories:\n post.categories.add(*(self.get_categories(entry) or []))\n\n self.logger.debug(\"Post successfully imported... %s\" % (\n feed_obj.feed_url))\n\n return post",
"def add_entry(self, entry_object):\n self.entries.append(entry_object)",
"def createCalendar(self, account):\n if self.calendar:\n cal = calendar.get_calendar(account, self.calendar)\n\n if cal:\n return cal \n\n # we need to create a calendar for this item and then save the name\n # We will use the name lab.equipment_type.equipment_name, using the IDStrings\n cal_name = \"%s.%s.%s\" % (self.laboratory,self.equipment_type,name_to_idstring(self.name))\n\n try:\n cal = calendar.add_calendar(account, cal_name)\n\n except calendar.DuplicateCalendarError:\n # we have already made this calendar :-)\n cal = calendar.get_calendar_by_name(account, name)\n\n if cal:\n self.calendar = cal.idstring\n item = self._getFromDB()\n if item:\n item.calendar = cal.idstring\n item.put()\n else:\n raise calendar.ConnectionError(\"\"\"Failed to create the calendar '%s' for equipment item '%s'\"\"\" % \\\n (cal_name,self.name), detail=self)\n\n return cal",
"def get_new_entry(self):",
"def test_new_entry(new_entry):\n\n assert new_entry.title == 'Laptops'\n assert new_entry.body == 'Because they are portable'\n assert isinstance(new_entry.created_at, dt)",
"def insertException(self, calException):\n date = calException.day.date()\n if (calException is None or\n date < self.valid_from.date() or\n date > self.valid_until.date()):\n return\n # First delete the old CalendarEntry, then add the new corresponding one\n self.deleteBaseCalendarEntries(from_date=date, to_date=date)\n chg_ctrl = CalendarEntry.MakeChangeControlAttributes()\n CalendarEntry.Create(day=calException.day,\n day_type_id=calException.day_type_id,\n description=calException.description,\n calendar_profile_id=self.cdb_object_id,\n weekday=date2weekday(date),\n cdb_cpersno=chg_ctrl['cdb_cpersno'],\n cdb_mpersno=chg_ctrl['cdb_mpersno'],\n cdb_cdate=chg_ctrl['cdb_cdate'],\n cdb_mdate=chg_ctrl['cdb_mdate'])",
"def _add_entry ( self, new_entry ):\n self._entries [ ( new_entry.filetype, new_entry.filename ) ] = new_entry\n self.dirty = True",
"def add(self):\n self.clear()\n task_name = self.take_task()\n minutes = self.take_min()\n notes = self.take_notes()\n\n date = datetime.datetime.now().strftime(\"%m/%d/%y\")\n# Check if csv exists yet\n if os.path.exists('./entries.csv'):\n row_count = 0\n with open('entries.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n row_count += 1\n\n with open('entries.csv', 'a', newline='') as csvfile:\n\n entrywriter = csv.DictWriter(csvfile,\n fieldnames=self.fieldnames)\n entrywriter.writerow({'row_number': row_count,\n 'date': date, 'task': task_name,\n 'minutes': minutes, 'notes': notes})\n else:\n with open('entries.csv', 'a', newline='') as csvfile:\n row_count = 0\n entrywriter = csv.DictWriter(csvfile,\n fieldnames=self.fieldnames)\n entrywriter.writeheader()\n entrywriter.writerow({'row_number': row_count,\n 'date': date, 'task': task_name,\n 'minutes': minutes, 'notes': notes})\n alert = \"Entry Added\\n\"\n self.menu(alert)",
"def add_table_entry(self, table_id, table_entry):",
"def create_entries(self, date, status):\r\n factories.Entry(**{\r\n 'user': self.user,\r\n 'start_time': date,\r\n 'end_time': date + relativedelta(hours=1),\r\n 'status': status\r\n })\r\n factories.Entry(**{\r\n 'user': self.user,\r\n 'start_time': date + relativedelta(hours=2),\r\n 'end_time': date + relativedelta(hours=3),\r\n 'status': status\r\n })"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the content of a Parquet file into a Pandas DataFrame.
|
def read_parquet(filename, columns=None, index=None):
pf = ParquetFile(filename)
return pf.to_pandas(columns=columns, index=index)
|
[
"def read(\n cls, file_path: str, unflatten_kwargs: dict = None, **read_kwargs\n ) -> pd.DataFrame:\n return pd.read_parquet(path=file_path, **read_kwargs)",
"def read_dataframe_from_bucket(file_path: str) -> pd.DataFrame:\n return pd.read_parquet(file_path)",
"def read_file(self, file_name):\r\n # full_path = os.path.join(self.corpus_path +'/' + file_name)\r\n df = pd.read_parquet(self.corpus_path+'/'+file_name, engine=\"pyarrow\")\r\n return df",
"def _data_from_parquet(path):\n data = pd.read_parquet(path)\n\n # Load back LISA metadata into \"df.attrs\", as they were written in\n # _data_to_parquet()\n if isinstance(data, pd.DataFrame):\n schema = pyarrow.parquet.read_schema(path)\n attrs = schema.metadata.get(b'lisa', '{}')\n data.attrs = json.loads(attrs)\n\n return data",
"def read_sample_dataframe():\n if not hasattr(read_sample_dataframe, \"df\"):\n parquet_file = Path(__file__).parent / \"data\" / \"parquet\" / \"singlefile.parquet\"\n backend = dframeio.ParquetBackend(str(parquet_file.parent))\n read_sample_dataframe.df = backend.read_to_pandas(parquet_file.name)\n return read_sample_dataframe.df.copy()",
"def read_parquet(cls, path, *, columns=[], strings_as_object=inf, dtypes={}):\n import pyarrow.parquet as pq\n columns = columns or None\n data = pq.read_table(path, columns=columns)\n return cls.from_arrow(data, strings_as_object=strings_as_object, dtypes=dtypes)",
"def read_parquet_file_from_ADLS(\n adls_connection_client, container: str, file_name: str, base_path: str = \"/\"\n):\n downloaded_bytes = download_from_ADLS(\n adls_connection_client, container, file_name, base_path\n )\n processed_df = get_parquet_df_from_contents(downloaded_bytes)\n return processed_df",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n file_extension = os.path.splitext(file_path)[-1].lower()\n if file_extension == \".json\":\n return pd.read_json(file_path)\n elif file_extension == \".jsonl\":\n return pd.read_json(file_path, lines=True)\n elif file_extension == \".tsv\":\n return pd.read_table(file_path)\n elif file_extension in {\".csv\", \".data\"}:\n return pd.read_csv(file_path)\n elif file_extension in {\".parquet\", \".pq\", \".pqt\"}:\n return pd.read_parquet(file_path)\n else:\n raise ValueError(f\"Unsupported dataset file type: {file_extension}\")",
"def get_raw_dataset(path):\n all_files = glob.glob(path + \"/*.parquet\")\n files = []\n for filename in tqdm(all_files):\n df = pd.read_parquet(filename, engine=\"fastparquet\")\n files.append(df)\n df = pd.concat(files, axis=0, ignore_index=True)\n return df",
"def read_parquet( # noqa: PLR0913\n filename,\n class_name=\"PandasDataset\",\n module_name=\"great_expectations.dataset\",\n dataset_class=None,\n expectation_suite=None,\n profiler=None,\n *args,\n **kwargs,\n):\n import pandas as pd\n\n df = pd.read_parquet(filename, *args, **kwargs)\n if dataset_class is not None:\n return _convert_to_dataset_class(\n df=df,\n dataset_class=dataset_class,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )\n else:\n return _load_and_convert_to_dataset_class(\n df=df,\n class_name=class_name,\n module_name=module_name,\n expectation_suite=expectation_suite,\n profiler=profiler,\n )",
"def read_parquet(cls, paths):\n return cls(Table._read_parquet(paths))",
"def convert_to_df(path):\n return pd.read_csv(path, sep='\\t')",
"def convert_to_parquet(self, df: pd.DataFrame) -> pd.DataFrame:\n self.logger.info(\n \"Start: Convert dataframe into parquet format\")\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n df.to_parquet(df.name + '_pq.parquet.gzip', compression='gzip')\n df_pq = pd.read_parquet(df.name + '_pq.parquet.gzip')\n self.logger.info(\"Finished: Converted dataframe into parquet format.\")\n return df_pq",
"def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")",
"def read_avro(filepath: str, encoding='rb') -> pd.DataFrame:\n with open(filepath, encoding) as fp:\n reader = fastavro.reader(fp)\n records = [r for r in reader]\n return pd.DataFrame.from_records(records)",
"def from_parquet(self,columns,partitioning,filters,filesystem):\n table = pq.read_table(\n source = self.dirPaths,\n columns = columns,\n partitioning = partitioning,\n filters=filters,\n filesystem =filesystem \n )\n return from_arrow_table(table,self.timeSeriesCol,self.mainCategoryCol)",
"def from_parquet(dirPath, timeSeriesCol, mainCategoryCol,columns=None,partitioning='hive',filters=None,filesystem=None):\n pio = Parquet_IO(dirPath,None, timeSeriesCol, mainCategoryCol)\n return pio.from_parquet(columns,partitioning,filters,filesystem)",
"def read_metadata_table(path: str) -> Optional[pd.DataFrame]:\n FILE_EXT_TO_PD_READ_FUNC = {\n '.tab': pd.read_table,\n '.tsv': pd.read_table,\n '.csv': pd.read_csv\n }\n _, file_ext = os.path.splitext(os.path.basename(path))\n file_ext = file_ext.lower()\n if file_ext not in FILE_EXT_TO_PD_READ_FUNC:\n logging.error('File extension of metadata file \"{}\" not one of the expected \"{}\"'.format(\n path,\n list(FILE_EXT_TO_PD_READ_FUNC.keys())\n ))\n return None\n dfmd: pd.DataFrame = FILE_EXT_TO_PD_READ_FUNC[file_ext](path)\n assert np.any(dfmd.columns == 'subtype'), 'Column with name \"subtype\" expected in metadata file \"{}\"'.format(path)\n dfmd.subtype.fillna('#N/A', inplace=True)\n dfmd.subtype = dfmd.subtype.astype(str)\n logging.info('Read scheme metadata file \"{}\" into DataFrame with shape {}'.format(path, dfmd.shape))\n return dfmd",
"def test_read_to_pandas_some_columns(sample_data_path):\n backend = dframeio.ParquetBackend(str(sample_data_path.parent))\n df = backend.read_to_pandas(sample_data_path.name, columns=[\"id\", \"first_name\"])\n SampleDataSchema.to_schema().select_columns([\"id\", \"first_name\"]).validate(df)\n assert len(df) == SampleDataSchema.length()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overwrite current damage and return self.
|
def with_attack(self, damage: int) -> object:
self.damage = damage
return self
|
[
"def take_damage(self, damage):\n # self.current_health -= self.defend(damage)\n # return self.current_health",
"def setDamage(self, damage):\n getHandle().setDamage(float(damage))",
"def harm(self, damage):\n\n print \"%d damage was done to %s\" % (damage, self.id)\n self.health -= damage\n if self.health <= 0:\n self.health = 0\n self.die()",
"def damage(self):\n return self._damage",
"def _INVALID_setDamage(self, damage):\n self.setDamage(damage)",
"def take_damage(self, damage):\n if damage <= 0: return\n self.hit_points[0] = max( self.hit_points[0] - damage, 0 )",
"def set_damage(self, damage: PixmanRegion32) -> None:\n lib.wlr_output_set_damage(self._ptr, damage._ptr)",
"def take_damage(self, damage: int):\n if random.random() >= self.dodge_change:\n super().take_damage(damage)",
"def causeDamage(self, damage):\n self.setCondition(self.getCondition() - damage)\n if self.getCondition() <=0:\n raise ShipSunk",
"def takeDamage(self):\n\t\tif self._invincible_time <= 0:\n\t\t\tself.health -= 1\n\t\t\t# make player invincible\n\t\t\tself._invincible_time = 2500 # invincible for 2.5s",
"def apply_damage_modifier(self, modifier):\r\n if not self.ignores_damage_mod:\r\n self.damage_this_tick *= modifier",
"def take_damage(self, damage_amt):\n self.health -= damage_amt\n if self.health <= 0:\n self.deaths += 1",
"def sting(self, dragon):\n dragon.reduce_armor(self.damage)",
"def clear_damage(self):\n\n self.damaged.fill(False)\n self.all_damaged = False",
"def getDamage(self):\n return getHandle().getDamage()",
"def apply_damage_to(self, target):\r\n target.apply_damage(self.damage_this_tick)\r\n self.average_damage += self.damage_this_tick\r\n self.num_casts += 1",
"def calculate_damage(self, bird):\n dmg = bird.mass * bird.speed()**2\n self.strength-=dmg\n if self.strength<0:\n self.strength=0",
"def attack(self,enemy):\n enemy.takeDamage(self.damage)",
"def damage(self) -> float:\n operators_experience = sum([operator.experience\n for operator in self._operators])\n return 0.1 + operators_experience / 100"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overwrite current cooldown and return self.
|
def with_cooldown(self, cooldown: int) -> object:
self.cooldown = cooldown
return self
|
[
"def cooldown(self, value):\n\n pass",
"def other_cooldown(self, value):\n\n pass",
"def other_cooldown(self):\n\n if self._other == None:\n return None\n\n return self._other_cooldown",
"def start_cooldown(self):\r\n self.cooldown_timer = 0\r\n self.cast_timer = 1\r\n self.casting = False",
"def on_update(self) -> None:\n if self.__cooldown_tick == self.cooldown:\n self.__cooldown_tick = 0\n else:\n self.__cooldown_tick += 1",
"def tick_cooldowns(self):\n\n if self.bullet_cooldown > 0:\n self.bullet_cooldown -= 1",
"def reset_cooldown(self, inter: ApplicationCommandInteraction) -> None:\n if self._buckets.valid:\n bucket = self._buckets.get_bucket(inter) # type: ignore\n bucket.reset()",
"async def ccooldown(self, ctx, delay : int = None):\r\n \r\n channel = ctx.message.channel\r\n author = ctx.message.author\r\n server = ctx.message.guild\r\n\r\n # Only allow owner\r\n isOwner = self.settings.isOwner(ctx.author)\r\n if isOwner == None:\r\n return\r\n elif isOwner == False:\r\n msgText = [\"Hus hus, jangan main main sama command ini\",\r\n \"Command ini bahaya loh dek, jangan main main!\",]\r\n msg = random.choice(msgText)\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.send(embed = em)\r\n return\r\n\r\n # Get current delay\r\n currDelay = self.settings.getGlobalStat(\"CommandCooldown\",self.commandCooldown)\r\n \r\n if delay == None:\r\n if currDelay == 1:\r\n msg = 'Cooldown untuk semua command telah dirubah menjadi *1 Detik*'\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n else:\r\n msg = 'Cooldown untuk semua command telah dirubah menjadi *{} Detik.*'.format(currDelay)\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n return\r\n \r\n try:\r\n delay = int(delay)\r\n except Exception:\r\n msg = 'Cooldown harus berupa angka.'\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n return\r\n \r\n if delay < 0:\r\n msg = 'Cooldown harus lebih dari *0 detik*.'\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n return\r\n\r\n if delay > self.maxCooldown:\r\n if self.maxCooldown == 1:\r\n msg = 'Cooldown tidak dapat lebih dari *1 detik*.'\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n else:\r\n msg = 'Cooldown tidak dapat lebih dari *{} detik*.'.format(self.maxCooldown)\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n return\r\n \r\n self.settings.setGlobalStat(\"CommandCooldown\",delay)\r\n if delay == 1:\r\n msg = 'Cooldown untuk semua command telah dirubah menjadi *1 Detik*.*'\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)\r\n else:\r\n msg = 'Cooldown untuk semua command telah dirubah menjadi *{} Detik.*'.format(delay)\r\n em = discord.Embed(color = 0XFF8C00, description = msg)\r\n em.set_footer(text = \"{}\".format(ctx.author), icon_url = \"{}\".format(ctx.author.avatar_url))\r\n await ctx.channel.send(embed = em)",
"def add_in_cooldown(self, author: discord.User, cmd: Command):\n if author.id in self._cooldown.keys():\n self._cooldown[author.id].append(cmd.name)\n else:\n self._cooldown[author.id] = [cmd.name]\n threading.Timer(cmd.cooldown, self.delete_of_cooldown, [author, cmd.name]).start()",
"def reset_remaining(self):\n self.time_remaining = self.lifetime",
"def attack(self, target: Health) -> None:\n if self.__cooldown_tick == 0:\n target.apply_damage(self.damage)\n if self.dot > 0: target.apply_dot(self.dot, self.dot_ticks)",
"def reset_cooldown(channel_name: str) -> None:\n if channel_name[0] == \"#\":\n channel_name = channel_name[1:]\n log.error(\"Someplace in the code is using channels with #.\")\n cooldowns[channel_name] = datetime.datetime.utcnow()",
"def cd(self, mask, target, args):\n if not (yield from self.__isNickservIdentified(mask.nick)):\n return\n get, set, timer, time = args.get('get'), args.get('set'), args.get('<timer>'), args.get('<time>')\n global TIMERS, DEFAULTCD\n if get:\n if timer:\n self.bot.privmsg(mask.nick, 'The cooldown for \"' + timer + '\" is set to ' + str(TIMERS.get(timer, DEFAULTCD)))\n else:\n for key in TIMERS.keys():\n self.bot.privmsg(mask.nick, 'The cooldown for \"' + key + '\" is set to ' + str(TIMERS.get(key, DEFAULTCD)))\n if set:\n TIMERS[timer] = int(time)\n self.__dbAdd(['timers'], timer, TIMERS[timer], save=True)\n self.bot.privmsg(mask.nick, 'The cooldown for \"' + timer + '\" is now changed to ' + str(TIMERS[timer]))",
"def copy(self) -> 'Retry':\n return Retry(max_tries=self.max_tries, delay=self.delay, backoff=self.backoff,\n max_jitter=self.max_jitter / 100.0, max_delay=int(self.max_delay), sleep_func=self.sleep_func,\n deadline=self.deadline, retry_exceptions=self.retry_exceptions)",
"def new_delay(*args, **kwargs):\n if self.TIMESTAMP_KWARG not in kwargs:\n kwargs[self.TIMESTAMP_KWARG] = datetime.datetime.utcnow()\n self._original_delay(*args, **kwargs)",
"async def cooldown_user(\n self, guild_id: int, giver_id: int, receiver_id: int\n ) -> None:\n self._members_on_cooldown[guild_id][giver_id].append(receiver_id)\n single_action_timer = KarmaSingleActionTimer(\n self.remove_from_cooldown,\n int(config[\"cooldown\"]),\n guild_id,\n giver_id,\n receiver_id,\n )\n self._running_timers[guild_id][giver_id][receiver_id] = single_action_timer\n await single_action_timer.start()",
"async def timeout(self, ctx, *, members = None, cooldown = None, reason = None):\r\n await self.mute_timeout(ctx,members,\"timeout\")",
"def hurt_player(self):\n \n self.health = self.health - 1\n self._check_if_dead()",
"def takeDamage(self):\n\t\tif self._invincible_time <= 0:\n\t\t\tself.health -= 1\n\t\t\t# make player invincible\n\t\t\tself._invincible_time = 2500 # invincible for 2.5s"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overwrite current dot and dot_ticks. Return self.
|
def with_dot(self, damage: int, ticks: int) -> object:
self.dot = damage
self.dot_ticks = ticks
return self
|
[
"def draw(self):\n super().draw()\n dot(self.prop['dotSize'], self.prop['dotColor'])",
"def plot_dot(self, frame_index):\n\n if self.dot is not None:\n self.dot.remove()\n self.dot = plt.scatter(self.x[frame_index], self.y[frame_index], s=20, color='red')\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()",
"def reset_dash_and_dot_structures(self):\n\n dash_len = self._compute_a_smallish_size_in_math_units()\n dash_len_px = self._rescale_length(dash_len)\n dot_sep = max((dash_len_px / 2, 2 * self.stroke_width))\n self.set_dash_dash_structure(dash_len_px, dash_len_px, units='svg')\n self.set_dash_dot_structure(dot_sep, units='svg')\n self.dasharray_dasharray_svgpx = [ dash_len_px, 1 ]",
"def setup_ticks(self):\r\n ndana = self.zavrsnoVrijeme - self.pocetnoVrijeme\r\n #major ticks\r\n majorLocator = HourLocator(interval=ndana.days+1)\r\n majorFormat = DateFormatter('%H:%M')\r\n #minor ticks\r\n minorLocator = AutoMinorLocator(n=4)\r\n minorFormat = NullFormatter()\r\n\r\n self.axes.xaxis.set_major_locator(majorLocator)\r\n self.axes.xaxis.set_major_formatter(majorFormat)\r\n self.axes.xaxis.set_minor_locator(minorLocator)\r\n self.axes.xaxis.set_minor_formatter(minorFormat)\r\n\r\n self.fig.autofmt_xdate()\r\n allXLabels = self.axes.get_xticklabels(which='both') #dohvati sve labele\r\n for label in allXLabels:\r\n #label.set_rotation(30)\r\n label.set_fontsize(8)",
"def reset_points(self):\n\n self.deleted_lines = 0\n self.lines_text.set(f\"Deleted lines: {self.deleted_lines}\")\n\n self.points = 0\n self.points_text.set(f\"Points: {self.points}\")\n\n self.level = 1\n self.delay = 500",
"def set_dayu_dot(self, show):\n self._dot = show\n self._badge_button.setText(\"\")\n self._badge_button.setVisible(show)\n self.style().polish(self)",
"def __set_trig_plot(self):\n if self.interval % 2 == 1 or self.interval <= 0:\n raise Exception('interval must be an even number > 0.')\n\n y_ticks = []\n for i in range(-5, 6):\n y_ticks.append(i)\n self.ax.set_yticks(y_ticks)\n self.ax.set_xticks(self.__trig_x_ticks())\n self.ax.set_xticklabels(self.__trig_x_ticklabels())\n return",
"def reset_interface(self):\n self.data.data = {'y': np.array([]), 'height': np.array([]), 'right': np.array([])}\n if self.include_nums:\n self.label_source.data = {'x': np.array([]), 'y': np.array([]), 'text': np.array([])}\n self.fig.yaxis.ticker.ticks = []",
"def get_tick(self):\n try:\n self['md_tick'] = self['md_bottom'] - self['md_top']\n except:\n pass\n\n try:\n self['tvd_tick'] = self['tvd_bottom'] - self['tvd_top']\n except:\n pass\n \n return self",
"def set_dash_dot_structure(self, dot_sep, units = 'math'):\n\n self.dot_dasharray_svgpx = (self.stroke_width, self._convert_length_to_svg(units, dot_sep))",
"def __set_exp_plot(self):\n y_ticks = []\n for i in range(-5, 6):\n y_ticks.append(i)\n self.ax.set_yticks(y_ticks)",
"def _copy_tick_props(self, src, dest):\n if src is None or dest is None:\n return\n super()._copy_tick_props(src, dest)\n\n # Ensure that tick transforms are independent so that padding works.\n trans = dest._get_text1_transform()[0]\n dest.label1.set_transform(trans + dest._text1_translate)\n trans = dest._get_text2_transform()[0]\n dest.label2.set_transform(trans + dest._text2_translate)",
"def update_plot_tick_labels(plot, data):\n plot.set_xticks(np.arange(0, len(data[0]), 5))\n plot.set_xticklabels(np.arange(1, len(data[0]) + 1, 5))\n plot.set_yticks(np.arange(0, len(data), 2))\n plot.set_yticklabels(np.arange(1, len(data) + 1, 2))",
"def ChangeDefaultUnits(self, inst=None):\n # convert plot units to new units\n units = [FormatUnits(x.units) for x in self.labels]\n for x in units:x._magnitude=1.0\n \n # create new labels\n labels = [x.copy() for x in self.labels]\n for n in range(len(labels)):\n labels[n].units = units[n]\n \n # convert plot data to new units\n for x in self.plots:\n x.array.Rescale(labels,self.labels)\n x.SetData(x.array)\n \n # set new labels\n self.labels = labels\n \n # update\n wx.CallAfter(self.Update)",
"def standardize_ticks(self, xbase=None, ybase=None):\n if xbase is None:\n if self.axs[0].xaxis.get_scale() == \"log\":\n xbase = max(\n ax.xaxis.get_ticklocs()[1] / ax.xaxis.get_ticklocs()[0]\n for ax in self.axs\n if ax.get_subplotspec().is_last_row()\n )\n else:\n xbase = max(\n ax.xaxis.get_ticklocs()[1] - ax.xaxis.get_ticklocs()[0]\n for ax in self.axs\n if ax.get_subplotspec().is_last_row()\n )\n if ybase is None:\n if self.axs[0].yaxis.get_scale() == \"log\":\n ybase = max(\n ax.yaxis.get_ticklocs()[1] / ax.yaxis.get_ticklocs()[0]\n for ax in self.axs\n if ax.get_subplotspec().is_first_col()\n )\n else:\n ybase = max(\n ax.yaxis.get_ticklocs()[1] - ax.yaxis.get_ticklocs()[0]\n for ax in self.axs\n if ax.get_subplotspec().is_first_col()\n )\n\n for ax in self.axs:\n if ax.get_subplotspec().is_first_col():\n if ax.yaxis.get_scale() == \"log\":\n ax.yaxis.set_major_locator(ticker.LogLocator(ybase))\n else:\n ax.yaxis.set_major_locator(ticker.MultipleLocator(ybase))\n if ax.get_subplotspec().is_last_row():\n if ax.xaxis.get_scale() == \"log\":\n ax.xaxis.set_major_locator(ticker.LogLocator(xbase))\n else:\n ax.xaxis.set_major_locator(ticker.MultipleLocator(xbase))",
"def _update_ticks(\n self, x, *, grid=None, gridminor=None,\n color=None, gridcolor=None, ticklen=None,\n tickloc=None, ticklabelloc=None, labelloc=None,\n tickdir=None, ticklabeldir=None, rotation=None,\n ):\n # Initial stuff\n axis = getattr(self, x + 'axis')\n sides = ('bottom', 'top') if x == 'x' else ('left', 'right')\n sides_active = tuple(side for side in sides if self.spines[side].get_visible())\n\n # Tick and grid settings for major and minor ticks separately\n # Override is just a \"new default\", but user can override this\n for which, igrid in zip(('major', 'minor'), (grid, gridminor)):\n # Tick properties\n # NOTE: This loads xtick.major.size, xtick.major.width,\n # xtick.major.pad, xtick.major.bottom, and xtick.major.top\n # For all the x/y major/minor tick types\n kwticks = rc.category(x + 'tick.' + which, context=True)\n if kwticks is None:\n kwticks = {}\n else:\n kwticks.pop('visible', None) # invalid setting\n if ticklen is not None:\n kwticks['size'] = units(ticklen, 'pt')\n if which == 'minor':\n kwticks['size'] *= rc['tick.lenratio']\n\n # Grid style and toggling\n name = 'grid' if which == 'major' else 'gridminor'\n if igrid is not None:\n axis.grid(igrid, which=which)\n kwgrid = rc.fill(\n {\n 'grid_color': name + '.color',\n 'grid_alpha': name + '.alpha',\n 'grid_linewidth': name + '.linewidth',\n 'grid_linestyle': name + '.linestyle',\n },\n context=True,\n )\n if gridcolor is not None: # override for specific x/y axes\n kwgrid['grid_color'] = gridcolor\n axis.set_tick_params(which=which, **kwgrid, **kwticks)\n\n # Tick and ticklabel properties that apply equally for major/minor lines\n # Weird issue causes set_tick_params to reset/forget grid is turned on if\n # you access tick.gridOn directly, instead of passing through tick_params.\n # Since gridOn is undocumented feature, don't use it. So calling _format_axes\n # a second time will remove the lines. First determine tick sides, avoiding\n # situation where we draw ticks on top of invisible spine.\n kw = {}\n loc2sides = {\n None: None,\n 'both': sides,\n 'none': (),\n 'neither': (),\n }\n if tickloc not in sides and any(self.spines[_].get_bounds() is not None for _ in sides): # noqa: E501\n tickloc = sides[0] # override to just one side\n ticklocs = loc2sides.get(tickloc, (tickloc,))\n if ticklocs is not None:\n kw.update({side: side in ticklocs for side in sides})\n kw.update({side: False for side in sides if side not in sides_active})\n\n # Tick label sides\n # Will override to make sure only appear where ticks are\n ticklabellocs = loc2sides.get(ticklabelloc, (ticklabelloc,))\n if ticklabellocs is not None:\n kw.update({'label' + side: (side in ticklabellocs) for side in sides})\n kw.update(\n {\n 'label' + side: False for side in sides\n if side not in sides_active\n or (ticklocs is not None and side not in ticklocs)\n }\n )\n\n # The axis label side\n if labelloc is None:\n if ticklocs is not None:\n options = tuple(\n side for side in sides if side in ticklocs and side in sides_active\n )\n if len(options) == 1:\n labelloc = options[0]\n elif labelloc not in sides:\n raise ValueError(\n f'Got labelloc {labelloc!r}, valid options are '\n + ', '.join(map(repr, sides)) + '.'\n )\n\n # Apply\n axis.set_tick_params(which='both', **kw)\n if labelloc is not None:\n axis.set_label_position(labelloc)\n\n # Tick label settings\n kw = rc.fill(\n {\n 'labelcolor': 'tick.labelcolor', # new props\n 'labelsize': 'tick.labelsize',\n 'color': x + 'tick.color',\n },\n context=True,\n )\n if color:\n kw['color'] = color\n kw['labelcolor'] = color\n\n # Tick label direction and rotation\n if tickdir == 'in': # ticklabels should be much closer\n kw['pad'] = 1.0\n if ticklabeldir == 'in': # put tick labels inside the plot\n tickdir = 'in'\n kw['pad'] = -rc[f'{x}tick.major.size'] - rc[f'{x}tick.major.pad']\n kw['pad'] -= rc._scale_font(rc[f'{x}tick.labelsize'])\n if tickdir is not None:\n kw['direction'] = tickdir\n axis.set_tick_params(which='both', **kw)\n\n # Settings that can't be controlled by set_tick_params\n # Also set rotation and alignment here\n kw = rc.fill(\n {\n 'fontfamily': 'font.family',\n 'weight': 'tick.labelweight'\n },\n context=True,\n )\n if rotation is not None:\n kw = {'rotation': rotation}\n if x == 'x':\n self._datex_rotated = True\n if rotation not in (0, 90, -90):\n kw['ha'] = ('right' if rotation > 0 else 'left')\n for t in axis.get_ticklabels():\n t.update(kw)",
"def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(LogLocator(self.base))\n axis.set_major_formatter(LogFormatterSciNotation(self.base))\n axis.set_minor_locator(LogLocator(self.base, self.subs))\n axis.set_minor_formatter(\n LogFormatterSciNotation(self.base,\n labelOnlyBase=(self.subs is not None)))",
"def addDotMarkAnchor(self):\n glyph = self.getGlyph(M_DOTA + 'comb')\n bounds = glyph.boundingBox()\n xpos = round((bounds[2] + bounds[0]) / 2)\n ypos = round((bounds[3] + bounds[1]) / 2)\n glyph.addAnchorPoint(ANCH_TOP_RIGHT, 'mark', xpos, ypos)",
"def states_dot(self) -> OptimizationVariableList:\n self._nlp.states_dot.node_index = self.node_index\n out = self._nlp.states_dot.unscaled\n out.current_cx_to_get = self.cx_index_to_get\n return out"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Attack target and apply dot if applicable.
|
def attack(self, target: Health) -> None:
if self.__cooldown_tick == 0:
target.apply_damage(self.damage)
if self.dot > 0: target.apply_dot(self.dot, self.dot_ticks)
|
[
"def attack(self, data, target):\n B, K = data.shape[:2]\n data = data.float().cuda().detach()\n data = data.transpose(1, 2).contiguous()\n ori_data = data.clone().detach()\n ori_data.requires_grad = False\n\n # points and normals\n if ori_data.shape[1] == 3:\n normal = None\n else:\n normal = ori_data[:, 3:, :]\n ori_data = ori_data[:, :3, :]\n target = target.long().cuda().detach()\n\n # init variables with small perturbation\n adv_data = ori_data.clone().detach() + \\\n torch.randn((B, 3, K)).cuda() * 1e-7\n adv_data.requires_grad_()\n opt = optim.Adam([adv_data], lr=self.attack_lr, weight_decay=0.)\n\n adv_loss = torch.tensor(0.).cuda()\n dist_loss = torch.tensor(0.).cuda()\n\n total_time = 0.\n forward_time = 0.\n backward_time = 0.\n clip_time = 0.\n\n # there is no binary search in this attack\n # just longer iterations of optimization\n for iteration in range(self.num_iter):\n t1 = time.time()\n\n # forward passing\n logits = self.model(adv_data) # [B, num_classes]\n if isinstance(logits, tuple): # PointNet\n logits = logits[0]\n\n t2 = time.time()\n forward_time += t2 - t1\n\n # print\n pred = torch.argmax(logits, dim=1) # [B]\n success_num = (pred == target).sum().item()\n if iteration % (self.num_iter // 5) == 0:\n print('Iteration {}/{}, success {}/{}\\n'\n 'adv_loss: {:.4f}, dist_loss: {:.4f}'.\n format(iteration, self.num_iter, success_num, B,\n adv_loss.item(), dist_loss.item()))\n\n # compute loss and backward\n adv_loss = self.adv_func(logits, target).mean()\n\n # in the official tensorflow code, they use sum instead of mean\n # so we multiply num_points as sum\n dist_loss = self.dist_func(\n adv_data.transpose(1, 2).contiguous(),\n ori_data.transpose(1, 2).contiguous()).mean() * K\n\n loss = adv_loss + dist_loss\n opt.zero_grad()\n loss.backward()\n opt.step()\n\n t3 = time.time()\n backward_time += t3 - t2\n\n # clipping and projection!\n adv_data.data = self.clip_func(adv_data.clone().detach(),\n ori_data, normal)\n\n t4 = time.time()\n clip_time = t4 - t3\n total_time += t4 - t1\n\n if iteration % 100 == 0:\n print('total time: {:.2f}, for: {:.2f}, '\n 'back: {:.2f}, clip: {:.2f}'.\n format(total_time, forward_time,\n backward_time, clip_time))\n total_time = 0.\n forward_time = 0.\n backward_time = 0.\n clip_time = 0.\n torch.cuda.empty_cache()\n\n # end of CW attack\n with torch.no_grad():\n logits = self.model(adv_data) # [B, num_classes]\n if isinstance(logits, tuple): # PointNet\n logits = logits[0]\n pred = torch.argmax(logits, dim=-1) # [B]\n success_num = (pred == target).\\\n sum().detach().cpu().item()\n\n # return final results\n print('Successfully attack {}/{}'.format(success_num, B))\n\n # in their implementation, they estimate the normal of adv_pc\n # we don't do so here because it's useless in our task\n adv_data = adv_data.transpose(1, 2).contiguous() # [B, K, 3]\n adv_data = adv_data.detach().cpu().numpy() # [B, K, 3]\n return adv_data, success_num",
"def attack(self):\n\n self.check_unresolved_actions()\n messages = self.conflict_check()\n if len(self.args) == 0:\n raise Exception('No target identified for your attack action')\n search = self.args[0]\n chars = []\n if self.engagement and self.engagement.characters:\n chars.extend(list(Character().filter(id__in=[c for c in self.engagement.characters]).all()))\n targets = [c for c in chars if search.lower() in c.name.lower()]\n if not targets and self.sc and self.sc.characters:\n chars.extend(list(Character().filter(id__in=[c for c in self.sc.characters]).all()))\n targets = [c for c in chars if search.lower() in c.name.lower()]\n if not targets:\n raise Exception(f'No target match for _{search}_ found in the ***{self.sc.name}*** scene.')\n if len(targets) > 1:\n names = '\\n '.join([f'***{m.name}***' for m in targets])\n raise Exception(f'Multiple targets matched _{search}_ in the ***{self.sc.name}*** scene. Please specify which:{names}')\n self.target = targets[0]\n self.target.active_target_by = str(self.char.id)\n self.save_char(self.target)\n self.char.active_action = 'Attack'\n self.char.active_target = str(self.target.id)\n self.save_char(self.char)\n messages.extend(self.add_chars_to_engagement())\n self.command = 'roll'\n # Allow for exact roll designation\n if self.args[1] == 'exact' and len(self.args) > 2:\n exact_roll = self.args[2]\n self.args = self.args[3:] if len(self.args) > 3 else tuple()\n self.invoke_index = [i for i in range(0, len(self.args)) if self.args[i] in ['invoke', 'i']]\n self.compel_index = [i for i in range(0, len(self.args)) if self.args[i] in ['compel', 'c']]\n roll_str = self.roll(exact_roll)\n else:\n self.args = self.args[1:]\n roll_str = self.roll()\n messages.extend(roll_str)\n return messages",
"def attack(self):\r\n pass",
"def attack(self, target: Target) -> None:\n logger = logging.getLogger(\"attack-multiple attacks\")\n logger.info(\"Running attacks\")\n file_contents = \"\"\n with open(self.config_filename, \"r+\", encoding=\"utf-8\") as f:\n file_contents = f.read()\n\n if file_contents != \"\":\n config_file_data = json.loads(file_contents)\n for config_obj in config_file_data:\n params = config_file_data[config_obj]\n attack_name = config_obj.split(\"-\")[0]\n attack_obj = None\n if attack_name == \"worst_case\":\n attack_obj = WorstCaseAttack(**params)\n elif attack_name == \"lira\":\n attack_obj = LIRAAttack(**params)\n elif attack_name == \"attribute\":\n attack_obj = AttributeAttack(**params)\n else:\n attack_names = \"'worst_case', 'lira' and 'attribute'\"\n logger.error(\n \"\"\"attack name is %s whereas supported attack names are %s: \"\"\",\n attack_name,\n attack_names,\n )\n\n if attack_obj is not None:\n attack_obj.attack(target)\n\n if attack_obj is not None:\n _ = attack_obj.make_report()\n logger.info(\"Finished running attacks\")",
"def apply_damage_to(self, target):\r\n target.apply_damage(self.damage_this_tick)\r\n self.average_damage += self.damage_this_tick\r\n self.num_casts += 1",
"def with_dot(self, damage: int, ticks: int) -> object:\n self.dot = damage\n self.dot_ticks = ticks\n return self",
"def approachTarget(self, amount):\n if amount == 0:\n # If amount is zero, do nothing.\n return\n \n if self.t.sub(self.p).mag()*(1 - amount) > 2.0*self.tolerance:\n # If 'self.approachTarget()' will not take the view within twice the\n # tolerance distance, approach the target by given amount:\n self.p = self.p.add(self.t.sub(self.p).scale(amount))",
"def __setAttack(self):\n\t\tself.attack = self.attack + int(floor(sqrt(self.xp)))",
"def use(self, target):\n\t\tpass",
"def set_target(self, target, slew_only=False):\n target = katpoint.Target(target)\n target.body.compute(self.observer)\n return target",
"def attack(self, model: nn.Module, inputs: torch.Tensor, labels: torch.Tensor,\n targeted: bool = False) -> torch.Tensor:\n batch_size = inputs.shape[0]\n tinputs = self._arctanh((inputs - self.boxplus) / self.boxmul)\n\n # set the lower and upper bounds accordingly\n lower_bound = torch.zeros(batch_size, device=self.device)\n CONST = torch.full((batch_size,), self.initial_const, device=self.device)\n upper_bound = torch.full((batch_size,), 1e10, device=self.device)\n\n o_best_l2 = torch.full((batch_size,), 1e10, device=self.device)\n o_best_score = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n o_best_attack = inputs.clone()\n\n # setup the target variable, we need it to be in one-hot form for the loss function\n labels_onehot = torch.zeros(labels.size(0), self.num_classes, device=self.device)\n labels_onehot.scatter_(1, labels.unsqueeze(1), 1)\n labels_infhot = torch.zeros_like(labels_onehot).scatter_(1, labels.unsqueeze(1), float('inf'))\n\n for outer_step in range(self.binary_search_steps):\n\n # setup the modifier variable, this is the variable we are optimizing over\n modifier = torch.zeros_like(inputs, requires_grad=True)\n\n # setup the optimizer\n optimizer = optim.Adam([modifier], lr=self.learning_rate, betas=(0.9, 0.999), eps=1e-8)\n best_l2 = torch.full((batch_size,), 1e10, device=self.device)\n best_score = torch.full((batch_size,), -1, dtype=torch.long, device=self.device)\n\n # The last iteration (if we run many steps) repeat the search once.\n if self.repeat and outer_step == (self.binary_search_steps - 1):\n CONST = upper_bound\n\n prev = float('inf')\n for iteration in range(self.max_iterations):\n # perform the attack\n adv, logits, l2, logit_dists, loss = self._step(model, optimizer, inputs, tinputs, modifier,\n labels, labels_infhot, targeted, CONST)\n\n if self.callback and (iteration + 1) % self.log_interval == 0:\n self.callback.scalar('logit_dist_{}'.format(outer_step), iteration + 1, logit_dists.mean().item())\n self.callback.scalar('l2_norm_{}'.format(outer_step), iteration + 1, l2.sqrt().mean().item())\n\n # check if we should abort search if we're getting nowhere.\n if self.abort_early and iteration % (self.max_iterations // 10) == 0:\n if loss > prev * 0.9999:\n break\n prev = loss\n\n # adjust the best result found so far\n predicted_classes = (logits - labels_onehot * self.confidence).argmax(1) if targeted else \\\n (logits + labels_onehot * self.confidence).argmax(1)\n\n is_adv = (predicted_classes == labels) if targeted else (predicted_classes != labels)\n is_smaller = l2 < best_l2\n o_is_smaller = l2 < o_best_l2\n is_both = is_adv * is_smaller\n o_is_both = is_adv * o_is_smaller\n\n best_l2[is_both] = l2[is_both]\n best_score[is_both] = predicted_classes[is_both]\n o_best_l2[o_is_both] = l2[o_is_both]\n o_best_score[o_is_both] = predicted_classes[o_is_both]\n o_best_attack[o_is_both] = adv[o_is_both]\n\n # adjust the constant as needed\n adv_found = (best_score == labels) if targeted else ((best_score != labels) * (best_score != -1))\n upper_bound[adv_found] = torch.min(upper_bound[adv_found], CONST[adv_found])\n adv_not_found = ~adv_found\n lower_bound[adv_not_found] = torch.max(lower_bound[adv_not_found], CONST[adv_not_found])\n is_smaller = upper_bound < 1e9\n CONST[is_smaller] = (lower_bound[is_smaller] + upper_bound[is_smaller]) / 2\n CONST[(~is_smaller) * adv_not_found] *= 10\n\n # return the best solution found\n return o_best_attack",
"def attack(self):\r\n last_attacker = self.db.last_attacker\r\n players = [obj for obj in self.location.contents\r\n if utils.inherits_from(obj, BASE_CHARACTER_TYPECLASS) and not obj.is_superuser]\r\n if players:\r\n\r\n # find a target\r\n if last_attacker in players:\r\n # prefer to attack the player last attacking.\r\n target = last_attacker\r\n else:\r\n # otherwise attack a random player in location\r\n target = players[random.randint(0, len(players) - 1)]\r\n\r\n # try to use the weapon in hand\r\n attack_cmds = (\"thrust\", \"pierce\", \"stab\", \"slash\", \"chop\")\r\n cmd = attack_cmds[random.randint(0, len(attack_cmds) - 1)]\r\n self.execute_cmd(\"%s %s\" % (cmd, target))\r\n\r\n # analyze result.\r\n if target.db.health <= 0:\r\n # we reduced enemy to 0 health. Whisp them off to\r\n # the prison room.\r\n tloc = search_object(self.db.defeat_location)\r\n tstring = self.db.defeat_text\r\n if not tstring:\r\n tstring = \"You feel your conciousness slip away ... you fall to the ground as \"\r\n tstring += \"the misty apparition envelopes you ...\\n The world goes black ...\\n\"\r\n target.msg(tstring)\r\n ostring = self.db.defeat_text_room\r\n if tloc:\r\n if not ostring:\r\n ostring = \"\\n%s envelops the fallen ... and then their body is suddenly gone!\" % self.key\r\n # silently move the player to defeat location\r\n # (we need to call hook manually)\r\n target.location = tloc[0]\r\n tloc[0].at_object_receive(target, self.location)\r\n elif not ostring:\r\n ostring = \"%s falls to the ground!\" % target.key\r\n self.location.msg_contents(ostring, exclude=[target])\r\n # Pursue any stragglers after the battle\r\n self.battle_mode = False\r\n self.roam_mode = False\r\n self.pursue_mode = True\r\n else:\r\n # no players found, this could mean they have fled.\r\n # Switch to pursue mode.\r\n self.battle_mode = False\r\n self.roam_mode = False\r\n self.pursue_mode = True",
"def _attack_success(self, adv_x, x, target_class):\n predicted_class = np.argmax(self.estimator.predict(self._perturb_image(adv_x, x))[0])\n return bool(\n (self.targeted and predicted_class == target_class)\n or (not self.targeted and predicted_class != target_class)\n )",
"def _attack(\n self, image: np.ndarray, target_class: np.ndarray, limit: int, max_iter: int\n ) -> Tuple[bool, np.ndarray]:\n bounds, initial = self._get_bounds(image, limit)\n\n def predict_fn(x):\n predictions = self.estimator.predict(self._perturb_image(x, image))[:, target_class]\n return predictions if not self.targeted else 1 - predictions\n\n def callback_fn(x, convergence=None):\n if self.es == 0:\n if self._attack_success(x.result[0], image, target_class):\n raise Exception(\"Attack Completed :) Earlier than expected\")\n else:\n return self._attack_success(x, image, target_class)\n\n if self.es == 0:\n from cma import CMAOptions\n\n opts = CMAOptions()\n if not self.verbose:\n opts.set(\"verbose\", -9)\n opts.set(\"verb_disp\", 40000)\n opts.set(\"verb_log\", 40000)\n opts.set(\"verb_time\", False)\n\n opts.set(\"bounds\", bounds)\n\n if self.type_attack == 0:\n std = 63\n else:\n std = limit\n\n from cma import CMAEvolutionStrategy\n\n strategy = CMAEvolutionStrategy(initial, std / 4, opts)\n\n try:\n strategy.optimize(\n predict_fn,\n maxfun=max(1, 400 // len(bounds)) * len(bounds) * 100,\n callback=callback_fn,\n iterations=1,\n )\n except Exception as exception:\n if self.verbose:\n print(exception)\n\n adv_x = strategy.result[0]\n else:\n strategy = differential_evolution(\n predict_fn,\n bounds,\n disp=self.verbose,\n maxiter=max_iter,\n popsize=max(1, 400 // len(bounds)),\n recombination=1,\n atol=-1,\n callback=callback_fn,\n polish=False,\n )\n adv_x = strategy.x\n\n if self._attack_success(adv_x, image, target_class):\n return True, self._perturb_image(adv_x, image)[0]\n else:\n return False, image",
"def update(self, obstacles, move_limit=1000):\n if self.alive():\n self.move(move_limit)\n # check if that move killed the dot\n if (self.posXY[0] < 3 or self.posXY[1] < 3 or\n self.posXY[0] > WIDTH-3 or self.posXY[1] > HEIGHT-3):\n self.dead = True\n # or hit an abstacle\n elif obstacles.collision(self.posXY):\n self.dead = True\n # or has the dot reached the target?\n elif distance(self.posXY, TARGET_XY) < 4:\n self.reached_goal = True",
"def attack(self,enemy):\n enemy.takeDamage(self.damage)",
"async def love(self, ctx, *, target=None):\r\n if ctx.author.nick is None:\r\n member = ctx.author\r\n else:\r\n member = ctx.author.nick\r\n\r\n if not target:\r\n return await ctx.send(f'{member} loves ... nothing')\r\n\r\n await ctx.send(f\"{member} gives {target} some good ol' fashioned lovin'.\")",
"def apply_before(self, target):\n return target",
"def make_adversarial_attack(x, y_target, model, max_iter=100, verbose=True):\n # Initialize our adversarial attack to the input image, and make it require gradient\n \n \n ##############################################################################\n # TODO: Generate an adversarial attack X_adv that the model will classify #\n # as the class target_y. You should perform gradient ascent on the score #\n # of the target class, stopping when the model is fooled. #\n # When computing an update step, first normalize the gradient: #\n # dX = learning_rate * g / ||g||_2 #\n # #\n # You should write a training loop. #\n # #\n # HINT: For most examples, you should be able to generate an adversarial #\n # attack in fewer than 100 iterations of gradient ascent. #\n # You can print your progress over iterations to check your algorithm. #\n ##############################################################################\n loss_fn = nn.CrossEntropyLoss()\n num_steps = 6\n step_size=0.01\n eps=0.3\n clamp=(0,1)\n x_adv = x.clone().detach().requires_grad_(True).to(x.device)\n num_channels = x.shape[1]\n y_target = torch.tensor(y_target).unsqueeze(0).to(x.device)\n for i in range(num_steps):\n _x_adv = x_adv.clone().detach().requires_grad_(True)\n prediction = model(_x_adv)\n print(torch.argmax(prediction))\n loss = loss_fn(prediction, y_target)\n loss.backward()\n with torch.no_grad():\n gradients = _x_adv.grad.sign() * step_size\n x_adv -= gradients\n x_adv = torch.max(torch.min(x_adv, x + eps), x - eps) \n x_adv = x_adv.clamp(*clamp)\n return x_adv"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if attack has completed cooldown upon update.
|
def on_update(self) -> None:
if self.__cooldown_tick == self.cooldown:
self.__cooldown_tick = 0
else:
self.__cooldown_tick += 1
|
[
"def tick_cooldowns(self):\n\n if self.bullet_cooldown > 0:\n self.bullet_cooldown -= 1",
"def can_shoot(self):\n\n return (self._cooldown <= 0)",
"def check_cooldown(db: Database, channel_name: str) -> bool:\n if channel_name[0] == \"#\":\n channel_name = channel_name[1:]\n log.error(\"Someplace in the code is using channels with #.\")\n cooldown_time = cooldowns[channel_name] if channel_name in cooldowns else None\n if cooldown_time is None:\n return False # no cooldown found.\n cooldown = db.get_cd(channel_name)\n if cooldown is None:\n cooldown = int(settings[\"default_cooldown\"])\n return not datetime.datetime.utcnow() - cooldown_time > timedelta(seconds=cooldown)",
"def check_invincibility(self):\n if not self.hittable and self.time_hit + 1200 <= pygame.time.get_ticks():\n self.hittable = True\n else:\n pass",
"def is_on_cooldown(self, inter: ApplicationCommandInteraction) -> bool:\n if not self._buckets.valid:\n return False\n\n bucket = self._buckets.get_bucket(inter) # type: ignore\n dt = inter.created_at\n current = dt.replace(tzinfo=datetime.timezone.utc).timestamp()\n return bucket.get_tokens(current) == 0",
"def is_in_cooldown(self, author: discord.User, cmd: str):\n return (author.id in self._cooldown.keys()) and (cmd in self._cooldown.get(author.id))",
"def cooldown(self, value):\n\n pass",
"def attack(self, target: Health) -> None:\n if self.__cooldown_tick == 0:\n target.apply_damage(self.damage)\n if self.dot > 0: target.apply_dot(self.dot, self.dot_ticks)",
"def start_cooldown(self):\r\n self.cooldown_timer = 0\r\n self.cast_timer = 1\r\n self.casting = False",
"def in_cool_down(self):\n in_cool_down = False\n\n if self.last_played_match:\n in_cool_down = (now() - self.last_played_match.played) <= timedelta(hours=4)\n\n return in_cool_down",
"def can_attack(self):\n return False",
"def other_can_shoot(self):\n\n if self._other == None:\n return None\n\n return self.other_cooldown <= 0",
"def _check_if_dead(self):\n if self.health <= 0:\n self.is_dead = True",
"def check_auto_attack_statue(self):\n\n if self.check_stone_box_statue() is False:\n return True if self.pic[843,422].item(0) == self.pic[843,500].item(0) == 18 else False\n else:\n return None",
"def check_status(self):\n print(\"Yamcha: Let go!\")\n print(\"Piccolo: It's over\")\n print(\"*Loud explosion*\")\n self.is_dead = True",
"def use_healthpotion(self):\n has_attacked = False\n if self.potions >= 1:\n self.potions -= 1\n self.health += 250\n if self.health > self.max_health:\n self.health = self.max_health\n self.app.write(self.name + \" uses a potion!\")\n time.sleep(1)\n self.app.write(self.name + \" has \" + str(self.health) + \" hit points.\")\n self.app.write(\"\")\n time.sleep(1)\n has_attacked = True\n return has_attacked\n else:\n self.app.write(\"You have no potions left!\")\n self.app.write(\"\")\n return has_attacked",
"def do_defense(self):\n for pirate in self.living_pirates:\n # if defense expiration is full and defense was activated this turn, start counting defense reload time\n if pirate.defense_expiration_turns == pirate.max_defense_turns and pirate.defense_turns[-1] == self.turn:\n pirate.defense_reload_turns = self.defense_reload_turns\n else:\n if pirate.defense_reload_turns > 0:\n pirate.defense_reload_turns -= 1\n # count defense expiration\n if pirate.defense_expiration_turns > 0:\n pirate.defense_expiration_turns -= 1",
"def check_update(self):\n\n if time.time() - self._last_update_check >= self.frametime:\n # A framerate occurs! Check if it was too long ago\n if time.time() - self._last_update_check >= self._reset_timeout:\n # Reset it\n self._last_update_check = time.time()\n else:\n self._last_update_check += self.frametime\n return True\n return False",
"def check_health_need_decrease(self): #If happiness/fullness are low enough and enough time has passed,\n if (self.fullness == 0 or self.happiness == 0) and ((time.time() - self.__time_health_last_decreased) >= self.CHANGE_STATE_TIME):\n self.make_sick()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Respond to iOS notification to empty vacuum.
|
def response_from_push_notification(
self, event_name: str, data: dict, kwargs: dict) -> None:
self.hass.log('Responding to iOS request that vacuum is empty')
self.hass.manager_app.bin_state = (
self.hass.manager_app.BinStates.empty)
target = self.hass.notification_manager.get_target_from_push_id(
data['sourceDevicePermanentID'])
self.hass.notification_manager.send(
'Vacuum Emptied',
'{0} emptied the vacuum.'.format(target),
target='not {0}'.format(target))
|
[
"def vacuum_empty(self):\n return self._vacuum.is_empty()",
"def vacuum_full(self):\n return self._vacuum.is_full()",
"def listen_unallocated(self):\n\n pass",
"async def test_no_fan_vacuum(opp, mqtt_mock):\n config = deepcopy(DEFAULT_CONFIG)\n del config[mqttvacuum.CONF_FAN_SPEED_LIST]\n config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(\n mqttvacuum.DEFAULT_SERVICES, SERVICE_TO_STRING\n )\n\n assert await async_setup_component(opp, vacuum.DOMAIN, {vacuum.DOMAIN: config})\n\n message = \"\"\"{\n \"battery_level\": 54,\n \"state\": \"cleaning\"\n }\"\"\"\n async_fire_mqtt_message(opp, \"vacuum/state\", message)\n state = opp.states.get(\"vacuum.mqtttest\")\n assert state.state == STATE_CLEANING\n assert state.attributes.get(ATTR_FAN_SPEED) is None\n assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None\n assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54\n assert state.attributes.get(ATTR_BATTERY_ICON) == \"mdi:battery-50\"\n\n message = \"\"\"{\n \"battery_level\": 54,\n \"state\": \"cleaning\",\n \"fan_speed\": \"max\"\n }\"\"\"\n async_fire_mqtt_message(opp, \"vacuum/state\", message)\n state = opp.states.get(\"vacuum.mqtttest\")\n\n assert state.state == STATE_CLEANING\n assert state.attributes.get(ATTR_FAN_SPEED) is None\n assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None\n\n assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54\n assert state.attributes.get(ATTR_BATTERY_ICON) == \"mdi:battery-50\"\n\n message = \"\"\"{\n \"battery_level\": 61,\n \"state\": \"docked\"\n }\"\"\"\n\n async_fire_mqtt_message(opp, \"vacuum/state\", message)\n state = opp.states.get(\"vacuum.mqtttest\")\n assert state.state == STATE_DOCKED\n assert state.attributes.get(ATTR_BATTERY_ICON) == \"mdi:battery-charging-60\"\n assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61",
"def try_vacuum(self):\n cur = self.con.cursor()\n page_count = self.get_pragma(\"page_count\")[0][0]\n freelist_count = self.get_pragma(\"freelist_count\")[0][0]\n page_size = self.get_pragma(\"page_size\")[0][0]\n\n #print(page_count, freelist_count, page_count - freelist_count)\n # 25% freepage and 1MB wasted space\n if (float(freelist_count)/page_count > .25\n and freelist_count * page_size > 1024*1024):\n cur.execute(\"VACUUM;\")\n self.commit()",
"async def test_discovery_removal_vacuum(opp, mqtt_mock):\n entry = MockConfigEntry(domain=mqtt.DOMAIN)\n await async_start(opp, \"openpeerpower\", {}, entry)\n\n data = '{ \"name\": \"Beer\",' ' \"command_topic\": \"test_topic\"}'\n\n async_fire_mqtt_message(opp, \"openpeerpower/vacuum/bla/config\", data)\n await opp.async_block_till_done()\n\n state = opp.states.get(\"vacuum.beer\")\n assert state is not None\n assert state.name == \"Beer\"\n\n async_fire_mqtt_message(opp, \"openpeerpower/vacuum/bla/config\", \"\")\n await opp.async_block_till_done()\n\n state = opp.states.get(\"vacuum.beer\")\n assert state is None",
"def _send_empty(self, status_code, message=None):\n self.send_response(status_code, message=message)\n self.end_headers()",
"def vacuum(self):\n\t\tself._exec_sql_and_commit(\"VACUUM\")",
"async def test_vacuum(opp: OpenPeerPower, mock_account):\n await setup_integration(opp, mock_account, PLATFORM_DOMAIN)\n assert opp.services.has_service(DOMAIN, SERVICE_RESET_WASTE_DRAWER)\n\n vacuum = opp.states.get(VACUUM_ENTITY_ID)\n assert vacuum\n assert vacuum.state == STATE_DOCKED\n assert vacuum.attributes[\"is_sleeping\"] is False",
"def on_heartbeat(self):\n raise NotImplementedError()",
"def unsubscribedReceived(self, presence):\n # This is just a confirmation. Don't respond.\n pass",
"async def on_resumed(self):\n\t print('[INFO] Resumed...')",
"def vacuum(self):\n self._check_mode_is_write('vacuum')\n\n if isinstance(self._col, Chunks):\n self._col.vacuum()",
"def vacuum_stats():\n query_vacuum_stats(current_app.extensions['sqlalchemy'].db)",
"def empty(self):\n super(Bay, self).empty()\n self.__remaining_capacity += self.__aggregate_task_processing_time\n self.__aggregate_task_processing_time = 0",
"def __actionArmEmpty(self):\n Log.d(f\"Arm is empty :: {self.__planningStack}\")\n exit(1)",
"def acknowledge_schedule_slot_notification(self, notification_id):\n pass",
"def empty_bin(self):\n self.fruit_count = 0",
"def handle_halt(self):\n self.byte_buffer.clear()\n self.comm_val.value = ClientAudioMsg.HALT\n while True:\n if self.comm_val.value == ClientAudioMsg.HALT_RSP:\n break\n time.sleep(SLEEP_INTERVAL)\n msg_code = ClientServerMsg.HALT_RSP\n duration_bytes = pack(DURATION_FORMAT,self.comm_arr[0])\n msg = self.encode_message(msg_code, duration_bytes)\n self.sock.sendall(msg)\n while not self.comm_queue.empty():\n self.comm_queue.get()\n self.state = ClientState.INACTIVE"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Define a property to get the bin state.
|
def bin_state(self) -> Enum:
return self.BinStates(self.get_state(self.entities['bin_state']))
|
[
"def flag_property(flag):\n def getter(self):\n return (self._flags & flag) != 0\n def setter(self, value):\n if value:\n self._flags |= flag\n else:\n self._flags &= ~flag\n return property(getter, setter)",
"def readStateAttribute(self):\r\n return _osgDB.Input_readStateAttribute(self)",
"def value(self) -> t.Any:\n return getattr(self.obj, self.statemanager.propname)",
"def binary_state(value):\n if value == \"on\":\n return True\n elif value == \"off\":\n return False\n else:\n return float(\"nan\")",
"def state(self):\n return self._state.value",
"def SoOverrideElement_getShininessOverride(state: 'SoState') -> \"SbBool\":\n return _coin.SoOverrideElement_getShininessOverride(state)",
"def SoOverrideElement_getNormalBindingOverride(state: 'SoState') -> \"SbBool\":\n return _coin.SoOverrideElement_getNormalBindingOverride(state)",
"def state(self): # State\n self._get_bus_status()\n return self._bus_state",
"def device_state_attributes(self):",
"def __int__(self):\n return self.bits",
"def getShininessOverride(state: 'SoState') -> \"SbBool\":\n return _coin.SoOverrideElement_getShininessOverride(state)",
"def example_property(self):",
"def _propertyToBool(self, propertyName):\n\n return stringToBool(self.__stack[-1][propertyName])",
"def state(self, bulb_state):\n print(bulb_state)\n print(self.BULB_STATE_ON)\n print(self.BULB_STATE_OFF)\n if bulb_state == self.BULB_STATE_ON:\n bulb_state = 1\n elif bulb_state == self.BULB_STATE_OFF:\n bulb_state = 0\n else:\n raise ValueError\n \n light_state = {\n \"on_off\": bulb_state,\n }\n return self.set_light_state(light_state)",
"def _create_mmio_property(addr):\n def _get(self):\n return self.read(addr)\n \n def _set(self, value):\n self.write(addr, value)\n \n return property(_get, _set)",
"def v(self):\n return self.state",
"def getNormalBindingOverride(state: 'SoState') -> \"SbBool\":\n return _coin.SoOverrideElement_getNormalBindingOverride(state)",
"def get_bprop_fn(prim, get_closure=False):\n out = bprop_getters.get(prim, None)\n if out is None:\n return bprops.get(prim, None)\n if get_closure:\n return out\n return out(prim)",
"def get_bin(self, n):\n return self.bins[n]",
"def getBool(cls, prop):\n \n value = cls.getInt(prop)\n \n if value not in (0, 1):\n raise ValueError(\"Expected 0 or 1, but got %r in config property %s\" % (\n value, prop))\n \n return bool(value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper function for validating an ABI
|
def validate_abi(abi):
if not is_list_like(abi):
raise TypeError("'abi' is not a list")
for e in abi:
if not is_dict(e):
raise TypeError("The elements of 'abi' are not all dictionaries")
|
[
"def validate_abi(abi: ABI) -> None:\n if not is_list_like(abi):\n raise ValueError(\"'abi' is not a list\")\n\n if not all(is_dict(e) for e in abi):\n raise ValueError(\"'abi' is not a list of dictionaries\")\n\n functions = filter_by_type(\"function\", abi)\n selectors = groupby(compose(encode_hex, function_abi_to_4byte_selector), functions)\n duplicates = valfilter(lambda funcs: len(funcs) > 1, selectors)\n if duplicates:\n raise ValueError(\n \"Abi contains functions with colliding selectors. \"\n f\"Functions {_prepare_selector_collision_msg(duplicates)}\"\n )",
"def validate_abi_value(abi_type: TypeStr, value: Any) -> None:\n if is_array_type(abi_type) and is_list_like(value):\n # validate length\n specified_length = length_of_array_type(abi_type)\n if specified_length is not None:\n if specified_length < 1:\n raise TypeError(\n f\"Invalid abi-type: {abi_type}. Length of fixed sized \"\n \"arrays must be greater than 0.\"\n )\n if specified_length != len(value):\n raise TypeError(\n \"The following array length does not the length specified\"\n f\"by the abi-type, {abi_type}: {value}\"\n )\n\n # validate sub_types\n sub_type = sub_type_of_array_type(abi_type)\n for v in value:\n validate_abi_value(sub_type, v)\n return\n elif is_bool_type(abi_type) and is_boolean(value):\n return\n elif is_uint_type(abi_type) and is_integer(value) and value >= 0:\n return\n elif is_int_type(abi_type) and is_integer(value):\n return\n elif is_address_type(abi_type):\n validate_address(value)\n return\n elif is_bytes_type(abi_type):\n if is_bytes(value):\n return\n elif is_string(value):\n if is_0x_prefixed(value):\n return\n else:\n raise TypeError(\n \"ABI values of abi-type 'bytes' must be either\"\n \"a python3 'bytes' object or an '0x' prefixed string.\"\n )\n elif is_string_type(abi_type) and is_string(value):\n return\n\n raise TypeError(f\"The following abi value is not a '{abi_type}': {value}\")",
"def validate_abi_type(abi_type: TypeStr) -> None:\n if not is_recognized_type(abi_type):\n raise ValueError(f\"Unrecognized abi_type: {abi_type}\")",
"def test_valid_app(valid_app_definition):\n validator = AppDefinitionValidator()\n try:\n validator.validate(valid_app_definition)\n except ValidationError as e:\n pytest.fail(e)",
"def validate_arguments(args):\n if not args.input_file[-4:] == \".pdb\":\n exit(\"ERROR: Input file should be in PDB format\")\n if args.cutoff < 0:\n exit(\"ERROR: Cut-off must be a non-negative value\")\n if args.n_cycles < 0:\n exit(\"ERROR: Number of cycles must be a non-negative value\")\n if args.blocked_region != None:\n assert len(args.blocked_region.split(\"-\")) == 2, \\\n \"Blocked region should be in format 'int-int'\"",
"def validate_address():\n def validate(address, typ, currency):\n try:\n ver = currencies.validate_bc_address(address)\n except InvalidAddressException:\n return False\n\n if typ == 'buyable':\n lst = currencies.buyable_currencies\n elif typ == 'sellable':\n lst = currencies.sellable_currencies\n elif typ == 'unsellable':\n lst = currencies.unsellable_currencies\n elif typ == 'unbuyable':\n lst = currencies.unbuyable_currencies\n else:\n abort(400)\n\n for curr in lst:\n if ver in curr.address_version:\n if curr.key == currency or currency == 'Any':\n return True\n return False\n\n data = request.json\n if validate(data['address'], data['type'], data['currency']):\n return jsonify({data['currency']: True})\n else:\n return jsonify({data['currency']: False})",
"def test_fail_signature_fragment_address_wrong(self):\n # noinspection SpellCheckingInspection\n self.bundle[2].address =\\\n Address(\n b'QHEDFWZULBZFEOMNLRNIDQKDNNIELAOXOVMYEI9P'\n b'GNFDPEEZCWVYLKZGSLCQNOFUSENIXRHWWTZFBXMPS'\n )\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Unable to find signature fragment 2 for transaction 1.'\n ],\n )",
"def validate_bundle(bundle):\n try:\n unpack(bundle)\n return (True, None)\n except Exception as e:\n return (False, e)",
"def test_invalid_app(invalid_app_definition):\n validator = AppDefinitionValidator()\n with pytest.raises(ValidationError):\n validator.validate(invalid_app_definition)",
"def is_valid_manifest_format(\n manifest_path,\n column_names_to_enums=None,\n allowed_protocols=[\"s3\", \"gs\"],\n allow_base64_encoded_md5=False,\n error_on_empty_url=False,\n line_limit=None,\n):\n logging.info(f'validating \"{manifest_path}\" manifest')\n warnings.filterwarnings(\"error\")\n\n enums_to_validators = _init_enums_to_validators(\n allowed_protocols, allow_base64_encoded_md5, error_on_empty_url\n )\n with open(manifest_path, \"r\", encoding=\"utf-8-sig\") as dsv_file:\n dsv_reader = _get_dsv_reader(dsv_file)\n manifest_column_names = dsv_reader.fieldnames\n manifest_column_names_to_validators = _get_manifest_column_names_to_validators(\n manifest_column_names, enums_to_validators, column_names_to_enums\n )\n _log_manifest_column_names_to_validators(manifest_column_names_to_validators)\n manifest_is_valid = _validate_manifest_column_names(\n manifest_column_names_to_validators, enums_to_validators, error_on_empty_url\n )\n if line_limit is None or line_limit > 1:\n manifest_is_valid = (\n _validate_rows(\n dsv_reader, manifest_column_names_to_validators, line_limit\n )\n and manifest_is_valid\n )\n\n _log_summary(manifest_is_valid, manifest_path, line_limit)\n return manifest_is_valid",
"def _validate_metadata(metadata, module):\n\n for field in module_metadata.FIELDS:\n if not field in metadata:\n raise UnpackerPackageError(\"module did not pass sanity validation\", reason=\"Missing mandatory field [{}]\".format(field))\n\n # Empty module name\n if metadata[\"module_name\"] == \"\":\n raise UnpackerPackageError(\"module did not pass sanity validation\", reason=\"Empty module name\")\n\n # Empty module file name\n if metadata[\"module_file\"] == \"\":\n raise UnpackerPackageError(\"module did not pass sanity validation\", reason=\"Empty module file name\")\n\n # Architecture must 64 bits\n if metadata[\"architecture\"] != \"x86_64\":\n raise UnpackerPackageError(\"module did not pass sanity validation\", reason=\"Architecture must 64 bits\")\n\n # Missing version\n if not semantic_version.validate(metadata[\"semantic_version\"]):\n raise UnpackerPackageError(\"module did not pass sanity validation\", reason=\"Invalid semantic version\")\n\n if StrictVersion(metadata[\"min_redis_pack_version\"]) < StrictVersion(module_metadata.MIN_REDIS_PACK_VERSION):\n raise UnpackerPackageError(\"module did not pass sanity validation\", reason=\"Min redis pack version is too low\")\n\n # wrong signature\n # TODO: this check should be deffered to a later stage\n # As _sha256_checksum will read entire module file\n # And we're unable to seek it back to its starting point.\n # if _sha256_checksum(module) != metadata[\"sha256\"]:\n # raise UnpackerPackageError(\"module did not pass sanity validation\", reason=\"Wrong signature\")\n\n return True",
"def test_invalid_change_baseregnum():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n statement['baseRegistrationNumber'] = 'B0000123456789'\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid",
"def test_version_valid():\n assert type(packaging.version.parse(ertai.__version__)) is packaging.version.Version",
"def is_signature_contract(script: bytes) -> bool:\n if len(script) != 40:\n return False\n\n if (script[0] != vm.OpCode.PUSHDATA1\n or script[1] != 33\n or script[35] != vm.OpCode.SYSCALL\n or int.from_bytes(script[36:40], 'little') != contracts.syscall_name_to_int(\n \"System.Crypto.CheckSig\")):\n return False\n return True",
"def _validate_version(version: int) -> bool:\n return version in [4]",
"def check_script(script):\n\n if not isinstance(script, str):\n raise Exception(\"Wrong script format.\")\n elif len(script)/2 != 20:\n raise Exception(\"Wrong signature length \" + str(len(script)/2))\n else:\n return True",
"def app_validate(data):\n required_keys = ['name', 'will_end', 'priority', 'requires_binary', 'version']\n for k in required_keys:\n if k not in data:\n raise InvalidApplicationDescription(msg=\"Missing required key: %s\" % k)\n\n try:\n ver = int(data[\"version\"])\n if ver != zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION:\n raise InvalidApplicationDescription(msg=\"This version of Zoe supports only version {} for application descriptions\".format(zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION))\n except ValueError:\n raise InvalidApplicationDescription(msg=\"version field should be an int\")\n\n try:\n bool(data['will_end'])\n except ValueError:\n raise InvalidApplicationDescription(msg=\"will_end field must be a boolean\")\n\n try:\n bool(data['requires_binary'])\n except ValueError:\n raise InvalidApplicationDescription(msg=\"requires_binary field must be a boolean\")\n\n try:\n priority = int(data['priority'])\n except ValueError:\n raise InvalidApplicationDescription(msg=\"priority field must be an int\")\n if priority < 0 or priority > 1024:\n raise InvalidApplicationDescription(msg=\"priority must be between 0 and 1024\")\n\n if 'services' not in data:\n raise InvalidApplicationDescription(msg='the application should contain a list of services')\n\n for p in data['services']:\n _service_check(p)\n\n found_monitor = False\n for p in data['services']:\n if p['monitor']:\n found_monitor = True\n break\n if not found_monitor:\n raise InvalidApplicationDescription(msg=\"at least one process should have monitor set to True\")",
"def validate_syntax(self):\n self._validate_prefix()\n self._validate_zero_address()\n self._validate_zero_network()\n self._validate_address()\n self._validate_address_type()\n self._validate_name()",
"def validate_manifest(manifest):\n obj = convert_manifest_to_object(manifest)\n result = obj.to_dict()\n\n # We use the same logic as `kubernetes.client` for converting attribute\n # names.\n desired = _cast_dict_keys(manifest, key_cast=_convert_attribute_name)\n\n # We only check fields provided in the source, since the conversion\n # may add empty or default values for optional fields\n dictdiff = recursive_diff(desired, result)\n return not dictdiff.removed() and not dictdiff.changed()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper function for validating an address EIP55 checksum
|
def validate_address_checksum(address):
if is_checksum_formatted_address(address):
if not is_checksum_address(address):
raise ValueError("'address' has an invalid EIP55 checksum")
|
[
"def isAddress(check):\n\timport re\n\tif re.search('^[13][a-zA-Z0-9]{26,33}$', check):\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_valid_base58_address(value: str) -> bool:\n if 25 > len(value) > 35:\n return False\n\n try:\n abytes = base58check.b58decode(value)\n except (ValueError):\n return False\n\n if not abytes[0] in (0x00, 0x05):\n return False\n\n checksum = hashlib.sha256(hashlib.sha256(abytes[:-4]).digest()).digest()[:4]\n if abytes[-4:] != checksum:\n return False\n\n return value == base58check.b58encode(abytes).decode()",
"def verify_address(self,address):\n if isinstance(address,int) and address in range(0,256):\n return (0,'{:02X}'.format(address))\n return ('E',-25)",
"def isAddress(address):\n assert (len(address) == 20 and address != ZERO_ADDRESS)\n return True",
"def validateNetworkAddress(netAddress):\n\t\n\t# from the link here: https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml\n\t # 0.0.0.0/8 - This host on this network\n\t # 100.64.0.0/10 - Shared address space\n\t # 127.0.0.0/8 - Loopback\n\t # 169.254.0.0/16 - Link Local\n\t # 192.0.0.0/24 - IETF protocol assignments\n\t # 192.0.2.0/24 - Documentation (test-net-1)\n\t # 192.31.196.0/24 - AS112-v4\n\t # 192.52.193.0/24 - AMT\n\t # 192.175.48.0/24 - Direct delegation AS112 service\n\t # 192.18.0.0/15 - Benchmarking\n\t # 192.51.100.0/24 - Documentation (test-net-2)\n\t# 203.0.113.0/24 - Documentation (test-net-3)\n\t\n\t# the subnet mask lengths to consider here are 8 10 15 16 24\n\t# goal is to take the network address calculated, bit-and it with the subnet mask to consider, and make sure that the resultant network address is\n\t# not one of the reserved ones\n\t\n\t# make a dictionary of the reserved spaces\n\treservedSpaces = {8:['0.0.0.0','127.0.0.0'], \\\n\t\t\t\t\t 10:['100.64.0.0'], \\\n\t\t\t\t\t 15:['192.18.0.0'], \\\n\t\t\t\t\t 16:['169.254.0.0'], \\\n\t\t\t\t\t 24:['192.0.0.0', '192.0.2.0', '192.31.196.0', '192.52.193.0', '192.175.48.0', '192.51.100.0', '203.0.113.0'] }\n\t\t\t\t\t \n\tfor smLength in reservedSpaces.keys():\n\t\t\n\t\t# first calculate the network address anded with the subnet mask\n\t\tif calculateNetworkAddress(dottedToBinary(netAddress),'1'*smLength+'0'*(32 - smLength)) in reservedSpaces[smLength]:\n\t\t\tprint(\"This fits within one of the IANA reserved IPv4 Spaces: \" + calculateNetworkAddress(dottedToBinary(netAddress),'1'*smLength+'0'*(32 - smLength)) + \\\n\t\t\t\"\\nNo further processing required.\\nSee https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml for further details.\")\n\t\t\tsys.exit()",
"def is_address(addr: str) -> bool:\n return Address.is_valid(addr.strip())",
"def _ipv6_validator(address: Union[str, int], strict: bool = True) -> bool:\n\n if isinstance(address, str):\n\n address, *port = address.split(']:') # Try split on closing bracket and port separator\n if port:\n address = address[1:] # Gets rid of the opening bracket that contained the address\n try:\n port_num = int(port[0])\n except ValueError:\n # Port number wasn't a valid integer\n return False\n\n if strict:\n if not 0 <= port_num <= PORT_NUMBER_MAX_VALUE: # 2**16\n # Port number was too high or too low to be strictly valid\n return False\n\n halves = address.split('::')\n segments = []\n\n if len(halves) == 2:\n # Address with zero-skip part\n left, right = map(lambda x: x.split(':'), halves)\n total_length = len(left) + len(right)\n\n if halves[0]:\n segments.extend(left)\n else:\n segments.append('0000')\n \n segments.extend(['0000' for _ in range(IPV6_MAX_SEGMENT_COUNT - total_length)])\n\n if halves[1]:\n segments.extend(right)\n else:\n segments.append('0000')\n\n elif len(halves) == 1:\n # Full address\n segments.extend(halves[0].split(':'))\n\n else:\n # More than one zero-skip\n return False\n\n try:\n processed_segments: List[int] = list(map(lambda x: int(x, 16) if x else 0, segments))\n except ValueError:\n # IPv6 address was not made of valid hexadecimal numbers\n return False\n\n if len(processed_segments) != IPV6_MAX_SEGMENT_COUNT:\n # Invalid number of segments\n return False\n\n if strict:\n for seg in processed_segments:\n if not 0 <= seg <= IPV6_MAX_SEGMENT_VALUE:\n # Segment value was too high or too low to be strictly valid\n return False\n\n return True\n\n elif isinstance(address, int):\n return IPV6_MIN_VALUE <= address <= IPV6_MAX_VALUE\n\n return False",
"def validateaddress(self, address):\n return Address.is_valid(address)",
"def check_address(btc_addr, network='test'):\n\n if network in ['test', \"testnet\"] and btc_addr[0] not in ['m', 'n']:\n raise Exception(\"Wrong testnet address format.\")\n elif network in ['main', 'mainnet'] and btc_addr[0] != '1':\n raise Exception(\"Wrong mainnet address format.\")\n elif network not in ['test', 'testnet', 'main', 'mainnet']:\n raise Exception(\"Network must be test/testnet or main/mainnet\")\n elif len(btc_addr) not in range(26, 35+1):\n raise Exception(\n \"Wrong address format, Bitcoin addresses should be 27-35 hex char long.\")\n else:\n return True",
"def check_address(btc_addr, network='test'):\n\n if network in ['test', \"testnet\"] and btc_addr[0] not in ['m', 'n']:\n raise Exception(\"Wrong testnet address format.\")\n elif network in ['main', 'mainnet'] and btc_addr[0] != '1':\n raise Exception(\"Wrong mainnet address format.\")\n elif network not in ['test', 'testnet', 'main', 'mainnet']:\n raise Exception(\"Network must be test/testnet or main/mainnet\")\n elif len(btc_addr) not in range(26, 35+1):\n raise Exception(\"Wrong address format, Bitcoin addresses should be 27-35 hex char long.\")\n else:\n return True",
"def validate_ipv46_address(value: Any):\n try:\n validate_ipv4_address(value)\n except ValidationError:\n try:\n validate_ipv6_address(value)\n except ValidationError:\n raise ValidationError(f\"'{value}' is not a valid IPv4 or IPv6 address.\")",
"def validate_ip_addresses(value: str) -> str:\n if len(value) > 10:\n return \"have length less than or equal to 10\"\n return \"\"",
"def test_valid_ipv4(self):\n self.assertEqual(is_valid_ip_address(\"192.168.0.55\"), True)",
"def is_valid(cls, addr):\n try:\n MAC(addr)\n except Exception:\n return False\n return True",
"def verify_ip(val):\n if not isinstance(val, str):\n raise Exception(f'Value is not a string. Type: {type(val)}')\n\n if not IP_PATTERN.fullmatch(val):\n raise Exception('Value does not seem to be an IPv4 address')",
"def is_valid_pxeboot_address(self, ip_address):\n if ip_address.version != 4:\n print(\"Invalid IP version - only IPv4 supported\")\n return False\n elif ip_address == self.pxeboot_subnet.network:\n print(\"Cannot use network address\")\n return False\n elif ip_address == self.pxeboot_subnet.broadcast:\n print(\"Cannot use broadcast address\")\n return False\n elif ip_address.is_multicast():\n print(\"Invalid network address - multicast address not allowed\")\n return False\n elif ip_address.is_loopback():\n print(\"Invalid network address - loopback address not allowed\")\n return False\n elif ip_address not in self.pxeboot_subnet:\n print(\"Address must be in the PXEBoot subnet\")\n return False\n else:\n return True",
"def validate_address():\n def validate(address, typ, currency):\n try:\n ver = currencies.validate_bc_address(address)\n except InvalidAddressException:\n return False\n\n if typ == 'buyable':\n lst = currencies.buyable_currencies\n elif typ == 'sellable':\n lst = currencies.sellable_currencies\n elif typ == 'unsellable':\n lst = currencies.unsellable_currencies\n elif typ == 'unbuyable':\n lst = currencies.unbuyable_currencies\n else:\n abort(400)\n\n for curr in lst:\n if ver in curr.address_version:\n if curr.key == currency or currency == 'Any':\n return True\n return False\n\n data = request.json\n if validate(data['address'], data['type'], data['currency']):\n return jsonify({data['currency']: True})\n else:\n return jsonify({data['currency']: False})",
"def validate_ip_addr(addr, version=None):\n if version == 4:\n return netaddr.valid_ipv4(addr)\n elif version == 6:\n return netaddr.valid_ipv6(addr)\n else:\n return netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr)",
"def sanitizeMAC(addr, ciscoFormat=False):\n\tif ciscoFormat:\n\t\tchar = '.'\n\t\tlen0 = 3\n\t\tlen1 = 4\n\t\ttop = 65536\n\telse:\n\t\tchar = ':'\n\t\tlen0 = 6\n\t\tlen1 = 2\n\t\ttop = 256\n\taddr = addr.split(char)\n\tif len(addr) != len0:\n\t\treturn False\n\tfor part in addr:\n\t\tif len(part) != len1:\n\t\t\treturn False\n\t\ttry:\n\t\t\tif not int(part, 16) < top:\n\t\t\t\treturn False\n\t\texcept ValueError:\n\t\t\treturn False\n\treturn True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.