query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Calculate the price with a discount and return the sum
|
def calculate_total_price(prices, discount):
sum_prices = 0
for price in prices:
dis = discount/100
pricedis = price - price * dis
print(pricedis)
sum_prices = sum_prices + pricedis
print(sum)
return math.floor(sum_prices)
|
[
"def apply_discount(price, discount):\n return (money_to_float(price)\n .fold(lambda cost:\n (percent_to_float(discount)\n .fold(lambda savings: cost * (1 - savings)))))",
"def get_final_price(price, discount_percentage=10):\n return price-( price* discount_percentage / 100)",
"def price(self):\n return self._price * (100 - self.discount) / 100",
"def final_price(self):\n return self.price - self.price * self.discount",
"def discount_price(product, discount):\n if config_value('TAX', 'DEFAULT_VIEW_TAX'):\n return taxed_discount_price(product, discount)\n else:\n return untaxed_discount_price(product, discount)",
"def discount(self, discount: float) -> None:\n self.price = self.price * discount",
"def calculate_final_price(self):\n final_price = self.price\n if self.discount.specify_discount_status() == 'Active':\n if self.discount.type == '$':\n final_price = self.price - self.discount.value\n elif self.discount.type == '%':\n if self.discount.value != 0:\n final_price = self.price - ((self.discount.value / 100) * self.price)\n else:\n pass\n return int(final_price)",
"def taxed_discount_price(product, discount):\n price = untaxed_discount_price(product, discount)\n taxer = satchmo_tax._get_taxprocessor()\n return price + taxer.by_price(product.taxClass, price)",
"def you_save(self):\n if self.discount_price:\n return self.price - self.discount_price\n else:\n return self.price - self.calc_discount_price()",
"def _value_discount(base_price):\n if base_price <= 1000.0:\n return .03\n elif base_price < 3000.0:\n return .05\n elif base_price < 10000.0:\n return .07\n elif base_price < 50000.0:\n return .1\n else:\n return .15",
"def discounted_return(self, discount):\n\n discounted_return = 0\n factor = 1\n for step_record in self:\n discounted_return += factor * step_record.reward\n factor *= discount\n return discounted_return",
"def fuelPrice(litres, price):\n if litres < 2:\n discount = 0\n elif litres < 4:\n discount = .5*litres\n elif litres < 6:\n discount = .10*litres\n elif litres < 8:\n discount = .15*litres\n elif litres < 10:\n discount = .20*litres\n else:\n discount = .25*litres\n return round(litres*price - discount, 2)",
"def taxed_discount_line_total(cartitem, discount):\n price = untaxed_discount_line_total(cartitem, discount)\n taxer = satchmo_tax._get_taxprocessor()\n price = price + taxer.by_price(cartitem.product.taxClass, price)\n\n return price",
"def taxed_discount_cart_total(cart, discount):\n total = Decimal('0.00')\n\n for item in cart:\n total += taxed_discount_line_total(item, discount)\n\n return total",
"def get_total_discount(basket, offers, catalogue):\n discount = 0.0\n\n for item, quantity in basket.items():\n offer_type = offers.get(item)\n if offer_type:\n offer_type = offers[item][0]\n offer_value = offers[item][1]\n item_price = catalogue[item]\n if offer_type == \"PERCENT_OFFER\":\n discount += quantity * item_price * int(offer_value) / 100\n elif offer_type == \"MULTI_OFFER\":\n charge_for_quantity = float(offer_value.split(\",\")[0])\n free_quantity = float(offer_value.split(\",\")[1])\n bundles, remainder = divmod(\n quantity, charge_for_quantity + free_quantity)\n if remainder > charge_for_quantity:\n bundles += 1\n remainder = 0\n charge_quantity = (bundles * charge_for_quantity) + remainder\n discount += (quantity - charge_quantity) * item_price\n\n return round(discount, 2)",
"def total(anItem):\r\n\r\n if anItem.price <= 0:\r\n raise ValueError(\"total does not compute prices at or below 0 cent\")\r\n\r\n if anItem.necessary:\r\n tax = anItem.price * 0.01\r\n else:\r\n tax = anItem.price * 0.09\r\n return anItem.price + tax",
"def calculate_discount(offer, delivery_fee):\n if offer.get('offer_type') == 'FLAT':\n discount = offer.get('offer_val')\n elif offer.get('offer_type') == 'DELIVERY':\n discount = delivery_fee\n else:\n discount = 0\n\n return discount",
"def discount(t,r):\r\n return (1+r)**(-t)",
"def compute_sale_price(self, perc_margin=0.0):\n new_sale_price = self.standard_price * ((100 + perc_margin) / 100)\n return new_sale_price"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes a list of dictionaries with "width", "height", and "method" keys and creates a map from image media types to the thumbnail size, thumbnailing method, and thumbnail media type to precalculate
|
def parse_thumbnail_requirements(
thumbnail_sizes: List[JsonDict],
) -> Dict[str, Tuple[ThumbnailRequirement, ...]]:
requirements: Dict[str, List[ThumbnailRequirement]] = {}
for size in thumbnail_sizes:
width = size["width"]
height = size["height"]
method = size["method"]
for format, thumbnail_format in THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP.items():
requirement = requirements.setdefault(format, [])
if thumbnail_format == "jpeg":
requirement.append(
ThumbnailRequirement(width, height, method, "image/jpeg")
)
elif thumbnail_format == "png":
requirement.append(
ThumbnailRequirement(width, height, method, "image/png")
)
else:
raise Exception(
"Unknown thumbnail mapping from %s to %s. This is a Synapse problem, please report!"
% (format, thumbnail_format)
)
return {
media_type: tuple(thumbnails) for media_type, thumbnails in requirements.items()
}
|
[
"def create_qrcode_thumbnails(original_image):\n thumbnail_kwargs = {\"format\": \"PNG\"}\n sizes = {\"small\": \"64x64\", \"medium\": \"256x256\", \"large\": \"490x490\"}\n images = {}\n for size, resolution in sizes.items():\n if size == \"square\":\n thumbnail = get_thumbnail(\n original_image, resolution, crop=\"center center\", **thumbnail_kwargs\n )\n else:\n thumbnail = get_thumbnail(original_image, resolution, **thumbnail_kwargs)\n images[size] = {}\n images[size][\"url\"] = thumbnail.url\n try:\n images[size][\"width\"] = thumbnail.width\n images[size][\"height\"] = thumbnail.height\n except TypeError: # NOTE: Happens when thumbnail isn't really made yet\n pass\n return images",
"def create_thumbnails(original_image):\n thumbnail_kwargs = {\n \"format\": \"JPEG\",\n \"progressive\": True,\n \"orientation\": True,\n \"quality\": 95,\n \"upscale\": True,\n }\n sizes = {\n \"square\": \"640x640\",\n \"small\": \"640x360\",\n \"medium\": \"1280x720\",\n \"large\": \"1920x1080\",\n }\n images = {}\n for size, resolution in sizes.items():\n if size == \"square\":\n thumbnail = get_thumbnail(\n original_image, resolution, crop=\"center center\", **thumbnail_kwargs\n )\n else:\n thumbnail = get_thumbnail(original_image, resolution, **thumbnail_kwargs)\n images[size] = {}\n images[size][\"url\"] = thumbnail.url\n try:\n images[size][\"width\"] = thumbnail.width\n images[size][\"height\"] = thumbnail.height\n except TypeError: # NOTE: Happens when thumbnail isn't really made yet\n pass\n return images",
"def thumbnail_settings(self):\n return {\n \"dimension\": \"300x200\",\n \"original_field\": \"photo\",\n \"thumbnail_field\": \"thumbnail\"\n }",
"def get_thumbnail_map():\n thumbnail_regex = re.compile(r'thumbnail: (.*)')\n thumbnails = []\n for file in os.listdir(join(ROOT, '_posts')):\n with open(join(ROOT, '_posts', file)) as in_file:\n content = in_file.read()\n thumbnails.append(thumbnail_regex.search(content).group(1))\n return thumbnails",
"def get_image_sizes(metadata):\n image_sizes = {}\n with open(metadata.image_sizes) as f:\n for line in f.readlines():\n image_id, ws, hs = line.strip('\\n').split(',')\n w, h = int(ws), int(hs)\n image_sizes[image_id] = (w, h)\n return image_sizes",
"def _resize_images(images: List) -> List:\n return list(\n map(\n lambda i: i.resize((64, 64)),\n images\n )\n )",
"def generate_thumbnails(self, fieldname='image'):\n original = getattr(self.context, fieldname, None)\n if not original:\n return False\n \n an_key = \"%s.%s\" % (self.annotation_prefix, fieldname)\n thumbs = dict()\n\n for format, size in self.thumbnails_scales.iteritems():\n data = StringIO(str(original))\n image = Image.open(data)\n image.thumbnail(size, Image.ANTIALIAS)\n tfd = StringIO()\n image.save(tfd, image.format, quality=90)\n thumbs[format] = tfd.getvalue()\n\n an = IAnnotations(self.context)\n an[an_key] = thumbs\n return True",
"def get_image_sizes(page, templ_vars):\n if 'type' in page.meta and page.meta['type'] == 'album':\n album = page.meta\n\n srcs = []\n\n # get absolute paths of images in album for each file type\n for file_type in FILE_TYPES:\n image_list = glob.glob(GALLERY_DIR + album['slug'] + '/*.' + file_type)\n srcs += image_list\n\n # split full srcs and thumb srcs from srcs into two lists\n full_sizes = []\n thumb_sizes = []\n for src in sorted(srcs):\n image = Image.open(src)\n width = image.size[0]\n height = image.size[1]\n size = [width, height]\n\n if src.split('/')[-1].startswith(THUMB_PREFIX):\n thumb_sizes.append(size)\n else:\n full_sizes.append(size)\n\n # bind to template via json\n templ_vars['site']['sizes'] = simplejson.dumps(full_sizes)\n templ_vars['site']['thumb_sizes'] = simplejson.dumps(thumb_sizes)",
"def create_photos_web_images(self,kwargs):\n\n files = json.load(kwargs['mappings'])\n print('[+] Processing {} files'.format(len(files)))\n\n names = cfg.WEB_IMAGE_NAMES\n sizes = cfg.WEB_IMAGE_SIZES\n\n # resize image\n for f in tqdm(files[:1]):\n # construct source filepath\n sha256 = f['sha256']\n ext = f['ext']\n sha256_tree = fiox.sha256_tree(sha256)\n fp_src = join(kwargs['input'],sha256_tree,'{}{}'.format(sha256,ext))\n try:\n im = cv.imread(fp_src)\n except:\n print('[-] Could not load: {}'.format(fp_src))\n continue\n if im is None or im.shape[0] == 0:\n print('[-] Bad file: {}'.format(fp_src))\n continue\n\n # make all sizes\n for abbr,w in zip(names,sizes):\n fp_dir_out = join(kwargs['output'],sha256_tree,sha256,abbr)\n fiox.ensure_dir(fp_dir_out)\n fp_im = join(fp_dir_out,'index.jpg')\n im_pil = imx.ensure_pil(im,bgr2rgb=True)\n w_orig,h_orig = im_pil.size\n h = int((w / w_orig) * h_orig)\n im_pil = im_pil.resize((w,h), Image.ANTIALIAS)\n #im_pil.save(fp_im, 'PNG', quality=)\n im_pil.save(fp_im, 'JPEG', quality=kwargs['quality'])",
"def each(thumb_map):\n return (ScaleFingering(thumb_map, i) for i in range(7))",
"def create_thumbnails(self):\n \n initial = log_time(\"create_thumbnails\")\n sizes = sorted(self.sizes.items(), key=lambda x: x[1][0], reverse=True)\n last = None\n \n for name, dimension in sizes:\n self.create_thumbnail(name, last=last)\n last = name\n assert self.thumbnail_exists(name)\n\n log_time(\"end create_thumbnails\", initial=initial)",
"def _resize_img(self, results):\n for key in ['image'] if 'image' in results else []:\n if self.keep_ratio:\n img, scale_factor = imrescale(\n results[key],\n results['scale'],\n return_scale=True,\n interpolation=self.interpolation,\n backend=self.backend)\n # the w_scale and h_scale has minor difference\n # a real fix should be done in the imrescale in the future\n new_h, new_w = img.shape[:2]\n h, w = results[key].shape[:2]\n w_scale = new_w / w\n h_scale = new_h / h\n else:\n img, w_scale, h_scale = imresize(\n results[key],\n results['scale'],\n return_scale=True,\n interpolation=self.interpolation,\n backend=self.backend)\n\n scale_factor = np.array(\n [w_scale, h_scale, w_scale, h_scale], dtype=np.float32)\n results['im_shape'] = np.array(img.shape)\n # in case that there is no padding\n results['pad_shape'] = img.shape\n results['scale_factor'] = scale_factor\n results['keep_ratio'] = self.keep_ratio\n # img_pad = self.impad(img, shape=results['scale'])\n results[key] = img",
"def resize(event: Dict) -> List[Image.Image]:\n # Read the images urls passed:\n images_urls = event[\"data_url\"]\n\n # Initialize an empty list for the resized images:\n resized_images = []\n\n # Go through the images urls and read and resize them:\n for image_url in images_urls:\n # Get the image:\n urllib.request.urlretrieve(image_url, \"temp.png\")\n image = Image.open(\"temp.png\")\n # Resize it:\n image = image.resize((224, 224))\n # Collect it:\n resized_images.append(image)\n\n return resized_images",
"def getPhotoSizes(self, photo_id):\n result = self.flickr.photos_getSizes(photo_id=photo_id)\n sizes = dict()\n # Set defaults to None\n for label in ('Square','Thumbnail','Small','Medium','Large','Original'):\n sizes[label] = {'width': None, 'height': None}\n # Set values given by flickr\n for el in result.sizes[0].size:\n size = el['label']\n if 'Medium' in size:\n size = 'Medium'\n elif 'Small' in size:\n size = 'Small'\n elif 'Large' in size:\n size = 'Large'\n sizes[size]['width'] = el['width']\n sizes[size]['height'] = el['height']\n return sizes",
"def _rescaleObjDict(OrigImageDict,OrijObjectList,resizeWidth,resizeHeight):\n \n alteredObjectList = copy.deepcopy(OrijObjectList)\n \n ratioX = resizeWidth/OrigImageDict['width']\n ratioY = resizeHeight/OrigImageDict['height']\n \n for eachObj in alteredObjectList:\n\n eachObj['xmin'] = int(eachObj['xmin'] * ratioX)\n eachObj['ymin'] = int(eachObj['ymin'] * ratioY)\n eachObj['xmax'] = int(eachObj['xmax'] * ratioX)\n eachObj['ymax'] = int(eachObj['ymax'] * ratioY)\n \n \n return alteredObjectList",
"def create_image_manifest(name,\n uris_list, dt_list, crs_list,\n id_list, md_list, pp_list, tsbi_list,\n properties_dict,\n start_time, end_time,\n footprint=None, pyramiding_policy=None, uri_prefix=None,\n missing_data=None\n ):\n\n # Check case (mosaic or band per tileset?)\n uril = len(uris_list)\n print(\"Number of tilesets\", uril)\n bl = len(id_list)\n print(\"Number of bands\", bl)\n\n # Create timestamps\n st = create_timestamp(start_time)\n et = create_timestamp(end_time)\n\n # Create bands\n bands = create_band_list(id_list, md_list, pp_list, tsbi_list)\n\n # Create tilesets\n if uril == 1:\n tilesets = create_tilesets_list(\n uris_list, dt_list, crs_list, id_list=None)\n else:\n tilesets = create_tilesets_list(\n uris_list, dt_list, crs_list, id_list=id_list)\n\n # Create properties\n props = create_properties_dict(properties_dict)\n\n # Name\n name = manifest.Name(name)\n\n # Create manifest\n out = manifest.ImageManifest(**{\n \"name\": name,\n \"bands\": bands,\n \"tilesets\": tilesets,\n \"properties\": props,\n \"start_time\": st,\n \"end_time\": et,\n \"footprint\": footprint,\n \"pyramiding_policy\": pyramiding_policy,\n \"uri_prefix\": uri_prefix,\n \"missing_data\": missing_data\n })\n\n return(out)",
"def make_images_dict(directory=IMG_PATH, max_dict_size=9000, is_color=False):\n list_name = {}\n list_dirs = os.listdir(directory)\n list_dirs.sort()\n for cur_dir in list_dirs:\n list_file = os.listdir(os.path.join(directory, cur_dir))\n list_file.sort()\n for cur_file in list_file:\n if not is_color:\n img = Image.open(os.path.join(\n directory, cur_dir, cur_file)).convert(\"L\")\n else:\n img = Image.open(os.path.join(directory, cur_dir, cur_file))\n list_name[cur_file] = img\n if len(list_name) == max_dict_size:\n return list_name\n return list_name",
"def thumbnail_dict(self) -> dict:\n return self.video_data.get('thumbnails')",
"def thumbnail_dict(self) -> dict:\n return self._snippet.get('thumbnails')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that trial specific restrictions apply
|
def test_trial_only_restriction(self):
self.given({
"rules:trial": {
"123": "restrict",
}
})
self.expect((
("1234", "restrict"),
("1235", "restrict"),
("1335", None),
))
|
[
"def visitTrial(self, testSuite):",
"def test_all_same():\n assert meets_criteria(111111) == True, \"Should be True\"",
"def test_conformance(self):\n self._request_valid(\"conformance\")",
"def test_getinvestmentrequirements(self):\n pass",
"def test_pos_2():\n assert meets_criteria(111123) == True, \"Should be True\"",
"def pre_create_trial(self):",
"def run_trial(self, trial):\n pass",
"def test_all_scenarios(self):\n\n exr_bash = self.prep_exr()\n percents = [1, 50, 90]\n # TODO: Don't use a for loop, use the trials kwarg\n for i in range(0, 2):\n Simulator().run(attack_types=Attack.runnable_attacks,\n adopt_policies=list(Non_Default_Policies.__members__.values()),\n percents=percents,\n exr_bash=exr_bash)",
"def test_06_allow_forbid_negative_recharges(self):\n\tprint \"...starting test 2.09\"\n\tself.testHandler.handle_maxwell_request(\"voltage:0\")\n\tself.testHandler.handle_maxwell_request(\"phase_load:10\")\n\ttime.sleep(5)\n\tpower=float((self.testHandler.handle_network_request(\"get_active_power\", validity_level=\"medium\")).split(\" \")[0])\n print \"active power = \",power,\" watts\"\n\tself.testHandler.handle_network_request(\"set_credit_limit:-100\")\n\t#test for allow or forbid negative recharges\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"high\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,0.0)\n\tprint\"allowing negative credits\"\n\tself.testHandler.handle_network_request(\"allow_negative_credits\")\n\tself.testHandler.handle_network_request(\"recharge:100\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tself.testHandler.handle_network_request(\"recharge:-120\")\n\ttime.sleep(2)\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,-20)\n\tprint\"test for forbid negative recharge strarted\"\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"high\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,0.0)\n\tprint\"forbiding negative credits and recharging with 100\"\n\tself.testHandler.handle_network_request(\"forbid_negative_credits\")\n\tself.testHandler.handle_network_request(\"recharge:100\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tself.testHandler.handle_network_request(\"recharge:-120\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert(a!=-20.0)\n\tassert(a==100.0)\n\tprint'it should not accept the recharge and then print credits'\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a",
"def test_actual_requirements(self):\n expected = (set(self.__subject.default_requirements()) | {self.__added}) - {self.__removed}\n self.assertEqual(expected, self.__subject.requirements())",
"def can_perform_forbidden_test():\n string = [Variable(\"A\"), Variable(\"B\"), Variable(\"C\"), Variable(\"D\"), Variable(\"S\")]\n assert not test_production.can_perform(string)",
"def test_accessing_predefined_strength() -> None:\n assert strength.weak < strength.medium\n assert strength.medium < strength.strong\n assert strength.strong < strength.required",
"def visitTrialAfter(self, testSuite):",
"def test_standings(self):\n pass",
"def test_09_credit_limit(self):\n\tprint \"...starting test 2.09\"\n\tprint \"putting load off\"\n\tself.testHandler.handle_maxwell_request(\"voltage:0\")\n\tself.testHandler.handle_maxwell_request(\"phase_load:10\")\n\ttime.sleep(5)\n\tself.testHandler.handle_network_request(\"forbid_negative_credits\")\n\tprint \"restriction of negative credits\"\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"high\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n \tassert_equal(a,0.0)\n\tprint \"recharging with 10\"\n\tself.testHandler.handle_network_request(\"recharge:10\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"high\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tself.testHandler.handle_network_request(\"slab_disable\")\n\tprint \"slab disabled\"\n\tself.testHandler.handle_network_request(\"fd_disable\")\n\tprint \" fd disabled\"\n\tself.testHandler.handle_network_request(\"set_credit_limit:-2147483648 \",validity_level=\"medium\")\n\tcredit_limit=int((self.testHandler.handle_network_request(\"get_credit_limit\")).split(\" \")[0])\n\tprint \"credit limit = \",credit_limit\t\n\tprint\"checking for huge positive recharge\"\n\tprint \"recharging with 2147483660\"\n\tself.testHandler.handle_network_request(\"recharge:2147483660\",validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"high\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"recharge should not be accepted the ceredits should remain same \"\n\tprint \"accurate credits = \",a\n\tassert_equal(a,10.0)\n\tprint \"passed for huge positive value test\" \n\n\tprint \"checking for huge negative values\"\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"high\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n \tassert_equal(a,0.0)\n\tprint \"recharging with 10\"\n\tself.testHandler.handle_network_request(\"recharge:10\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"high\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tprint \"recharging with -2147483660\"\n\tself.testHandler.handle_network_request(\"recharge:-2147483660\",validity_level=\"medium\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"high\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"recharge should not be accepted the ceredits should remain same ie.. 10\"\n\tprint \"accurate credits = \",a\n\tif(a>10.0):\n\t\tprint \"the value has got a jump towards positve end so test is about to fail\"\n\telif(a==10.0):\n\t\tprint \"the test should pass\"\n\telse:\n\t\tprint\"the price has decreased from 10 check the load\"\n\tassert_equal(a,10.0)",
"def testAccessAllowedWithPhonyInputs(self):\n access_checker = access.AllAllowedAccessChecker()\n access_checker.checkAccess(Explosive(), Explosive())",
"def test_resource_details(self):\r\n\r\n # Check anonymous user and logged in user with no permissions\r\n for user in ['anonymous', 'registered']:\r\n response = self.client[user].get(self.urls['resource_private'])\r\n self.failUnlessEqual(response.status_code, 403)\r\n\r\n # Check people who should have access to the private project\r\n for user in ['maintainer', 'team_coordinator', 'team_member',\r\n 'reviewer']: # 'writer',\r\n response = self.client[user].get(self.urls['resource_private'])\r\n self.failUnlessEqual(response.status_code, 200)",
"def test6_evaluation(self):\n self.data = clam.common.data.ParameterCondition(x=True,\n then=clam.common.data.SetMetaField('x','yes'),\n )\n parameters = {}\n out = self.data.evaluate(parameters)\n self.assertTrue(out == False)",
"def test_get_unusual_activity(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Performs pairwise matching of nans between ``x`` and ``y``.
|
def match_nans(x, y):
if has_missing(x) or has_missing(y):
# Need to copy to avoid mutating original objects and to avoid writeable errors
# with ``xr.apply_ufunc`` with vectorize turned on.
x, y = x.copy(), y.copy()
idx = np.logical_or(np.isnan(x), np.isnan(y))
# NaNs cannot be added to `int` arrays.
if x.dtype == "int":
x = x.astype("float")
if y.dtype == "int":
y = y.astype("float")
x[idx], y[idx] = np.nan, np.nan
return x, y
|
[
"def nmi(X, Y):\n # remove pairs with a missing value in comparison\n\n new_X , new_Y = remove_pairs_with_a_missing(X, Y, missing_char=0)\n return normalized_mutual_info_score(new_X, new_Y), None #return NormalizedMutualInformation(pData1, pData2).get_distance() ",
"def distance_SNN(x, y):\n # Neighborhood size of each point\n s_x = x.shape[0]\n s_y = y.shape[0]\n\n # Size of neighborhood overlap.\n # Using loops since the numpy functions such as `numpy.isin` and `numpy.intersect1d` are not supported by numba\n s_xy = 0.\n for i in x:\n for j in y:\n if i == j:\n s_xy += 1.\n break\n\n cs = s_xy / ((s_x * s_y) ** 0.5)\n # Clip values to the range `[-1, 1]`, the domain of arc-cosine\n dist = np.arccos(max(-1., min(1., cs)))\n\n return dist",
"def _check_nans(self, other=None, context=None):\r\n\r\n self_is_nan = self._isnan()\r\n if other is None:\r\n other_is_nan = False\r\n else:\r\n other_is_nan = other._isnan()\r\n\r\n if self_is_nan or other_is_nan:\r\n if context is None:\r\n context = getcontext()\r\n\r\n if self_is_nan == 2:\r\n return context._raise_error(InvalidOperation, 'sNaN',\r\n self)\r\n if other_is_nan == 2:\r\n return context._raise_error(InvalidOperation, 'sNaN',\r\n other)\r\n if self_is_nan:\r\n return self._fix_nan(context)\r\n\r\n return other._fix_nan(context)\r\n return 0",
"def alternating_nones(self, x, y):\n for v1, v2 in zip(self.data[x], self.data[y]):\n if (v1 is not None) and (v2 is not None):\n return False\n return True",
"def calculate_non_mask_overlaps(x_mask, y_mask):\n x_is_not_nan = 1 * ~x_mask\n y_is_not_nan = 1 * ~y_mask\n\n r = np.dot(x_is_not_nan.T, y_is_not_nan)\n return r",
"def match2d(x1, y1, x2, y2, rad):\n from scipy.spatial import cKDTree\n xx1 = numpy.stack([x1, y1], axis=1)\n xx2 = numpy.stack([x2, y2], axis=1)\n tree1 = cKDTree(xx1)\n tree2 = cKDTree(xx2)\n res = tree1.query_ball_tree(tree2, rad)\n lens = [len(r) for r in res]\n m1 = numpy.repeat(numpy.arange(len(x1), dtype='i4'), lens)\n if sum([len(r) for r in res]) == 0:\n m2 = m1.copy()\n else:\n m2 = numpy.concatenate([r for r in res if len(r) > 0])\n d12 = numpy.sqrt(numpy.sum((xx1[m1, :]-xx2[m2, :])**2, axis=1))\n return m1, m2, d12",
"def test_skipna_returns_same_value_as_dropped_pairwise_nans(\n a_1d_fixed_nan, b_1d_fixed_nan, metric\n):\n a_dropped, b_dropped, _ = drop_nans(a_1d_fixed_nan, b_1d_fixed_nan)\n with raise_if_dask_computes():\n res_with_nans = metric(a_1d_fixed_nan, b_1d_fixed_nan, \"time\", skipna=True)\n res_dropped_nans = metric(a_dropped, b_dropped, \"time\")\n assert_allclose(res_with_nans, res_dropped_nans)",
"def wilcoxon_exact(x, y=None, alternative=\"two-sided\"):\n if alternative not in [\"two-sided\", \"less\", \"greater\"]:\n raise ValueError(\"Alternative must be either 'two-sided'\", \"'greater' or 'less'\")\n\n x = np.array(x)\n if x.ndim > 1:\n raise ValueError(\"Sample x must be one-dimensional\")\n\n if y is not None:\n y = np.array(y)\n if y.ndim > 1:\n raise ValueError(\"Sample y must be one-dimensional\")\n if x.shape != y.shape:\n raise ValueError(\"Sample x and y must have the same length.\")\n diff = x - y\n else:\n diff = x\n\n if np.unique(np.abs(diff)).size != diff.size:\n if y is None:\n raise ValueError(\"abs(x) values must be unique\")\n else:\n raise ValueError(\"abs(x - y) values must be unique\")\n\n ranks = scipy.stats.rankdata(np.abs(diff))\n signs = np.sign(diff)\n T = (signs*ranks).sum()\n\n n = diff.size\n if n > 30:\n print(\"warning: sample size is large for exact calculation\\n\" + \n \" calculation may be slow\", file=sys.stderr)\n rank_sum, pmf = compute_pmf(n)\n\n if alternative == \"less\":\n idx = rank_sum <= T\n p = pmf[idx].sum()\n elif alternative == \"greater\":\n idx = rank_sum >= T\n p = pmf[idx].sum()\n else:\n idx = np.logical_or(rank_sum <= -np.abs(T), rank_sum >= np.abs(T))\n p = pmf[idx].sum()\n\n return T, p",
"def _match_tuples(y_true, y_pred):\n n_true = len(y_true)\n n_pred = len(y_pred)\n\n iou_matrix = np.empty((n_true, n_pred))\n\n for i in range(n_true):\n for j in range(n_pred):\n iou_matrix[i, j] = iou(y_true[i], y_pred[j])\n\n idxs_true, idxs_pred = linear_sum_assignment(1 - iou_matrix)\n\n if (not idxs_true.size) or (not idxs_pred.size):\n ious = np.array([])\n else:\n ious = iou_matrix[idxs_true, idxs_pred]\n return idxs_true, idxs_pred, ious",
"def test_nan_values(self):\n f_1d = self.loader.load(find(\"nans_in_1d_data.dat\"))[0]\n f_2d = self.loader.load(find(\"nans_in_2d_data.DAT\"))[0]\n for i in range(0, len(f_1d.x) - 1):\n self.assertFalse(math.isnan(f_1d.x[i]))\n self.assertFalse(math.isnan(f_1d.y[i]))\n self.assertFalse(math.isnan(f_1d.dy[i]))\n self.assertTrue(isinstance(f_2d, Data2D))\n f_2d.data = f_2d.data.flatten()\n f_2d.qx_data = f_2d.qx_data.flatten()\n f_2d.qy_data = f_2d.qy_data.flatten()\n for i in range(0, len(f_2d.data) - 1):\n self.assertFalse(math.isnan(f_2d.data[i]))\n self.assertFalse(math.isnan(f_2d.qx_data[i]))\n self.assertFalse(math.isnan(f_2d.qy_data[i]))",
"def match(x1, y1, m1, x2, y2, m2, dr_tol, dm_tol=None):\n \n x1 = np.array(x1, copy=False)\n y1 = np.array(y1, copy=False)\n m1 = np.array(m1, copy=False)\n x2 = np.array(x2, copy=False)\n y2 = np.array(y2, copy=False)\n m2 = np.array(m2, copy=False)\n \n if x1.shape != y1.shape:\n raise ValueError('x1 and y1 do not match!')\n if x2.shape != y2.shape:\n raise ValueError('x2 and y2 do not match!')\n \n # Setup coords1 pairs and coords 2 pairs\n # this is equivalent to, but faster than just doing np.array([x1, y1])\n coords1 = np.empty((x1.size, 2))\n coords1[:, 0] = x1\n coords1[:, 1] = y1\n \n # this is equivalent to, but faster than just doing np.array([x1, y1])\n coords2 = np.empty((x2.size, 2))\n coords2[:, 0] = x2\n coords2[:, 1] = y2\n\n # Utimately we will generate arrays of indices.\n # idxs1 is the indices for matches into catalog 1. This\n # is just a place holder for which stars actually\n # have matches.\n idxs1 = np.ones(x1.size, dtype=int) * -1\n idxs2 = np.ones(x1.size, dtype=int) * -1\n\n # The matching will be done using a KDTree.\n kdt = KDT(coords2)\n\n # This returns the number of neighbors within the specified\n # radius. We will use this to find those stars that have no or one\n # match and deal with them easily. The more complicated conflict\n # cases will be dealt with afterward.\n i2_match = kdt.query_ball_point(coords1, dr_tol)\n Nmatch = np.array([len(idxs) for idxs in i2_match])\n\n # What is the largest number of matches we have for a given star?\n Nmatch_max = Nmatch.max()\n\n\n # Loop through and handle all the different numbers of matches.\n # This turns out to be the most efficient so we can use numpy\n # array operations. Remember, skip the Nmatch=0 objects... they\n # already have indices set to -1.\n for nn in range(1, Nmatch_max+1):\n i1_nn = np.where(Nmatch == nn)[0]\n\n if len(i1_nn) == 0:\n continue\n\n if nn == 1:\n i2_nn = np.array([i2_match[mm][0] for mm in i1_nn])\n if dm_tol != None:\n dm = np.abs(m1[i1_nn] - m2[i2_nn])\n keep = dm < dm_tol\n idxs1[i1_nn[keep]] = i1_nn[keep]\n idxs2[i1_nn[keep]] = i2_nn[keep]\n else:\n idxs1[i1_nn] = i1_nn\n idxs2[i1_nn] = i2_nn\n else:\n i2_tmp = np.array([i2_match[mm] for mm in i1_nn])\n\n # Repeat star list 1 positions and magnitudes\n # for nn times (tile then transpose) \n x1_nn = np.tile(x1[i1_nn], (nn, 1)).T\n y1_nn = np.tile(y1[i1_nn], (nn, 1)).T\n m1_nn = np.tile(m1[i1_nn], (nn, 1)).T\n\n # Get out star list 2 positions and magnitudes\n x2_nn = x2[i2_tmp]\n y2_nn = y2[i2_tmp]\n m2_nn = m2[i2_tmp]\n dr = np.abs(x1_nn - x2_nn, y1_nn - y2_nn)\n dm = np.abs(m1_nn - m2_nn)\n\n if dm_tol != None:\n # Don't even consider stars that exceed our\n # delta-mag threshold. \n dr_msk = np.ma.masked_where(dm > dm_tol, dr)\n dm_msk = np.ma.masked_where(dm > dm_tol, dm)\n\n # Remember that argmin on masked arrays can find\n # one of the masked array elements if ALL are masked.\n # But our subsequent \"keep\" check should get rid of all\n # of these.\n dm_min = dm_msk.argmin(axis=1)\n dr_min = dr_msk.argmin(axis=1)\n\n # Double check that \"min\" choice is still within our\n # detla-mag tolerence.\n dm_tmp = np.choose(dm_min, dm.T)\n\n keep = (dm_min == dr_min) & (dm_tmp < dm_tol)\n else:\n dm_min = dm.argmin(axis=1)\n dr_min = dr.argmin(axis=1)\n\n keep = (dm_min == dr_min)\n\n i2_keep_2D = i2_tmp[keep]\n dr_keep = dr_min[keep] # which i2 star for a given i1 star\n ii_keep = np.arange(len(dr_keep)) # a running index for the i2 keeper stars.\n\n idxs1[i1_nn[keep]] = i1_nn[keep]\n idxs2[i1_nn[keep]] = i2_keep_2D[ii_keep, dr_keep]\n\n idxs1 = idxs1[idxs1 >= 0]\n idxs2 = idxs2[idxs2 >= 0] \n\n dr = np.hypot(x1[idxs1] - x2[idxs2], y1[idxs1] - y2[idxs2])\n dm = m1[idxs1] - m2[idxs2]\n\n # Deal with duplicates\n duplicates = [item for item, count in Counter(idxs2).iteritems() if count > 1]\n print 'Found {0:d} out of {1:d} duplicates'.format(len(duplicates), len(dm))\n # for dd in range(len(duplicates)):\n # dups = np.where(idxs2 == duplicates[dd])[0]\n\n # # Handle them in brightness order -- brightest first in the first starlist\n # fsort = m1[dups].argsort()\n\n # # For every duplicate, match to the star that is closest in space and \n # # magnitude. HMMMM.... this doesn't seem like it will work optimally.\n\n \n return idxs1, idxs2, dr, dm",
"def check_pairwise_arrays(X, Y):\r\n if Y is X or Y is None:\r\n X = Y = atleast2d_or_csr(X)\r\n else:\r\n X = atleast2d_or_csr(X)\r\n Y = atleast2d_or_csr(Y)\r\n if X.shape[1] != Y.shape[1]:\r\n raise ValueError(\"Incompatible dimension for X and Y matrices: \"\r\n \"X.shape[1] == %d while Y.shape[1] == %d\" % (\r\n X.shape[1], Y.shape[1]))\r\n\r\n if not (X.dtype == Y.dtype == np.float32):\r\n if Y is X:\r\n X = Y = X.astype(np.float)\r\n else:\r\n X = X.astype(np.float)\r\n Y = Y.astype(np.float)\r\n return X, Y",
"def test_lookup_na_points():\n import math\n points = [[0,1], [float('nan'), 3], [4, float('nan')], [5, 6]] * 3\n r = xy.lookup(points, shoredistance=True, grids=False, areas=False, asdataframe=False)\n assert len(r) == len(points)\n for i in [1,2,5,6,9,10]:\n assert r[i] == {}\n r = xy.lookup(points, shoredistance=True, grids=True, areas=True, asdataframe=True)\n assert r.shape[0] == len(points)\n assert r.shape[1] >= 1\n for i in [1,2,5,6,9,10]:\n assert math.isnan(r[\"shoredistance\"][i])",
"def xnor(x, y):\r\n return (not xor(x, y))",
"def nk_similarity(A, B):\n\n A = triangular_corrcoef(A)\n B = triangular_corrcoef(B)\n\n rho = spearman(A, B)\n\n return rho",
"def accelerated_matching(features1, features2, x1, y1, x2, y2):\n\n #############################################################################\n # TODO: YOUR CODE HERE #\n #############################################################################\n matches = []\n confidences = []\n alpha = 0.8\n k1,d = features1.shape\n features = np.vstack((features1, features2))\n knn = NearestNeighbors(n_neighbors=3,algorithm='kd_tree',leaf_size=100,radius=20)\n knn.fit(features2)\n neigh_dist, neigh_ind = knn.kneighbors(features1, return_distance=True)\n for k in range(k1):\n ratio = neigh_dist[k][0]/neigh_dist[k][1]\n if ratio<=alpha:\n matches.append([k,neigh_ind[k][0]])\n confidences.append(ratio)\n\n matches = np.array(matches)\n confidences = np.array(confidences)\n idx = np.argsort(confidences)\n matches = matches[idx]\n confidences = confidences[idx]\n\n # raise NotImplementedError('`accelerated_matching` function in ' +\n # '`student_feature_matching.py` needs to be implemented')\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return matches, confidences",
"def calc_indices(self, x, y):\n i1, i2 = self.calc_fractional_indices(x, y)\n # Use np.round to ensure that returned type is numpy array or scalar.\n nx = np.round(i1).astype(int)\n ny = np.round(i2).astype(int)\n return nx, ny",
"def nan_inds(x):\n\n nans = np.isnan(x)\n\n return nans",
"def findNearestNeighbors(X, Y, numberOfNearestPoints=1):\n\t\n\tfrom sklearn.neighbors import NearestNeighbors\n\n\tneigh = NearestNeighbors(n_neighbors=numberOfNearestPoints)\n\tneigh.fit(X)\n\tradii,indices=neigh.kneighbors(Y)\n\tpoints=X[indices]\n\t\n\treturn points, indices, radii"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Publishes a list of events. The events in the list 'events' are sent to the server in a new HTTP request.
|
def publish_events(self, events):
body = ztreamy.serialize_events(events)
logging.info("Connecting to " + self.hostname + " on port " + str(self.port))
conn = httplib.HTTPConnection(self.hostname, self.port)
conn.request('POST', self.path, body, ZtreamyClient._headers)
response = conn.getresponse()
if response.status == 200:
logging.info("Got 200 status from " + self.path)
logging.info("Sent :" + body)
return True
else:
logging.error(str(response.status) + ' ' + response.reason)
return False
|
[
"def send_events_batch(self, data):\n return self._write_request(self._base_url, 'track/', data, batch=True)",
"def events(self, events: List[DeploymentEvent]):\n\n self._events = events",
"def add_events(self, events: List[Event]):\n self.events.extend(events)",
"def publisher():\n backend = get_backend(\"school_backends\", BACKEND, CHANNEL, \"my.app\")\n for x in range(0, 100):\n data = {\"foo\": \"bar\", \"nested\": [{\"foo\": \"baz\"}]}\n\n print(\"-----------------------\")\n publish(backend, random.choice(events), data)\n sleep_time = random.choice(range(0, 10))\n print(\"Next publication in {}\".format(sleep_time))\n time.sleep(sleep_time)",
"def upload_to_google_calendar(events):\n batch = MyCalendarBatchInsert()\n\n for event in events:\n batch.add(event.to_gcal_event())\n\n return batch.execute()",
"def send_to_syslog(events, syslog):\r\n for cnt, event in enumerate(events, start=1):\r\n syslog.send(json.dumps(event))\r\n logging.debug('Event %s sent to syslog: %s.', cnt, json.dumps(event))\r\n logging.debug('Total Events: %s ', cnt)",
"def list_event(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_event\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1EventList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def make_event_list(events):\n event_list = []\n for event in events:\n json_event = event.to_json()\n event_list.append(json_event)\n return event_list",
"async def process_events(self, events: List[EventData]):\n pass",
"def add_events(self, events):\n\n raise NotImplementedError # pragma: no cover",
"async def manage_events(self, events: Optional[List[List[str]]] = None) -> Union[list, None]:\n\n # Build the request data\n url: str = self.EVENTS_URL % self.server\n data: dict = {'id': self.client_id}\n\n # If events weren't selected, get them\n if not events:\n events = await self._request(url, data)\n\n # If we got events back\n if isinstance(events, list):\n\n # Handle the events and return their results\n # TODO Allow underscores, case insensitivity for method names in handler\n return [self.loop.create_task(self._handle_event(event)) for event in events]\n\n # If we got a dict back, there was a system error & we didn't get anything back at all\n if isinstance(events, dict):\n await self._handle_event(['systemError', events])",
"def bulk_publish(self, list_message, list_action):\n message = ''\n msg_list = []\n if self.sns_client:\n for i, one_message in enumerate(list_message):\n full_message = self._add_more_data(one_message, list_action[i])\n temp_message = json.dumps(full_message)\n # check max size of the message to publish under the limit\n if (len(message) + len(temp_message)) > 256000:\n self.sns_client.publish(message, self.PC_SNS_TOPIC)\n msg_list = [full_message]\n else:\n msg_list.append(one_message)\n message = json.dumps(msg_list)\n\n if message:\n self.sns_client.publish(message, self.PC_SNS_TOPIC)",
"def events(self):\n r = requests.get(self.uri+'events')\n r.raise_for_status()\n return r.json()",
"def watch_event_list(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method watch_event_list\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/watch/events'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='JsonWatchEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def public_events(self, number=-1, etag=None):\n url = self._build_url('events', base_url=self._api)\n return self._iter(int(number), url, Event, etag=etag)",
"def schedule_events(self, events: list, location='last'):\n\n # hack\n event_queue_was_empty = not self._event_queue\n\n if location == 'immediately':\n for event in events:\n self._push_event(event)\n elif location == 'next up':\n self._event_queue = events + self._event_queue\n elif location == 'last':\n self._event_queue += events\n\n # hack: this gets checked every time an event is scheduled\n if self.processing and event_queue_was_empty and (location == 'next up' or location == 'last'):\n self._push_next_event()",
"def send_to_delivery_stream(events, stream_name):\n if not events:\n logger.info(\"No events provided: nothing delivered to Firehose\")\n return\n\n records = []\n for event in events:\n if not isinstance(event, str):\n # csv events already have a newline\n event = json.dumps(event) + \"\\n\"\n records.append({\"Data\": event})\n firehose = boto3.client(\"firehose\")\n logger.info(\"Delivering %s records to Firehose stream '%s'\",\n len(records), stream_name)\n resp = firehose.put_record_batch(\n DeliveryStreamName=stream_name,\n Records=records)\n return resp",
"def get_all_events(request):\n events = Event.objects.all()\n data = serializers.serialize(\"json\", events)\n return HttpResponse(data, content_type=\"application/json\")",
"def get_events():\n\n #immplementation\n\n return json.dumps(events)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create `self.perf_patterns` and units only in `self.reference`.
|
def add_metrics(self):
for metric in self.METRICS:
#getlogger().info('creating metric %s', metric.label)
self.perf_patterns[metric.label] = reduce(self.stdout, self.num_tasks, metric.column, metric.function)
self.reference[metric.label] = (0, None, None, metric.unit) # oddly we don't have to supply the "*" scope key??
|
[
"def __init__(self, patterns):\n\n self.wildcard_patterns = {}\n literals = []\n \n for j, pattern in enumerate(patterns):\n pattern = pattern.strip()\n wildcards = pattern.count('*')\n \n if wildcards == 0:\n literals.append(pattern)\n\n elif wildcards == 1:\n if not pattern.endswith('*'):\n ve = \"Pattern %i invalid: * can only appear at end of pattern.\" % j\n raise ValueError(ve)\n\n self.add_wildcard_pattern(pattern)\n \n else:\n ve = \"Pattern %i invalid: * can only appear once.\" % j\n raise ValueError(ve)\n\n self.literals = self.make_literal_set(literals)\n self.optimize_wildcard_patterns()\n self.pattern_lengths = sorted(self.wildcard_patterns.keys())",
"def createAttrPatterns(patternFile=\"string\", patternDefinition=\"string\", patternType=\"string\"):\n pass",
"def _generate_patterns(self):\n\t\tpatterns = self._PatternList() #Create linked list\n\t\tpattern_split = lambda series : split(series, len(series)/self._cycle_length) #lambda to perform splitting\n\t\tload = pattern_split(self._deseasonalized)\n\t\ttemp = pattern_split(self._temp_series)\n\t\tsolar = pattern_split(self._solar_series)\n\t\tholiday = pattern_split(self._holiday_series)\n\t\tlockdown = pattern_split(self._lockdown_series)\n\t\tfor ld, t, s, h, lk in zip(load, temp, solar, holiday, lockdown):\n\t\t\tnew_node = self._Pattern(ld, t, s, h, lk) #Create new pattern\n\t\t\tpatterns.append_after_tail(new_node) #Append to linked list\n\t\treturn patterns",
"def generate_patterns(self, num_patterns):\n self.num_patterns = num_patterns\n\n # Duration of the spike pattern and buckets.\n self.num_buckets = math.floor(self.duration / self.pattern_duration)\n self.free_buckets = np.arange(self.num_buckets - 1)\n\n # TODO: Handle error more gracefully.\n if np.sum(self.spike_trains) == 0:\n print \"WARNING! Generating empty pattern. \" \\\n \"Please generate spike trains first.\"\n\n for i in range(self.num_patterns):\n print \"Generating pattern...\"\n # Generate pattern from spike trains.\n self.generate_pattern()\n\n return self.patterns",
"def _build_measure(self) -> None:\n assert self.working_instructions is not None\n instr = self.working_instructions[self.index]\n assert isinstance(instr, Measurement)\n assert self.diagram is not None\n self.diagram.append(instr.qubit.index, TIKZ_MEASURE())\n self.index += 1",
"def generateDrumsFromPatterns(audio, tracks, beatStrings, start, stop):\n numMeasures = stop-start\n \n for i in range(0,len(beatStrings)):\n makeBeat(audio[i], tracks[i], start, beatStrings[i] * numMeasures)",
"def __call__(self, pitches, durations):\n import abjad\n if isinstance(pitches, str):\n pitches = pitches.split()\n if not isinstance(pitches, collections.Iterable):\n pitches = [pitches]\n if isinstance(durations, (numbers.Number, tuple)):\n durations = [durations]\n nonreduced_fractions = [abjad.NonreducedFraction(_) for _ in durations]\n size = max(len(nonreduced_fractions), len(pitches))\n nonreduced_fractions = abjad.sequence(nonreduced_fractions)\n nonreduced_fractions = nonreduced_fractions.repeat_to_length(size)\n pitches = abjad.sequence(pitches).repeat_to_length(size)\n Duration = abjad.Duration\n duration_groups = Duration._group_by_implied_prolation(\n nonreduced_fractions\n )\n result = []\n for duration_group in duration_groups:\n # get factors in denominator of duration group other than 1, 2.\n factors = abjad.mathtools.factors(duration_group[0].denominator)\n factors = set(factors)\n factors.discard(1)\n factors.discard(2)\n current_pitches = pitches[0:len(duration_group)]\n pitches = pitches[len(duration_group):]\n if len(factors) == 0:\n for pitch, duration in zip(current_pitches, duration_group):\n leaves = self._make_leaf_on_pitch(\n pitch,\n duration,\n decrease_monotonic=self.decrease_monotonic,\n forbidden_duration=self.forbidden_duration,\n skips_instead_of_rests=self.skips_instead_of_rests,\n use_multimeasure_rests=self.use_multimeasure_rests,\n repeat_ties=self.repeat_ties,\n )\n result.extend(leaves)\n else:\n # compute tuplet prolation\n denominator = duration_group[0].denominator\n numerator = abjad.mathtools.greatest_power_of_two_less_equal(\n denominator)\n multiplier = (numerator, denominator)\n ratio = 1 / abjad.Duration(*multiplier)\n duration_group = [\n ratio * abjad.Duration(duration)\n for duration in duration_group\n ]\n # make tuplet leaves\n tuplet_leaves = []\n for pitch, duration in zip(current_pitches, duration_group):\n leaves = self._make_leaf_on_pitch(\n pitch,\n duration,\n decrease_monotonic=self.decrease_monotonic,\n skips_instead_of_rests=self.skips_instead_of_rests,\n use_multimeasure_rests=self.use_multimeasure_rests,\n repeat_ties=self.repeat_ties,\n )\n tuplet_leaves.extend(leaves)\n tuplet = abjad.Tuplet(multiplier, tuplet_leaves)\n result.append(tuplet)\n return abjad.select(result)",
"def _insert_patterns(self):\n self._add_finders()\n self._add_separators()\n self._add_alignment_patterns()\n self._add_timing_pattern()\n self._add_reserved_areas()\n self._add_version_info()",
"def benchmark_to_format_zero(self):\n for name in (\"numpy\", \"dict\", \"records\", \"df\", \"arrow\"):\n test_meta = make_meta(\"to_format\", \"to_{}\".format(name))\n func = Benchmark(\n lambda: getattr(self._view, \"to_{0}\".format(name))(), meta=test_meta\n )\n setattr(self, \"to_format_{0}\".format(name), func)",
"def compile_patterns(self):\n self.patterns_cmp = [self.re_compiler(x) for x in self.patterns]\n if self.exclusions:\n self.exclusions_cmp = [self.re_compiler(x) for x in self.exclusions]",
"def measurement_rule(gate, platform):\n sequence = PulseSequence()\n for qubit in gate.target_qubits:\n MZ_pulse = platform.create_MZ_pulse(qubit, start=0)\n sequence.add(MZ_pulse)\n return sequence, {}",
"def build_ms_pattern(ms_loc, ms_pat):\n # Create a list the names of the measurement set files\n search_path = '{}/*/{}'.format(ms_loc, ms_pat)\n print (\"Find measurement sets matching\", search_path)\n file_list = glob.glob(search_path)\n if len(file_list) == 0:\n search_path = '{}/{}'.format(ms_loc, ms_pat)\n print (\"Find measurement sets matching\", search_path)\n file_list = glob.glob(search_path)\n\n # Find which parts of the file names vary between files\n mismatches=find_mismatches(file_list)\n\n # Idenitfy if the mismatches are for the beam number or interleave name \n # and build up definitions of the varying regions\n regions = []\n for start,end in sorted(mismatches):\n if start==end:\n continue\n if file_list[0][start:end].isnumeric():\n print(\"beam {} to {} is {}\".format(start, end, file_list[0][start:end]))\n regions.append((\"{1}\", start, end))\n else:\n print(\"interleave {} to {} is {}\".format(start, end, file_list[0][start:end]))\n regions.append((\"{0}\", start, end))\n regions.reverse()\n\n # Build a pattern for the measurement sets by replacing the varying regions in a sample path with placeholders\n pattern = str(file_list[0])\n for region in regions:\n pattern = pattern[0:region[1]]+region[0]+pattern[region[2]:]\n return pattern",
"def init_patterns(self):\n ##########################################\n # load patterns\n if self.verbose:\n print(\"init patterns:\")\n\n self.pattern_list = []\n self.pattern_list.append('stop')\n self.pattern_list.extend(pattern.load_all_submodules())\n\n # init all patterns:\n self.pattern = {}\n for pattern_class in pattern.Pattern.__subclasses__():\n full_module_name = pattern_class.__module__\n pattern_name = full_module_name.replace(\"pattern.\", \"\")\n self.add_pattern(pattern_name, pattern_class)",
"def prepare_usage_metrics():\n data = get_all_profiler_metrics_data(USAGE_DATA_FOLDER)\n data['Method'] = data['Method'].apply(ctor_to_class_name)\n return data",
"def update_patterns(self, patterns):\n\t\tpass",
"def compilePatterns(self):\n if self.verbose > 0:\n print(\"compile patterns...\"); sys.stdout.flush()\n for tagId in self.tags:\n tag = self.tags[tagId]\n self.patterns[tagId] = re.compile(tag + self.regexpMotif)",
"def __init__(self, name, class_name, formats=None, *args, **kwargs):\n super(AudioTrackListRule, self).__init__(name, *args, **kwargs)\n self.class_name = class_name\n\n self.formats = []\n if formats:\n self.formats.extend(formats)\n\n (json_template, js_template) = _get_tracklist_template_paths()\n self._append_dependent_paths([\n json_template,\n js_template])",
"def __init__(self, name, size=50, sector=0, cadence=None):\n super(Source_cut_pseudo, self).__init__()\n if cadence is None:\n cadence = []\n self.name = name\n self.size = size\n self.sector = sector\n self.camera = 0\n self.ccd = 0\n self.wcs = []\n self.time = np.arange(10)\n self.flux = 20 * np.ones((100, 50, 50)) + np.random.random(size=(100, 50, 50))\n star_flux = np.random.random(100) * 1000 + 200\n star_x = np.random.random(100) * 50 - 0.5\n star_y = np.random.random(100) * 50 - 0.5\n star_x_round = np.round(star_x)\n star_y_round = np.round(star_y)\n for j in range(100):\n for i in range(100):\n self.flux[j, int(star_y_round[i]), int(star_x_round[i])] += star_flux[i]\n try:\n self.flux[j, int(star_y_round[i]), int(star_x_round[i]) + 1] += star_flux[i]\n except:\n continue\n self.flux_err = []\n self.gaia = []\n self.cadence = cadence\n self.quality = []\n self.mask = np.ones(np.shape(self.flux[0]))\n\n # t_tic = Table()\n # t_tic[f'tic'] = tic_id[in_frame]\n t = Table()\n t[f'tess_mag'] = - star_flux\n t[f'tess_flux'] = star_flux\n t[f'tess_flux_ratio'] = star_flux / np.max(star_flux)\n t[f'sector_{self.sector}_x'] = star_x\n t[f'sector_{self.sector}_y'] = star_y\n gaia_targets = t # TODO: sorting not sorting all columns\n gaia_targets.sort('tess_mag')\n self.gaia = gaia_targets",
"def test_pattern_step():\n ol.download()\n num_samples = 128\n loopback_sent = build_random_pattern(num_samples)\n pattern_generator = PatternGenerator(mb_info)\n pattern_generator.trace(use_analyzer=True,\n num_analyzer_samples=num_samples)\n pattern_generator.setup(loopback_sent,\n stimulus_group_name='stimulus',\n analysis_group_name='analysis',\n frequency_mhz=100)\n\n for _ in range(num_samples):\n pattern_generator.step()\n\n loopback_recv = pattern_generator.waveform.waveform_dict\n list1 = list2 = list3 = list()\n for wavelane_group in loopback_sent['signal']:\n if wavelane_group and wavelane_group[0] == 'stimulus':\n for i in wavelane_group[1:]:\n temp = deepcopy(i)\n temp['wave'] = wave_to_bitstring(i['wave'])\n list1.append(temp)\n\n for wavelane_group in loopback_recv['signal']:\n if wavelane_group and wavelane_group[0] == 'stimulus':\n for i in wavelane_group[1:]:\n temp = deepcopy(i)\n temp['wave'] = wave_to_bitstring(i['wave'])\n list2.append(temp)\n elif wavelane_group and wavelane_group[0] == 'analysis':\n for i in wavelane_group[1:]:\n temp = deepcopy(i)\n temp['wave'] = wave_to_bitstring(i['wave'])\n list3.append(temp)\n assert list1 == list2, \\\n 'Stimulus not equal in generated and captured patterns.'\n assert list2 == list3, \\\n 'Stimulus not equal to analysis in captured patterns.'\n\n pattern_generator.stop()\n pattern_generator.reset()\n del pattern_generator"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test cycle task allow_change_state value by user position.
|
def test_change_state_by_user(self, user, expected_values):
all_models.Cycle.query.filter(
all_models.Cycle.id == self.cycle_id
).update({
all_models.Cycle.is_current: True,
})
db.session.commit()
user = all_models.Person.query.filter_by(email=user).one()
self.api.set_user(user)
response = self.api.get_query(all_models.CycleTaskGroupObjectTask,
"__sort=id")
self.assert200(response)
cycle_tasks = (response.json
.get("cycle_task_group_object_tasks_collection", {})
.get("cycle_task_group_object_tasks", []))
# relies on same order of ids of TGT and CTGOT
self.assertListEqual(
[cycle_task["allow_change_state"] for cycle_task in cycle_tasks],
expected_values,
)
|
[
"def test_change_state_by_is_current(self, cycle_is_current):\n all_models.Cycle.query.filter(\n all_models.Cycle.id == self.cycle_id\n ).update({\n all_models.Cycle.is_current: cycle_is_current,\n })\n db.session.commit()\n user_mail = self.WORKFLOW_OWNER\n user = all_models.Person.query.filter_by(email=user_mail).one()\n self.api.set_user(user)\n response = self.api.get_query(all_models.CycleTaskGroupObjectTask,\n \"__sort=id\")\n self.assert200(response)\n\n cycle_tasks = (response.json\n .get(\"cycle_task_group_object_tasks_collection\", {})\n .get(\"cycle_task_group_object_tasks\", []))\n # relies on same order of ids of TGT and CTGOT\n states = [ct[\"allow_change_state\"] for ct in cycle_tasks]\n if cycle_is_current:\n self.assertTrue(all(states))\n else:\n self.assertFalse(any(states))",
"def testChangeState(self):\n\n a = LedSwitcher(\"../test/testinputs/test1LineItem.txt\")\n a.parseFile()\n self.assertTrue(a.changeState(0, 0, True) == True)",
"def alter_state(self, state):\n state_map = dict(start = DAQmx_Val_Task_Start,\n stop = DAQmx_Val_Task_Stop,\n verify = DAQmx_Val_Task_Verify,\n commit = DAQmx_Val_Task_Commit,\n reserve = DAQmx_Val_Task_Reserve,\n unreserve = DAQmx_Val_Task_Unreserve,\n abort = DAQmx_Val_Task_Abort)\n state_val = self._get_map_value ('state', state_map, state)\n return CALL('TaskControl', self, state_val) == 0",
"def test_update_workflow_status(self):\n pass",
"def test_set_user_state(self):\n pass",
"def test_time_valid_change(generic_task):\n generic_task.set_time_valid('0000')\n assert generic_task.get_time_valid() == '0000'",
"def check(self, state, val):\n return",
"def before_state_change(self, source, target):",
"def test_disable_running_transition():\n\n def assert_new(instance):\n \"\"\"\n ensure the state is still the original state\n \"\"\"\n assert instance.state == \"new\"\n\n x = get_thing()\n x.disable_running_state(assert_new)",
"def test_set_transition_state():\n\n def assert_state(instance):\n \"\"\"\n ensure the running state is set\n \"\"\"\n assert instance.state == \"do_thing_running\"\n\n x = get_thing()\n x.do_thing(assert_state)\n\n # ensure the target transition is set when the process is done\n assert x.state == x.CHOICES.done",
"async def test_switch_change_alarm_state(hass, utcnow):\n helper = await setup_test_component(hass, create_security_system_service)\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_home\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 0\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_away\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 1\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_night\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 2\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_disarm\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 3",
"def test_settingRunning(self):\n self.flags.running = True\n self.assertTrue(self.flags.running, \"The Running flag should be True\")",
"async def test_temp_change_ac_on_outside_tolerance(\n hass: HomeAssistant, setup_comp_3\n) -> None:\n calls = _setup_switch(hass, False)\n await common.async_set_temperature(hass, 25)\n _setup_sensor(hass, 30)\n await hass.async_block_till_done()\n assert len(calls) == 1\n call = calls[0]\n assert call.domain == HASS_DOMAIN\n assert call.service == SERVICE_TURN_ON\n assert call.data[\"entity_id\"] == ENT_SWITCH",
"def sof_changed(self, state):\n self.skip_optical_flow_new = (state == QtCore.Qt.Checked)",
"def test_stage_three_die_pass(self):\n self.game.currentStage = 3\n self.game.angryDieA.currentValue = \"5\"\n self.game.angryDieB.currentValue = \"5\"\n self.game.invalidFlagA = False\n self.game.invalidFlagB = False\n self.game.valid_check()\n self.assertFalse(self.game.invalidFlagA,\"Cheating flag for die A was set\")\n self.assertFalse(self.game.invalidFlagB, \"Cheating flag for die B was set\")",
"def test_date_valid_change(generic_task):\n generic_task.set_date_valid('2018-01-01')\n assert generic_task.get_date_valid() == '2018-01-01'",
"async def test_state_triggers(hass: HomeAssistant) -> None:\n hass.states.async_set(\"sensor.test_monitored\", STATE_OFF)\n\n config = {\n \"binary_sensor\": {\n \"name\": \"Test_Binary\",\n \"platform\": \"bayesian\",\n \"observations\": [\n {\n \"platform\": \"state\",\n \"entity_id\": \"sensor.test_monitored\",\n \"to_state\": \"off\",\n \"prob_given_true\": 0.9999,\n \"prob_given_false\": 0.9994,\n },\n ],\n \"prior\": 0.2,\n \"probability_threshold\": 0.32,\n }\n }\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"binary_sensor.test_binary\").state == STATE_OFF\n\n events = []\n async_track_state_change_event(\n hass, \"binary_sensor.test_binary\", callback(lambda event: events.append(event))\n )\n\n context = Context()\n hass.states.async_set(\"sensor.test_monitored\", STATE_ON, context=context)\n await hass.async_block_till_done()\n await hass.async_block_till_done()\n\n assert events[0].context == context",
"def test_notify_run_status(self):\n pass",
"def testBypassStatus(self):\n dummyTask = Task.create('checksum')\n crawlers = [FsCrawler.createFromPath(self.__jsonConfig)]\n\n taskHolder = TaskHolder(dummyTask, Template(\"{filePath}\"))\n dummyTask2 = Task.create('checksum')\n taskHolder2 = TaskHolder(dummyTask2, Template(\"{filePath}\"))\n taskHolder.addSubTaskHolder(taskHolder2)\n self.assertEqual(len(taskHolder.run(crawlers)), len(crawlers) * 2)\n\n taskHolder.setStatus(\"bypass\")\n self.assertEqual(len(taskHolder.run(crawlers)), len(crawlers))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test cycle task allow_change_state value by Cycle is_current value.
|
def test_change_state_by_is_current(self, cycle_is_current):
all_models.Cycle.query.filter(
all_models.Cycle.id == self.cycle_id
).update({
all_models.Cycle.is_current: cycle_is_current,
})
db.session.commit()
user_mail = self.WORKFLOW_OWNER
user = all_models.Person.query.filter_by(email=user_mail).one()
self.api.set_user(user)
response = self.api.get_query(all_models.CycleTaskGroupObjectTask,
"__sort=id")
self.assert200(response)
cycle_tasks = (response.json
.get("cycle_task_group_object_tasks_collection", {})
.get("cycle_task_group_object_tasks", []))
# relies on same order of ids of TGT and CTGOT
states = [ct["allow_change_state"] for ct in cycle_tasks]
if cycle_is_current:
self.assertTrue(all(states))
else:
self.assertFalse(any(states))
|
[
"def test_change_state_by_user(self, user, expected_values):\n all_models.Cycle.query.filter(\n all_models.Cycle.id == self.cycle_id\n ).update({\n all_models.Cycle.is_current: True,\n })\n db.session.commit()\n user = all_models.Person.query.filter_by(email=user).one()\n self.api.set_user(user)\n response = self.api.get_query(all_models.CycleTaskGroupObjectTask,\n \"__sort=id\")\n self.assert200(response)\n\n cycle_tasks = (response.json\n .get(\"cycle_task_group_object_tasks_collection\", {})\n .get(\"cycle_task_group_object_tasks\", []))\n # relies on same order of ids of TGT and CTGOT\n self.assertListEqual(\n [cycle_task[\"allow_change_state\"] for cycle_task in cycle_tasks],\n expected_values,\n )",
"def _needs_to_track_change(self, instance, value) -> bool:\n try:\n current_value = instance.__dict__[self._name]\n except KeyError:\n return True\n return value != current_value",
"def alter_state(self, state):\n state_map = dict(start = DAQmx_Val_Task_Start,\n stop = DAQmx_Val_Task_Stop,\n verify = DAQmx_Val_Task_Verify,\n commit = DAQmx_Val_Task_Commit,\n reserve = DAQmx_Val_Task_Reserve,\n unreserve = DAQmx_Val_Task_Unreserve,\n abort = DAQmx_Val_Task_Abort)\n state_val = self._get_map_value ('state', state_map, state)\n return CALL('TaskControl', self, state_val) == 0",
"def testChangeState(self):\n\n a = LedSwitcher(\"../test/testinputs/test1LineItem.txt\")\n a.parseFile()\n self.assertTrue(a.changeState(0, 0, True) == True)",
"def test_update_workflow_status(self):\n pass",
"def _can_be_changed(self, graph):\n return not graph.is_raw_data or (graph.is_raw_data and self._active == 'MOVE')",
"def goal_test(self, current):\n\n if current.state == self.goal_state:\n return True\n else:\n return False",
"def test_disable_running_transition():\n\n def assert_new(instance):\n \"\"\"\n ensure the state is still the original state\n \"\"\"\n assert instance.state == \"new\"\n\n x = get_thing()\n x.disable_running_state(assert_new)",
"def check(self, state, val):\n return",
"def sof_changed(self, state):\n self.skip_optical_flow_new = (state == QtCore.Qt.Checked)",
"async def test_switch_change_alarm_state(hass, utcnow):\n helper = await setup_test_component(hass, create_security_system_service)\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_home\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 0\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_away\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 1\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_night\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 2\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_disarm\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 3",
"def test_change(self):\n bandwidth_value = random.randint(0, 10000000)\n self._bw.change(bandwidth_value)\n self.assertEqual(self._bw._current, bandwidth_value)",
"def test_set_transition_state():\n\n def assert_state(instance):\n \"\"\"\n ensure the running state is set\n \"\"\"\n assert instance.state == \"do_thing_running\"\n\n x = get_thing()\n x.do_thing(assert_state)\n\n # ensure the target transition is set when the process is done\n assert x.state == x.CHOICES.done",
"def __damaged_state_change(start, adv, final):\n return start.damaged != adv.damaged or adv.damaged != final.damaged",
"def _check(self, target, current):\n raise NotImplementedError('Do not call the base Goal directly.')",
"def before_state_change(self, source, target):",
"def check(self):\n new_state = io.input(self.pin)\n if new_state != self.state:\n self.changed = time()\n self.state = new_state\n return new_state",
"def state_changed(self, details):\n logger.info(\"* Run State Change:\")\n logger.info(`details`)\n\n if details.state == controller.TC_STATE_STOPPING:\n logger.info('Hardware shutdown in progress, telescope decelerating.')\n self.running = False\n elif details.state == controller.TC_STATE_EXCEPTION:\n self.running = False\n logger.debug('acq in state_changed:')\n self.lock.acquire()\n logger.debug('acq in state_changed() success')\n d = self.host.get_exception()\n d.addCallback(self._get_exception_completed)",
"def test_date_valid_change(generic_task):\n generic_task.set_date_valid('2018-01-01')\n assert generic_task.get_date_valid() == '2018-01-01'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a TSV string from the sentences_input table to a list, optionally applying a fn to each element
|
def tsv_string_to_list(s, func=lambda x : x, sep='|^|'):
if s.strip() == "":
return []
# Auto-detect separator
if re.search(r'^\{|\}$', s):
split = re.split(r'\s*,\s*', re.sub(r'^\{\s*|\s*\}$', '', s))
else:
split = s.split(sep)
# split and apply function
return [func(x) for x in split]
|
[
"def tsv_string_to_listoflists(s, func=lambda x : x, sep1='|^|', sep2='|~|'):\n return tsv_string_to_list(s, func=lambda x : tsv_string_to_list(x, func=func, sep=sep1), sep=sep2)",
"def tab_split(line: str, converter: Callable[[str], Any]=str) -> List[Any]:\n return [converter(x) for x in line.split('\\t')]",
"def read_tsv(filename, text_column, annotation_column, remap_fn=None, skip_comments=True, keep_broken_tags=False):\n with open(filename, encoding=\"utf-8\") as fin:\n lines = fin.readlines()\n\n lines = [x.strip() for x in lines]\n\n sentences = []\n current_sentence = []\n for line_idx, line in enumerate(lines):\n if not line:\n if current_sentence:\n sentences.append(current_sentence)\n current_sentence = []\n continue\n if skip_comments and line.startswith(\"#\"):\n continue\n\n pieces = line.split(\"\\t\")\n try:\n word = pieces[text_column]\n except IndexError as e:\n raise IndexError(\"Could not find word index %d at line %d\" % (text_column, line_idx)) from e\n if word == '\\x96':\n # this happens in GermEval2014 for some reason\n continue\n try:\n tag = pieces[annotation_column]\n except IndexError as e:\n if keep_broken_tags:\n tag = None\n else:\n raise IndexError(\"Could not find tag index %d at line %d\" % (annotation_column, line_idx)) from e\n if remap_fn:\n tag = remap_fn(tag)\n\n current_sentence.append((word, tag))\n\n if current_sentence:\n sentences.append(current_sentence)\n\n return sentences",
"def convert(self, token_tml):\n sents = []\n cur_sent = []\n last_sent = -1\n for line in open(token_tml):\n line = line.strip()\n if not line:\n continue\n fn, sent_id, tok_id, \\\n surface_form, tmlTag, tmlTagId, tmlTagLoc = [eval(v) for v in line.split('|||')]\n cur_ent = [tok_id,\n surface_form,\n self.consolidate_fact_value(fn, sent_id, tmlTagId) \\\n if (tmlTag == 'EVENT')\\\n else \"_\"]\n\n if sent_id != last_sent:\n if cur_sent:\n toks = nlp(unicode(\" \".join([word[1] for word in cur_sent])))\n dep_feats = self.get_dep_feats(toks, cur_sent)\n sents.append([fb_feat + dep_feat\n for (fb_feat, dep_feat) in zip(cur_sent, dep_feats)])\n cur_sent = [cur_ent]\n else:\n cur_sent.append(cur_ent)\n last_sent = sent_id\n\n return '\\n\\n'.join(['\\n'.join(['\\t'.join(map(str, word))\n for word in sent])\n for sent in sents\n if len(sent) > self.sentence_threshold]) + \"\\n\\n\" # filter short sentences",
"def run_main_tsv(row_parser, row_fn):\n for line in sys.stdin:\n for line_out in row_fn(row_parser(line)):\n print_tsv_output(line_out)",
"def read_ptsv(line):\n return map(read_ptsv_element, line.rstrip().split('\\t'))",
"def _line_to_list(line, map_func=int) -> list:\n return list(map(map_func, line.strip().split(' ')))",
"def _read_torchtext_tabular(cls, input_file):\n return open_split(input_file, lower_case=False)",
"def sent_to_binary_features(sentence, funcs):\n if not sentence.is_tokenized:\n raise Exception(\"Sentence not tokenized\")\n matrix = np.zeros((len(sentence), len(funcs)))\n for i, token in enumerate(sentence):\n for j, func in enumerate(funcs):\n if func(token):\n matrix[i, j] = 1\n return matrix.tolist()",
"def split_on_attribute_values(tester,rows):\n ts,fs = [],[]\n def helper(r):\n if tester(r): ts.append(r)\n else: fs.append(r)\n\n map(helper,rows)\n return ts,fs",
"def recurse_tsv(contents: str, fnc: Callable, delim: str = \"\\t\") -> Tuple[str, bool]:\n modified = False\n new_contents = []\n for line in contents.split(\"\\n\"):\n new_values = []\n for v in line.split(delim):\n new_val, modified_ = fnc(v)\n modified |= modified_\n if modified_:\n new_values.append(new_val)\n else:\n new_values.append(v)\n new_contents.append(delim.join(new_values))\n\n return \"\\n\".join(new_contents), modified",
"def feature_function_segments(segments, **kwargs):\n l = []\n segments = add_segment_final_space(ensure_unicode(segments))\n parse(segments)\n for segment in generate_items_segments(segments):\n l.append(feature_function_for_segments(segments, segment, **kwargs))\n return l",
"def preprocess(list_of_sentences):\n ret_list = []\n for f in list_of_sentences:\n f = f.lower()\n f= f.replace('\\n', '')\n f= f.replace('?','')\n ret_list.append(f)\n return ret_list",
"def transform(self, *inputs: Table) -> List[Table]:\n pass",
"def pipeline(column_name):\n vectorizer = TfidfVectorizer()\n vectorized_text = vectorizer.fit_transform(\n training_data[column_name].values.astype('U')).toarray()\n return vectorized_text",
"def split_by_lines(dataset):\n def my_fn(text):\n lines = tf.strings.split([text], sep='\\n').values\n return tf.strings.strip(lines)\n\n dataset = dataset.map(my_fn, num_parallel_calls=AUTOTUNE)\n dataset = dataset.unbatch()\n return dataset.filter(lambda x: tf.strings.length(x) > 0)",
"def lineages_to_columns(lineages, tax_tsv):\n return [lineage_to_columns(lineage, tax_tsv) for lineage in lineages]",
"def convert_custom_csv_to_tsv(input, output_path, label_col, text_col, id_col=None, skip_header=True,\r\n output_format=DEFAULT_OUT_FORMAT):\r\n convert_custom_input_to_tsv(input, \",\", output_path, label_col, text_col, id_col=id_col, skip_header=skip_header,\r\n output_format=output_format)\r\n return None",
"def get_table_from_output(self, output: str) -> torch.Tensor:\n search_result = re.search(\n r\".*function\\(\\*\\(_,_\\), \\[(.*)]\\)\\..*\", output, re.DOTALL\n )\n if search_result is None:\n raise ValueError(\"wrong mace4 output file format!\")\n input_lines = search_result.groups()[0]\n cayley_table = torch.tensor(\n list(\n map(\n int,\n input_lines.translate(\n str.maketrans(\"\", \"\", \" \\t\\n])\")\n ).split(\",\"),\n )\n )\n ).view(self.cardinality, self.cardinality)\n return cayley_table"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a TSV string from sentences_input table to a list of lists
|
def tsv_string_to_listoflists(s, func=lambda x : x, sep1='|^|', sep2='|~|'):
return tsv_string_to_list(s, func=lambda x : tsv_string_to_list(x, func=func, sep=sep1), sep=sep2)
|
[
"def tsv_string_to_list(s, func=lambda x : x, sep='|^|'):\n \n if s.strip() == \"\":\n return []\n\n # Auto-detect separator\n if re.search(r'^\\{|\\}$', s):\n split = re.split(r'\\s*,\\s*', re.sub(r'^\\{\\s*|\\s*\\}$', '', s))\n else:\n split = s.split(sep)\n\n # split and apply function\n return [func(x) for x in split]",
"def read_ptsv(line):\n return map(read_ptsv_element, line.rstrip().split('\\t'))",
"def _read_torchtext_tabular(cls, input_file):\n return open_split(input_file, lower_case=False)",
"def load_dataset():\n sents = []\n part_size = FLAGS.tgt_len\n\n data_in_f = open(FLAGS.sentences_file, 'r')\n\n for line in data_in_f:\n line = line.strip()\n if line == \"\":\n continue\n symbols = line.split()\n\n if FLAGS.backwards:\n symbols = symbols[::-1]\n # annoying arithmetic\n num_sent_parts = len(symbols) // part_size if \\\n len(symbols) % part_size == 0 else len(symbols) // part_size + 1\n\n # split sentence into partitions\n partitions = []\n cur = 0\n for _ in range(num_sent_parts):\n if cur+part_size > len(symbols):\n partitions.append(symbols[cur:])\n else:\n partitions.append(symbols[cur:cur+part_size])\n cur += part_size\n sents.append(partitions)\n\n return sents",
"def read_tsv(filename, text_column, annotation_column, remap_fn=None, skip_comments=True, keep_broken_tags=False):\n with open(filename, encoding=\"utf-8\") as fin:\n lines = fin.readlines()\n\n lines = [x.strip() for x in lines]\n\n sentences = []\n current_sentence = []\n for line_idx, line in enumerate(lines):\n if not line:\n if current_sentence:\n sentences.append(current_sentence)\n current_sentence = []\n continue\n if skip_comments and line.startswith(\"#\"):\n continue\n\n pieces = line.split(\"\\t\")\n try:\n word = pieces[text_column]\n except IndexError as e:\n raise IndexError(\"Could not find word index %d at line %d\" % (text_column, line_idx)) from e\n if word == '\\x96':\n # this happens in GermEval2014 for some reason\n continue\n try:\n tag = pieces[annotation_column]\n except IndexError as e:\n if keep_broken_tags:\n tag = None\n else:\n raise IndexError(\"Could not find tag index %d at line %d\" % (annotation_column, line_idx)) from e\n if remap_fn:\n tag = remap_fn(tag)\n\n current_sentence.append((word, tag))\n\n if current_sentence:\n sentences.append(current_sentence)\n\n return sentences",
"def tab_split(line: str, converter: Callable[[str], Any]=str) -> List[Any]:\n return [converter(x) for x in line.split('\\t')]",
"def csv_to_list(csv_str, dem = ','):\n lines = csv_str.strip('\\n').split('\\n')\n csv_data = []\n for l in lines:\n csv_data.append(l.strip('\\r').split(dem))\n return csv_data",
"def createListFromTable(self, table):\n\t\tlistOfData = []\n\n\t\tfor data in table:\n\t\t\tlistOfData.append(data)\n\n\t\treturn listOfData",
"def parse_tsv(path):\n\twith open(path, newline = '') as handle:\n\t\treader = csv.DictReader(handle, delimiter = '\\t')\n\t\tdata = []\n\t\t\n\t\t# get info from rows\n\t\tfor row in reader:\n\t\t\t\n\t\t\tdata.append(row)\n\t\t\t\n\treturn data",
"def get_sentence_data(data_file):\n # Read sentences from file\n sents = []\n with open(data_file) as file:\n for line in file:\n # add them as arrays to make expansion easier\n sents.append(line.strip().split())\n \n # Get binary feature vects (d) and labels (l) from sents\n d = []\n l = []\n for line in sents:\n vect = numpy.zeros(feature_size)\n for i in line[1:]:\n i = i.split(\":\")\n word = i[0]\n value = i[1]\n #print word, value\n try:\n vect[features_index[word]] = float(value)\n except:\n pass\n l.append(line[0])\n d.append(vect)\n \n return d, l",
"def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table",
"def _line_to_list(line, map_func=int) -> list:\n return list(map(map_func, line.strip().split(' ')))",
"def split_rows(sentences, column_names):\r\n new_sentences = []\r\n texts=[]\r\n root_values = ['0', 'ROOT', 'ROOT', 'ROOT', 'ROOT', 'ROOT', '0', 'ROOT', '0', 'ROOT']\r\n start = [dict(zip(column_names, root_values))]\r\n for sentence in sentences:\r\n info=[]\r\n rows = sentence.split('\\n')\r\n sentence = [dict(zip(column_names, row.split())) for row in rows if row[0] != '#']\r\n sentence = start + sentence\r\n new_sentences.append(sentence)\r\n if \"newdoc id\" in rows[0]: # beginnings of new docs\r\n info.append(rows[1])\r\n info.append(rows[2])\r\n texts.append(info)\r\n else:\r\n info.append(rows[0])\r\n info.append(rows[1])\r\n texts.append(info)\r\n return new_sentences, texts",
"def get_sentences(input_file):\n \n with open(input_file) as f:\n \treturn [line.rstrip('\\n') for line in f if line.rstrip('\\n')]",
"def read_input_data(input_data):\n\tsentences = []\n\tsentences_tokenised = []\n\tif len(input_data) > 0:\n\t\tinput_data.strip()\n\t\tif list(input_data)[-1] != \".\":\n\t\t\tinput_data += '.'\n\t\tsentences = input_data.split(\".\")[:-1]\n\t\tfor sentence in sentences:\n\t\t\tsentence = re.findall(r\"[\\w']+|[.,!?;]\", sentence)\n\n\t\t\tsentence_new = []\n\t\t\tfor token in sentence:\n\t\t\t\ttoken = [token]\n\t\t\t\tsentence_new.append(token)\n\t\t\tsentence_new.append(['.'])\n\t\t\tsentences_tokenised.append(sentence_new)\n\tprint(sentences_tokenised)\n\treturn sentences_tokenised",
"def lines_to_list_of_lists(lines):\n #create and fill list of lists with float numbers\n return [ [float(strnum) for strnum in line.split()] for line in lines ]",
"def load_matchlist(filepath='example_for_matching.tsv'):\n\tmatchlist = []\n\twith open(filepath, 'r', encoding='utf8') as tsvin:\n\t\ttsvin = csv.reader(tsvin, delimiter='\\t')\n\t\ttry:\n\t\t\tfor row in tsvin:\n\t\t\t\tmatchlist += [row[0]]\n\t\texcept UnicodeDecodeError:\n\t\t\tprint('{time} unicode error. q={q}'.format(time=datetime.datetime.now(), q=row[0]))\n\n\treturn matchlist",
"def record_to_sentences(record):\r\n from nltk.tokenize import sent_tokenize\r\n\r\n record_id = record['record_id']\r\n sentences = sent_tokenize(record[wordcount.TEXT_FIELD])\r\n sentences = [Row(record_id=record_id, Sentences_t=sentence.encode('utf-8')) for sentence in sentences]\r\n return sentences",
"def make_linelist_from_dataframe(df):\n lst = []\n for values in df.head().values:\n lst.append('\\t'.join([str(v) for v in values]))\n return lst"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a list of ddlib.Word objects from input row.
|
def create_ddlib_sentence(row):
sentence = []
for i, word in enumerate(row.words):
sentence.append(ddlib.Word(
begin_char_offset=None,
end_char_offset=None,
word=word,
lemma=row.lemmas[i],
pos=row.poses[i],
ner=row.ners[i],
dep_par=row.dep_parents[i],
dep_label=row.dep_paths[i]))
return sentence
|
[
"def generate_word_count(self, row):\n\tword_counter = Counter(row[1])\n\treturn [ (word, [ (row[0], word_counter[word]) ] ) \\\n for word in word_counter ]",
"def create_boggle_lst(row):\n\t# Read row to lst\n\trow_lst = row.split(' ')\n\trow_lst = case_insensitive(row_lst)\n\t# Create boggle list\n\tboggle_lst.append(row_lst)\n\t# Create boggle_ch_lst\n\tfor ch in row_lst:\n\t\tboggle_ch_lst.append(ch)",
"def doc_transform(doc_batch):\n docs = []\n for d in doc_batch:\n words = []\n for s in d:\n words += s\n docs.append(words)\n # nw = len(words)\n return docs",
"def loadWordsToList(self):\n dataFile = open('wordDb.txt', 'r');\n list = [];\n varI = 0;\n while varI < 288:\n strArr = dataFile.readline().strip('\\n').split(\" \");\n list.append(strArr[0:]);\n varI+=1;\n\n for i in list:\n for j in i:\n stringDb.listOfWords.append(j);",
"def read_machine_word_array(self, a, count):\n return [idc.Dword(x) for x in range(a, a + count * self.machine_word_size, self.machine_word_size)]",
"def import_words(cursor):\n print(\"*** Inserting Words ***\")\n base_dir = os.path.dirname(os.path.realpath(__file__))\n id = 1\n dataset = DataParser.get_dataset()\n with open(os.path.join(os.path.dirname(base_dir), \"data\", dataset, \"mult.dat\")) as\\\n bag, open(os.path.join(os.path.dirname(base_dir), \"data\", dataset, \"vocabulary.dat\")) as vocab:\n for entry in bag:\n entry = entry.strip()\n splitted = entry.split(\" \")\n num_words = int(splitted[0])\n for i in range(1, num_words + 1):\n article_to_count = splitted[i].split(\":\")\n word_id = str(int(article_to_count[0]) + 1)\n count = article_to_count[1]\n cursor.execute(\"insert into words_articles(article_id, count, word_id) \\\n values (%s, %s, %s)\", (id, count, word_id))\n id += 1\n current_word = 1\n for word in vocab:\n word = word.strip()\n cursor.execute(\"insert ignore into words(id, word) values(%s, %s)\", (current_word, word))\n current_word += 1",
"def return_row(self, row):\n if isinstance(row, int):\n row = number_to_rowname(row)\n return [self.wells[wellname] for wellname in self.rows[row]]",
"def get_all_words(board: Sequence[Sequence[str]]) -> List[List[str]]:\n all_words = []\n for row in board:\n word = []\n for num in row:\n num = str(num)\n if not int(num):\n if len(word) > 1:\n all_words.append(word)\n word = []\n else:\n word.append(num)\n if len(word) > 1:\n all_words.append(word)\n\n return all_words",
"def fetch_wordlist() -> list[tuple[str]]:\n\n logger.info(f\"Fetch wordlist from {WORDLIST_URL}\")\n\n response = httpx.get(WORDLIST_URL)\n word_list = response.text.split(\"\\n\")\n return [(word.strip(),) for word in word_list[2:]]",
"def parse_row(row):\n cells = row.getchildren()\n\n data = [tc(cells[0]), int(tc(cells[1]))]\n data.extend(float(tc(c)) for c in cells[2:])\n\n return data",
"def __getWords(self,line):\n\n l=string.expandtabs(string.lower(line),1)\n words=string.split(string.lstrip(l))\n \n return words",
"def extractStrings(self):\n def extractRow(row):\n return [entry.text for entry in row]\n return [extractRow(row) for row in self.array2d]",
"def extractTranslations(row, list, languages=None):\n\t# Locations of the translations.\n\tlocations = [\n\t\t[6 + 3 * i for i in range(10)], # First row.\n\t\t[4 + 3 * i for i in range(10)], # Second row.\n\t\t[5 + 3 * i for i in range(10)] # Third row.\n\t]\n\n\t# Translations object.\n\ttranslation = {}\n\tindex = 0\n\n\t# For each translation line.\n\tfor j in range(0, 3):\n\t\t# For each language of the translation line.\n\t\tfor i in locations[j]:\n\t\t\tif languages:\n\t\t\t\t# Use languages as keys.\n\t\t\t\ttranslation[languages[index]] = row[i]\n\t\t\telse:\n\t\t\t\t# Use index as keys.\n\t\t\t\ttranslation[index] = row[i]\n\n\t\t\t# Next language.\n\t\t\tindex += 1\n\n\t\t# Next row.\n\t\tif j < 2:\n\t\t\trow = next(list, None)\n\n\t# Translations object.\n\treturn translation",
"def convert_document(document: list[Union[str, tuple[str, int]]]) -> list[str]:\n return [\n word\n for code in document\n for word in ([code] if isinstance(code, str) else [code[0]] * code[1])\n ]",
"def get_word_list(self)->list:\n return self.word_list",
"def convertRowIntoOutput(gtfRow):\n toRet=[gtfRow.geneName,gtfRow.classification,gtfRow.start,gtfRow.stop,gtfRow.chromosome,gtfRow.orientation]\n return(toRet)",
"def createRow(columns, row):\n return [voxie.Variant(column.dbusType, column.converter(row.get(column.name, column.default))) for column in columns]",
"def hex_to_wordlist(hexinput):\n\n hexinput = str(hexinput)\n if int(sys.version_info.major) == 2:\n hexinput = unicode(hexinput)\n hexinput = unicodedata.normalize('NFC',hexinput)\n hexinput = str(hexinput).replace(\"L\",\"\").replace(\"0x\",\"\")\n for char in hexinput:\n if char not in '0123456789abcdefABCDEF':\n raise TypeError(\"Input contains non-hex chars.\")\n if len(hexinput) % 2:\n raise Exception(\"Hex input is odd-length. Although many functions in this module auto-correct that, because of the high importance of not altering your Electrum seed, this error is thrown instead. Please make sure the input hex is exactly 32 hex chars.\")\n try:\n test1 = binascii.unhexlify(hexinput)\n test2 = int(hexinput,16)\n test1, test2 = None, None\n except:\n raise TypeError(\"Input does not appear to be hex.\")\n assert len(hexinput) == 32\n output = []\n for i in range(int(len(hexinput) // 8)):\n word = hexinput[8*i:8*i+8]\n x = int(word,16)\n w1 = (x % ElectrumWallet_V1.NUMBER_OF_WORDS)\n w2 = ((x // ElectrumWallet_V1.NUMBER_OF_WORDS) + w1) % ElectrumWallet_V1.NUMBER_OF_WORDS\n w3 = ((x // ElectrumWallet_V1.NUMBER_OF_WORDS // ElectrumWallet_V1.NUMBER_OF_WORDS) + w2) % ElectrumWallet_V1.NUMBER_OF_WORDS\n output += [ ElectrumWallet_V1.ELECTRUM_ENG_V1_WORDLIST[w1], ElectrumWallet_V1.ELECTRUM_ENG_V1_WORDLIST[w2], ElectrumWallet_V1.ELECTRUM_ENG_V1_WORDLIST[w3] ]\n return str(str(output).replace(\",\",\"\").replace(\"[ \",\"\").replace(\" ]\",\"\").replace(\"[\",\"\").replace(\"]\",\"\").replace(\"u'\",\"\").replace(\"'\",\"\"))",
"def fetch(self, whereclauses: Union[List[str], None] = None) -> List[Word]:\n word_list = []\n if self.db_conn is not None:\n cursor = self.db_conn.cursor()\n sql_statement = f'SELECT * FROM {self.__WORD_TABLE_NAME}'\n if whereclauses is not None:\n if len(whereclauses) is 1:\n sql_statement += f' WHERE {whereclauses[0]}'\n else:\n sql_statement += f' WHERE {whereclauses[0]}'\n for clause in whereclauses[1:]:\n sql_statement += f' AND {clause}'\n cursor.execute(sql_statement)\n raw_word_list = cursor.fetchall()\n cursor.close()\n for word in raw_word_list:\n # word[0] is omitted because it is merely the primary ID key of the database.\n # Kanji and Note are not guaranteed to exist, in which case they are 'None'\n kanji = None if word[self.WORD_KANJI_FIELD] is 'None' else word[self.WORD_KANJI_FIELD]\n note = None if word[self.WORD_NOTE_FIELD] is 'None' else word[self.WORD_NOTE_FIELD]\n eng = word[self.WORD_ENGLISH_FIELD].split('_')\n grammar = word[self.WORD_GRAMMAR_TYPES_FIELD].split('_')\n new_word = Word(eng, word[self.WORD_ROMAJI_FIELD], kanji, word[self.WORD_CHAPTER_FIELD], grammar, note)\n word_list.append(new_word)\n return word_list\n else:\n raise exceptions.DatabaseError('Can\\'t fetch words without a connection!')",
"def get_wordlist(self):\n return [w for w in self.words]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Escape a string that's meant to be in a Postgres array. We doublequote the string and escape backslashes and doublequotes.
|
def pg_array_escape(tok):
return '"%s"' % str(tok).replace('\\', '\\\\').replace('"', '\\\\"')
|
[
"def _quote_escape(item):\n\n rex_sqlquote = re.compile(\"'\", re.M)\n\n return rex_sqlquote.sub(\"''\", item)",
"def escape_sql_values(field_value):\n\treturn field_value.replace(\"'\",\"''\")",
"def escape( *args ):\n cmd = ''\n for s in args:\n if cmd: cmd += ' '\n if not s:\n cmd += '\"\"'\n else:\n cmd += pipes.quote(s)\n return cmd",
"def stringdb_escape_text(text):\n return text.replace('\\\\', '\\\\\\\\').replace('\\t', '\\\\t')",
"def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")",
"def _escape_influx_special_char(string):\n influx_special_char = re.compile(r'([,=\\s])')\n return influx_special_char.sub(r'\\\\\\1', string)",
"def _quote(s):\n return b\"'%s'\" % stringutil.escapestr(pycompat.bytestr(s))",
"def escape_argument(text):\n\n re_no_need_escaping = re.compile(r'^[0-9a-zA-Z_\\.-]+$')\n if re_no_need_escaping.match(text):\n return text\n\n replacements = {\n '\\\\': '\\\\\\\\',\n '\\n': '\\\\n',\n '\\r': '\\\\r',\n '\\t': '\\\\t',\n '\"': '\\\\\"',\n }\n\n def _escape_char(char):\n if char in replacements:\n return replacements[char]\n return char\n\n return '\"{}\"'.format(''.join(_escape_char(x) for x in text))",
"def esc(self, value):\n return value.replace('\\\\', '\\\\\\\\').replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"')",
"def csv_escape(self, line):\n\n in_quote = False\n result = \"\"\n for p in re.split('(\")', line):\n if p == '\"':\n in_quote = not in_quote\n if in_quote:\n p = re.sub(r',', r'\\\\054', p)\n result += p\n\n return result",
"def escape_for_query_string(query_str):\n query_string_reserved_characters = ['\\\\',\n ' ', '+', '-', '=', '&&', '||', '>', '<', '!', '(', ')',\n '{', '}', '[', ']', '^', '\"', '~', '*', '?', ':', '/'\n ]\n\n query_str = ''.join(['\\\\%s' % c if c in query_string_reserved_characters else c for c in query_str])\n return query_str",
"def escape_for_c(rawstring):\n out = rawstring\n out = out.replace(\"\\\\\", \"\\\\\\\\\")\n out = out.replace('\"', '\\\\\"')\n return out",
"def dbescape(s):\n\tif s:\n\t\timport MySQLdb\n\t\treturn MySQLdb.escape_string(s)\n\t\t#return s.replace(\"\\\"\",\"\\\\\\\"\").replace(\"'\",\"\\'\")\n\telse:\n\t\treturn \"NULL\"",
"def _quote_field(data, field):\n if data is None:\n return None\n\n # embedded quotes require escaping - but only if not escaped already\n # note that semi-colons do not need escaping here since we are putting it\n # inside of a quoted string\n fieldBuf = \"\"\n escape = False\n for c in data[field]:\n if c == '\"':\n fieldBuf += '\\\\\"'\n escape = False\n elif c == '\\\\':\n if escape:\n fieldBuf += '\\\\\\\\'\n escape = False\n else:\n escape = True\n else:\n if escape:\n fieldBuf += '\\\\'\n fieldBuf += c\n escape = False\n\n data[field] = '\"%s\"' % fieldBuf\n\n return data",
"def escape_quotes(self, str): \n return str.replace(\"\\\"\", \"\\\\\\\"\")",
"def _EscapeValueForCsv(v):\r\n return '\"%s\"' % v.replace('\"', '\"\"')",
"def lua_escape(s: str) -> str:\n return ''.join(LUA_ESCAPES[i] for i in s.encode('utf8'))",
"def shellquote(s):\n return \"'\" + re.sub(r'(?<![\\'\\\"])\\'(?![\\'\\\"])', r\"'\\''\",\n re.sub(r\"'\\\\''\", \"'\\\"'\\\\''\\\"'\", s)) + \"'\"",
"def escape_string(self, s): # real signature unknown; restored from __doc__\n pass",
"def quoted_paths(paths: Iterable[Path]) -> str:\n return \" \".join([f'\"{p}\"' for p in paths])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print a tuple as output of TSV extractor.
|
def print_tsv_output(out_record):
values = []
for x in out_record:
if isinstance(x, list) or isinstance(x, tuple):
cur_val = list_to_pg_array(x)
elif x is None:
cur_val = '\N'
else:
cur_val = x
values.append(cur_val)
print '\t'.join(str(x) for x in values)
|
[
"def print_aln(tuple):\n print tuple[0]\n print tuple[1]\n print \"Score:\", tuple[2]",
"def tsv_line(value_list):\n return '\\t'.join([str(x) for x in value_list])",
"def get_tsv(self):\n msg = ''\n for stmt in self.get_statements():\n if not stmt.evidence:\n logger.warning('Statement %s without evidence' % stmt.uuid)\n txt = ''\n pmid = ''\n else:\n txt = stmt.evidence[0].text if stmt.evidence[0].text else ''\n pmid = stmt.evidence[0].pmid if stmt.evidence[0].pmid else ''\n line = '%s\\t%s\\t%s\\n' % (stmt, txt, pmid)\n msg += line\n return msg",
"def write_tsv(outfname, rows, colnames=None):\n with tabio.safe_write(outfname or sys.stdout) as handle:\n if colnames:\n header = \"\\t\".join(colnames) + \"\\n\"\n handle.write(header)\n handle.writelines(\"\\t\".join(map(str, row)) + \"\\n\" for row in rows)",
"def print_output(header, str, result):\n log_write(header)\n print header\n for index, row in enumerate(result, 1):\n output = str.format(index, row[0], row[1])\n log_write(output)\n print output",
"def _tab_print_ ( t , title = '' , prefix = '' , alignment = 'll' , xfmt = '%+.5g' , yfmt = '%+-.5g' ) :\n rows = [ ('Abscissa' , 'Value' ) ] \n for i in range ( t.size() ) :\n x = t.x ( i )\n y = t.y ( i )\n row = xfmt % x, yfmt % y\n rows.append ( row )\n \n if not title : title = 'Interpolation Table' \n import ostap.logger.table as T\n return T.table ( rows , title = title , prefix = prefix , alignment = alignment )",
"def create_tsv(output, data):\n if not output:\n output = open('evaluation.dat', 'w')\n i = 0\n for item in data:\n path, label = item.split(':')\n xml_file = open(path).read()\n completion_rate = calculate_completion_rate(xml_file)\n output.write('{0}\\t{1}\\t{2}\\r\\n'.format(str(i), label, str(completion_rate)))\n i += 1\n output.close()",
"def to_tsv(self, output_file):\n csvw = csv.writer(output_file, delimiter=\"\\t\", quoting=csv.QUOTE_NONE)\n for row in self.summary:\n csvw.writerow(row)",
"def write_tsv(self, filename):\n\n output = StringIO()\n\n # Add the header line\n output.write('model_name\\t')\n output.write('\\t'.join([r.func_name for r in self.reporters]))\n output.write('\\n')\n\n # Transpose the results list\n results = zip(*self.results)\n\n for model_name, result_row in zip(self.names, results):\n output.write(model_name + '\\t')\n output.write('\\t'.join([r.get_text() for r in result_row]))\n output.write('\\n')\n\n with open(filename, 'w') as f:\n f.write(output.getvalue())",
"def get_tupledesc(self):",
"def run_main_tsv(row_parser, row_fn):\n for line in sys.stdin:\n for line_out in row_fn(row_parser(line)):\n print_tsv_output(line_out)",
"def GetTsvScreenPrinter():\r\n writer = UnicodeWriter(sys.stdout, dialect='excel-tab')\r\n return ExportPrinter(writer)",
"def write_tsv(obo_records_dict, output_stream, separator=\", \"):\n\n header = [\"id\", \"name\", \"is_a\", \"namespace\", \"def\"]\n records = yield_all(obo_records_dict)\n\n for record in records:\n row = []\n for tag in header:\n value = record.get(tag)\n if value is None:\n row.append(\"\")\n elif isinstance(value, list):\n row.append(separator.join(map(str, value)))\n else:\n row.append(str(value))\n output_stream.write(\"\\t\".join(row))\n output_stream.write(\"\\n\")",
"def write_to_tsv_file(data_across_features, info_cols, output_handle, close=False):\n for info_batch, preds_batch in zip(info_cols, data_across_features):\n for info, preds in zip(info_batch, preds_batch):\n preds_str = '\\t'.join(\n probabilities_to_string(list(preds)))\n info_str = '\\t'.join([str(i) for i in info])\n output_handle.write(\"{0}\\t{1}\\n\".format(info_str, preds_str))\n if close:\n output_handle.close()",
"def write_tsv(outfile, header, rows, *,\n _open=open,\n _get_writer=csv.writer,\n ):\n if isinstance(outfile, str):\n with _open(outfile, 'w', newline='') as outfile:\n return write_tsv(outfile, header, rows,\n _open=_open,\n _get_writer=_get_writer,\n )\n\n if isinstance(header, str):\n header = header.split('\\t')\n writer = _get_writer(outfile, delimiter='\\t')\n writer.writerow(header)\n for row in rows:\n writer.writerow('' if v is None else str(v)\n for v in row)",
"def printTextCloud(sortedListTuple):\n \n print('Here is the text cloud for your web page:')\n for i in range (0, len(sortedListTuple[0])):\n print(sortedListTuple[0][i] + ' (' + str(sortedListTuple[1][i]) + ')')",
"def printTestResult(self):\n splitter = \"=================================================================================================\"\n print(\"\\n\" + splitter)\n print(\"%-3s%-60s%11s\" % ('ID', 'Testcase Name', 'Test Result'))\n for i in range(len(self)):\n print(\"%-3d%-60s%11s\" % (i + 1, self[i].name, self[i].result))\n print(splitter + \"\\n\")",
"def print_table(self, limit=None):\n old_version_tuples = []\n most_recent_tuples = []\n\n for row in self.tuples:\n if len(row) > 0:\n most_recent_tuples.append(row[-1])\n\n for version in row[:-1]:\n old_version_tuples.append(version)\n\n self._print(limit, most_recent_tuples, False)\n\n if len(old_version_tuples) > 0:\n print('\\n\\n')\n self._print(limit, old_version_tuples, True)",
"def print_verbose_report(tests):\n for t in tests:\n if t.result in ('*', 'f'):\n print( \"// === %s (result: %s) ===\" % (t.name, t.result) )\n print( \"// --- input ---\" )\n print( t.input )\n print( \"// --- expected output ---\" )\n print( t.output )\n print( \"// --- obtained output ---\" )\n print( t.stdoutdata )\n if t.result not in ('.', '*', 'f'):\n print( \"// === %s (result: %s) ===\" % (t.name, t.result) )\n print( \"// --- obtained stderr ---\" )\n print( t.stderrdata )",
"def tsv_from_query(rows, descriptions):\n tsv = u\"\\t\".join([escape_for_tsv(x) for x in descriptions]) + u\"\\n\"\n for row in rows:\n tsv += u\"\\t\".join([escape_for_tsv(x) for x in row]) + u\"\\n\"\n return tsv"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Runs through lines in sys.stdin, applying row_fn(row_parser(line)) Assumes that this outputs a list of rows, which get printed out in tsv format Has standard error handling for malformed rows optimally row_fn returns object with pretty print
|
def run_main_tsv(row_parser, row_fn):
for line in sys.stdin:
for line_out in row_fn(row_parser(line)):
print_tsv_output(line_out)
|
[
"def _read_format_line(line, format):\r\n rows = line.strip().split(\"\\t\")\r\n return _read_format_rows(rows, format)",
"def rows(self):\n def parse_result_row(row):\n return row.split(\"\\t\")\n\n for row in self.results.data:\n yield parse_result_row(row)",
"def process_rows(self, row_fn, init_fn=None, final_fn=None):\n self._impl.process_rows(row_fn, init_fn, final_fn)",
"def parse(f):\n result = []\n schema = []\n\n # lno is current line number\n # raw is the text as read from f without any alterations\n for lno, raw in enumerate(f):\n # Skip comment lines, middle lines, and empty lines.\n if raw.startswith('%') or raw == '---+---+---\\n' or raw == '\\n':\n continue\n\n line = []\n try:\n # Strip visual splitting characters away and convert to integers.\n line = list(map(int, raw.strip().replace('|', '').replace('.', '0')))\n except:\n print('Invalid input on line {}!'.format(lno))\n exit(1)\n\n # Note that MAX_DIGIT is the width of the schema.\n if len(line) != MAX_DIGIT:\n print('Expected {} cells on line {} but got {}'.format(MAX_DIGIT, lno, len(line)))\n exit(1)\n\n schema.append(line)\n\n # Note that MAX_DIGIT is the height of the schema.\n if len(schema) == MAX_DIGIT:\n result.append(schema)\n schema = []\n\n return result",
"def preprocess_tsv(line,\n field_delim='\\t',\n num_fields=2,\n inputs_format='{0}',\n targets_format='{1}',\n field_names=None,\n use_quote_delim=False):\n def _format_part_with_field_numbers(part, field_values):\n found = re.findall(r'{(\\d+)}', part)\n if found:\n return field_values[int(found[0])]\n else:\n return part\n\n def _format_part_with_field_names(part, field_names, field_values):\n field_names_re = '|'.join(['{{({})}}'.format(x) for x in field_names])\n found = re.findall(field_names_re, part)\n if found:\n pos = field_names.index(''.join(found[0]))\n return field_values[int(pos)]\n else:\n return part\n\n def _format(format_string, field_names, field_values):\n if field_names is None:\n parts = [\n _format_part_with_field_numbers(p, field_values)\n for p in re.split(r'({\\d+})', format_string)\n ]\n else:\n field_names_re = '(' + '|'.join(['{{{}}}'.format(x) for x in field_names\n ]) + ')'\n parts = [\n _format_part_with_field_names(p, field_names, field_values)\n for p in re.split(field_names_re, format_string)\n ]\n return tf.strings.join(parts)\n\n field_values = tf.io.decode_csv(\n line,\n record_defaults=[''] *\n (num_fields if field_names is None else len(field_names)),\n field_delim=field_delim,\n use_quote_delim=use_quote_delim)\n return {\n 'inputs': _format(inputs_format, field_names, field_values),\n 'targets': _format(targets_format, field_names, field_values)\n }",
"def process_line(line, sqrub, prefix=None, schema=None):\n\n indent = sqrub.indent\n # test if end of line has end of block\n if re.search(r'\\);$', line):\n sqrub.indent = False\n # remove noise lines from parse\n if re.search(r'^--', line) or line == '' or line == ');':\n return line\n # remove \\' and replace with ''\n if re.search(r'\\'', line.upper()):\n line = line.replace('\\\\\\'', '\\'\\'')\n # CASE: INSERT INTO\n if re.search(r'^INSERT INTO', line.upper()):\n sqrub.indent = True\n return split_insert_line(line, prefix, schema)\n # CASE: VALUES or sub-line\n if re.search(r'VALUES\\s?\\((E?\\'|NULL|\\d+,)', line.upper()):\n return ' ' + line\n if re.search(r'\\s?\\((E?\\'|NULL|\\d+,)', line.upper()):\n return ' ' + line\n # special DDL line with no name\n for tok in DDL_OTHER_KEYWORDS:\n if re.search(r''.join(tok), line.lower()):\n return line\n # set up initial values of name and remain for existence test later\n name = None\n remain = None\n for tok in DDL_KEYWORDS:\n if tok in line.lower():\n if ' '.join((tok, 'if exists')) in line.lower():\n tok = ' '.join((tok, 'if exists'))\n name, remain = split_line_with_token(line, tok)\n name = standardize_name(name, prefix, schema)\n sqrub.indent = True\n return ''.join((tok.upper(), ' ', name, ' ', remain)).replace(' ;', ';')\n # no token at start of line - column declaration\n for tok in DDL_TYPES:\n if tok in line.lower():\n name, remain = split_line_with_column_name(line)\n name = standardize_name(name, prefix=None, schema=None)\n remain = remain.strip()\n if not name or not remain:\n return\n if indent:\n return ' '.join((INDENT, name, remain.upper()))\n else:\n return ' '.join((name, remain.upper()))",
"def parse_row(line):\n\n if type(line) is not str:\n raise TypeError('line must be a non-empty string.')\n if not line.strip():\n raise ValueError('line must be a non-empty string.')\n\n row_instructions = line[line.index(':') + 2 :]\n\n number = _find_first_num(line)\n\n side = None\n if re.search('rs|right side', line, re.IGNORECASE):\n side = 'RS'\n elif re.search('ws|wrong side', line, re.IGNORECASE):\n side = 'WS'\n\n row = Row([Annotation(row_instructions)], number, side)\n\n if re.search(IN_ROW_REPEAT_REGEX, line, re.IGNORECASE):\n return Row(parse_in_row_repeat(row_instructions), number, side)\n\n return row",
"def csvtab():\n parser = _default_arguments()\n\n args, remainder = parser.parse_known_args()\n\n informat = getattr(parsers, args.informat)(designation='inparser')\n outformat = parsers.table()\n\n informat.parse_args(remainder)\n outformat.parse_args(remainder)\n\n outformat.rows = csvutils.tabulate(informat.file,\n parser=informat,\n maxw=outformat.column_maxwidth,\n pad=outformat.padding)\n\n outformat.write(outformat.file)",
"def parse_file(fname):\n for table in tabula.read_pdf(fname, pages=\"all\", multiple_tables=True, pandas_options={'dtype': str}):\n table = table.apply(parse_row, axis=1).apply(pd.Series)\n yield table",
"def each_csv_row(csv_file: str, func, skip_header=False, encoding='utf-8'):\n log.info('parse CSV file %s', csv_file)\n with open(csv_file, 'r', encoding=encoding, newline='\\n') as f:\n reader = csv.reader(f)\n i = 0\n if skip_header:\n next(reader)\n i += 1\n for row in reader:\n r = [v.strip() for v in row]\n func(r, i)\n i += 1",
"def rows(rows, col, find, file):\n\n print(f'___file {file}')\n df = pd.read_csv(file)\n\n parsed_r = []\n parsed_c = []\n sel_row = []\n sel_col = []\n\n range_start = 0\n range_end = 0\n\n if ',' in rows:\n parsed_r = rows.split(',')\n print(f'parsed_r {parsed_r}')\n\n if ',' in col:\n parsed_c = col.split(',')\n print(f'parsed_c {parsed_c}')\n\n print(f\"my rows {rows} and col {col}\")\n if parsed_r:\n for i in parsed_r:\n sel_row.append(int(i))\n print(i)\n\n if col == 'all':\n sel_col = 'all'\n\n print(f\"my col {rows} and col {col}\")\n if len(col) == 1:\n sel_col.append(int(col))\n if parsed_c:\n for i in parsed_c:\n sel_col.append(int(i))\n print(i)\n\n my_rows(sel_row, sel_col, sel_find, df)",
"def print_rows(rows):\n # All of the function body is a todo task\n for row in rows:\n print(row)",
"def read_tsv(input_file):\n try:\n with open(input_file, 'r') as input:\n # first line in a tsv (the header)\n header_text = input.readline()\n # ordered list of header keys\n key_list = header_text.strip('\\n').split('\\t')\n # starts on next line\n for line in input.readlines():\n line_json = {}\n # ordered list of line values\n line_list = line.strip('\\n').split('\\t')\n # map values to keys\n index = 0\n for key in key_list:\n value = line_list[index]\n # change empty strings to None (null)\n if value == '':\n value = None\n line_json.update({key:value})\n index += 1\n yield line_json\n except IOError as e:\n raise Exception('File {} invalid. Error: {}'.format(input_file, e))",
"def row_mangler(self, source, row_gen, row):\n if row_gen._header and len(row_gen._header) < len(row):\n\n if '\\t' in row:\n raise Exception() # tabs should be handled by filetype in the sources.\n\n i = len(row_gen._header) - 1\n row = row[:i] + [','.join(row[i:])]\n return row",
"def tb_iterator(path: Path) -> Generator[TbRow, None, None]:\n with path.open(mode=\"r\") as fp:\n for line in fp:\n yield TbRow.from_line(line)",
"def parse(self, source, sink, **kwargs):\n\t\trow_num = 1\n\t\tfURI = kwargs.get(\"csv_file_URI\", \"\")\n\t\tself._add_table(fURI, sink)\n\t\ttry:\n\t\t\tf = source.getByteStream()\n\t\t\trows = csv.reader(f, delimiter=',', quoting=csv.QUOTE_ALL)\n\t\t\tfor row in rows:\n\t\t\t\tif row_num == 1:\n\t\t\t\t\tcolumns = self._add_header(fURI, row, row_num, sink)\n\t\t\t\telse:\n\t\t\t\t\tself._add_row(fURI, columns, row, row_num, sink)\n\t\t\t\trow_num = row_num + 1\n\t\t\tf.close()\n\t\texcept csv.Error, e:\n\t\t\tsys.exit('%s' %e)",
"def parse(self):\n reader_args = (self.filename,\n self.fs,\n self.header,\n self.max_lines,\n self.field_pre_filter,\n self.record_pre_filter)\n\n with Reader(*reader_args) as reader:\n for nr, record in enumerate(reader, 1): # line numbers start from 1\n record = self.record_func(nr, self._parse_fields(record))\n if self.record_post_filter(nr, record):\n yield record",
"def input_pre_processor():\n\n # alternate table list name may have been provided as a model argument\n table_list_name = inject.get_step_arg('table_list', default='input_table_list')\n table_list = setting(table_list_name)\n assert table_list is not None, \"table list '%s' not in settings.\" % table_list_name\n\n data_dir = data_dir_from_settings()\n\n for table_info in table_list:\n\n tablename = table_info['tablename']\n\n logger.info(\"input_pre_processor processing %s\" % tablename)\n\n # read the csv file\n data_filename = table_info.get('filename', None)\n data_file_path = os.path.join(data_dir, data_filename)\n if not os.path.exists(data_file_path):\n raise RuntimeError(\"input_pre_processor %s - input file not found: %s\"\n % (tablename, data_file_path, ))\n\n logger.info(\"Reading csv file %s\" % data_file_path)\n df = read_csv_with_fallback_encoding(data_file_path)\n\n logger.info(\"input file columns: %s\" % df.columns.values)\n\n drop_columns = table_info.get('drop_columns', None)\n if drop_columns:\n for c in drop_columns:\n logger.info(\"dropping column '%s'\" % c)\n del df[c]\n\n # rename columns\n column_map = table_info.get('column_map', None)\n if column_map:\n df.rename(columns=column_map, inplace=True)\n\n # set index\n index_col = table_info.get('index_col', None)\n if index_col is not None:\n if index_col in df.columns:\n assert not df.duplicated(index_col).any()\n df.set_index(index_col, inplace=True)\n else:\n df.index.names = [index_col]\n\n # read expression file\n # expression_filename = table_info.get('expression_filename', None)\n # if expression_filename:\n # assert False\n # expression_file_path = os.path.join(configs_dir, expression_filename)\n # if not os.path.exists(expression_file_path):\n # raise RuntimeError(\"input_pre_processor %s - expression file not found: %s\"\n # % (table, expression_file_path, ))\n # spec = assign.read_assignment_spec(expression_file_path)\n #\n # df_alias = table_info.get('df_alias', table)\n #\n # locals_d = {}\n #\n # results, trace_results, trace_assigned_locals \\\n # = assign.assign_variables(spec, df, locals_d, df_alias=df_alias)\n # # for column in results.columns:\n # # orca.add_column(table, column, results[column])\n #\n # df = pd.concat([df, results], axis=1)\n\n logger.info(\"adding table %s\" % tablename)\n\n # add (or replace) pipeline table\n repop = inject.get_step_arg('repop', default=False)\n inject.add_table(tablename, df, replace=repop)",
"def row_parser(prv_file: str) -> (List[str], List[str], List[str]):\n cpu_list = []\n node_list = []\n thread_list = []\n\n row_file = prv_file[:-4] + '.row'\n try:\n opened_row_file = open(row_file, 'r')\n except FileNotFoundError:\n print(f'==WARNING== Could not open .row file {row_file}')\n return cpu_list, node_list, thread_list\n\n lines = opened_row_file.read().split(\"\\n\")\n opened_row_file.close()\n\n lines_generator = (i for i, s in enumerate(lines) if (\"LEVEL CPU SIZE\" or \"LEVEL TASK SIZE\") in s)\n index = next(lines_generator)\n cpu_size = int(lines[index].split()[3])\n cpu_list = lines[index + 1: index + cpu_size + 1]\n # HDF5 for python only supports string in ASCII code, thus we need to reencode the string\n cpu_list = [name.encode(\"ascii\", \"ignore\") for name in cpu_list]\n lines_generator = (i for i, s in enumerate(lines) if \"LEVEL NODE SIZE\" in s)\n index = next(lines_generator)\n node_size = int(lines[index].split()[3])\n node_list = lines[index + 1: index + node_size + 1]\n node_list = [name.encode(\"ascii\", \"ignore\") for name in node_list]\n lines_generator = (i for i, s in enumerate(lines) if \"LEVEL THREAD SIZE\" in s)\n index = next(lines_generator)\n thread_size = int(lines[index].split()[3])\n thread_list = lines[index + 1: index + thread_size + 1]\n thread_list = [name.encode(\"ascii\", \"ignore\") for name in thread_list]\n\n return cpu_list, node_list, thread_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the desired capacity for the group.
|
def set_capacity(self, capacity):
params = {'AutoScalingGroupName': self.name,
'DesiredCapacity': capacity}
req = self.connection.get_object('SetDesiredCapacity', params,
Request)
self.connection.last_request = req
return req
|
[
"def set_course_capacity(self, capacity: int) -> None:\n self.course_capacity = capacity",
"def resize(self, capacity: int) -> None:\n self.capacity = capacity",
"def set_nominal_capacity(self, capacity_ah=30):\n\n self.master_payloads['BattCap_Write'][4] = capacity_ah",
"def set_capacity(self, scheme, nr_connections):\n self._limit[scheme] = nr_connections\n self._enforce_limits(scheme)",
"def setBandwidthCapacity(self, lb, ub):\n self._setEdgesPropertyRandomly(lb, ub, \"capacity\")",
"def set_capacity(self, capacity):\n return _ldns.ldns_buffer_set_capacity(self, capacity)\n #parameters: ldns_buffer *, size_t,\n #retvals: bool",
"def set_nominal_capacity(self, capacity_ah=30):\n\n self.master_payloads['BattCap_Write'][4] = int(capacity_ah / 2.0)",
"def storage_capacity(self, value: bool):\n self._properties[\"storageCapacity\"] = value",
"def set_battery_design_capacity(self, capacity, timeout=RESPONSE_DELAY):\n\n command.create_set_command(\n command.PROTOCOL_COMMAND_SET_BATTERY_DESIGN_CAPACITY, capacity, 2\n )\n command.send_command()\n delay_ms(timeout)\n raw = command.receive_command(COMMAND_SIZE_FOR_UINT8)\n\n status = raw[PROTOCOL_HEADER_SIZE]\n return status",
"def license_capacity(self, license_capacity):\n\n self._license_capacity = license_capacity",
"def resize(self, new_capacity: int) -> None:\n\n if not isinstance(new_capacity, int) or new_capacity < 1:\n return\n\n if new_capacity < self.size:\n return\n\n #1_a.1 creates list, references, alternative data via copy\n val = 0\n alt_data = [None] * new_capacity\n for i in range(self.size):\n alt_data[i] = self.data[i]\n self.capacity = new_capacity\n self.data = alt_data\n return",
"def set_cost_limit(self, cost):\n self.cost_limit = cost",
"def capacity(self):\n return",
"def tank_size(self, fuel_capacity):\n self.fuel_capacity = fuel_capacity",
"def _ReduceCapacity(self, thickness):\n self.__available_capacity -= thickness",
"def Rfc3918groupCapacity(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.quicktest.rfc3918groupcapacity.rfc3918groupcapacity import Rfc3918groupCapacity\n\t\treturn Rfc3918groupCapacity(self)",
"def transcoding_capacity(self, transcoding_capacity):\n\n self._transcoding_capacity = transcoding_capacity",
"def test_resize_eip_group_bandwidth(self):\n self.client.resize_eip_group_bandwidth(id=EIP_GRP_ID,\n bandwidth_in_mbps=40)",
"def update_capacity(user, male_max, female_max):\n capacity = user.party_capacity\n if capacity is None:\n new_capacity = Capacity.create(male_max=male_max,\n female_max=female_max)\n user.party_capacity = new_capacity\n user.save()\n else:\n user.party_capacity.update(male_max=male_max,\n female_max=female_max)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sync local changes with AutoScaling group.
|
def update(self):
return self.connection._update_group('UpdateAutoScalingGroup', self)
|
[
"def aws_update_autoscaler():\r\n ami_id = aws_create_ami_from()\r\n cur_date = time.strftime('%Y%m%d', time.gmtime())\r\n lcName = 'ns11-%s' % cur_date\r\n lc = LaunchConfiguration(name=lcName, \r\n image_id=ami_id, instance_type=env.aws.get('instance_type'),\r\n key_name=env.aws.get('key_pair'), \r\n security_groups=env.aws.get('security_groups'))\r\n env.asConn.create_launch_configuration(lc)\r\n print \"Created launchConfiguration %s\" % lcName\r\n \r\n ag = AutoScalingGroup(\r\n connection=env.asConn,\r\n launch_config=lc, \r\n group_name=env.aws.get('as_group'), load_balancers=env.aws.get('balancers'),\r\n availability_zones=env.aws.get('availability_zones'))\r\n # min_size=env.aws.get('min_size'), max_size=env.aws.get('max_size'))\r\n ag.update()\r\n # env.asConn.create_auto_scaling_group(ag) \r\n print \"Added launchConfiguration %s to group %s (updated AutoScaleGroup)\" % (lcName, env.aws.get('as_group'))",
"def test_update_group_config(self):\r\n self._setup_test_stack()\r\n\r\n resource = self.stack['my_group']\r\n new_template = copy.deepcopy(resource.parsed_template())\r\n new_template['Properties']['groupConfiguration']['minEntities'] = 5\r\n scheduler.TaskRunner(resource.update, new_template)()\r\n\r\n self.assertEqual(1, len(self.fake_auto_scale.groups))\r\n self.assertEqual(\r\n 5, self.fake_auto_scale.groups['0'].kwargs['min_entities'])",
"def test_instance_group_update_replace_with_adjusted_capacity(self):\r\n updt_template = json.loads(ig_tmpl_with_updt_policy)\r\n grp = updt_template['Resources']['JobServerGroup']\r\n policy = grp['UpdatePolicy']['RollingUpdate']\r\n policy['MinInstancesInService'] = '8'\r\n policy['MaxBatchSize'] = '4'\r\n config = updt_template['Resources']['JobServerConfig']\r\n config['Properties']['ImageId'] = 'bar'\r\n\r\n self.update_instance_group(ig_tmpl_with_updt_policy,\r\n json.dumps(updt_template),\r\n num_updates_expected_on_updt=8,\r\n num_creates_expected_on_updt=2,\r\n num_deletes_expected_on_updt=2,\r\n update_replace=True)",
"def update(self, scaling_group, name=None, cooldown=None, min_entities=None,\r\n max_entities=None, metadata=None):\r\n return self._manager.update(scaling_group, name=name, cooldown=cooldown,\r\n min_entities=min_entities, max_entities=max_entities,\r\n metadata=metadata)",
"def dupli_group_swap(self, obj, new_group):\n obj.dupli_group = new_group \n obj.name = new_group.name",
"async def upgrade(ctx: MigrationContext):\n async with AsyncSession(ctx.pg) as session:\n async for old_group in ctx.mongo.groups.find({}):\n group = (\n await session.execute(\n select(SQLGroup).where(SQLGroup.legacy_id == old_group[\"_id\"])\n )\n ).one_or_none()\n\n if not group:\n session.add(\n SQLGroup(\n legacy_id=old_group[\"_id\"],\n name=old_group[\"name\"],\n permissions=old_group[\"permissions\"],\n )\n )\n\n await session.commit()",
"def test_instance_group_update_replace_huge_min_in_service(self):\r\n updt_template = json.loads(ig_tmpl_with_updt_policy)\r\n group = updt_template['Resources']['JobServerGroup']\r\n policy = group['UpdatePolicy']['RollingUpdate']\r\n policy['MinInstancesInService'] = '20'\r\n policy['MaxBatchSize'] = '1'\r\n policy['PauseTime'] = 'PT0S'\r\n config = updt_template['Resources']['JobServerConfig']\r\n config['Properties']['ImageId'] = 'bar'\r\n\r\n self.update_instance_group(ig_tmpl_with_updt_policy,\r\n json.dumps(updt_template),\r\n num_updates_expected_on_updt=9,\r\n num_creates_expected_on_updt=1,\r\n num_deletes_expected_on_updt=1,\r\n update_replace=True)",
"def test_autoscaling_group_update_replace_huge_batch_size(self):\r\n updt_template = json.loads(asg_tmpl_with_updt_policy)\r\n group = updt_template['Resources']['WebServerGroup']\r\n policy = group['UpdatePolicy']['AutoScalingRollingUpdate']\r\n policy['MinInstancesInService'] = '0'\r\n policy['MaxBatchSize'] = '20'\r\n config = updt_template['Resources']['LaunchConfig']\r\n update_image = 'F17-x86_64-cfntools'\r\n config['Properties']['ImageId'] = update_image\r\n\r\n self.update_autoscaling_group(asg_tmpl_with_updt_policy,\r\n json.dumps(updt_template),\r\n num_updates_expected_on_updt=10,\r\n num_creates_expected_on_updt=0,\r\n num_deletes_expected_on_updt=0,\r\n num_reloads_expected_on_updt=3,\r\n update_replace=True,\r\n update_image_id=update_image)",
"def replace(self, scaling_group, name, cooldown, min_entities,\r\n max_entities, metadata=None):\r\n body = self._create_group_config_body(name, cooldown, min_entities,\r\n max_entities, metadata=metadata)\r\n group_id = utils.get_id(scaling_group)\r\n uri = \"/%s/%s/config\" % (self.uri_base, group_id)\r\n resp, resp_body = self.api.method_put(uri, body=body)",
"def sync_images():\n logging.info(\"Synchronising images using glance-simplestreams-sync\")\n generic_utils.assertActionRanOK(\n zaza_model.run_action_on_leader(\n \"glance-simplestreams-sync\",\n \"sync-images\",\n raise_on_failure=True,\n action_params={},\n )\n )",
"def test_instance_group_update_replace_huge_batch_size(self):\r\n updt_template = json.loads(ig_tmpl_with_updt_policy)\r\n group = updt_template['Resources']['JobServerGroup']\r\n policy = group['UpdatePolicy']['RollingUpdate']\r\n policy['MinInstancesInService'] = '0'\r\n policy['MaxBatchSize'] = '20'\r\n config = updt_template['Resources']['JobServerConfig']\r\n config['Properties']['ImageId'] = 'bar'\r\n\r\n self.update_instance_group(ig_tmpl_with_updt_policy,\r\n json.dumps(updt_template),\r\n num_updates_expected_on_updt=10,\r\n num_creates_expected_on_updt=0,\r\n num_deletes_expected_on_updt=0,\r\n update_replace=True)",
"def test_update_device_group(self):\n pass",
"def sync(self):\n self.info.sync()",
"def updated(self, group, **payload):\n pass",
"def update_inplace(self):\n for resource_name in self.all_custom_ami_resources():\n ami = self.resources[resource_name]\n self.load_latest_ami_name_pattern(ami)\n self.update_ami(resource_name, ami)",
"def replace(self, scaling_group, name, cooldown, min_entities,\r\n max_entities, metadata=None):\r\n return self._manager.replace(scaling_group, name, cooldown,\r\n min_entities, max_entities, metadata=metadata)",
"def sync_gcp_organizations(\n neo4j_session: neo4j.Session, crm_v1: Resource, gcp_update_tag: int,\n common_job_parameters: Dict,\n) -> None:\n logger.debug(\"Syncing GCP organizations\")\n data = get_gcp_organizations(crm_v1)\n load_gcp_organizations(neo4j_session, data, gcp_update_tag)\n cleanup_gcp_organizations(neo4j_session, common_job_parameters)",
"def update(self, api_client, **kwargs):\n cmd = {'id': self.id}\n cmd.update(kwargs)\n return api_client.updateInstanceGroup(**cmd)",
"def sync_instances(self, ctxt, project_id, updated_since, deleted):\n self.msg_runner.sync_instances(ctxt, project_id, updated_since,\n deleted)",
"def _add_auto_scaling(self):\n auto_scaling_group = self.fargate_service.service.auto_scale_task_count(\n min_capacity=2,\n max_capacity=10\n )\n auto_scaling_group.scale_on_cpu_utilization(\n 'CpuScaling',\n target_utilization_percent=50,\n scale_in_cooldown=core.Duration.seconds(60),\n scale_out_cooldown=core.Duration.seconds(60)\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete this autoscaling group if no instances attached or no scaling activities in progress.
|
def delete(self, force_delete=False):
return self.connection.delete_auto_scaling_group(self.name,
force_delete)
|
[
"def destroy(self):\r\n return self.driver.ex_destroy_group(self)",
"def delete(name, force=False, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n retries = 30\n while True:\n try:\n conn.delete_auto_scaling_group(name, force)\n msg = \"Deleted autoscale group {}.\".format(name)\n log.info(msg)\n return True\n except boto.exception.BotoServerError as e:\n if retries and e.code == \"Throttling\":\n log.debug(\"Throttled by AWS API, retrying in 5 seconds...\")\n time.sleep(5)\n retries -= 1\n continue\n log.error(e)\n msg = \"Failed to delete autoscale group {}\".format(name)\n log.error(msg)\n return False",
"def test_delete_without_backing_group(self):\r\n self._setup_test_stack()\r\n resource = self.stack['my_group']\r\n del self.fake_auto_scale.groups['0']\r\n scheduler.TaskRunner(resource.delete)()\r\n self.assertEqual({}, self.fake_auto_scale.groups)",
"def handle_delete(self):\r\n if self.resource_id is None:\r\n return\r\n asclient = self.stack.clients.auto_scale()\r\n args = self._get_group_config_args(\r\n self.properties[self.GROUP_CONFIGURATION])\r\n args['min_entities'] = 0\r\n args['max_entities'] = 0\r\n try:\r\n asclient.replace(self.resource_id, **args)\r\n except NotFound:\r\n pass",
"def DeleteInstanceGroup(tag, try_count=0):\n clovis_logger.info('Instance group destruction for tag: ' + tag)\n if not instance_helper.DeleteInstanceGroup(tag):\n clovis_logger.info('Instance group destruction failed for: ' + tag)\n if try_count <= 5:\n deferred.defer(DeleteInstanceGroup, tag, try_count + 1, _countdown=60)\n return\n clovis_logger.error('Giving up group destruction for: ' + tag)\n clovis_logger.info('Scheduling instance template destruction for tag: ' + tag)\n # Wait a little before deleting the instance template, because it may still be\n # considered in use, causing failures.\n deferred.defer(DeleteInstanceTemplate, tag, _countdown=30)",
"def delete(self):\n if self.container is not None:\n self.container.stop()\n # Delete the container\n # Actually, this function removes all stopped containers with the old container's id, which does the same\n self.docker.containers.prune(filters={\"id\": self.container.id})\n # TODO : smarter image retrieval. image[0].tags[0] will probably always work, but still\n self.docker.images.remove(self.image[0].tags[0])",
"def test_destroy_not_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[1]\r\n self.assertTrue(group.destroy())",
"def test_ex_create_and_delete_empty_group(self):\r\n group = self.driver.ex_create_group('libcloud_test_group')\r\n group.destroy()",
"def delete(self, group_id):\r\n group = self._get_group(group_id)\r\n if (group.kwargs['min_entities'] > 0\r\n or group.kwargs['max_entities'] > 0):\r\n raise Exception(\"Can't delete yet!\")\r\n del self.groups[group_id]",
"def delete_all(self):\n to_delete = list(self.instances.keys())\n if len(to_delete) > 0: # Only delete stuff if there's stuff to\n # delete.\n self.delete(to_delete)",
"def nuke(self, older_than_seconds: float) -> None:\n for scaling in self.list_asg(older_than_seconds):\n try:\n self.asg.delete_auto_scaling_group(\n AutoScalingGroupName=scaling, ForceDelete=True\n )\n print(\"Nuke Autoscaling Group {0}\".format(scaling))\n except ClientError as exc:\n nuke_exceptions(\"autoscaling group\", scaling, exc)\n\n for launch_conf in self.list_launch_confs(older_than_seconds):\n try:\n self.asg.delete_launch_configuration(\n LaunchConfigurationName=launch_conf\n )\n print(\"Nuke Launch Configuration {0}\".format(launch_conf))\n except ClientError as exc:\n nuke_exceptions(\"launch configuration\", launch_conf, exc)",
"def destroy(self, request, *args, **kwargs):\n group = self.get_object()\n if group.user_set.all():\n logger.info(\"Trying to delete a not empty group\")\n return Response(\n {\"message\": \"Group has users. Can not be deleted.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n group.delete()\n logger.info(\"Group deleted\")\n return Response(\n {\"message\": \"Group deleted successfully\"},\n status=status.HTTP_200_OK,\n )",
"def test_destroy_deployed_group(self):\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[0]\r\n self.assertTrue(group.destroy())",
"def delete(self):\n url = f'{self._okta.api}/groups/{self.id}'\n response = self._okta.session.delete(url)\n return response.ok",
"def schedule_drained_instance_cleanup():\n for instance_group_manager_key in (\n instance_group_managers.get_drained_instance_group_managers()):\n instance_group_manager = instance_group_manager_key.get()\n if instance_group_manager:\n for instance_key in instance_group_manager.instances:\n instance = instance_key.get()\n if instance and not instance.cataloged:\n utilities.enqueue_task('cleanup-drained-instance', instance.key)",
"def delete_instance_group_manager(key):\n instance_group_manager = yield key.get_async()\n if not instance_group_manager:\n logging.warning('InstanceGroupManager does not exist: %s', key)\n return\n\n if instance_group_manager.url or instance_group_manager.instances:\n return\n\n instance_template_revision = yield key.parent().get_async()\n if not instance_template_revision:\n logging.warning('InstanceTemplateRevision does not exist: %s', key.parent())\n return\n\n instance_template = yield instance_template_revision.key.parent().get_async()\n if not instance_template:\n logging.warning(\n 'InstanceTemplate does not exist: %s',\n instance_template_revision.key.parent(),\n )\n return\n\n # If the InstanceGroupManager is drained, we can delete it now.\n for i, drained_key in enumerate(instance_template_revision.drained):\n if key.id() == drained_key.id():\n instance_template_revision.drained.pop(i)\n yield instance_template_revision.put_async()\n yield key.delete_async()\n return\n\n # If the InstanceGroupManager is implicitly drained, we can still delete it.\n if instance_template_revision.key in instance_template.drained:\n for i, drained_key in enumerate(instance_template_revision.active):\n if key.id() == drained_key.id():\n instance_template_revision.active.pop(i)\n yield instance_template_revision.put_async()\n yield key.delete_async()",
"def delete(self, *args, **kwargs):\n try:\n self.terminate_task()\n self.periodic_task.delete()\n except:\n pass\n return super(ShoalScrapeTask, self).delete(*args, **kwargs)",
"def _cleanup_allocations(self, context, instance_uuids):\n if not instance_uuids:\n return\n\n LOG.debug(\"Cleaning up allocations for %s\", instance_uuids)\n for uuid in instance_uuids:\n self.placement_client.delete_allocation_for_instance(\n context, uuid, force=True)",
"def test_destroy_deployed_group_failed(self):\r\n self.driver = AbiquoNodeDriver('muten', 'roshi',\r\n 'http://dummy.host.com/api')\r\n location = self.driver.list_locations()[0]\r\n group = self.driver.ex_list_groups(location)[0]\r\n self.assertFalse(group.destroy())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Suspends Auto Scaling processes for an Auto Scaling group.
|
def suspend_processes(self, scaling_processes=None):
return self.connection.suspend_processes(self.name, scaling_processes)
|
[
"def delete(name, force=False, region=None, key=None, keyid=None, profile=None):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n retries = 30\n while True:\n try:\n conn.delete_auto_scaling_group(name, force)\n msg = \"Deleted autoscale group {}.\".format(name)\n log.info(msg)\n return True\n except boto.exception.BotoServerError as e:\n if retries and e.code == \"Throttling\":\n log.debug(\"Throttled by AWS API, retrying in 5 seconds...\")\n time.sleep(5)\n retries -= 1\n continue\n log.error(e)\n msg = \"Failed to delete autoscale group {}\".format(name)\n log.error(msg)\n return False",
"def _add_auto_scaling(self):\n auto_scaling_group = self.fargate_service.service.auto_scale_task_count(\n min_capacity=2,\n max_capacity=10\n )\n auto_scaling_group.scale_on_cpu_utilization(\n 'CpuScaling',\n target_utilization_percent=50,\n scale_in_cooldown=core.Duration.seconds(60),\n scale_out_cooldown=core.Duration.seconds(60)\n )",
"def pause(self, scaling_group):\r\n # NOTE: This is not yet implemented. The code is based on the docs,\r\n # so it should either work or be pretty close.\r\n return self._manager.pause(scaling_group)",
"def resume(self, scaling_group):\r\n # NOTE: This is not yet implemented. The code is based on the docs,\r\n # so it should either work or be pretty close.\r\n return self._manager.resume(scaling_group)",
"def pause(self, scaling_group):\r\n uri = \"/%s/%s/pause\" % (self.uri_base, utils.get_id(scaling_group))\r\n resp, resp_body = self.api.method_post(uri)\r\n return None",
"def aws_update_autoscaler():\r\n ami_id = aws_create_ami_from()\r\n cur_date = time.strftime('%Y%m%d', time.gmtime())\r\n lcName = 'ns11-%s' % cur_date\r\n lc = LaunchConfiguration(name=lcName, \r\n image_id=ami_id, instance_type=env.aws.get('instance_type'),\r\n key_name=env.aws.get('key_pair'), \r\n security_groups=env.aws.get('security_groups'))\r\n env.asConn.create_launch_configuration(lc)\r\n print \"Created launchConfiguration %s\" % lcName\r\n \r\n ag = AutoScalingGroup(\r\n connection=env.asConn,\r\n launch_config=lc, \r\n group_name=env.aws.get('as_group'), load_balancers=env.aws.get('balancers'),\r\n availability_zones=env.aws.get('availability_zones'))\r\n # min_size=env.aws.get('min_size'), max_size=env.aws.get('max_size'))\r\n ag.update()\r\n # env.asConn.create_auto_scaling_group(ag) \r\n print \"Added launchConfiguration %s to group %s (updated AutoScaleGroup)\" % (lcName, env.aws.get('as_group'))",
"def resume_processes(self, scaling_processes=None):\n return self.connection.resume_processes(self.name, scaling_processes)",
"def _kill_all(self):\n try:\n os.killpg(self._jobid, signal.SIGKILL)\n except (ProcessLookupError, PermissionError):\n # The process group may already be dead or assigned to a different\n # group, so ignore this error\n pass",
"def _sleep(self):\n self.kill()",
"def restart_group(self, groupname):\n self._apply_group_func(groupname, self.restart_process)",
"def suspend(self):\n if 'suspend' in self.data.links:\n self.make_request(\n ActionCommandFailed,\n method='update',\n etag=self.etag,\n resource='suspend')\n self._del_cache()\n else:\n raise ActionCommandFailed('Task is already suspended. Call activate '\n 'to reactivate.')",
"def test_autoscaling_group_update_replace_huge_batch_size(self):\r\n updt_template = json.loads(asg_tmpl_with_updt_policy)\r\n group = updt_template['Resources']['WebServerGroup']\r\n policy = group['UpdatePolicy']['AutoScalingRollingUpdate']\r\n policy['MinInstancesInService'] = '0'\r\n policy['MaxBatchSize'] = '20'\r\n config = updt_template['Resources']['LaunchConfig']\r\n update_image = 'F17-x86_64-cfntools'\r\n config['Properties']['ImageId'] = update_image\r\n\r\n self.update_autoscaling_group(asg_tmpl_with_updt_policy,\r\n json.dumps(updt_template),\r\n num_updates_expected_on_updt=10,\r\n num_creates_expected_on_updt=0,\r\n num_deletes_expected_on_updt=0,\r\n num_reloads_expected_on_updt=3,\r\n update_replace=True,\r\n update_image_id=update_image)",
"def delete(self, force_delete=False):\n return self.connection.delete_auto_scaling_group(self.name,\n force_delete)",
"def _kill_after_delay() -> None:\n time.sleep(5)\n proc.kill()",
"def can_drain_instances(session):\n for asg in session.asgs:\n asg_result = session.clients[\"autoscaling\"].describe_auto_scaling_groups(\n AutoScalingGroupNames=[asg]\n )\n\n min_num = asg_result[\"AutoScalingGroups\"][0][\"MinSize\"]\n\n if not len(asg_result[\"AutoScalingGroups\"][0][\"Instances\"]) >= min_num:\n return False\n\n for instance in asg_result[\"AutoScalingGroups\"][0][\"Instances\"]:\n if (\n not instance[\"LifecycleState\"] == \"InService\"\n or not instance[\"HealthStatus\"] == \"Healthy\"\n ):\n return False\n\n list_result = session.clients[\"ecs\"].list_container_instances(\n cluster=session.cluster, filter=f\"attribute:ecs.ami-id == {session.ami}\"\n )\n\n if not len(list_result[\"containerInstanceArns\"]) >= min_num:\n return False\n\n return True",
"def test_scaledown(self):\n pool = self.ThreadPool(TestThread, initial_size=10, keywordarg=\"keywordstring\")\n \n try:\n self.assertEqual(threading.activeCount(), 11)\n pool.scale_down(timeout=1)\n self.assertEqual(threading.activeCount(), 10)\n pool.scale_down(timeout=1)\n self.assertEqual(threading.activeCount(), 9)\n pool.scale_down(timeout=1)\n self.assertEqual(threading.activeCount(), 8)\n pool.scale_down(timeout=1)\n self.assertEqual(threading.activeCount(), 7)\n pool.scale_down(timeout=1)\n finally:\n pool.safe_terminate()",
"def test_kill_a_pd(self):\n\n n = 1\n self._update_policy_params_and_assert({'preserve_n': n})\n self._assert_n_processes(n)\n\n n = 2\n self._update_policy_params_and_assert({'preserve_n': n})\n for pd in self.pd_names:\n self._assert_n_processes(1, only_pd=pd)\n\n upids_before_kill = list(self.haservice.core.managed_upids)\n\n killed_pd = self.pd_names.pop()\n self.epuharness.stop(services=[killed_pd])\n\n timeout = 30\n while timeout >= 0 and upids_before_kill == self.haservice.core.managed_upids:\n # Waiting for HA Service to notice\n print \"Managed UPIDs: %s\" % self.haservice.core.managed_upids\n time.sleep(1)\n timeout -= 1\n if timeout <= 0:\n assert \"Took too long for haservice to notice missing upid\"\n\n assert upids_before_kill != self.haservice.core.managed_upids\n\n n = 2\n self._assert_n_processes(n)",
"def auto_terminate_cluster(self):\n if self.is_aws:\n stats = self.aws_util.ec2_get_kube_stats()\n if stats and stats < config.AUTO_TERMINATE_CPU:\n msg = \"Low CPU utilization detected in k8s cluster! Auto-terminating...\"\n print(glog.red(msg))\n self.depth.on_terminate_cluster()",
"def kill_subprocesses(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resumes Auto Scaling processes for an Auto Scaling group.
|
def resume_processes(self, scaling_processes=None):
return self.connection.resume_processes(self.name, scaling_processes)
|
[
"def resume(self, scaling_group):\r\n # NOTE: This is not yet implemented. The code is based on the docs,\r\n # so it should either work or be pretty close.\r\n return self._manager.resume(scaling_group)",
"def resume(self, scaling_group):\r\n uri = \"/%s/%s/resume\" % (self.uri_base, utils.get_id(scaling_group))\r\n resp, resp_body = self.api.method_post(uri)\r\n return None",
"def resume(self):\n assert self.running\n\n self._paused = False\n\n for process in self.processes:\n process.resume()",
"def aws_update_autoscaler():\r\n ami_id = aws_create_ami_from()\r\n cur_date = time.strftime('%Y%m%d', time.gmtime())\r\n lcName = 'ns11-%s' % cur_date\r\n lc = LaunchConfiguration(name=lcName, \r\n image_id=ami_id, instance_type=env.aws.get('instance_type'),\r\n key_name=env.aws.get('key_pair'), \r\n security_groups=env.aws.get('security_groups'))\r\n env.asConn.create_launch_configuration(lc)\r\n print \"Created launchConfiguration %s\" % lcName\r\n \r\n ag = AutoScalingGroup(\r\n connection=env.asConn,\r\n launch_config=lc, \r\n group_name=env.aws.get('as_group'), load_balancers=env.aws.get('balancers'),\r\n availability_zones=env.aws.get('availability_zones'))\r\n # min_size=env.aws.get('min_size'), max_size=env.aws.get('max_size'))\r\n ag.update()\r\n # env.asConn.create_auto_scaling_group(ag) \r\n print \"Added launchConfiguration %s to group %s (updated AutoScaleGroup)\" % (lcName, env.aws.get('as_group'))",
"def _add_auto_scaling(self):\n auto_scaling_group = self.fargate_service.service.auto_scale_task_count(\n min_capacity=2,\n max_capacity=10\n )\n auto_scaling_group.scale_on_cpu_utilization(\n 'CpuScaling',\n target_utilization_percent=50,\n scale_in_cooldown=core.Duration.seconds(60),\n scale_out_cooldown=core.Duration.seconds(60)\n )",
"def resume(self):\n self._call(\"resume\")",
"def restart_group(self, groupname):\n self._apply_group_func(groupname, self.restart_process)",
"def resume(self, as_policy_id):\n url = \"/scaling_policy/%s/action\" % as_policy_id\n return self._create(url, json=dict(action=\"resume\"), raw=True)",
"def resume_instance(\n self,\n request: gpdb_20160503_models.ResumeInstanceRequest,\n ) -> gpdb_20160503_models.ResumeInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.resume_instance_with_options(request, runtime)",
"def resume(taskID):\r\n _activeScheduler.resume(taskID)",
"def resume(self, pid):\n pass",
"def resumeVpgProtection(self, vpgid):\n\n return requests.post(self.zvmip + self.endPoint + '/' + vpgid + '/resume', headers=self.headerwithkey, verify=False)",
"def resume(self):\n self.r2api.frida_continue()",
"def resume(self, group_id, topic_partitions):\n if isinstance(topic_partitions, TopicPartition):\n topic_partitions = [topic_partitions]\n self.consumers[group_id].resume(topic_partitions)",
"def __resume(self):\n # thread should be paused to resume\n if self.paused:\n # Notify so thread will wake after lock released\n self.pause_cond.notify()\n # Now release the lock\n self.pause_cond.release()\n self.paused = False\n # notify app\n to_json({\n \"resumed\": True\n })\n # user triggered pause (through play button) through GUI and self.paused is still false means\n # GA is too slow on generating the next generation, than when the user clicked play (for resume)\n # it just turns self.__pause_now to false to prevent GA from pausing.\n elif self.__pause_now:\n self.__pause_now = False",
"def resume(self):\n ret = libvirtmod.virDomainResume(self._o)\n if ret == -1: raise libvirtError ('virDomainResume() failed', dom=self)\n return ret",
"def resume_instance(self, ctxt, instance):\n self.msg_runner.resume_instance(ctxt, instance)",
"def resume(shelf=None):\n\n _act_on_guests(shelf, \"resume\")",
"def _resume_paused_producer(self) -> None:\n if self._paused_producer and self._producer:\n self._paused_producer = False\n self._producer.resumeProducing()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initializes the rule with a specified queue to push the tag changes to.
|
def __init__(self, queue: Queue):
self._queue = queue
|
[
"def __init__(self, queue):\n self.queue = queue\n super(QueueHandler, self).__init__()",
"def init_queue(self):\n empty_graph = Hybrid_structure(5, 6)\n score = self.__score(empty_graph)\n self._queue[empty_graph] = score",
"def queue(self, queue_):\n self._queue = queue_",
"def create_queue(self, queue):",
"def __setup_kombu_queue(self, config):\n configs = config[u'config']\n for item in configs:\n if item[u'group'] == u'queue':\n value = item[u'value']\n queue = value[u'queue']\n uri = value[u'uri']\n manager = RedisManager(uri)\n manager.server.set(u'_kombu.binding.%s' % queue, value)",
"def set_queue(self, queue: Dict[str, QueueItem]):\n self.updater.dispatcher.bot_data['queue'] = queue",
"def __init__(self, conf):\n self.conf = conf\n self.q_route_spec = Queue.Queue()",
"def __init__(self):\n super().__init__()\n self._binqueue = []",
"def requeue_changes(cls, queue):\n for c in sorted(cls.get_changes(), cmp=lambda c0, c1: 1 if fnmatch.fnmatch(c0, \"*mini-buildd-build*\") else -1):\n LOG.info(\"Incoming: Re-queuing: {c}\".format(c=c))\n queue.put(c)",
"def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=True)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_queue(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=six.text_type, is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queue must be of a type compatible with leafref\"\"\",\n 'defined-type': \"leafref\",\n 'generated-type': \"\"\"YANGDynClass(base=six.text_type, is_leaf=True, yang_name=\"queue\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='leafref', is_config=False)\"\"\",\n })\n\n self.__queue = t\n if hasattr(self, '_set'):\n self._set()",
"def __init__(self, in_queue, cfg, name=None):\n super(BroadcastReassembled, self).__init__(event_stopper=mp.Event(), name=name)\n self.in_queue = in_queue\n self.yolo3_rtt = None\n self.crnn_rtt = None\n self.detections = 0\n self.current_detections = 0\n self.recognitions = 0\n self.current_recognitions = 0\n self.buffer = []\n self.oldest_broadcasted_frame = 0\n\n for key, value in cfg.items():\n setattr(self, key, value)",
"def __init__(self, queue, threshold, name=None):\n super(Flusher, self).__init__(event_stopper=td.Event(), name=name)\n self.queue = queue\n self.threshold = threshold\n self.runnable = self.flush_pipe",
"def __init__(self, *args, **kwargs):\n # Call the super class' __init__\n super().__init__(*args, **kwargs)\n\n # Set the queue's prefix\n plugin_queue.prefix = self.prefix",
"def __init__(self):\r\n try:\r\n self.queue = beanstalkc.Connection(host=Config.get('beanstalk')['address'], port=Config.get('beanstalk')['port'])\r\n except Exception, e:\r\n log.warning(\"Could not create queue.\")\r\n self.queue = None",
"def _initialize_message_queues(self):\n self._inc_mq = MessageQueue()\n self._inc_mq_b = MessageQueue()\n self._out_mq = MessageQueue()\n self._out_mq_b = MessageQueue()",
"def subscribe_trajectory_queue(\n self, trajectory_queue: AgentManagerQueue[Trajectory]\n ) -> None:\n super().subscribe_trajectory_queue(trajectory_queue)\n parsed_behavior_id = self._name_to_parsed_behavior_id[\n trajectory_queue.behavior_id\n ]\n if parsed_behavior_id.team_id == self.wrapped_trainer_team:\n # With a future multiagent trainer, this will be indexed by 'role'\n internal_trajectory_queue: AgentManagerQueue[\n Trajectory\n ] = AgentManagerQueue(parsed_behavior_id.brain_name)\n\n self._internal_trajectory_queues[\n parsed_behavior_id.brain_name\n ] = internal_trajectory_queue\n self.trainer.subscribe_trajectory_queue(internal_trajectory_queue)",
"def declare_queue(self, queue_name):\n if queue_name not in self.queues:\n self.emit_before(\"declare_queue\", queue_name)\n self.queues[queue_name] = Queue()\n self.emit_after(\"declare_queue\", queue_name)\n\n delayed_name = dq_name(queue_name)\n self.queues[delayed_name] = Queue()\n self.delay_queues.add(delayed_name)\n self.emit_after(\"declare_delay_queue\", delayed_name)",
"def __init__(self, **kwargs):\n queue = \"workflow-submission\"\n super(WorkflowSubmissionPublisher, self).__init__(\n queue,\n MQ_DEFAULT_QUEUES[queue][\"routing_key\"],\n durable=MQ_DEFAULT_QUEUES[queue][\"durable\"],\n max_priority=MQ_DEFAULT_QUEUES[queue][\"max_priority\"],\n **kwargs,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Processes the resource with the rule. The resource will first be checked to see the rule should be run using 'check_condition'.
|
def process(self, resource: Resource):
if self.check_condition(resource):
tags = self.get_tags(resource)
payload = {
"resource": resource.to_dict(),
"tags": tags
}
self._queue.push(json.dumps(payload))
return True
return False
|
[
"def run_evaluate(self, instance):\n # return None if not true, or rule_status id if True (for reporting)?\n if not self.active:\n return\n\n hits = 0\n condition = None\n\n for condition in self.condition_set.all():\n result = condition.run_evaluate(instance)\n if result:\n hits += 1\n elif condition.join_condition == 'AND':\n return\n\n if hits == 0:\n return\n\n # what is the instances identity?\n identity = getattr(instance, self.instance_identifier)\n\n # check to see if action can be performed more than once\n if self.perform_action_once and self.rulestatus_set.filter(\n identity=identity,\n content_type=self.process_model).count() > 0:\n return\n\n # last condition is used to generate rule status\n rule_status, create = RuleStatus.objects.get_or_create(\n rule=self,\n condition=condition,\n content_type=self.process_model,\n object_id=instance.id,\n identity=identity\n )\n\n # Perform action - rule must have passed, and there must be an action\n # class if no action class, then obviously don't do anything, carry on\n # the ruleset sequence.\n if self.action_class:\n madule = self.action_class[:self.action_class.rindex(\".\")]\n klass = self.action_class[self.action_class.rindex(\".\") + 1:]\n action = str_to_class(madule, klass)\n\n # prepare kwargs\n parameters = self.parameter_names.split(\",\")\n values = self.parameter_values.split(\",\")\n kwargs = dict(zip(parameters, values))\n\n # add instance specific id\n kwargs[self.instance_identifier] = identity\n\n # hand over rule id, so that any rule field(s) can be used in\n # payment description\n kwargs['rule_id'] = self.id\n\n # perform action as rule passed, and no limit to\n performed_result = action.perform(**kwargs)\n ActionStatus.objects.get_or_create(\n action=self.action_class,\n rule_status=rule_status,\n performed=performed_result[:255] # avoid overflow\n )\n\n return rule_status.id",
"def process(self, x):\n match_not_found = True\n if self.before_processing:\n self.before_processing(x)\n for rule in self._rules:\n if rule.matches(x):\n rule.action(x)\n match_not_found = False\n if rule.last:\n break\n if match_not_found:\n for action in self._defaults:\n action(x)\n if self.after_processing:\n self.after_processing(x)",
"def execute_resource(self, resource=None, context=None, mode=None, handlers=False):\n # we only care about processing leaf node objects\n if self.is_collection(resource):\n return\n\n # if in handler mode we do not process the handler unless it was signaled\n if handlers and not context.has_seen_any_signal(resource.all_handles()):\n context.on_skipped(resource, is_handler=handlers)\n return\n\n # tell the callbacks we are about to process a resource\n # they may use this to print information about the resource\n context.on_resource(resource, handlers)\n\n # plan always, apply() only if not in check mode, else assume\n # the plan was executed.\n provider = self.plan(resource, context)\n assert provider is not None\n if mode == APPLY:\n self.do_apply(provider, context, handlers)\n else:\n self.do_simulate(provider, context)\n\n # if anything has changed, let the callbacks know about it\n self.signal_changes(provider=provider, resource=resource, context=context)",
"def perform_checks(self) -> None:",
"def execute_rule(self, rule, environment):\n\n if self.build_ids.check_and_add(rule, environment):\n return\n\n values = ParameterValues(\n self.parameters, environment, rule.label.path, self.loader)\n\n for dep in rule.all_dependencies:\n\n # Evaluate conditional dependency.\n if dep.when and not dep.when(values):\n continue\n\n if dep.parameters:\n if dep.parameters is REMOVE:\n # The last map is the parameters from command-line\n # and we should preserve it\n next_env = ChainMap(environment.maps[-1])\n else:\n next_env = environment.new_child()\n next_env.update(dep.parameters)\n else:\n next_env = environment\n\n self.execute_rule(self.rules[dep.label], next_env)\n\n if LOG.isEnabledFor(logging.INFO):\n if environment.maps[0] is not environment.maps[-1]:\n current_env = environment.maps[0]\n LOG.info('execute rule %s with %s', rule.label, ', '.join(\n '%s = %r' % (label, current_env[label])\n for label in sorted(current_env)\n ))\n else:\n LOG.info('execute rule %s', rule.label)\n if not self.dry_run and rule.build:\n with Context(self.loader, rule.label.path):\n rule.build(values)",
"def dispatch(self, event, context):\n\n if self.RESOURCE_PROPERTIES_SCHEMA is not None:\n validator = jsonschema.Draft4Validator(self.RESOURCE_PROPERTIES_SCHEMA)\n try:\n for key in \"ResourceProperties\", \"OldResourceProperties\":\n if key in event:\n validator.validate(event[key])\n except jsonschema.ValidationError as exc:\n physical_resource_id = event.get(\"PhysicalResourceId\", DEFAULT_PHYSICAL_RESOURCE_ID)\n return Failed(physical_resource_id, reason=unicode(exc))\n\n event_type_handler = self._event_type_handlers[event[\"RequestType\"]]\n return event_type_handler(event, context)",
"def start_rule(self, context):\n pass",
"def resource_cmd_execute(self, cmd_args):\n if cmd_args.subcommand == 'add':\n return self.cmd_invoker.resource_add(cmd_args.device_name)\n elif cmd_args.subcommand == 'remove':\n return self.cmd_invoker.resource_remove(cmd_args.device_name)\n elif cmd_args.subcommand == 'check':\n return self.cmd_invoker.resource_check(cmd_args.device_name)\n else:\n return CommandResult(1, \"Invalid resource command entered.\")",
"def testRule(self):\n rule_path = self.ruleMap['RULE_PATHS']['TEST_RULE']\n self.log.info(\"exec TEST rule on : \"+self.digitObjProperty['file'])\n\n try:\n myvalue = self.irods._ruleExec(rule_path)\n except Exception as ex:\n self.log.error(\"Could not execute a rule\")\n self.log.error(ex)\n pass",
"def check_conditions(self, part=None):\n assert part is not None, 'must specify what to check'\n\n # check the BSP itself?\n if part == 'bright_star_pipeline':\n # force redo requested?\n _force_redo = self.db_entry['pipelined'][self.name]['status']['force_redo']\n # pipeline done?\n _done = self.db_entry['pipelined'][self.name]['status']['done']\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['status']['retries']\n\n go = _force_redo or ((not _done) and (_num_tries <= self.config['misc']['max_retries']))\n\n return go\n\n # Preview generation for the results of BSP processing?\n elif part == 'bright_star_pipeline:preview':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # preview generated?\n _preview_done = self.db_entry['pipelined'][self.name]['preview']['done']\n\n # last_modified == pipe_last_modified?\n _outdated = abs((self.db_entry['pipelined'][self.name]['preview']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['preview']['retries']\n\n # if self.db_entry['_id'] == '3_J1144+6946_VIC_Si_o_20170607_043349.042103':\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n # input('WAIT!!')\n\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n go = (_pipe_done and (not _pipe_failed)) and ((not _preview_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go\n\n # Strehl calculation for the results of BSP processing?\n elif part == 'bright_star_pipeline:strehl':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # Strehl calculated?\n _strehl_done = self.db_entry['pipelined'][self.name]['strehl']['status']['done']\n\n # last_modified == pipe_last_modified?\n _outdated = abs((self.db_entry['pipelined'][self.name]['strehl']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['strehl']['status']['retries']\n\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n go = (_pipe_done and (not _pipe_failed)) and ((not _strehl_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go\n\n # Run PCA high-contrast processing pipeline?\n elif part == 'bright_star_pipeline:pca':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # pca done?\n _pca_done = self.db_entry['pipelined'][self.name]['pca']['status']['done']\n\n # last_modified == pipe_last_modified?\n _outdated = abs((self.db_entry['pipelined'][self.name]['pca']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['pca']['status']['retries']\n\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n go = (_pipe_done and (not _pipe_failed)) and ((not _pca_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go\n\n elif part == 'bright_star_pipeline:pca:preview':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # pca done?\n _pca_done = self.db_entry['pipelined'][self.name]['pca']['status']['done']\n\n # pca preview done?\n _pca_preview_done = self.db_entry['pipelined'][self.name]['pca']['preview']['done']\n\n # last_modified == pipe_last_modified? (or old DB entry)\n _outdated = 'last_modified' not in self.db_entry['pipelined'][self.name]['pca']['preview'] or \\\n (abs((self.db_entry['pipelined'][self.name]['pca']['preview']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0)\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['pca']['preview']['retries']\n\n go = (_pipe_done and (not _pipe_failed) and _pca_done) and ((not _pca_preview_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go",
"def update_rules(self):\n print(\"Checking existing rules and subscription for (phedex) dataset %s \" %\n self.phedex_dataset)\n\n for block, sub in self.subscriptions.items():\n if 'request' not in sub:\n print(\"subscription for block %s missing 'request' field\" % block)\n sub['request'] = 'unknown'\n\n if 'group' not in sub:\n print(\"subscription for block %s missing 'group' field\" % block)\n sub['group'] = 'unknown'\n\n submd = json.dumps({'type': 'phedex_sync', 'rid': sub['request'],\n 'group': sub['group']})\n\n if block not in self.rules:\n print(\"No rule found for %s, creating one\" % block)\n self.add_rule(names=[block], rse_exp='rse='+self.rse,\n comment=submd)\n # For the moment ignoring this: ISSUE ..\n #elif submd != self.rules[block]['comments']:\n # print(\"Rule for %s has wrong comment, re-creating\" % block)\n # self.del_rule(self.rules[block]['id'])\n # self.add_rule(names=[block], rse_exp='rse='+self.rse,\n # comment=submd)\n elif self.cli.account != self.rules[block]['account']:\n print(\"Rule for %s belongs to the wrong account, modifying\" % block)\n self.update_rule(self.rules[block]['id'], {'account': self.account})\n\n for block, rule in self.rules.items():\n if block not in self.subscriptions:\n print(\"Rule for %s correspond to no subscription, deleting\" % block)\n self.del_rule(rule['id'])",
"def do_check(self):\n with self.lock:\n bV, bN = self.need_process_v, self.need_process_n\n self.need_process_v, self.need_process_n = False, False\n if bV:\n self.process_verifs() # rate_limited call (1 per second)\n if bN:\n self.process_notifs() # rate_limited call (1 per 15 seconds)",
"def determineRelevantRule(self, monitoringData, configuration):\n\n typeData = configuration.get(\"data\")\n cpuRules = configuration.get(\"CPU\")\n memRules = configuration.get(\"Mem\")\n rulesMet = []\n self.rulesMetDict = {}\n returnedValue = None\n wl = Whitelist()\n self.whitelist = wl.readWhitelist()\n\n # check if monitoring data is of type list since tcp connection monitoring data\n # is the only monitoring data that is passed as a list.\n if type(monitoringData) is list:\n connectionDataDict = {}\n monDataType = 'Connections'\n # first converts the relevant components of the connection monitoring data\n # into a dictionary.\n connectionDataDict = self.convertToDict(monitoringData)\n\n # calls the correct rules for connection monitoring data based on the configuration.yaml file.\n for monKey in connectionDataDict:\n # checks the process name isn't in the whitelist.\n if monKey.lower() not in self.whitelist:\n for key, value in configuration.items():\n if key == monDataType:\n for rule in value:\n classToCall = getattr(sys.modules[__name__], rule)\n methodToCall = getattr(classToCall, 'check')\n functionArgs = list(\n signature(methodToCall).parameters.keys())\n\n for IP in connectionDataDict[monKey]:\n tempReturnedValue = methodToCall(\n IP)\n if tempReturnedValue == True:\n returnedValue = True\n\n if(returnedValue == True):\n if monKey in self.rulesMetDict:\n self.rulesMetDict[monKey].append(rule)\n else:\n self.rulesMetDict[monKey] = [rule]\n\n print(monKey + ': [' + rule + \" returnedValue: \" + str(returnedValue)+']')\n else:\n pass\n\n return self.rulesMetDict\n\n else:\n # calls instance of rules determined by parameter set by configuration file.\n try:\n for monKey in monitoringData:\n monDataType = monitoringData.get(monKey)[0]\n monDataValues = monitoringData.get(monKey)\n\n for key, value in configuration.items():\n if key == monDataType:\n for rule in value:\n classToCall = getattr(sys.modules[__name__], rule)\n methodToCall = getattr(classToCall, 'check')\n functionArgs = list(signature(methodToCall).parameters.keys())\n\n if \"process\" in functionArgs:\n returnedValue = methodToCall(monKey)\n else:\n returnedValue = methodToCall(monDataValues)\n\n if(returnedValue == True):\n if monKey in self.rulesMetDict:\n self.rulesMetDict[monKey].append(rule)\n else:\n self.rulesMetDict[monKey] = [rule]\n\n print(monKey + ': [' + rule + \" returnedValue: \" + str(returnedValue)+']')\n return self.rulesMetDict\n except Exception as e:\n return self.rulesMetDict",
"def __init__(__self__,\n resource_name: str,\n args: RuleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def add_rule(self, action_type, role, resource):\n\n if not self.validate(role, resource):\n return\n permission = (role, resource)\n if permission not in self.ACTION_DICT.get(action_type):\n self.ACTION_DICT.get(action_type).append(permission)",
"def _child_running(self, child_resource_id):\n\n with self._children_being_validated_lock:\n if child_resource_id in self._children_being_validated:\n return\n\n if child_resource_id in self._ia_clients and \\\n self._ia_clients[child_resource_id] == _INVALIDATED_CHILD:\n\n self._children_being_validated.add(child_resource_id)\n log.info(\"%r: OOIION-1077 starting _validate_child_greenlet for \"\n \"instrument: %r\", self._platform_id, child_resource_id)\n Greenlet.spawn(self._validate_child_greenlet, child_resource_id, True)\n return\n\n if child_resource_id in self._pa_clients and \\\n self._pa_clients[child_resource_id] == _INVALIDATED_CHILD:\n\n self._children_being_validated.add(child_resource_id)\n log.info(\"%r: OOIION-1077 starting _validate_child_greenlet for \"\n \"platform: %r\", self._platform_id, child_resource_id)\n Greenlet.spawn(self._validate_child_greenlet, child_resource_id, False)\n return\n\n if log.isEnabledFor(logging.TRACE): # pragma: no cover\n if not child_resource_id in self._ia_clients and \\\n not child_resource_id in self._ia_clients:\n log.trace(\"%r: OOIION-1077 _child_running: %r is not a direct child\",\n self._platform_id, child_resource_id)",
"def do_execute(self, line):\n if self.active_module == None or \\\n (self.active_payload == None and not self.modules[self.active_module].payload_custom) or \\\n (self.active_device == None and self.modules[self.active_module].require_device()):\n various.print_warning(\"Please set all components before executing. See help use\")\n return\n dev = action_manager.device_map[self.active_device]\n mod = self.modules[self.active_module]\n if self.modules[self.active_module].payload_custom:\n pay = None\n else:\n pay = self.payloads[self.active_payload]\n e = dev.validate()\n if e != \"\":\n various.print_warning(\"Validation error in device: %s\" % e)\n return\n if not self.modules[self.active_module].payload_custom:\n e = pay.validate()\n if pay != None and e != \"\":\n various.print_warning(\"Validation error in payload: %s\" % e)\n return\n # payload and device is ready let's set payload and make validation\n if not self.modules[self.active_module].payload_custom:\n pay_c = pay.get_payload()\n mod.payload = pay_c\n else:\n pay_c = self.modules[self.active_module].payload\n # and validate in module\n e = mod.validate()\n if e != \"\":\n various.print_warning(\"Validation error in module: %s\" % e)\n return\n if self.modules[self.active_module].payload_custom or \\\n pay.do_execute():\n e = mod.execute(pay_c, dev)\n if e != \"\":\n various.print_warning(\"Error executing module: %s\" % e)\n return",
"def _checks(resource: t.Any) -> None:\n\n # check if endpoint exists already\n check = [i for i, _ in self.all_endpoints] \n if (endpoint in check):\n raise EndpointExistsException(f\"Endpoint \\\"{endpoint}\\\" already exists\")\n \n # check that resource is not None, if it is, did not return class\n if (resource is None):\n raise TypeError(\"function that the decorator is above must return a class\")\n \n # check if resource is subclass of ConnectionResource\n if (not issubclass(resource, ConnectionResource)):\n raise TypeError(\"resource has to extend com_server.ConnectionResource\")\n \n # check if resource name is taken, if so, change it (flask_restful interperets duplicate names as multiple endpoints)\n names = [i.__name__ for _, i in self.all_endpoints]\n if (resource.__name__ in names):\n s = f\"{resource.__name__}\"\n\n while (s in names):\n # append underscore until no matching \n s += \"_\"\n \n resource.__name__ = s",
"def authorize(self, actor, action, resource, *, check_read=True):\n if not self.policy.query_rule_once(\"allow\", actor, action, resource):\n is_not_found = False\n if action == self.read_action:\n is_not_found = True\n elif check_read and not self.policy.query_rule_once(\n \"allow\", actor, self.read_action, resource\n ):\n is_not_found = True\n raise self._get_error(is_not_found)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Measures the performance of bigint bitwise binops
|
def bench_ak_bitwise_binops(benchmark, op):
cfg = ak.get_config()
N = pytest.prob_size * cfg["numLocales"]
a1 = ak.randint(0, 2**32, N, dtype=ak.uint64, seed=pytest.seed)
a2 = ak.randint(0, 2**32, N, dtype=ak.uint64, seed=pytest.seed)
a = ak.bigint_from_uint_arrays([a1, a2], max_bits=pytest.max_bits)
b1 = ak.randint(0, 2**32, N, dtype=ak.uint64, seed=pytest.seed)
b2 = ak.randint(0, 2**32, N, dtype=ak.uint64, seed=pytest.seed)
b = ak.bigint_from_uint_arrays([b1, b2], max_bits=pytest.max_bits)
# bytes per bigint array (N * 16) since it's made of 2 uint64 arrays
# if max_bits in [0, 64] then they're essentially 1 uint64 array
nbytes = N * 8 if pytest.max_bits != -1 and pytest.max_bits <= 64 else N * 8 * 2
if op == "and":
benchmark.pedantic(_perform_and_binop, args=[a, b], rounds=pytest.trials)
elif op == "or":
benchmark.pedantic(_perform_or_binop, args=[a, b], rounds=pytest.trials)
elif op == "shift":
benchmark.pedantic(_perform_shift_binop, args=[a], rounds=pytest.trials)
benchmark.extra_info["description"] = "Measures the performance of bigint bitwise binops"
benchmark.extra_info["problem_size"] = pytest.prob_size
benchmark.extra_info["transfer_rate"] = "{:.4f} GiB/sec".format(
(nbytes / benchmark.stats["mean"]) / 2**30
)
benchmark.extra_info["max_bit"] = pytest.max_bits # useful when looking at bigint
|
[
"def mul_bin(b1,b2):\r\n\r\n n1 = bin_to_dec(b1) \r\n n2 = bin_to_dec(b2)\r\n\r\n b = dec_to_bin(n1*n2)\r\n\r\n return b",
"def test_nth_bit_set():\n for _ in range(0, 10000):\n number = random.randint(0, 100000000)\n bits = bin(number)[2:]\n for i, b in enumerate(reversed(bits)):\n assert has_nth_bit_set(number, i) == (int(b) == 1)",
"def _get_bits_spent(self, random_objects: List[Any]) -> int:",
"def _2bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:\n for n, val in enumerate(in_):\n out[4 * n] = int((val >> 6) * scale)\n out[4 * n + 1] = int(((val >> 4) & 0b11) * scale)\n out[4 * n + 2] = int(((val >> 2) & 0b11) * scale)\n out[4 * n + 3] = int((val & 0b11) * scale)",
"def test_conditional_superop_64bit(self, method, device):\n shots = 100\n cases = ref_conditionals.conditional_cases_64bit()\n backend = self.backend(method=method, device=device)\n backend.set_options(max_parallel_experiments=0)\n circuits = ref_conditionals.conditional_circuits_nbit(\n 64, cases, final_measure=True, conditional_type=\"superop\"\n )\n targets = ref_conditionals.condtional_counts_nbit(64, cases, shots, hex_counts=False)\n\n result = backend.run(circuits, shots=shots).result()\n self.assertSuccess(result)\n self.compare_counts(result, circuits, targets, hex_counts=False, delta=0)",
"def performance():\n n = 1024\n while n < 50000000:\n sorted = range(n)\n now = time()\n\n # by using -1 the worst case scenario is evaluated\n #contains(sorted, -1)\n binary_search_contains(sorted, -1)\n\n\n done = time()\n\n print n, (done-now)*1000\n n *= 2",
"def power_digit_sum():\n\ts = datetime.utcnow()\n\tresult = 0\n\tn = 2**1000\n\tfor i in str(n):\n\t\tresult += int(i)\n\te = datetime.utcnow()\n\tprint(\"time: \" + str(e - s))\n\treturn Helpers.pr(result)",
"def _get_big_op_bitcount_expr():\n expr = delay_model_pb2.DelayExpression()\n _set_max_expression(expr)\n _set_operand_bit_count_expression_factor(expr.lhs_expression, 0)\n _set_operand_bit_count_expression_factor(expr.rhs_expression, 1)\n return expr",
"def Solution48():\n return sum(x**x for x in range(1, 1001)) % 10000000000",
"def bitness():\n pass",
"def countBits(x):\n # return bin(n).count(\"1\")\n n, res = \"\", 0\n while x > 0:\n y = str(x % 2)\n res += 1 if y == '1' else 0\n n = y + n\n x = int(x / 2)\n return res",
"def cbin(n):\n if n < 0:\n n = UMAX + n\n return '{0:0>32}'.format(bin(n)[2:])",
"def __mul__(self, other):\n from datatypes.integers.UInt import UInt\n # if (isinstance(other, UInt8)):\n # arrayX = self.bits\n # arrayY = other.bits\n # return UInt8(bits = multRecurrence(arrayX, arrayY))\n if (isinstance(other, UInt8)):\n result = []\n for i in xrange(7, -1, -1):\n arrayTemp = []\n result += [arrayTemp]\n for k in xrange (0, 7-i):\n result[7-i].insert(0, PlainBit(False))\n for j in xrange(7, -1, -1):\n result[7-i].insert(0, self.bits[i] * other.bits[j])\n\n for l in xrange(0, 8):\n while (len(result[l]) < 16):\n result[l].insert(0, PlainBit(False))\n\n\n # We split results in two array of size 8. The form is (resultXb append resultXa) . Where X is the line number\n result1a = UInt8(bits=result[0][8:])\n result1b = UInt8(bits=result[0][:8])\n result1UInt = UInt(ints = [result1b, result1a])\n\n result2a = UInt8(bits=result[1][8:])\n result2b = UInt8(bits=result[1][:8])\n result2UInt = UInt(ints = [result2b, result2a])\n\n result3a = UInt8(bits=result[2][8:])\n result3b = UInt8(bits=result[2][:8])\n result3UInt = UInt(ints = [result3b, result3a])\n\n result4a = UInt8(bits=result[3][8:])\n result4b = UInt8(bits=result[3][:8])\n result4UInt = UInt(ints = [result4b, result4a])\n\n result5a = UInt8(bits=result[4][8:])\n result5b = UInt8(bits=result[4][:8])\n result5UInt = UInt(ints = [result5b, result5a])\n\n result6a = UInt8(bits=result[5][8:])\n result6b = UInt8(bits=result[5][:8])\n result6UInt = UInt(ints = [result6b, result6a])\n\n result7a = UInt8(bits=result[6][8:])\n result7b = UInt8(bits=result[6][:8])\n result7UInt = UInt(ints = [result7b, result7a])\n\n result8a = UInt8(bits=result[7][8:])\n result8b = UInt8(bits=result[7][:8])\n result8UInt = UInt(ints = [result8b, result8a])\n\n resultFinal = result1UInt + result2UInt + result3UInt + result4UInt + result5UInt + result6UInt + result7UInt + result8UInt\n\n return resultFinal",
"def bin_fast(self, x, y, statistic='mean'):\n binned,_,_ = binned_statistic(x, y, statistic=statistic, bins=self.bin_edges)\n return binned",
"def _count_ones(byte):\n return sum([1 for i in (1, 2, 4, 8, 16, 32, 64, 128) if i & byte])",
"def count_one_bits(integer: int) -> int:\n return sum(iter_bits(integer))",
"def num_bits(n):\n return math.floor(math.log2(abs(N))) + 1",
"def Solution16():\n return sum_digits(2**1000)",
"def convert_bit_index(x):\n if x == 666666666:#if x is a non data value\n return 255\n x_string = str(x)\n sum = 0\n for i in range(1,6):\n if str(i) in x_string:\n sum += 2**i\n return sum"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Decodes given onehotvector into its value.
|
def decode(self, one_hot_vector):
# TODO: Implement this method
index = 0
for entry in one_hot_vector:
if entry == 1:
return self.tags[index]
else:
index = index + 1
|
[
"def decode(self, one_hot_vector):\n # TODO: Implement this method\n for i in range(len(self.tags)):\n if one_hot_vector[i] == 1:\n return self.tags[i]",
"def reverse_one_hot_vector_encoding(self,\n df_one_hot_vector:pd.DataFrame=None):\n\n assert(isinstance(df_one_hot_vector, pd.DataFrame))\n\n colnames = self.one_hot_vector_category_column_names\n\n df_categorical = None\n for colname in colnames:\n # create a dataframe that only contains the columns that were created by\n # the one_hot vector application\n df_only_one_hot_vector_columns = df_one_hot_vector.filter(regex=(f'^{colname}_')).copy()\n if df_only_one_hot_vector_columns.shape[1] == 0:\n # no column has been found\n break\n # rename the one hot vector columns such that they only contain the values of the original categorical\n # variable\n df_only_one_hot_vector_columns.rename(columns=lambda x: re.sub(f'^{colname}_','',x),inplace=True)\n\n\n # create a series that contains the categorical variables\n series_with_categorical_variable = df_only_one_hot_vector_columns.idxmax(axis=1)\n # convert the series to a dataframe\n df_with_categorical_variable = series_with_categorical_variable.to_frame()\n # rename the column name to the original column name\n df_with_categorical_variable.columns=[colname]\n\n # drop the one hot vector columns from the dataframe\n df_everything_except_one_hot_vector_columns = df_one_hot_vector.filter(regex=(f'^(?!^{colname}_)')).copy()\n\n # join the two to yield the subpart of the dataframe that is categorical for the colname\n df_categorical_subpart = pd.concat(\n [df_with_categorical_variable, df_everything_except_one_hot_vector_columns], axis=1)\n\n if df_categorical is None:\n df_categorical = df_categorical_subpart\n else:\n df_categorical = pd.concat([df_categorical, df_categorical_subpart],axis=1)\n\n\n return df_categorical",
"def to_one_hot(vector, one_hot_size):\n import numpy as np\n squeezed_vector = np.squeeze(vector, axis=-1)\n\n one_hot = np.zeros((squeezed_vector.size, one_hot_size))\n\n one_hot[np.arange(squeezed_vector.size), squeezed_vector] = 1\n\n return one_hot",
"def one_hot_decoding(image):\n np_image = np.array(image)\n im_shape = np_image.shape\n phases = im_shape[1]\n res = np.zeros([im_shape[0]] + list(im_shape[2:]))\n\n # the assumption is that each pixel has exactly one 1 in its phases\n # and 0 in all other phases:\n for i in range(phases):\n if i == 0:\n continue # the res is already 0 in all places..\n phase_image = np_image[:, i, ...]\n res[phase_image == 1] = i\n return res",
"def one_hot_encode(vec_dim, index):\n out = torch.zeros(vec_dim)\n out[index] = 1\n return out",
"def onehot(self,sym):\n return self.constantVector(sym,self.db.onehot(sym))",
"def one_hot_vec(label):\r\n\tvec = np.zeros(10)\r\n\tvec[label] = 1\r\n\treturn vec",
"def element_from_one_hot(solver, values, indicators, max_val):\n H = solver.NumVar(lb=0)\n\n for ind, Hi in zip(indicators, values):\n solver.Add(H >= Hi - max_val * (1 - ind))\n solver.Add(H <= Hi + max_val * (1 - ind))\n\n return H",
"def onehot_to_index(onehot_seq):\n return np.where(onehot_seq)[1]",
"def index_to_one_hot(index):\n\tvector = np.zeros(64, dtype = float)\n\tvector[index] = 1.\n\treturn vector",
"def one_hot_encode(digit):\n y = np.array([0] * 10)\n y[digit] = 1\n return y",
"def one_hot_single_value(cur_val, total_vals):\n x = [0] * total_vals\n x[cur_val] = 1\n return x",
"def apply_one_hot_vector_encoding(self,\n df_categorical:pd.DataFrame=None):\n\n assert(isinstance(df_categorical, pd.DataFrame))\n # apply one hot vector enncoding only to colnames in this list\n colnames = self.one_hot_vector_category_column_names\n df_one_hot_vector = pd.get_dummies(df_categorical, columns=colnames)\n\n\n return df_one_hot_vector",
"def one_hot(indices, on_value, off_value, depth, axis, dtype):\n return cpp.one_hot(indices, on_value, off_value, depth, axis, dtype)",
"def to_onehot(c):\r\n onehot = torch.zeros((1, 1000))\r\n onehot[:, c] = 1.0\r\n return onehot",
"def get_label_one_hot(example):\n one_hot_vector = np.zeros(NUM_CLASSES)\n np.put(one_hot_vector, get_label(example), 1)\n return one_hot_vector",
"def one_hot_encoding(class_list, num_classes):\r\n\r\n # Returns true for the class index, false otherwise\r\n booleans = (np.arange(num_classes) == class_list[:, None])\r\n\r\n # Converts all false entries to 0, and all true entries to 1\r\n encoded = booleans.astype(float)\r\n return encoded",
"def one_hot_encoder(y):\n\n letter = np.zeros((10, 1))\n letter[int(y)] = 1\n return letter",
"def oneHot(lentgh, hot):\n vec = np.zeros((1, lentgh))\n vec[0, hot] = 1\n return vec"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the md5 hash of a file.
|
def _get_file_md5sum(file_name):
hash_obj = hashlib.md5()
with open(file_name, 'rb') as f:
hash_obj.update(f.read())
return hash_obj.hexdigest().encode('utf-8')
|
[
"def filehash(file):\n hasher = hashlib.md5()\n f = open(file, 'rb')\n buf = f.read()\n hasher.update(buf)\n return hasher.hexdigest()",
"def compute_md5(file):\n md5 = hashlib.md5()\n while True:\n buf = file.read(8192)\n if not buf:\n break\n md5.update(buf)\n return md5",
"def calcFileMd5sum(filename): \n\n m = hashlib.md5()\n\n # Read file in as 128 byte chunks\n with open(filename) as f: m.update(f.read(128))\n \n return m.hexdigest()",
"def calc_md5( path_filename ):\n hash_md5 = hashlib.md5()\n with open( path_filename , \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()",
"def calculate_md5_checksum(filename):\n\n length = io.DEFAULT_BUFFER_SIZE\n md5 = hashlib.md5()\n\n with io.open(filename, mode=\"rb\") as fd:\n for chunk in iter(lambda: fd.read(length), b''):\n md5.update(chunk)\n\n return md5.hexdigest()",
"def checksum_md5 (filename) :\n fname = filename\n block_size = 0x10000\n fd = open(fname, \"rb\")\n try:\n block = [ fd.read(block_size) ]\n while len(block[-1]) > 0 :\n block.append ( fd.read(block_size) )\n contents = block\n zero = hashlib.md5()\n i = 0 \n for el in contents :\n i += 1\n zero.update( el )\n m = zero\n return m.hexdigest()\n finally:\n fd.close()\n return None",
"def file_md5(filename):\r\n file_o = read_file(filename)\r\n file_str = file_o.read()\r\n file_o.close()\r\n return string_md5(file_str)",
"def md5sum_file(filename):\n import hashlib\n \n infile = open(filename, 'rb')\n content = infile.read()\n infile.close()\n m = hashlib.md5() \n m.update(content)\n md5 = m.hexdigest() # now the md5 variable contains the MD5 sum\n \n return md5",
"def MD5Sum(klass, filename):\n return hashlib.md5(path(filename).text()).hexdigest()[:8]",
"def md5_filelike(filelike):\n m = hashlib.md5()\n while True:\n s = filelike.read()\n if len(s) == 0:\n break\n else:\n m.update(s)\n return m.hexdigest()",
"def compute_md5_sum(self, resource: GenomicResource, filename: str) -> str:\r\n logger.debug(\r\n \"compute md5sum for %s in %s\", filename, resource.resource_id)\r\n\r\n with self.open_raw_file(resource, filename, \"rb\") as infile:\r\n md5_hash = hashlib.md5()\r\n while chunk := infile.read(self.CHUNK_SIZE):\r\n md5_hash.update(chunk)\r\n return md5_hash.hexdigest()",
"def compute_file_hash(file_path, alg='md5'):\n if alg == 'md5':\n md5_obj = hashlib.md5()\n block_size = 65536\n # read chunk by chunk for big file\n with open(file_path, 'r+b') as f:\n for block in iter(lambda: f.read(block_size), \"\"):\n md5_obj.update(block)\n local_md5 = md5_obj.hexdigest()\n file_hash = local_md5\n\n else:\n raise NotImplementedError(\"ALGORITHM {0} NOT IMPLEMENTED!\".format(alg))\n return file_hash",
"def md5sum():\r\n hashSum = None\r\n try:\r\n # Open as read binary\r\n config = _ConfigFile._open('rb', yaml=False)\r\n\r\n # pipe contents of the file through\r\n hashSum = md5(config).hexdigest()\r\n except:\r\n hashSum = 'none'\r\n\r\n return hashSum",
"def _get_md5sum(self, fpath):\n try:\n current_md5 = hashlib.md5()\n if isinstance(fpath, six.string_types) and os.path.exists(fpath):\n with open(fpath, \"rb\") as fh:\n for chunk in self._read_chunks(fh):\n current_md5.update(chunk)\n\n elif (fpath.__class__.__name__ in [\"StringIO\", \"StringO\"] or\n isinstance(fpath, IOBase)):\n for chunk in self._read_chunks(fpath):\n current_md5.update(chunk)\n else:\n return \"\"\n return current_md5.hexdigest()\n except Exception:\n msg = (\"Failed to calculate the image's md5sum\")\n LOG.error(msg)\n raise exception.SDKImageOperationError(rs=3)",
"def md5_checksum(self) -> str:\n file_hash = FileHash(hashlib.md5())\n file_hash.add_file(self.archive_file)\n return base64.b64encode(file_hash.digest).decode()",
"def md5sum(str_or_file):\r\n md5_hash = \"?\"\r\n if os.path.isfile(str_or_file):\r\n md5_hash = file_md5(str_or_file)\r\n else:\r\n md5_hash = string_md5(str_or_file)\r\n return md5_hash",
"def md5_for_file(f, block_size=2**20):\n m = hashlib.md5()\n with open(f , \"rb\" ) as f:\n while True:\n buf = f.read(block_size)\n if not buf:\n break\n m.update( buf )\n return m.hexdigest()",
"def md5files(files):\n m = hashlib.md5()\n for key, path in sorted(files, key=lambda x: x[0]):\n m.update(six.b(key))\n if os.path.isdir(path):\n m.update(md5files([\n (os.path.join(key, filename), os.path.join(path, filename))\n for filename in os.listdir(path)\n if not filename.startswith('.')]))\n else:\n with open(path, 'rb') as f:\n m.update(f.read())\n return m.hexdigest()",
"def hash_file(path):\n\n md5_hash = hashlib.md5()\n sha256_hash = hashlib.sha256()\n with open(path, 'rb') as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n md5_hash.update(data)\n sha256_hash.update(data)\n return md5_hash.hexdigest(), sha256_hash.hexdigest()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Mark old IDs as inactive; ensure that new ID users have the same id. Used when a user creates multiple accounts and only wants to keep one.
|
def deactivate_user(new_id: int, old_ids: list[int]):
# deactivate old user ids, and reassign assessments for current user
new_user = HAWCUser.objects.get(id=new_id)
for old_user in HAWCUser.objects.filter(id__in=old_ids):
for assessment in old_user.assessment_pms.all():
if assessment not in new_user.assessment_pms.all():
new_user.assessment_pms.add(assessment)
old_user.assessment_pms.clear()
for assessment in old_user.assessment_teams.all():
if assessment not in new_user.assessment_teams.all():
new_user.assessment_teams.add(assessment)
old_user.assessment_teams.clear()
for assessment in old_user.assessment_reviewers.all():
if assessment not in new_user.assessment_reviewers.all():
new_user.assessment_reviewers.add(assessment)
old_user.assessment_reviewers.clear()
old_user.is_active = False
old_user.save()
|
[
"def set_as_inactive(self):\n with transaction.atomic():\n self.is_member = False\n self.is_secretary = False\n self.is_treasurer = False\n self.is_president = False\n self.is_inactive = True",
"def update_ids(self):\n self.uid += 1",
"def invalidate_previous_tokens(sender, instance, created, **kwargs):\n\n if instance.user.pk in api_settings.PASSWORDLESS_DEMO_USERS.keys():\n return\n\n if isinstance(instance, CallbackToken):\n CallbackToken.objects.active().filter(user=instance.user, type=instance.type).exclude(id=instance.id).update(is_active=False)",
"def set_active_admission(self, hadm_id):\n if not self.active_adm_id is None:\n for attr_key in self.temp_attr_map[self.active_adm_id].keys():\n self.attributes.pop(attr_key, None)\n for (attr_key,val) in self.temp_attr_map[hadm_id].iteritems():\n self.add_attr(attr_key,val)\n\n old_adm = self.active_adm_id\n self.active_adm_id = hadm_id\n return old_adm",
"def is_identity_previously_claimed(self):\n # The settings override to allow previously claimed IDs to be claimed again.\n # Should only work on testing.\n if settings.GIDX_ALLOW_PREVIOUSLY_CLAIMED_ID:\n logger.warning('Allowing previously claimed ID to be claimed again.')\n return False\n\n return 'ID-EX' in self.json['ReasonCodes']",
"def reset_id(cls):\n cls.id_index = 0",
"def reclaim_id(self,object_id):\n self.recycled_ids.add(object_id)\n return self.recycled_ids",
"def test_inactive_users_are_not_counted_in_active(self):\n this_user = self.users[0]\n this_user.is_active = False\n this_user.save()\n self.assertTrue(UserProfile.active.count() == User.objects.count() - 1)",
"def create_inactive_account(self, username, email, firstname, lastname, idnumber):\n return self.command('create_account', username, email, firstname, lastname, idnumber)",
"def bulk_expire(cls, older_than):\n cls.objects.filter(status=cls.InviteStatus.SENT, sent_at__lt=older_than).update(\n status=cls.InviteStatus.EXPIRED\n )",
"def setTournamentAsActive(id):\n\n with get_cursor() as cursor:\n cursor.execute(\"UPDATE tournaments SET active = 0\")\n cursor.execute(\"UPDATE tournaments SET active = 1 where id = %s\", (id,))",
"def participants_deregister(id):\n query = \"UPDATE user SET registered=0 WHERE user_id='{0}'\".format(id)\n connection = app.config[\"PYMYSQL_CONNECTION\"]\n\n # submit query and retrieve values\n with connection.cursor() as cursor:\n cursor.execute(query)\n\n return \"done.\", 200",
"def _make_alert_active(self, alert_id: str):\n alert = self.pending.pop(alert_id)\n alert[\"active\"] = True\n self.active[alert_id] = alert\n self.alerts_cache[\"pending\"] = self.pending\n self.alerts_cache.store()\n # self.alerts_cache.update_yaml_file(\"pending\", value=self.pending, final=True)",
"def inactive_lost_accounts_last_month(self):\n if not hasattr(self, '_inactive_lost_accounts_last_month'):\n accounts = []\n thirty_one_days_ago = datetime.datetime.now() - datetime.timedelta(31)\n events = LifecycleEvent.objects.filter(\n Q(account__in=self.accounts, type=3, date_created__gte=thirty_one_days_ago) | Q(\n account__in=self.accounts, type=5, date_created__gte=thirty_one_days_ago))\n\n for event in events:\n if event.account not in accounts and event.account.status != 1:\n accounts.append(event.account)\n self._inactive_lost_accounts_last_month = accounts\n\n return self._inactive_lost_accounts_last_month",
"def flash_id_led(self):\r\n logging.info('Flashing ID LED')\r\n\r\n if self.live:\r\n for i in range(5):\r\n self._id_led.on()\r\n time.sleep(0.1)\r\n self._id_led.off()\r\n time.sleep(0.1)",
"def setLastId(self):\n\t\tself.lastId = self.cursor.fetchone()[0]",
"def assign_to_me(self):\n for ticket in self:\n if ticket.assigned_user_id != self.env.user:\n ticket.write({'assigned_user_id': self.env.user.id})",
"def mark_as_unread(self, message_ids: List[int]):\n self.imap.remove_flags(message_ids, [SEEN])",
"def create_inactive_user(self, form):\r\n new_user = form.save(commit=False)\r\n new_user.is_active = False\r\n new_user.save()\r\n\r\n self.send_activation_email(new_user)\r\n\r\n return new_user"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate report on the physical structure of the target SpiNNaker \ machine.
|
def generate_machine_report(report_directory, machine, connections):
file_name = report_directory + os.sep + "machine_structure.rpt"
f_machine_struct = None
try:
f_machine_struct = open(file_name, "w")
except IOError:
logger.error("Generate_placement_reports: Can't open file {} for "
"writing.".format(file_name))
f_machine_struct.write("\t\tTarget SpiNNaker Machine Structure\n")
f_machine_struct.write("\t\t==================================\n\n")
time_date_string = time.strftime("%c")
f_machine_struct.write("Generated: %s" % time_date_string)
f_machine_struct.write(" for target machine '{}'".format(connections))
f_machine_struct.write("\n\n")
x_dim = machine.max_chip_x + 1
y_dim = machine.max_chip_y + 1
f_machine_struct.write("Machine dimensions (in chips) x : {} y : {}\n\n"
.format(x_dim, y_dim))
# TODO: Add further details on the target machine.
f_machine_struct.write("\t\tMachine router information\n")
f_machine_struct.write("\t\t==========================\n\n")
chips = machine.chips
for x in range(machine.max_chip_x + 1):
for y in range(machine.max_chip_y + 1):
chip = machine.get_chip_at(x, y)
if chip:
f_machine_struct.write("Information for chip {}:{}\n"
.format(chip.x, chip.y))
f_machine_struct.write(
"Neighbouring chips \n{}\n"
.format(chip.router.get_neighbouring_chips_coords()))
f_machine_struct.write("Router list of links for this chip"
" are: \n")
for link in chip.router.links:
f_machine_struct.write("\t{}\n".format(link))
f_machine_struct.write("\t\t==========================\n\n")
# Close file:
f_machine_struct.close()
|
[
"def writeSegmentDetailsKml(outPath,singleSimulation,nodes):",
"def generate_report(self, report_path: str):\n\n # Dictionary mapping type names to facts. Each type name is mapped\n # to a dictionary which maps sources to a list of facts. This makes\n # organizing the output report easier.\n report_data: Dict[str, Dict[str, List[pwncat.db.Fact]]] = {}\n system_details = []\n\n try:\n # Grab hostname\n hostname = pwncat.victim.enumerate.first(\"system.hostname\").data\n system_details.append([\"Hostname\", util.escape_markdown(hostname)])\n except ValueError:\n hostname = \"[unknown-hostname]\"\n\n # Not provided by enumerate, but natively known due to our connection\n system_details.append(\n [\"Primary Address\", util.escape_markdown(pwncat.victim.host.ip)]\n )\n system_details.append(\n [\"Derived Hash\", util.escape_markdown(pwncat.victim.host.hash)]\n )\n\n try:\n # Grab distribution\n distro = pwncat.victim.enumerate.first(\"system.distro\").data\n system_details.append(\n [\n \"Distribution\",\n util.escape_markdown(\n f\"{distro.name} ({distro.ident}) {distro.version}\"\n ),\n ]\n )\n except ValueError:\n pass\n\n try:\n # Grab the architecture\n arch = pwncat.victim.enumerate.first(\"system.arch\").data\n system_details.append([\"Architecture\", util.escape_markdown(arch.arch)])\n except ValueError:\n pass\n\n try:\n # Grab kernel version\n kernel = pwncat.victim.enumerate.first(\"system.kernel.version\").data\n system_details.append(\n [\n \"Kernel\",\n util.escape_markdown(\n f\"Linux Kernel {kernel.major}.{kernel.minor}.{kernel.patch}-{kernel.abi}\"\n ),\n ]\n )\n except ValueError:\n pass\n\n try:\n # Grab SELinux State\n selinux = pwncat.victim.enumerate.first(\"system.selinux\").data\n system_details.append([\"SELinux\", util.escape_markdown(selinux.state)])\n except ValueError:\n pass\n\n try:\n # Grab ASLR State\n aslr = pwncat.victim.enumerate.first(\"system.aslr\").data\n system_details.append(\n [\"ASLR\", \"disabled\" if aslr.state == 0 else \"enabled\"]\n )\n except ValueError:\n pass\n\n try:\n # Grab init system\n init = pwncat.victim.enumerate.first(\"system.init\").data\n system_details.append([\"Init\", util.escape_markdown(str(init.init))])\n except ValueError:\n pass\n\n try:\n # Check if we are in a container\n container = pwncat.victim.enumerate.first(\"system.container\").data\n system_details.append([\"Container\", util.escape_markdown(container.type)])\n except ValueError:\n pass\n\n # Build the table writer for the main section\n table_writer = MarkdownTableWriter()\n table_writer.headers = [\"Property\", \"Value\"]\n table_writer.column_styles = [\n pytablewriter.style.Style(align=\"right\"),\n pytablewriter.style.Style(align=\"center\"),\n ]\n table_writer.value_matrix = system_details\n table_writer.margin = 1\n\n # Note enumeration data we don't need anymore. These are handled above\n # in the system_details table which is output with the table_writer.\n ignore_types = [\n \"system.hostname\",\n \"system.kernel.version\",\n \"system.distro\",\n \"system.init\",\n \"system.arch\",\n \"system.aslr\",\n \"system.container\",\n ]\n\n # This is the list of known enumeration types that we want to\n # happen first in this order. Other types will still be output\n # but will be output in an arbitrary order following this list\n ordered_types = [\n # Sudo privileges\n \"sudo\",\n # Possible kernel exploits - very important\n \"system.kernel.exploit\",\n # Enumerated user passwords - very important\n \"system.user.password\",\n # Enumerated possible user private keys - very important\n \"system.user.private_key\",\n # Directories in our path that are writable\n \"writable_path\",\n ]\n\n # These types are very noisy. They are important for full enumeration,\n # but are better suited for the end of the list. These are output last\n # no matter what in this order.\n noisy_types = [\n # System services. There's normally a lot of these\n \"system.service\",\n # Installed packages. There's *always* a lot of these\n \"system.package\",\n ]\n\n with Progress(\n \"enumerating report data\",\n \"•\",\n \"[cyan]{task.fields[status]}\",\n transient=True,\n console=console,\n ) as progress:\n task = progress.add_task(\"\", status=\"initializing\")\n for fact in pwncat.victim.enumerate():\n progress.update(task, status=str(fact.data))\n if fact.type in ignore_types:\n continue\n if fact.type not in report_data:\n report_data[fact.type] = {}\n if fact.source not in report_data[fact.type]:\n report_data[fact.type][fact.source] = []\n report_data[fact.type][fact.source].append(fact)\n\n try:\n with open(report_path, \"w\") as filp:\n filp.write(f\"# {hostname} - {pwncat.victim.host.ip}\\n\\n\")\n\n # Write the system info table\n table_writer.dump(filp, close_after_write=False)\n filp.write(\"\\n\")\n\n # output ordered types first\n for typ in ordered_types:\n if typ not in report_data:\n continue\n self.render_section(filp, typ, report_data[typ])\n\n # output everything that's not a ordered or noisy type\n for typ, sources in report_data.items():\n if typ in ordered_types or typ in noisy_types:\n continue\n self.render_section(filp, typ, sources)\n\n # Output the noisy types\n for typ in noisy_types:\n if typ not in report_data:\n continue\n self.render_section(filp, typ, report_data[typ])\n\n console.log(f\"enumeration report written to [cyan]{report_path}[/cyan]\")\n except OSError as exc:\n console.log(f\"[red]error[/red]: [cyan]{report_path}[/cyan]: {exc}\")",
"def generate_report():",
"def generate_device_info_file():\n new_device_info = DeviceInfo(generate_initial_values=True)\n write_device_info_file(new_device_info)",
"def test_get_compute_physical_summary_list(self):\n pass",
"def show(self):\n print(\"%s: %u devices, %u links\" % (self.name, len(self.devices_by_address), len(self.links)))\n #return\n print(\"Platform: %s\" % self.name)\n # List devices\n for d in self:\n if d.name is not None:\n name = d.name\n else:\n name = None\n print(\"%20s \" % d.address_str(), end=\"\")\n print(\" %20s\" % name, end=\"\")\n print(\" %12s\" % (d.type_str()), end=\"\")\n if d.is_affine_to_cpu():\n if d.affine_cpu is not None:\n print(\"%5s\" % str(d.affine_cpu), end=\"\")\n else:\n print(\"%5u\" % d.cpu_number, end=\"\")\n else:\n print(\" \", end=\"\")\n if d.is_hidden:\n print(\" hidden\", end=\"\")\n if d.sysfs_path is not None:\n print(\" %s\" % d.sysfs_path, end=\"\")\n print()\n if False:\n for cfk in d.config:\n print(\" %s: %s\" % (cfk, d.config[cfk]))\n print(\"Links:\")\n for ln in self.links:\n print(\" %s\" % str(ln))\n print(\"CPUs:\")\n for c in range(0, self.max_cpu_number+1):\n cd = self.device_by_cpu(c, type=CS_DEVTYPE_TRACE_CORE)\n if cd is None:\n continue\n print(\" CPU #%u:\" % c)\n for sink in self.devices:\n if sink.type in [CS_DEVTYPE_PORT, CS_DEVTYPE_FIFO, CS_DEVTYPE_BUFFER, CS_DEVTYPE_ROUTER]:\n p = cd.get_path_to(sink)\n print(\" %s: %s\" % (sink, p))",
"def main():\n power_system = PowerSystem(\"normal_with_pevs_case9a.raw\",\"normal_with_pevs_case9a.dyr\")\n print power_system._nbus\n print \"LOADS\"\n print len(power_system._loads)\n print \"PEVS\"\n print len(power_system._pevs)\n print \"GENERATORS\"\n print len(power_system._generators)\n print \"BRANCHES\"\n print len(power_system._branches)",
"def _master_info_basic(self):\n out_str = \"\"\n (status, dev_id) = self.__tx_dev.rd(0x400008)\n self.__tx_dev.decode_error_status(status, cmd='rd(0x400008)', print_on_error=True)\n if(status == 0x01):\n dev_id = (dev_id & 0x0f)\n (status, gmd) = self.__tx_dev.get_master_descriptor()\n self.__tx_dev.decode_error_status(status, cmd='get_master_descriptor()', print_on_error=True)\n md = gmd.moduleDescriptor\n if(status == 0x01):\n out_str = \":\".join([\"%.2X\" % i for i in md.macAddress]) + \" \"\n major = md.firmwareVersion >> 5 # (Upper 11-bits)\n minor = md.firmwareVersion & 0x1f # (Lower 5-bits)\n out_str += \"- v%d.%d \" % (major, minor)\n module_id = (md.moduleID & 0xff)\n out_str += \"- %s (0x%.2X) \" % (dec.module_id.get(module_id, \"Unknown moduleID\"), module_id)\n out_str += \"- %s (0x%.2X) \" % (dec.hardware_type.get(md.hardwareType, \"Unknown hardwareType\"), md.hardwareType)\n print(out_str)",
"def get_hwinfo(ns):\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n\n # Chassis\n try:\n chassis = get_single_instance(ns, 'LMI_Chassis')\n except Exception:\n result = [(get_colored_string('error:', RED_COLOR),\n 'Missing class LMI_Chassis. Is openlmi-hardware package installed on the server?')]\n tf.produce_output(result)\n return []\n\n hwinfo = chassis.Manufacturer\n if chassis.Model and chassis.Model != 'Not Specified' \\\n and chassis.Model != chassis.Manufacturer:\n hwinfo += ' ' + chassis.Model\n elif chassis.ProductName and chassis.ProductName != 'Not Specified' \\\n and chassis.ProductName != chassis.Manufacturer:\n hwinfo += ' ' + chassis.ProductName\n virt = getattr(chassis, 'VirtualMachine', None)\n if virt and virt != 'No':\n hwinfo += ' (%s virtual machine)' % virt\n chassis_res = [\n ('Hardware:', hwinfo),\n ('Serial Number:', chassis.SerialNumber),\n ('Asset Tag:', chassis.Tag)]\n tf.produce_output(chassis_res)\n\n # CPUs\n try:\n cpus = get_all_instances(ns, 'LMI_Processor')\n cpu_caps = get_all_instances(ns, 'LMI_ProcessorCapabilities')\n except Exception:\n cpus = None\n cpu_caps = None\n if cpus and cpu_caps:\n cores = 0\n threads = 0\n for i in cpu_caps:\n cores += i.NumberOfProcessorCores\n threads += i.NumberOfHardwareThreads\n cpus_res = [\n ('CPU:', '%s, %s arch' % (cpus[0].Name, cpus[0].Architecture)),\n ('CPU Topology:', '%d cpu(s), %d core(s), %d thread(s)' % \\\n (len(cpus), cores, threads))]\n else:\n cpus_res = [('CPU:', 'N/A')]\n tf.produce_output(cpus_res)\n\n # Memory\n try:\n memory = get_single_instance(ns, 'LMI_Memory')\n except Exception:\n memory = None\n if memory:\n memory_size = format_memory_size(memory.NumberOfBlocks)\n else:\n memory_size = 'N/A GB'\n tf.produce_output([('Memory:', memory_size)])\n\n return []",
"def write_system_info():\n\n # get system information, and write them into the log file\n system, node, release, version, machine, processor = platform.uname()\n\n if system in ['Linux']:\n # find how many physical processers\n p = subprocess.Popen('grep \"physical id\" /proc/cpuinfo|sort|uniq|wc -l',\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n processor_number = int(p.stdout.readlines()[0])\n\n # find the model name of the processors\n p = subprocess.Popen('grep \"model name\" /proc/cpuinfo|uniq', shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n processor = '; '.join([row.decode('utf-8').split(':')[1].strip()\n for row in p.stdout.readlines()])\n\n # find how many cores\n p = subprocess.Popen('grep \"cpu cores\" /proc/cpuinfo|uniq',shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n cores = int(p.stdout.readlines()[0].decode('utf-8').split(':')[1])\n\n # get the memory\n p = subprocess.Popen('free -mh',shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n row = p.stdout.readlines()[1]\n info = row.split()\n memory = '%s (total); %s (used); %s (free)'%(info[1],info[2],info[3])\n else:\n processor_number = 0\n processor = processor\n cores = 0\n memory = 'Unknown'\n\n\n distribution = ' '.join(platform.dist())\n username = getpass.getuser()\n node = platform.node()\n abspath = os.path.abspath(os.curdir)\n python_version = platform.python_version()\n\n info = ['Start reduction.',\n 'Node: %s'%node,\n 'Processor: %d x %s (%d cores)'%(processor_number, processor, cores),\n 'System: %s %s %s'%(system, release, machine),\n 'Distribution: %s'%distribution,\n 'Memory: %s'%memory,\n 'Username: %s'%username,\n 'Python version: %s'%python_version,\n 'Working directory: %s'%abspath,\n ]\n separator = os.linesep + ' '\n logger.info(separator.join(info))",
"def create_xml_server(self, server, dev_list, server_metadata={}):\n \n #get if operating system is Windows \n windows_os = False\n os_type = server_metadata.get('os_type', None)\n if os_type == None and 'metadata' in dev_list[0]:\n os_type = dev_list[0]['metadata'].get('os_type', None)\n if os_type != None and os_type.lower() == \"windows\":\n windows_os = True\n #get type of hard disk bus \n bus_ide = True if windows_os else False \n bus = server_metadata.get('bus', None)\n if bus == None and 'metadata' in dev_list[0]:\n bus = dev_list[0]['metadata'].get('bus', None)\n if bus != None:\n bus_ide = True if bus=='ide' else False\n \n self.xml_level = 0\n hypervisor = server.get('hypervisor', 'kvm')\n os_type_img = server.get('os_image_type', 'other')\n\n if hypervisor[:3] == 'xen':\n text = \"<domain type='xen'>\"\n else:\n text = \"<domain type='kvm'>\"\n #get topology\n topo = server_metadata.get('topology', None)\n if topo == None and 'metadata' in dev_list[0]:\n topo = dev_list[0]['metadata'].get('topology', None)\n #name\n name = server.get('name', '')[:28] + \"_\" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58\n text += self.inc_tab() + \"<name>\" + name+ \"</name>\"\n #uuid\n text += self.tab() + \"<uuid>\" + server['uuid'] + \"</uuid>\" \n \n numa={}\n if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:\n numa = server['extended']['numas'][0]\n #memory\n use_huge = False\n memory = int(numa.get('memory',0))*1024*1024 #in KiB\n if memory==0:\n memory = int(server['ram'])*1024;\n else:\n if not self.develop_mode:\n use_huge = True\n if memory==0:\n return -1, 'No memory assigned to instance'\n memory = str(memory)\n text += self.tab() + \"<memory unit='KiB'>\" +memory+\"</memory>\" \n text += self.tab() + \"<currentMemory unit='KiB'>\" +memory+ \"</currentMemory>\"\n if use_huge:\n text += self.tab()+'<memoryBacking>'+ \\\n self.inc_tab() + '<hugepages/>'+ \\\n self.dec_tab()+ '</memoryBacking>'\n\n #cpu\n use_cpu_pinning=False\n vcpus = int(server.get(\"vcpus\",0))\n cpu_pinning = []\n if 'cores-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['cores-source'])):\n cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )\n vcpus += 1\n if 'threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['threads-source'])):\n cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )\n vcpus += 1\n if 'paired-threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['paired-threads-source'])):\n cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )\n cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )\n vcpus += 2\n \n if use_cpu_pinning and not self.develop_mode:\n text += self.tab()+\"<vcpu placement='static'>\" +str(len(cpu_pinning)) +\"</vcpu>\" + \\\n self.tab()+'<cputune>'\n self.xml_level += 1\n for i in range(0, len(cpu_pinning)):\n text += self.tab() + \"<vcpupin vcpu='\" +str(cpu_pinning[i][0])+ \"' cpuset='\" +str(cpu_pinning[i][1]) +\"'/>\"\n text += self.dec_tab()+'</cputune>'+ \\\n self.tab() + '<numatune>' +\\\n self.inc_tab() + \"<memory mode='strict' nodeset='\" +str(numa['source'])+ \"'/>\" +\\\n self.dec_tab() + '</numatune>'\n else:\n if vcpus==0:\n return -1, \"Instance without number of cpus\"\n text += self.tab()+\"<vcpu>\" + str(vcpus) + \"</vcpu>\"\n\n #boot\n boot_cdrom = False\n for dev in dev_list:\n if dev['type']=='cdrom' :\n boot_cdrom = True\n break\n if hypervisor == 'xenhvm':\n text += self.tab()+ '<os>' + \\\n self.inc_tab() + \"<type arch='x86_64' machine='xenfv'>hvm</type>\"\n text += self.tab() + \"<loader type='rom'>/usr/lib/xen/boot/hvmloader</loader>\"\n if boot_cdrom:\n text += self.tab() + \"<boot dev='cdrom'/>\" \n text += self.tab() + \"<boot dev='hd'/>\" + \\\n self.dec_tab()+'</os>'\n elif hypervisor == 'xen-unik':\n text += self.tab()+ '<os>' + \\\n self.inc_tab() + \"<type arch='x86_64' machine='xenpv'>xen</type>\"\n text += self.tab() + \"<kernel>\" + str(dev_list[0]['source file']) + \"</kernel>\" + \\\n self.dec_tab()+'</os>'\n else:\n text += self.tab()+ '<os>' + \\\n self.inc_tab() + \"<type arch='x86_64' machine='pc'>hvm</type>\"\n if boot_cdrom:\n text += self.tab() + \"<boot dev='cdrom'/>\" \n text += self.tab() + \"<boot dev='hd'/>\" + \\\n self.dec_tab()+'</os>'\n #features\n text += self.tab()+'<features>'+\\\n self.inc_tab()+'<acpi/>' +\\\n self.tab()+'<apic/>' +\\\n self.tab()+'<pae/>'+ \\\n self.dec_tab() +'</features>'\n if topo == \"oneSocket:hyperthreading\":\n if vcpus % 2 != 0:\n return -1, 'Cannot expose hyperthreading with an odd number of vcpus'\n text += self.tab() + \"<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>\" % (vcpus/2)\n elif windows_os or topo == \"oneSocket\":\n text += self.tab() + \"<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>\" % vcpus\n else:\n text += self.tab() + \"<cpu mode='host-model'></cpu>\"\n text += self.tab() + \"<clock offset='utc'/>\" +\\\n self.tab() + \"<on_poweroff>preserve</on_poweroff>\" + \\\n self.tab() + \"<on_reboot>restart</on_reboot>\" + \\\n self.tab() + \"<on_crash>restart</on_crash>\"\n if hypervisor == 'xenhvm':\n text += self.tab() + \"<devices>\" + \\\n self.inc_tab() + \"<emulator>/usr/bin/qemu-system-i386</emulator>\" + \\\n self.tab() + \"<serial type='pty'>\" +\\\n self.inc_tab() + \"<target port='0'/>\" + \\\n self.dec_tab() + \"</serial>\" +\\\n self.tab() + \"<console type='pty'>\" + \\\n self.inc_tab()+ \"<target type='serial' port='0'/>\" + \\\n self.dec_tab()+'</console>' #In some libvirt version may be: <emulator>/usr/lib64/xen/bin/qemu-dm</emulator> (depends on distro)\n elif hypervisor == 'xen-unik':\n text += self.tab() + \"<devices>\" + \\\n self.tab() + \"<console type='pty'>\" + \\\n self.inc_tab()+ \"<target type='xen' port='0'/>\" + \\\n self.dec_tab()+'</console>'\n else:\n text += self.tab() + \"<devices>\" + \\\n self.inc_tab() + \"<emulator>/usr/libexec/qemu-kvm</emulator>\" + \\\n self.tab() + \"<serial type='pty'>\" +\\\n self.inc_tab() + \"<target port='0'/>\" + \\\n self.dec_tab() + \"</serial>\" +\\\n self.tab() + \"<console type='pty'>\" + \\\n self.inc_tab()+ \"<target type='serial' port='0'/>\" + \\\n self.dec_tab()+'</console>'\n if windows_os:\n text += self.tab() + \"<controller type='usb' index='0'/>\" + \\\n self.tab() + \"<controller type='ide' index='0'/>\" + \\\n self.tab() + \"<input type='mouse' bus='ps2'/>\" + \\\n self.tab() + \"<sound model='ich6'/>\" + \\\n self.tab() + \"<video>\" + \\\n self.inc_tab() + \"<model type='cirrus' vram='9216' heads='1'/>\" + \\\n self.dec_tab() + \"</video>\" + \\\n self.tab() + \"<memballoon model='virtio'/>\" + \\\n self.tab() + \"<input type='tablet' bus='usb'/>\" #TODO revisar\n elif hypervisor == 'xen-unik':\n pass\n else:\n text += self.tab() + \"<controller type='ide' index='0'/>\" + \\\n self.tab() + \"<input type='mouse' bus='ps2'/>\" + \\\n self.tab() + \"<input type='keyboard' bus='ps2'/>\" + \\\n self.tab() + \"<video>\" + \\\n self.inc_tab() + \"<model type='cirrus' vram='9216' heads='1'/>\" + \\\n self.dec_tab() + \"</video>\"\n\n#> self.tab()+'<alias name=\\'hostdev0\\'/>\\n' +\\\n#> self.dec_tab()+'</hostdev>\\n' +\\\n#> self.tab()+'<input type=\\'tablet\\' bus=\\'usb\\'/>\\n'\n if windows_os:\n text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes'/>\"\n else:\n #If image contains 'GRAPH' include graphics\n #if 'GRAPH' in image:\n text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>\" +\\\n self.inc_tab() + \"<listen type='address' address='0.0.0.0'/>\" +\\\n self.dec_tab() + \"</graphics>\"\n\n vd_index = 'a'\n for dev in dev_list:\n bus_ide_dev = bus_ide\n if (dev['type']=='cdrom' or dev['type']=='disk') and hypervisor != 'xen-unik':\n if dev['type']=='cdrom':\n bus_ide_dev = True\n text += self.tab() + \"<disk type='file' device='\"+dev['type']+\"'>\"\n if 'file format' in dev:\n text += self.inc_tab() + \"<driver name='qemu' type='\" +dev['file format']+ \"' cache='writethrough'/>\"\n if 'source file' in dev:\n text += self.tab() + \"<source file='\" +dev['source file']+ \"'/>\"\n #elif v['type'] == 'block':\n # text += self.tab() + \"<source dev='\" + v['source'] + \"'/>\"\n #else:\n # return -1, 'Unknown disk type ' + v['type']\n vpci = dev.get('vpci',None)\n if vpci == None and 'metadata' in dev:\n vpci = dev['metadata'].get('vpci',None)\n text += self.pci2xml(vpci)\n \n if bus_ide_dev:\n text += self.tab() + \"<target dev='hd\" +vd_index+ \"' bus='ide'/>\" #TODO allows several type of disks\n else:\n text += self.tab() + \"<target dev='vd\" +vd_index+ \"' bus='virtio'/>\" \n text += self.dec_tab() + '</disk>'\n vd_index = chr(ord(vd_index)+1)\n elif dev['type']=='xml':\n dev_text = dev['xml']\n if 'vpci' in dev:\n dev_text = dev_text.replace('__vpci__', dev['vpci'])\n if 'source file' in dev:\n dev_text = dev_text.replace('__file__', dev['source file'])\n if 'file format' in dev:\n dev_text = dev_text.replace('__format__', dev['source file'])\n if '__dev__' in dev_text:\n dev_text = dev_text.replace('__dev__', vd_index)\n vd_index = chr(ord(vd_index)+1)\n text += dev_text\n elif hypervisor == 'xen-unik':\n pass\n else:\n return -1, 'Unknown device type ' + dev['type']\n\n net_nb=0\n bridge_interfaces = server.get('networks', [])\n for v in bridge_interfaces:\n #Get the brifge name\n result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )\n if result <= 0:\n self.logger.error(\"create_xml_server ERROR %d getting nets %s\", result, content)\n return -1, content\n #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM\n #I know it is not secure \n #for v in sorted(desc['network interfaces'].itervalues()):\n model = v.get(\"model\", None)\n if content[0]['provider']=='default':\n text += self.tab() + \"<interface type='network'>\" + \\\n self.inc_tab() + \"<source network='\" +content[0]['provider']+ \"'/>\"\n elif content[0]['provider'][0:7]=='macvtap':\n text += self.tab()+\"<interface type='direct'>\" + \\\n self.inc_tab() + \"<source dev='\" + self.get_local_iface_name(content[0]['provider'][8:]) + \"' mode='bridge'/>\" + \\\n self.tab() + \"<target dev='macvtap0'/>\"\n if windows_os:\n text += self.tab() + \"<alias name='net\" + str(net_nb) + \"'/>\"\n elif model==None:\n model = \"virtio\"\n elif content[0]['provider'][0:6]=='bridge':\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.get_local_iface_name(content[0]['provider'][7:])+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n elif model==None:\n model = \"virtio\"\n elif content[0]['provider'][0:3] == \"OVS\":\n vlan = content[0]['provider'].replace('OVS:', '')\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab() + \"<source bridge='ovim-\" + str(vlan) + \"'/>\"\n if hypervisor == 'xenhvm' or hypervisor == 'xen-unik':\n text += self.tab() + \"<script path='vif-openvswitch'/>\"\n else:\n return -1, 'Unknown Bridge net provider ' + content[0]['provider']\n if model!=None:\n text += self.tab() + \"<model type='\" +model+ \"'/>\"\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.dec_tab()+'</interface>'\n \n net_nb += 1\n\n interfaces = numa.get('interfaces', [])\n\n net_nb=0\n for v in interfaces:\n if self.develop_mode: #map these interfaces to bridges\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.develop_bridge_iface+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n else:\n text += self.tab() + \"<model type='e1000'/>\" #e1000 is more probable to be supported than 'virtio'\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.dec_tab()+'</interface>'\n continue\n \n if v['dedicated'] == 'yes': #passthrought\n text += self.tab() + \"<hostdev mode='subsystem' type='pci' managed='yes'>\" + \\\n self.inc_tab() + \"<source>\"\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.dec_tab()+'</hostdev>'\n net_nb += 1\n else: #sriov_interfaces\n #skip not connected interfaces\n if v.get(\"net_id\") == None:\n continue\n text += self.tab() + \"<interface type='hostdev' managed='yes'>\"\n self.inc_tab()\n if v.get('mac_address', None) != None:\n text+= self.tab() + \"<mac address='\" +v['mac_address']+ \"'/>\"\n text+= self.tab()+'<source>'\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n if v.get('vlan',None) != None:\n text += self.tab() + \"<vlan> <tag id='\" + str(v['vlan']) + \"'/> </vlan>\"\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.dec_tab()+'</interface>'\n\n \n text += self.dec_tab()+'</devices>'+\\\n self.dec_tab()+'</domain>'\n return 0, text",
"def create_xml_server(self, server, dev_list, server_metadata={}):\n \n #get if operating system is Windows \n windows_os = False\n os_type = server_metadata.get('os_type', None)\n if os_type == None and 'metadata' in dev_list[0]:\n os_type = dev_list[0]['metadata'].get('os_type', None)\n if os_type != None and os_type.lower() == \"windows\":\n windows_os = True\n #get type of hard disk bus \n bus_ide = True if windows_os else False \n bus = server_metadata.get('bus', None)\n if bus == None and 'metadata' in dev_list[0]:\n bus = dev_list[0]['metadata'].get('bus', None)\n if bus != None:\n bus_ide = True if bus=='ide' else False\n \n self.xml_level = 0\n\n text = \"<domain type='kvm'>\"\n #get topology\n topo = server_metadata.get('topology', None)\n if topo == None and 'metadata' in dev_list[0]:\n topo = dev_list[0]['metadata'].get('topology', None)\n #name\n name = server.get('name', '')[:28] + \"_\" + server['uuid'][:28] #qemu impose a length limit of 59 chars or not start. Using 58\n text += self.inc_tab() + \"<name>\" + name+ \"</name>\"\n #uuid\n text += self.tab() + \"<uuid>\" + server['uuid'] + \"</uuid>\" \n \n numa={}\n if 'extended' in server and server['extended']!=None and 'numas' in server['extended']:\n numa = server['extended']['numas'][0]\n #memory\n use_huge = False\n memory = int(numa.get('memory',0))*1024*1024 #in KiB\n if memory==0:\n memory = int(server['ram'])*1024;\n else:\n if not self.develop_mode:\n use_huge = True\n if memory==0:\n return -1, 'No memory assigned to instance'\n memory = str(memory/4)\n text += self.tab() + \"<memory unit='KiB'>\" +memory+\"</memory>\" \n text += self.tab() + \"<currentMemory unit='KiB'>\" +memory+ \"</currentMemory>\"\n if use_huge and 1 == 0: # retirar hugepages por enquanto (N)\n text += self.tab()+'<memoryBacking>'+ \\\n self.inc_tab() + '<hugepages/>'+ \\\n self.dec_tab()+ '</memoryBacking>'\n\n #cpu\n use_cpu_pinning=False\n vcpus = int(server.get(\"vcpus\",0))\n cpu_pinning = []\n if 'cores-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['cores-source'])):\n cpu_pinning.append( [ numa['cores-id'][index], numa['cores-source'][index] ] )\n vcpus += 1\n if 'threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['threads-source'])):\n cpu_pinning.append( [ numa['threads-id'][index], numa['threads-source'][index] ] )\n vcpus += 1\n if 'paired-threads-source' in numa:\n use_cpu_pinning=True\n for index in range(0, len(numa['paired-threads-source'])):\n cpu_pinning.append( [numa['paired-threads-id'][index][0], numa['paired-threads-source'][index][0] ] )\n cpu_pinning.append( [numa['paired-threads-id'][index][1], numa['paired-threads-source'][index][1] ] )\n vcpus += 2\n \n if use_cpu_pinning and not self.develop_mode:\n text += self.tab()+\"<vcpu placement='static'>\" +str(len(cpu_pinning)) +\"</vcpu>\" + \\\n self.tab()+'<cputune>'\n self.xml_level += 1\n for i in range(0, len(cpu_pinning)):\n text += self.tab() + \"<vcpupin vcpu='\" +str(cpu_pinning[i][0])+ \"' cpuset='\" +str(cpu_pinning[i][1]) +\"'/>\"\n text += self.dec_tab()+'</cputune>'+ \\\n self.tab() + '<numatune>' +\\\n self.inc_tab() + \"<memory mode='strict' nodeset='\" +str(numa['source'])+ \"'/>\" +\\\n self.dec_tab() + '</numatune>'\n else:\n if vcpus==0:\n return -1, \"Instance without number of cpus\"\n text += self.tab()+\"<vcpu>\" + str(vcpus) + \"</vcpu>\"\n\n #boot\n boot_cdrom = False\n for dev in dev_list:\n if dev['type']=='cdrom' :\n boot_cdrom = True\n break\n text += self.tab()+ '<os>' + \\\n self.inc_tab() + \"<type arch='aarch64' machine='virt-2.9'>hvm</type>\"\n if boot_cdrom:\n \ttext += self.tab() + \"<boot dev='cdrom'/>\" \n text += self.tab() + \"<boot dev='hd'/>\"\n text += self.tab() + \"<loader readonly='yes' type='pflash'>/usr/share/qemu/aavmf-aarch64-code.bin</loader>\" + \\\n\t\tself.tab() + \"<nvram>/var/lib/libvirt/qemu/nvram/debian_VARS.fd</nvram>\" + \\\n\t\t\tself.dec_tab()+'</os>'\n #features\n text += self.tab()+'<features>'+\\\n self.inc_tab()+'<acpi/>' +\\\n self.tab()+'<apic/>' +\\\n self.tab()+'<pae/>'+ \\\n self.dec_tab() +'</features>'\n if topo == \"oneSocket:hyperthreading\":\n if vcpus % 2 != 0:\n return -1, 'Cannot expose hyperthreading with an odd number of vcpus'\n text += self.tab() + \"<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='2' /> </cpu>\" % (vcpus/2)\n elif windows_os or topo == \"oneSocket\":\n text += self.tab() + \"<cpu mode='host-model'> <topology sockets='1' cores='%d' threads='1' /> </cpu>\" % vcpus\n else:\n text += self.tab() + \"<cpu mode='host-passthrough'>\" +\\\n \tself.inc_tab() + \"<model fallback='allow'/>\" +\\\n \tself.dec_tab() + \"</cpu>\"\n text += self.tab() + \"<clock offset='utc'/>\" +\\\n self.tab() + \"<on_poweroff>preserve</on_poweroff>\" + \\\n self.tab() + \"<on_reboot>restart</on_reboot>\" + \\\n self.tab() + \"<on_crash>restart</on_crash>\"\n text += self.tab() + \"<devices>\" + \\\n self.inc_tab() + \"<emulator>/usr/bin/qemu-system-aarch64</emulator>\" + \\\n self.tab() + \"<serial type='pty'>\" +\\\n self.inc_tab() + \"<target port='0'/>\" + \\\n self.dec_tab() + \"</serial>\" +\\\n self.tab() + \"<console type='pty'>\" + \\\n self.inc_tab()+ \"<target type='serial' port='0'/>\" + \\\n self.dec_tab()+'</console>'\n if windows_os:\n text += self.tab() + \"<controller type='usb' index='0'/>\" + \\\n self.tab() + \"<controller type='ide' index='0'/>\" + \\\n self.tab() + \"<input type='mouse' bus='ps2'/>\" + \\\n self.tab() + \"<sound model='ich6'/>\" + \\\n self.tab() + \"<video>\" + \\\n self.inc_tab() + \"<model type='cirrus' vram='9216' heads='1'/>\" + \\\n self.dec_tab() + \"</video>\" + \\\n self.tab() + \"<memballoon model='virtio'/>\" + \\\n self.tab() + \"<input type='tablet' bus='usb'/>\" #TODO revisar\n\n#> self.tab()+'<alias name=\\'hostdev0\\'/>\\n' +\\\n#> self.dec_tab()+'</hostdev>\\n' +\\\n#> self.tab()+'<input type=\\'tablet\\' bus=\\'usb\\'/>\\n'\n if windows_os:\n text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes'/>\"\n else:\n #If image contains 'GRAPH' include graphics\n #if 'GRAPH' in image:\n #text += self.tab() + \"<graphics type='vnc' port='-1' autoport='yes' listen='0.0.0.0'>\" +\\\n # self.inc_tab() + \"<listen type='address' address='0.0.0.0'/>\" +\\\n # self.dec_tab() + \"</graphics>\"\n text += self.tab() + \"<video>\" +\\\n \tself.inc_tab() + \"<model type='virtio'/>\" +\\\n \tself.dec_tab() + \"</video>\"\n\n vd_index = 'a'\n for dev in dev_list:\n bus_ide_dev = bus_ide\n if dev['type']=='cdrom' or dev['type']=='disk':\n if dev['type']=='cdrom':\n bus_ide_dev = True\n text += self.tab() + \"<disk type='file' device='\"+dev['type']+\"'>\"\n if 'file format' in dev:\n text += self.inc_tab() + \"<driver name='qemu' type='\" +dev['file format']+ \"' cache='writethrough'/>\"\n if 'source file' in dev:\n text += self.tab() + \"<source file='\" +dev['source file']+ \"'/>\"\n #elif v['type'] == 'block':\n # text += self.tab() + \"<source dev='\" + v['source'] + \"'/>\"\n #else:\n # return -1, 'Unknown disk type ' + v['type']\n vpci = dev.get('vpci',None)\n if vpci == None and 'metadata' in dev:\n vpci = dev['metadata'].get('vpci',None)\n #text += self.pci2xml(vpci)\n #text += self.tab() + \"<address type='drive' controller='0' bus='0' target='0' unit='0'/>\"\n \n if bus_ide_dev:\n text += self.tab() + \"<target dev='hd\" +vd_index+ \"' bus='ide'/>\" #TODO allows several type of disks\n else:\n text += self.tab() + \"<target dev='vd\" +vd_index+ \"' bus='virtio'/>\"\n text += self.dec_tab() + '</disk>'\n vd_index = chr(ord(vd_index)+1)\n elif dev['type']=='xml':\n dev_text = dev['xml']\n if 'vpci' in dev:\n dev_text = dev_text.replace('__vpci__', dev['vpci'])\n if 'source file' in dev:\n dev_text = dev_text.replace('__file__', dev['source file'])\n if 'file format' in dev:\n dev_text = dev_text.replace('__format__', dev['source file'])\n if '__dev__' in dev_text:\n dev_text = dev_text.replace('__dev__', vd_index)\n vd_index = chr(ord(vd_index)+1)\n text += dev_text\n else:\n return -1, 'Unknown device type ' + dev['type']\n\n net_nb=0\n bridge_interfaces = server.get('networks', [])\n for v in bridge_interfaces:\n #Get the brifge name\n self.db_lock.acquire()\n result, content = self.db.get_table(FROM='nets', SELECT=('provider',),WHERE={'uuid':v['net_id']} )\n self.db_lock.release()\n if result <= 0:\n self.logger.error(\"create_xml_server ERROR %d getting nets %s\", result, content)\n return -1, content\n #ALF: Allow by the moment the 'default' bridge net because is confortable for provide internet to VM\n #I know it is not secure \n #for v in sorted(desc['network interfaces'].itervalues()):\n model = v.get(\"model\", None)\n if content[0]['provider']=='default':\n text += self.tab() + \"<interface type='network'>\" + \\\n self.inc_tab() + \"<source network='\" +content[0]['provider']+ \"'/>\"\n elif content[0]['provider'][0:7]=='macvtap':\n text += self.tab()+\"<interface type='direct'>\" + \\\n self.inc_tab() + \"<source dev='\" + self.get_local_iface_name(content[0]['provider'][8:]) + \"' mode='bridge'/>\" + \\\n self.tab() + \"<target dev='macvtap0'/>\"\n if windows_os:\n text += self.tab() + \"<alias name='net\" + str(net_nb) + \"'/>\"\n elif model==None:\n model = \"virtio\"\n elif content[0]['provider'][0:6]=='bridge':\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.get_local_iface_name(content[0]['provider'][7:])+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n elif model==None:\n model = \"virtio\"\n elif content[0]['provider'][0:3] == \"OVS\":\n vlan = content[0]['provider'].replace('OVS:', '')\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab() + \"<source bridge='ovim-\" + str(vlan) + \"'/>\"\n else:\n return -1, 'Unknown Bridge net provider ' + content[0]['provider']\n if model!=None:\n text += self.tab() + \"<model type='\" +model+ \"'/>\"\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.tab() + \"<rom file=''/>\"\n text += self.dec_tab()+'</interface>'\n \n net_nb += 1\n\n interfaces = numa.get('interfaces', [])\n\n net_nb=0\n for v in interfaces:\n if self.develop_mode: #map these interfaces to bridges\n text += self.tab() + \"<interface type='bridge'>\" + \\\n self.inc_tab()+\"<source bridge='\" +self.develop_bridge_iface+ \"'/>\"\n if windows_os:\n text += self.tab() + \"<target dev='vnet\" + str(net_nb)+ \"'/>\" +\\\n self.tab() + \"<alias name='net\" + str(net_nb)+ \"'/>\"\n else:\n text += self.tab() + \"<model type='e1000'/>\" #e1000 is more probable to be supported than 'virtio'\n if v.get('mac_address', None) != None:\n text+= self.tab() +\"<mac address='\" +v['mac_address']+ \"'/>\"\n text += self.pci2xml(v.get('vpci',None))\n text += self.dec_tab()+'</interface>'\n continue\n \n if v['dedicated'] == 'yes': #passthrought\n text += self.tab() + \"<hostdev mode='subsystem' type='pci' managed='yes'>\" + \\\n self.inc_tab() + \"<source>\"\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.dec_tab()+'</hostdev>'\n net_nb += 1\n else: #sriov_interfaces\n #skip not connected interfaces\n if v.get(\"net_id\") == None:\n continue\n text += self.tab() + \"<interface type='hostdev' managed='yes'>\"\n self.inc_tab()\n if v.get('mac_address', None) != None:\n text+= self.tab() + \"<mac address='\" +v['mac_address']+ \"'/>\"\n text+= self.tab()+'<source>'\n self.inc_tab()\n text += self.pci2xml(v['source'])\n text += self.dec_tab()+'</source>'\n if v.get('vlan',None) != None:\n text += self.tab() + \"<vlan> <tag id='\" + str(v['vlan']) + \"'/> </vlan>\"\n text += self.pci2xml(v.get('vpci',None))\n if windows_os:\n text += self.tab() + \"<alias name='hostdev\" + str(net_nb) + \"'/>\"\n text += self.tab() + \"<rom file=''/>\"\n text += self.dec_tab()+'</interface>'\n\n \n text += self.dec_tab()+'</devices>'+\\\n self.dec_tab()+'</domain>'\n return 0, text",
"def printMetadata(self):\n print (\"************COMMONDATA************\")\n print (\"Setname:\", self.setname, \"PROC:\", self.proc)\n print (\"NDAT:\", self.ndata,\"NSYS:\",self.nsys)",
"def create_slf_file(self):\n mesh = open(self.name, 'w') \n mesh.write('numel numnp nmat nmode (This is for a beam bridge)\\n')\n mesh.write(str(len(self.edge_list))+'\\t'+str(len(self.node_list))\n + '\\t'+str(len(self.beams)) + '\\t0\\n')\n mesh.write('matl no., E mod, Poiss. Ratio,density, Area, Iy, Iz\\n')\n tables = open('./tables/CHSTables.txt', 'r')\n for i,beam in enumerate(self.beams):\n mesh.write(str(i)+' '+str(self.beams[i]['emod'])+'\\t0.3000\\t'\n + str(self.beams[i]['density'])+'\\t'+str(self.beams[i]['area'])\n + '\\t'+str(self.beams[i]['iy'])+'\\t'+str(self.beams[i]['ix']) + '\\n') \n mesh.write('el no.,connectivity, matl no, element type\\n')\n for i, edge in enumerate(self.edge_list): \n mesh.write(str(i)+'\\t'+str(edge['pt_a'])+'\\t'+str(edge['pt_b'])\n + '\\t'+str(edge['material'])+'\\t2 \\n')\n mesh.write('node no., coordinates\\n')\n for node in self.node_list:\n mesh.write(node['id']+'\\t'+str(node['x'])+'\\t'+str(node['y'])+'\\t'+str(node['z'])+\"\\n\")\n mesh.write(\"element with specified local z axis: x, y, z component\\n -10\\n\")\n mesh.write('prescribed displacement x: node disp value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement y: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed displacement z: node disp value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi x: node angle value\\n')\n for node in self.fixed_list:\n# if node[1] == True: # un-comment when dealing with fixed-roller structures\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi y: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nprescribed angle phi z: node angle value\\n')\n for node in self.fixed_list:\n mesh.write(node[0]['id']+\"\\t0.0\\n\")\n mesh.write('-10\\nnode with point load x, y, z and 3 moments phi x, phi y, phi z\\n') \n if self.BROKEN:\n for node in self.nodeselfloads: \n trans = 0\n broken_long = 0\n for thing in self.load_nodes:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load \n if self.GROUND_BROKEN:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load_broken\n trans = self.transverse_ground_load\n broken_long = self.longitudinal_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load\n trans = self.transverse_cable_load\n else:\n for thing in self.ground_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n for thing in self.break_node:\n if thing == node[0]:\n node[1] = node[1] + self.vertical_cable_load_broken\n broken_long = self.longitudinal_cable_load \n trans = self.transverse_cable_load\n mesh.write(str(node[0])+'\\t'+str(broken_long)+'\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n else:\n for node in self.nodeselfloads: \n trans = 0\n for yolk in self.load_nodes:\n if yolk == node[0]:\n node[1] = node[1] + self.vertical_cable_load\n trans = self.transverse_cable_load\n for thong in self.ground_node:\n if thong == node[0]:\n node[1] = node[1] + self.vertical_ground_load\n trans = self.transverse_ground_load\n mesh.write(str(node[0])+'\\t0\\t'+str(trans)+'\\t-'+str(round(node[1],5))+'\\t0\\t0\\t0\\n')\n mesh.write('-10\\nelement with distributed load in global beam y and z coordinates\\n') \n mesh.write('-10\\nelement no. and gauss pt. no. with local stress vector xx and moment xx,yy,zz\\n-10')\n mesh.close()",
"def displayBiosInfo(self):\n biosInfo = self.getBiosInfo()\n sysInfo = self.getSysInfo()\n procInfo = self.getProcInfo()\n dimm = self.getDimmInfo()\n if biosInfo:\n self.pprint.bsection('DMI Decode')\n self.pprint.bheader('\\tBIOS')\n self.pprint.blue('\\t\\tVendor : ', biosInfo['Vendor'])\n self.pprint.blue('\\t\\tVersion : ', biosInfo['Version'])\n self.pprint.blue('\\t\\tRelease : ', biosInfo['Release Date'])\n else:\n self.pprint.bred('\\t\\t Could not parse dmidecode')\n\n if sysInfo:\n self.pprint.bheader('\\tSystem')\n self.pprint.blue('\\t\\tVendor : ', sysInfo['Manufacturer'])\n self.pprint.blue('\\t\\tServer : ', sysInfo['Product Name'])\n self.pprint.blue('\\t\\tSerial : ', sysInfo['Serial Number'])\n self.pprint.blue('\\t\\tUUID : ', sysInfo['UUID'])\n\n self.pprint.bheader('\\tCPU')\n\n if procInfo.sockets > 0:\n self.pprint.white(\n '\\t\\t{} sockets - {} cores - {} threads per core'.format(\n procInfo.sockets, procInfo.cores,\n procInfo.threadspercore\n )\n )\n self.pprint.white('\\t\\t{} total cores {} total threads'.format(\n procInfo.cores,\n procInfo.processors\n )\n )\n else:\n self.pprint.white(\n '\\t\\tVirtual Machine with no defined sockets or cores'\n )\n self.pprint.blue('\\t\\tFamily : ',\n procInfo.vendor,\n ' ',\n procInfo.family\n )\n self.pprint.blue('\\t\\tModel : ', procInfo.model.strip())\n self.pprint.bheader('\\tMemory')\n self.pprint.white('\\t\\t{} of {} DIMMs populated'.format(\n (dimm.dimmCount - dimm.emptyDimms),\n dimm.dimmCount\n )\n )\n self.pprint.blue('\\t\\tTotal : ',\n str(dimm.totalMem),\n ' MB',\n ' ({} GB)'.format(\n (dimm.totalMem / 1024)\n )\n )\n self.pprint.blue('\\t\\tMax Mem : ',\n '{} GB'.format(dimm.maxMem)\n )\n self.pprint.green(\n '\\t\\t{} total controllers {} GB maximum per controller'.format(\n dimm.memArrays, dimm.maxMem\n )\n )",
"def gather_chassis_details(self):",
"def generate(self, soc, periph_mappings):\n\n def generate_soc(soc):\n def generate_memory_regions(soc):\n mem_reg_node = Node(\"memory_regions\")\n mem_reg_node.add_property(\"#address-cells\", \"<1>\")\n mem_reg_node.add_property(\"#size-cells\", \"<1>\")\n\n def generate_memory(region):\n mem_node = Node(\"memory\", region.base_addr)\n mem_node.add_property(\"label\", f'\"{region.name.lower()}\"')\n mem_node.add_property(\"device_type\", '\"memory\"')\n mem_node.add_property(\n \"reg\", f\"<0x{region.base_addr:08x} 0x{region.size:08x}>\"\n )\n return mem_node\n\n main_ram_node = generate_memory(soc.main_ram)\n main_ram_node.label = \"main_ram\"\n mem_reg_node.add_node(main_ram_node)\n\n for memory_region in soc.other_memory_regions:\n mem_reg_node.add_node(generate_memory(memory_region))\n\n return mem_reg_node\n\n soc_node = Node(\"soc\")\n soc_node.add_property(\"#address-cells\", \"<1>\")\n soc_node.add_property(\"#size-cells\", \"<1>\")\n soc_node.add_property(\n \"compatible\", f'\"{soc.vendor.lower()},{soc.name.lower()}\"'\n )\n soc_node.add_property(\"ranges\")\n\n mem_reg_node = generate_memory_regions(soc)\n soc_node.add_node(mem_reg_node)\n\n return soc_node\n\n self.logger.info(\"Generating SoC devicetree\")\n\n devicetree_writer = DevicetreeWriter()\n\n soc_node = generate_soc(soc)\n devicetree_writer.add_node(soc_node)\n\n self.logger.info(devicetree_writer.write())",
"def test_print_empty(self):\n wdir = tempfile.mkdtemp()\n print('wdir', wdir)\n os.chdir(wdir)\n shutil.copyfile(abidata.cif_file(\"si.cif\"), os.path.join(wdir, 'si.cif'))\n spec_in = get_spec('GW')\n self.assertIsInstance(spec_in, GWSpecs)\n spec_in.data['source'] = 'cif'\n spec_in.data['converge'] = True\n return_value = spec_in.loop_structures('w')\n self.assertEqual(return_value[0], 0)\n self.assertEqual(return_value[1], 1)\n self.assertEqual(return_value[2], {u'Si_si.cif': 'no convergence data found'})",
"def info(self):\n print(\"RAW DATA\")\n print(\"==================\")\n print(\"File name:\\t\" + str(self.filename))\n print(\"------------------\")\n print(\"Source name:\\t\" + (self.meta[\"source\"] or \"Unknown\"))\n print(\"Observed date:\\t\" + self.meta[\"OBSDATE\"].iso)\n print(\"Description:\\t\" + self.meta[\"obstype\"] or \"Unknown\")\n print(\"Scan number:\\t\" + str(self.meta[\"scan\"] or \"Unknown\"))\n\n print(\"------------------\")\n print(\"No. of KIDS detectors:\\t\", self.ndet)\n print(\"No. of time samples:\\t\", self.nsamples)\n print(\n \"Typical size of fully sampled data (GiB):\\t{:3.1f}\".format(self.nsamples * self.ndet * 32 / 8 / 1024 ** 3)\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return all the EIA plant IDs associated with a given EIA operator ID.
|
def eia_operator_plants(operator_id, pudl_engine):
Session = sa.orm.sessionmaker()
Session.configure(bind=pudl_engine)
session = Session()
pudl_plant_ids = [p.plant_id for p in session.query(models.UtilityEIA923).
filter_by(operator_id=operator_id).
first().util_pudl.plants]
eia923_plant_ids = [p.plant_id for p in
session.query(models.PlantEIA923).
filter(models.
PlantEIA923.
plant_id_pudl.
in_(pudl_plant_ids))]
session.close_all()
return(eia923_plant_ids)
|
[
"def get_mapped_plants_eia():\n mapped_plants_eia = (\n get_plant_map()\n .loc[:, [\"plant_id_eia\", \"plant_name_eia\"]]\n .dropna(subset=[\"plant_id_eia\"])\n .astype({\"plant_id_eia\": int})\n .drop_duplicates(\"plant_id_eia\")\n .sort_values(\"plant_id_eia\")\n )\n return mapped_plants_eia",
"def get_valid_operator_id_list(user) -> list:\n answer = []\n for esId in Operator.objects.values_list('esId', flat=True):\n if user.has_perm('localinfo.{0}'.format(esId)):\n answer.append(esId)\n\n return answer",
"def get_ophys_experiment_ids_for_ophys_container_id(ophys_container_id):\n experiments = get_filtered_ophys_experiment_table()\n ophys_experiment_ids = np.sort(experiments[(experiments.container_id == ophys_container_id)].ophys_experiment_id.values)\n return ophys_experiment_ids",
"def test_plants_eia860(pudl_out_eia):\n print('\\nReading EIA 860 plant data...')\n print(f\" plants_eia860: {len(pudl_out_eia.plants_eia860())} records.\")",
"def get_ids():",
"def get_all_operators(self):\n\n query = \"\"\"SELECT id_op, nombre\n FROM operadores_table\n WHERE estamento != 'invalid'\n ORDER BY nombre\"\"\"\n\n self.cursor.execute(query)\n operadores = [\"{}- {}\".format(op[0], op[1]) for op in self.cursor]\n print(\"OPERADORESSSSSSSSSSSSSSSSSSSSS........: \", operadores)\n\n return operadores",
"def test_operator_get_all_operators(self):\n pass",
"def get_operators(self, country_id):\n if type(country_id) != int:\n raise TypeError(\"arg must be an int\")\n else:\n return self._make_transferto_request(\n action=\"pricelist\", info_type=\"country\", content=country_id\n )",
"def _get_instrument_ids(self):\n return self._pnode.instruments.keys()",
"def __getAllIPpoolIds(self):\n ippoolids_dic=db_main.getHandle().get(\"ippool\",\"true\",0,-1,\"ippool_id\",[\"ippool_id\"])\n return map(lambda x:x[\"ippool_id\"],ippoolids_dic)",
"def getEquipmentID(self):\n return self.equipment",
"def get_equipment_list(zone, zone_ep):\n connections = zone_ep.getreferingobjs(\n iddgroups=[\"Zone HVAC Equipment Connections\"], fields=[\"Zone_Name\"]\n )\n referenced_object = next(iter(connections)).get_referenced_object(\n \"Zone_Conditioning_Equipment_List_Name\"\n )\n # EquipmentList can have 18 objects. Filter out the None objects.\n return filter(\n None,\n [\n referenced_object.get_referenced_object(f\"Zone_Equipment_{i}_Name\")\n for i in range(1, 19)\n ],\n )",
"def getoids(self):\n return self.oidmap.keys()",
"def get_operator(self, operator_id):\n return self.operators.get_operator(operator_id, self)",
"def GET_all_operators(self):\n return list(map(lambda _: _.as_dict(), get_operators()))",
"def id_list(self):\n return numpy.array(self.spiketrains.keys(), int)",
"def getEpisodeIDs(self):\n cursor = self.connection.cursor()\n\n cursor.execute('SELECT episode_id FROM info')\n\n data = cursor.fetchall()\n data = self.extractValues(data)\n\n cursor.close()\n return data",
"def get_wigos_ids(self,primary=True):\n \n xpath = '/wmdr:WIGOSMetadataRecord/wmdr:facility/wmdr:ObservingFacility/gml:identifier'\n wigosid_elem = self.xml_root.xpath(xpath,namespaces=namespaces)\n \n if not wigosid_elem:\n raise ValueError(\"no WIGOS ID element\")\n \n return (wigosid_elem[0].text).split(\",\")",
"def idPerm(self):\n allPerm = self.uniPerm\n permId = [[findIndexPerm(allPerm, self.perm[ihypo][iconfig])\n for iconfig in range(self.nperm[ihypo])]\n for ihypo in range(model.nhypo)]\n return permId"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate generation vs. expense correlation for FERC Form 1 plants. This function helped us identify which of the expns_ fields in the FERC Form 1 dataset represent production costs, and which are nonproduction costs, for the purposes of modeling marginal cost of electricity from various plants. We expect the difference in expenses vs. generation to be more indicative of production vs. nonproduction costs for plants with higher capacity factors, and since what we're trying to do here is identify which fields in the FERC Form 1 data are production costs, we allow a capacity_factor threshold to be set analysis is only done for those plants with capacity factors larger than the threshold. Additionaly, some types of plants simply do not have some types of expenses, so to keep those plants from dragging down otherwise meaningful correlations, any zero expense values are dropped before calculating the correlations. Returns a dictionary with expns_ field names as the keys, and correlations as the values.
|
def ferc1_expns_corr(pudl_engine, capacity_factor=0.6):
steam_df = pd.read_sql('SELECT * FROM plants_steam_ferc1', pudl_engine)
steam_df['capacity_factor'] = \
(steam_df['net_generation_mwh'] / 8760 * steam_df['total_capacity_mw'])
# Limit plants by capacity factor
steam_df = steam_df[steam_df['capacity_factor'] > capacity_factor]
expns_corr = {}
for expns in steam_df.filter(regex='expns').columns.tolist():
mwh_plants = steam_df.net_generation_mwh[steam_df[expns] != 0]
expns_plants = steam_df[expns][steam_df[expns] != 0]
expns_corr[expns] = np.corrcoef(mwh_plants, expns_plants)[0, 1]
return(expns_corr)
|
[
"def fuel_ferc1(testing=False):\n pudl_engine = pudl.db_connect_pudl(testing=testing)\n fuel_ferc1_tbl = pt['fuel_ferc1']\n fuel_ferc1_select = sa.sql.select([fuel_ferc1_tbl, ])\n fuel_df = pd.read_sql(fuel_ferc1_select, pudl_engine)\n\n # We have two different ways of assessing the total cost of fuel given cost\n # per unit delivered and cost per mmbtu. They *should* be the same, but we\n # know they aren't always. Calculate both so we can compare both.\n fuel_df['fuel_consumed_total_mmbtu'] = \\\n fuel_df['fuel_qty_burned'] * fuel_df['fuel_avg_mmbtu_per_unit']\n fuel_df['fuel_consumed_total_cost_mmbtu'] = \\\n fuel_df['fuel_cost_per_mmbtu'] * fuel_df['fuel_consumed_total_mmbtu']\n fuel_df['fuel_consumed_total_cost_unit'] = \\\n fuel_df['fuel_cost_per_unit_burned'] * fuel_df['fuel_qty_burned']\n\n pu_ferc = plants_utils_ferc1(testing=testing)\n\n out_df = pd.merge(fuel_df, pu_ferc, on=['respondent_id', 'plant_name'])\n out_df = out_df.drop('id', axis=1)\n\n first_cols = [\n 'report_year',\n 'respondent_id',\n 'util_id_pudl',\n 'respondent_name',\n 'plant_id_pudl',\n 'plant_name'\n ]\n\n out_df = organize_cols(out_df, first_cols)\n\n return(out_df)",
"def fit_reservoir_capacity(effective_range=False):\n #%% compare nordpool reservoir capacities and reservoir capacities from reservoir database\n from power_plants import Database as PlantDatabase\n\n db = PlantDatabase(db='D:/Data/power_plants.db')\n\n\n df = db.select_data(table='reservoirs',select_column='country',\n column_vals=['United Kingdom','Germany','Poland','Sweden','Norway','Finland','Latvia','Lithuania'])\n df = df.loc[df.Hydroelectricity == 'x',:]\n\n df.drop(columns=[c for c in df.columns if c not in ['Country','Name_of_dam','Reservoir_capacity']],inplace=True)\n\n cap = df.groupby(['Country']).sum().loc[:,'Reservoir_capacity']\n cap.index = ['FI','DE','LV','LT','NO','PL','SE','GB']\n\n if effective_range:\n reservoir_capacity = { # GWh\n 'SE1':11326,\n 'SE2':13533,\n 'SE3':1790,\n 'SE4':180,\n 'FI':2952,\n 'NO1':6078,\n 'NO2':21671,\n 'NO3':7719,\n 'NO4':14676,\n 'NO5':14090,\n 'LT':11.8,\n 'LV':9.4,\n }\n else:\n reservoir_capacity = {\n 'NO1':5787,\n 'NO2':32725,\n 'NO3':7809,\n 'NO4':19367,\n 'NO5':16523,\n 'SE1':14810,\n 'SE2':15730,\n 'SE3':2911,\n 'SE4':224,\n 'FI':5530,\n 'LT':12.2,\n 'LV':11.2,\n }\n\n from model_definitions import country_to_areas\n\n df_cap = pd.DataFrame(dtype=float,index=['SE','FI','NO','LV','LT'],columns=['nordpool','reservoir'])\n for c in df_cap.index:\n df_cap.at[c,'nordpool'] = sum(reservoir_capacity[a] for a in country_to_areas[c])\n df_cap.at[c,'reservoir'] = cap.at[c]\n\n p = np.polyfit(x=df_cap.reservoir,y=df_cap.nordpool,deg=1)\n\n xvals = cap.loc[[c for c in cap.index if c not in df_cap.index]]\n yvals = np.polyval(p,xvals)\n\n #%%\n f,ax = plt.subplots()\n plt.plot(df_cap.reservoir,df_cap.nordpool,'*',label='Known')\n plt.plot(xvals,yvals,'o',label='Unknown')\n xx = [min(cap),max(cap)]\n plt.plot(xx,np.polyval(p,xx),'k--',label='fit')\n for x,y,name in zip(np.array(xvals),yvals,xvals.index):\n plt.text(x,y,name)\n for x,y,name in zip(df_cap.reservoir,df_cap.nordpool,df_cap.index):\n plt.text(x,y,name)\n plt.ylabel('Nordpool reservoir capacity (GWh)')\n plt.xlabel('Aquastat reservoir capacity (Mm3)')\n plt.legend()\n plt.grid()\n\n res = pd.Series(data=yvals,index=xvals.index)\n res.name = 'GWh'\n if effective_range:\n res.to_excel(Path(data_path) / f'reservoir_capacity_effective.xlsx')\n else:\n res.to_excel(Path(data_path) / f'reservoir_capacity.xlsx')\n return res",
"def get_esp_criteria():\n cd = {'crr_n15':\n {'W': [0.0, 0.15],\n 'M': [0.15, 0.25],\n 'S': [0.25, 0.45],\n 'R': [0.45, 0.60]},\n 'h_liq':\n {'T': [0.5, 3.0],\n 'M': [3.0, 7.0],\n 'L': [7.0, 10.0]},\n 'h_crust':\n {'S': [0.0, 2.0],\n 'M': [3.0, 7.0],\n 'D': [7.0, 10.0]\n }\n }\n\n return cd",
"def __get_pex_constraints(self):\n exch = self.cmodel.get_exchange_reactions()\n ext_comp = [i for i in self.cmodel.get_reaction_compartments(exch[0])][0]\n exch_metas = []\n for reac in exch:\n exch_metas += \\\n self.cmodel.reactions[reac].get_substrates() + \\\n self.cmodel.reactions[reac].get_products()\n pex_reacs = []\n for meta in exch_metas:\n pex_reacs += self.cmodel.get_metabolite_reactions(meta)\n pex_per_comp = {}\n for pex in pex_reacs:\n comps = self.cmodel.get_reaction_compartments(pex)\n for comp in comps:\n if comp != ext_comp:\n if comp not in pex_per_comp:\n pex_per_comp[comp] = [pex]\n elif comp in pex_per_comp:\n pex_per_comp[comp].append(pex)\n\n for model_name in list(self.model_dic.keys()):\n for two_comp_reac in self.cmodel.reactions:\n check_endswith = [compart.endswith(model_name) for\n compart in self.cmodel.get_reaction_compartments(two_comp_reac)]\n if sum(check_endswith) == len(check_endswith):\n if two_comp_reac not in pex_per_comp[self.extracellular_compartment_id + \"_\" + model_name]:\n pex_per_comp[self.extracellular_compartment_id + \"_\" + model_name].append(two_comp_reac)\n\n pex_constraints = {}\n for comp in pex_per_comp:\n pex_constraints[comp] = create_constraints(pex_per_comp[comp])\n return pex_constraints",
"def _create_spatial_rfs_coverage(self):\n\n # Set parameters for all cells\n n_cells = len(self.gc_df)\n spatial_df = self.exp_stat_df[self.exp_stat_df[\"domain\"] == \"spatial\"]\n for param_name, row in spatial_df.iterrows():\n shape, loc, scale, distribution, _ = row\n self.gc_df[param_name] = self._get_random_samples(\n shape, loc, scale, n_cells, distribution\n )\n\n # Calculate RF diameter scaling factor for all ganglion cells\n # Area of RF = Scaling_factor * Random_factor * Area of ellipse(semi_xc,semi_yc), solve Scaling_factor.\n area_of_ellipse = self.ellipse2area(\n self.gc_df[\"semi_xc\"], self.gc_df[\"semi_yc\"]\n ) # Units are pixels for the Chichilnisky data\n\n \"\"\"\n The area_of_rf contains area for all model units. Its sum must fill the whole area (coverage factor = 1).\n We do it separately for each ecc sector, step by step, to keep coverage factor at 1 despite changing gc density with ecc\n \"\"\"\n area_scaling_factors_coverage1 = np.zeros(area_of_ellipse.shape)\n for index, surface_area in enumerate(self.sector_surface_area_all):\n scaling_for_coverage_1 = (surface_area * 1e6) / np.sum(\n area_of_ellipse[self.gc_df[\"ecc_group_idx\"] == index]\n ) # in micrometers2\n\n area_scaling_factors_coverage1[\n self.gc_df[\"ecc_group_idx\"] == index\n ] = scaling_for_coverage_1\n\n # Apply scaling factors to semi_xc and semi_yc. Units are micrometers.\n # scale_random_distribution = 0.08 # Estimated by eye from Watanabe and Perry data.\n # Normal distribution with scale_random_distribution 0.08 cover about 25% above and below the mean value\n scale_random_distribution = 0.001\n random_normal_distribution1 = 1 + np.random.normal(\n scale=scale_random_distribution, size=n_cells\n )\n\n semi_xc = (\n np.sqrt(area_scaling_factors_coverage1)\n * self.gc_df[\"semi_xc\"]\n * random_normal_distribution1\n )\n random_normal_distribution2 = 1 + np.random.normal(\n scale=scale_random_distribution, size=n_cells\n ) # second randomization\n\n semi_yc = (\n np.sqrt(area_scaling_factors_coverage1)\n * self.gc_df[\"semi_yc\"]\n * random_normal_distribution2\n )\n\n # Scale from micrometers to millimeters and return to numpy matrix\n self.gc_df[\"semi_xc\"] = semi_xc / 1000\n self.gc_df[\"semi_yc\"] = semi_yc / 1000\n\n # self.gc_df[\"orient_cen\"] = self.gc_df[\n # \"pos_polar_deg\"\n # ] # plus some noise here TODO. See Watanabe 1989 JCompNeurol section Dendritic field orietation",
"def run(self, exposure, catalog):\n bbox = exposure.getBBox()\n\n self.log.info(\"Measuring aperture corrections for %d flux fields\" % (len(self.toCorrect),))\n # First, create a subset of the catalog that contains only selected stars\n # with non-flagged reference fluxes.\n subset1 = [record for record in self.starSelector.selectStars(exposure, catalog).starCat\n if not record.get(self.refFluxKeys.flag)]\n\n apCorrMap = ApCorrMap()\n\n # Outer loop over the fields we want to correct\n for name, keys in self.toCorrect.iteritems():\n fluxName = name + \"_flux\"\n fluxSigmaName = name + \"_fluxSigma\"\n\n # Create a more restricted subset with only the objects where the to-be-correct flux\n # is not flagged.\n subset2 = [record for record in subset1 if not record.get(keys.flag)]\n\n # Check that we have enough data points that we have at least the minimum of degrees of\n # freedom specified in the config.\n if len(subset2) - 1 < self.config.minDegreesOfFreedom:\n raise RuntimeError(\"Only %d sources for calculation of aperture correction for '%s'; \"\n \"require at least %d.\"\n % (len(subset2), name, self.config.minDegreesOfFreedom+1))\n apCorrMap[fluxName] = ChebyshevBoundedField(bbox, numpy.ones((1,1), dtype=float))\n apCorrMap[fluxSigmaName] = ChebyshevBoundedField(bbox, numpy.zeros((1,1), dtype=float))\n continue\n\n # If we don't have enough data points to constrain the fit, reduce the order until we do\n ctrl = self.config.fitConfig.makeControl()\n while len(subset2) - ctrl.computeSize() < self.config.minDegreesOfFreedom:\n if ctrl.orderX > 0:\n ctrl.orderX -= 1\n if ctrl.orderY > 0:\n ctrl.orderY -= 1\n\n # Fill numpy arrays with positions and the ratio of the reference flux to the to-correct flux\n x = numpy.zeros(len(subset2), dtype=float)\n y = numpy.zeros(len(subset2), dtype=float)\n apCorrData = numpy.zeros(len(subset2), dtype=float)\n indices = numpy.arange(len(subset2), dtype=int)\n for n, record in enumerate(subset2):\n x[n] = record.getX()\n y[n] = record.getY()\n apCorrData[n] = record.get(self.refFluxKeys.flux)/record.get(keys.flux)\n\n for _i in range(self.config.numIter):\n\n # Do the fit, save it in the output map\n apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)\n\n # Compute errors empirically, using the RMS difference between the true reference flux and the\n # corrected to-be-corrected flux.\n apCorrDiffs = apCorrField.evaluate(x, y)\n apCorrDiffs -= apCorrData\n apCorrErr = numpy.mean(apCorrDiffs**2)**0.5\n\n # Clip bad data points\n apCorrDiffLim = self.config.numSigmaClip * apCorrErr\n keep = numpy.fabs(apCorrDiffs) <= apCorrDiffLim\n x = x[keep]\n y = y[keep]\n apCorrData = apCorrData[keep]\n indices = indices[keep]\n\n # Final fit after clipping\n apCorrField = ChebyshevBoundedField.fit(bbox, x, y, apCorrData, ctrl)\n\n self.log.info(\"Aperture correction for %s: RMS %f from %d\" %\n (name, numpy.mean((apCorrField.evaluate(x, y) - apCorrData)**2)**0.5, len(indices)))\n\n # Save the result in the output map\n # The error is constant spatially (we could imagine being\n # more clever, but we're not yet sure if it's worth the effort).\n # We save the errors as a 0th-order ChebyshevBoundedField\n apCorrMap[fluxName] = apCorrField\n apCorrErrCoefficients = numpy.array([[apCorrErr]], dtype=float)\n apCorrMap[fluxSigmaName] = ChebyshevBoundedField(bbox, apCorrErrCoefficients)\n\n # Record which sources were used\n for i in indices:\n subset2[i].set(keys.used, True)\n\n return Struct(\n apCorrMap = apCorrMap,\n )",
"def calcEfPowRCCISPR(Prsa, Lcable, Linsertion, Glna, Aeff, Freq):\n\teps0 = 8.854E-12\n\tmu0 = 4*np.pi*1E-7\n\t#c0 = 1./np.sqrt(eps0 * mu0)\n\tZo= np.sqrt(mu0/eps0)\n\t#Zo =377 # Free Space Impendance\n\tLinsertion = (Linsertion - 10*np.log(Aeff)) * -1.00 #Given as a loss in negative dB of the ACF of the Chamber\n\tPlessloss = Prsa + Lcable + Linsertion - Glna\n\tEfield = np.ones(Plessloss.size)\n\tfor i,F in enumerate(Freq):\n\t\tif F < 1000:\n\t\t\tr = 10\n\t\t\tCFactor = 10*np.log10(Zo/(4*(np.pi)*(r**2))) + 90 #Conversion factor (from dBm to dBuV/m)\n\t\t\tEfield[i] = Plessloss[i] + CFactor\n\t\telse:\n\t\t\tr = 3\n\t\t\tCFactor = 10*np.log10(Zo/(4*(np.pi)*(r**2))) + 90 #Conversion factor (from dBm to dBuV/m)\n\t\t\tEfield[i] = Plessloss[i] + CFactor\n\t#print(CFactor)\n\treturn(Efield, Plessloss)",
"def corrected_cation_calculation(df_exc_fe, df_inc_fe):\n corrected_df = df_exc_fe.iloc[:, :-1].copy()\n for i in range(df_exc_fe.shape[0]):\n corrected_df.iloc[i, :] = np.array(df_exc_fe.iloc[i, :-1]) * 8 / df_exc_fe[\"total_cation_formula\"][i]\n fe3 = []\n fe2 = []\n for j in range(df_inc_fe.shape[0]):\n fe3_temp = 24 * (1 - 8 / df_inc_fe[\"total_cation_formula\"][j])\n fe3.append(fe3_temp)\n fe2_temp = df_inc_fe[\"Fe\"][j] * 8 / df_inc_fe[\"total_cation_formula\"][j] - fe3_temp\n fe2.append(fe2_temp)\n corrected_df[\"Fe2+\"] = np.array(fe2).reshape(-1, 1)\n corrected_df[\"Fe3+\"] = np.array(fe3).reshape(-1, 1)\n return corrected_df",
"def read_organic_carbon_growth_factors(ffoc_gfdir):\n\n gf_ffoc_raw = eu.csv_read(ffoc_gfdir + 'GF_fossilFuelOC_calcS.csv')\n gf_ffoc_raw = np.array(gf_ffoc_raw)[1:, :] # skip header\n gf_ffoc = {'RH_frac': np.array(gf_ffoc_raw[:, 0], dtype=float),\n 'GF': np.array(gf_ffoc_raw[:, 1], dtype=float)}\n\n return gf_ffoc",
"def update_covered_fields(cls, fields):\n fields = copy.deepcopy(fields)\n frac, depth = cls.covered(fields)\n done = (fields['PRIORITY'] == DONE)\n print(\"Found %i exposures already done.\"%done.sum())\n\n wide = np.char.endswith(fields['PROGRAM'],'-wide')\n teff_min_wide = pd.DataFrame(fields).merge(TEFF_MIN_WIDE,on='FILTER',how='left').to_records()['TEFF']\n covered_wide = depth > teff_min_wide*fields['TILING']*fields['EXPTIME']\n done_wide = wide & covered_wide\n print('Found %i WIDE exposures newly done.'%(done_wide & ~done).sum())\n\n mc = np.char.endswith(fields['PROGRAM'],'-mc')\n teff_min_mc = pd.DataFrame(fields).merge(TEFF_MIN_MC,on='FILTER',how='left').to_records()['TEFF']\n covered_mc = depth > teff_min_mc*fields['TILING']*fields['EXPTIME']\n done_mc = mc & covered_mc\n print('Found %i MC exposures newly done.'%(done_mc & ~done).sum())\n\n extra = np.char.endswith(fields['PROGRAM'],'-extra')\n teff_min_extra = pd.DataFrame(fields).merge(TEFF_MIN_EXTRA,on='FILTER',how='left').to_records()['TEFF']\n covered_extra = depth > teff_min_extra*fields['TILING']*fields['EXPTIME']\n done_extra = extra & covered_extra\n print('Found %i EXTRA exposures newly done.'%(done_extra & ~done).sum())\n\n fields['PRIORITY'][done_wide & ~done] = DONE\n fields['PRIORITY'][done_mc & ~done] = DONE\n fields['PRIORITY'][done_extra & ~done] = DONE\n\n return fields",
"def eRates(self, Gp, Gm, eDiffp, eDiffm, corrDiffpm, tp, tm):\r\n #For now we take the derivative of the function fp and fm, because the \r\n #measured difference doesn't change anything in the derivative. So we put \r\n #zero for the measured difference in the functions.\r\n \r\n eDiffp_2 = eDiffp*eDiffp\r\n eDiffm_2 = eDiffm*eDiffm\r\n \r\n #Error in gamma+, including the correlation\r\n ap = 1/self.ddiffpdGp(tp, Gp, Gm, self.dGp)\r\n bp = 1/self.ddiffmdGp(tm, Gp, Gm, self.dGp)\r\n eGp = np.sqrt( eDiffp_2*ap**2 + eDiffm_2*bp**2 + 2*ap*bp*corrDiffpm )\r\n \r\n #Error in gamma-, including the correlation\r\n am = 1/self.ddiffpdGm(tp, Gp, Gm, self.dGm)\r\n bm = 1/self.ddiffmdGm(tm, Gp, Gm, self.dGm)\r\n eGm = np.sqrt( eDiffp_2*am**2 + eDiffm_2*bm**2 + 2*am*bm*corrDiffpm ) \r\n \r\n #Correlation between gamma+ and gamma-\r\n corrGpm = (ap*am*eDiffp_2 + bp*bm*eDiffm_2 +\r\n (ap*bm + am*bp)*corrDiffpm )\r\n \r\n return (eGp, eGm, corrGpm)",
"def get_CE_rate_coe(_CE_fac, _Te, _gi, _dEij, _type):\n\n _kT = Cst.k_ * _Te\n\n if _type == \"ECS\":\n\n _CEij = (8.63E-06 * _CE_fac) / (_gi * _Te**0.5) * np.exp( - _dEij / _kT )\n\n else:\n return None\n\n return _CEij",
"def fuel_receipts_costs_eia923(freq=None, testing=False,\n start_date=None, end_date=None):\n pudl_engine = pudl.db_connect_pudl(testing=testing)\n # Most of the fields we want come direclty from Fuel Receipts & Costs\n frc_tbl = pt['fuel_receipts_costs_eia923']\n frc_select = sa.sql.select([frc_tbl, ])\n\n # Need to re-integrate the MSHA coalmine info:\n cmi_tbl = pt['coalmine_info_eia923']\n cmi_select = sa.sql.select([cmi_tbl, ])\n cmi_df = pd.read_sql(cmi_select, pudl_engine)\n\n if start_date is not None:\n frc_select = frc_select.where(\n frc_tbl.c.report_date >= start_date)\n if end_date is not None:\n frc_select = frc_select.where(\n frc_tbl.c.report_date <= end_date)\n\n frc_df = pd.read_sql(frc_select, pudl_engine)\n frc_df = frc_df.rename(columns={'plant_id': 'plant_id_eia'})\n\n frc_df = pd.merge(frc_df, cmi_df,\n how='left',\n left_on='coalmine_id',\n right_on='id')\n\n cols_to_drop = ['fuel_receipt_id', 'coalmine_id', 'id']\n frc_df = frc_df.drop(cols_to_drop, axis=1)\n\n # Calculate a few totals that are commonly needed:\n frc_df['total_heat_content_mmbtu'] = \\\n frc_df['heat_content_mmbtu_per_unit'] * frc_df['fuel_quantity']\n frc_df['total_fuel_cost'] = \\\n frc_df['total_heat_content_mmbtu'] * frc_df['fuel_cost_per_mmbtu']\n\n by = ['plant_id_eia', 'fuel_type_pudl']\n if freq is not None:\n # Create a date index for temporal resampling:\n frc_df = frc_df.set_index(pd.DatetimeIndex(frc_df.report_date))\n by = by + [pd.Grouper(freq=freq)]\n # Sum up these values so we can calculate quantity weighted averages\n frc_df['total_ash_content'] = \\\n frc_df['ash_content_pct'] * frc_df['fuel_quantity']\n frc_df['total_sulfur_content'] = \\\n frc_df['sulfur_content_pct'] * frc_df['fuel_quantity']\n frc_df['total_mercury_content'] = \\\n frc_df['mercury_content_ppm'] * frc_df['fuel_quantity']\n\n frc_gb = frc_df.groupby(by=by)\n frc_df = frc_gb.agg({\n 'fuel_quantity': np.sum,\n 'total_heat_content_mmbtu': np.sum,\n 'total_fuel_cost': np.sum,\n 'total_sulfur_content': np.sum,\n 'total_ash_content': np.sum,\n 'total_mercury_content': np.sum,\n })\n\n frc_df['fuel_cost_per_mmbtu'] = \\\n frc_df['total_fuel_cost'] / frc_df['total_heat_content_mmbtu']\n frc_df['heat_content_mmbtu_per_unit'] = \\\n frc_df['total_heat_content_mmbtu'] / frc_df['fuel_quantity']\n frc_df['sulfur_content_pct'] = \\\n frc_df['total_sulfur_content'] / frc_df['fuel_quantity']\n frc_df['ash_content_pct'] = \\\n frc_df['total_ash_content'] / frc_df['fuel_quantity']\n frc_df['mercury_content_ppm'] = \\\n frc_df['total_mercury_content'] / frc_df['fuel_quantity']\n frc_df = frc_df.reset_index()\n frc_df = frc_df.drop(['total_ash_content',\n 'total_sulfur_content',\n 'total_mercury_content'], axis=1)\n\n # Bring in some generic plant & utility information:\n pu_eia = plants_utils_eia(start_date=start_date,\n end_date=end_date,\n testing=testing)\n out_df = analysis.merge_on_date_year(frc_df, pu_eia, on=['plant_id_eia'])\n\n # Drop any records where we've failed to get the 860 data merged in...\n out_df = out_df.dropna(subset=['operator_id', 'operator_name'])\n\n if freq is None:\n # There are a couple of invalid records with no specified fuel.\n out_df = out_df.dropna(subset=['fuel_group'])\n\n first_cols = ['report_date',\n 'plant_id_eia',\n 'plant_id_pudl',\n 'plant_name',\n 'operator_id',\n 'util_id_pudl',\n 'operator_name', ]\n\n # Re-arrange the columns for easier readability:\n out_df = organize_cols(out_df, first_cols)\n\n # Clean up the types of a few columns...\n out_df['plant_id_eia'] = out_df.plant_id_eia.astype(int)\n out_df['plant_id_pudl'] = out_df.plant_id_pudl.astype(int)\n out_df['operator_id'] = out_df.operator_id.astype(int)\n out_df['util_id_pudl'] = out_df.util_id_pudl.astype(int)\n\n return(out_df)",
"def influence_df(df, G):\n\n core = trim_degrees(G)\n deg = nx.degree(G) # Degree centrality\n ds = sorted_map(deg)\n cent = nx.closeness_centrality(core) # Closeness centrality\n cs = sorted_map(cent)\n bet = nx.betweenness_centrality(core) #Betweenness centrality\n bs = sorted_map(bet)\n eig = nx.eigenvector_centrality_numpy(core) #Eigenvector centrality\n es = sorted_map(eig)\n\n degree_names = [x[0] for x in ds[0:250]]\n close_names = [x[0] for x in cs[0:]]\n bet_names = [x[0] for x in bs[0:]]\n eig_names = [x[0] for x in es[0:]]\n\n union_names = list(set(degree_names) | set(close_names) | set(bet_names) | set(eig_names))\n influence_table = [[name, deg[name], cent[name], bet[name], eig[name]] for name in union_names]\n\n df_influence = pd.DataFrame(\n influence_table, columns=['company_name', 'degree_centrality',\n 'closeness_centrality', 'betweenness_centrality', 'eigenvector_centrality'])\n company_list = set(df['company_name']) #v Old Version\n df_influence['type'] = [0 if x in company_list else 1 for x in df_influence['company_name']]\n\n # investor_list = set(df['investor_name'])\n # df_influence['type'] = [1 if x in investor_list else 0 for x in df_influence['company_name']]\n\n df_influence = df_influence[(df_influence['type'] == 1)]\n df_influence['eigen_and_close'] = df_influence['closeness_centrality'] + df_influence['eigenvector_centrality']\n df_influence.sort_values('eigen_and_close', ascending=False, inplace=True)\n df_influence.reset_index(inplace=True)\n df_influence['influence_rank'] = [x/500 for x in df_influence.index]\n return df_influence",
"def _calculate_coexpression(self, significance_thresh=3):\n # 1. Calculate the PCCs\n self.log(\"Calculating Coexpression\")\n num_bytes_needed = comb(self.shape()[0], 2) * 8\n if num_bytes_needed > psutil.virtual_memory().available:\n raise MemoryError(\"Not enough RAM to calculate co-expression network\")\n # pass in a contigious array to the cython function to calculate PCCs\n pccs = PCCUP.pair_correlation(\n np.ascontiguousarray(\n # PCCUP expects floats\n self._expr.as_matrix().astype(\"float\")\n )\n )\n\n self.log(\"Applying Fisher Transform\")\n pccs[pccs >= 1.0] = 0.9999999\n pccs[pccs <= -1.0] = -0.9999999\n pccs = np.arctanh(pccs)\n gc.collect()\n\n # Do a PCC check to make sure they are not all NaNs\n if not any(np.logical_not(np.isnan(pccs))):\n raise ValueError(\n \"Not enough data is available to reliably calculate co-expression, \"\n \"please ensure you have more than 10 accessions to calculate correlation coefficient\"\n )\n\n self.log(\"Calculating Mean and STD\")\n # Sometimes, with certain datasets, the NaN mask overlap\n # completely for the two genes expression data making its PCC a nan.\n # This affects the mean and std fro the gene.\n pcc_mean = np.ma.masked_array(pccs, np.isnan(pccs)).mean()\n self._global(\"pcc_mean\", pcc_mean)\n gc.collect()\n pcc_std = np.ma.masked_array(pccs, np.isnan(pccs)).std()\n self._global(\"pcc_std\", pcc_std)\n gc.collect()\n\n # 2. Calculate Z Scores\n self.log(\"Finding adjusted scores\")\n pccs = (pccs - pcc_mean) / pcc_std\n gc.collect()\n\n # 3. Build the dataframe\n self.log(\"Build the dataframe and set the significance threshold\")\n self._global(\"significance_threshold\", significance_thresh)\n raw_coex = self._raw_coex(pccs, significance_thresh)\n del pccs\n gc.collect()\n\n # 4. Calculate Gene Distance\n self.log(\"Calculating Gene Distance\")\n raw_coex.addcol(\n self.refgen.pairwise_distance(\n gene_list=self.refgen.from_ids(self._expr.index)\n ),\n pos=1,\n name=\"distance\",\n )\n gc.collect()\n\n # 5. Cleanup\n raw_coex.flush()\n del raw_coex\n gc.collect()\n\n # 6. Load the new table into the object\n self.coex = self._bcolz(\"coex\", blaze=True)\n self.set_sig_edge_zscore(float(self._global(\"significance_threshold\")))\n self.log(\"Done\")\n return self",
"def generation_fuel_all_eia923(gf: pd.DataFrame, gfn: pd.DataFrame) -> pd.DataFrame:\n primary_key = [\n \"report_date\",\n \"plant_id_eia\",\n \"prime_mover_code\",\n \"energy_source_code\",\n ]\n sum_cols = [\n \"fuel_consumed_for_electricity_mmbtu\",\n \"fuel_consumed_for_electricity_units\",\n \"fuel_consumed_mmbtu\",\n \"fuel_consumed_units\",\n \"net_generation_mwh\",\n ]\n other_cols = [\n \"nuclear_unit_id\", # dropped in the groupby / aggregation.\n \"fuel_mmbtu_per_unit\", # recalculated based on aggregated sum_cols.\n ]\n # Rather than enumerating all of the non-data columns, identify them by process of\n # elimination, in case they change in the future.\n non_data_cols = list(set(gfn.columns) - set(primary_key + sum_cols + other_cols))\n\n gfn_gb = gfn.groupby(primary_key)\n # Ensure that all non-data columns are homogeneous within groups\n if not (gfn_gb[non_data_cols].nunique() == 1).all(axis=None):\n raise ValueError(\n \"Found inhomogeneous non-data cols while aggregating nuclear generation.\"\n )\n gfn_agg = pd.concat(\n [\n gfn_gb[non_data_cols].first(),\n gfn_gb[sum_cols].sum(min_count=1),\n ],\n axis=\"columns\",\n )\n # Nuclear plants don't report units of fuel consumed, so fuel heat content ends up\n # being calculated as infinite. However, some nuclear plants report using small\n # amounts of DFO. Ensure infite heat contents are set to NA instead:\n gfn_agg = gfn_agg.assign(\n fuel_mmbtu_per_unit=np.where(\n gfn_agg.fuel_consumed_units != 0,\n gfn_agg.fuel_consumed_mmbtu / gfn_agg.fuel_consumed_units,\n np.nan,\n )\n ).reset_index()\n return pd.concat([gfn_agg, gf]).sort_values(primary_key).reset_index(drop=True)",
"def calc_r2eff(self):\n\n # Assemble param vector.\n self.params = self.assemble_param_vector(r2=self.r2, r2a=self.r2a, r2b=self.r2b, dw=self.dw, pA=self.pA, kex=self.kex, spins_params=self.spins_params)\n\n # Make nested list arrays of data. And return them.\n values, errors, cpmg_frqs, missing, frqs, exp_types, relax_times, offsets = self.return_r2eff_arrays()\n\n # Unpack the parameter values.\n # Initialise the post spin parameter indices.\n end_index = []\n # The spin and frequency dependent R2 parameters.\n end_index.append(len(self.exp_type) * self.num_spins * len(self.fields))\n if self.model in [\"CR72 full\"]:\n end_index.append(2 * len(self.exp_type) * self.num_spins * len(self.fields))\n # The spin and dependent parameters (phi_ex, dw, padw2).\n end_index.append(end_index[-1] + self.num_spins)\n\n # Unpack the parameter values.\n R20 = self.params[:end_index[1]].reshape(self.num_spins*2, len(self.fields))\n R20A = R20[::2].flatten()\n R20B = R20[1::2].flatten()\n dw = self.params[end_index[1]:end_index[2]]\n pA = self.params[end_index[2]]\n kex = self.params[end_index[2]+1]\n\n # Copy value structure\n self.back_calc = deepcopy(values)\n\n # Setup special numpy array structures, for higher dimensional computation.\n # Get the shape of back_calc structure.\n back_calc_shape = list( asarray(self.back_calc).shape )[:4]\n\n # Find which frequency has the maximum number of disp points.\n # To let the numpy array operate well together, the broadcast size has to be equal for all shapes.\n self.max_num_disp_points = max(self.num_disp_points)\n\n # Create numpy arrays to pass to the lib function.\n # All numpy arrays have to have same shape to allow to multiply together.\n # The dimensions should be [ei][si][mi][oi][di]. [Experiment][spins][spec. frq][offset][disp points].\n # The number of disp point can change per spectrometer, so we make the maximum size.\n self.R20A_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.R20B_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.dw_frq_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.cpmg_frqs_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.num_disp_points_a = ones(back_calc_shape + [self.max_num_disp_points])\n self.back_calc_a = ones(back_calc_shape + [self.max_num_disp_points])\n\n # Loop over the spins.\n for si in range(self.num_spins):\n # Loop over the spectrometer frequencies.\n for mi in range(len(self.fields)):\n # Extract number of dispersion points.\n num_disp_points = self.num_disp_points[mi]\n\n # Extract cpmg_frqs and num_disp_points from lists.\n self.cpmg_frqs_a[0][si][mi][0][:num_disp_points] = cpmg_frqs[0][mi][0]\n self.num_disp_points_a[0][si][mi][0][:num_disp_points] = self.num_disp_points[mi]\n\n # Now calculate.\n\n # Loop over the spins.\n for si in range(self.num_spins):\n # Loop over the spectrometer frequencies.\n for mi in range(len(self.fields)):\n # Extract number of dispersion points.\n num_disp_points = len(cpmg_frqs[0][mi][0])\n\n # The R20 index.\n r20_index = mi + si*len(self.fields)\n\n # Store r20a and r20b values per disp point.\n self.R20A_a[0][si][mi][0] = array( [R20A[r20_index]] * self.max_num_disp_points, float64)\n self.R20B_a[0][si][mi][0] = array( [R20B[r20_index]] * self.max_num_disp_points, float64)\n\n # Convert dw from ppm to rad/s.\n dw_frq = dw[si] * frqs[0][si][mi]\n\n # Store dw_frq per disp point.\n self.dw_frq_a[0][si][mi][0] = array( [dw_frq] * self.max_num_disp_points, float64)\n\n ## Back calculate the R2eff values.\n r2eff_CR72(r20a_orig=self.R20A_a, r20b_orig=self.R20B_a, dw_orig=self.dw_frq_a, r20a=self.R20A_a, r20b=self.R20B_a, pA=pA, dw=self.dw_frq_a, kex=kex, cpmg_frqs=self.cpmg_frqs_a, back_calc=self.back_calc_a)\n\n # Now return the values back to the structure of self.back_calc object.\n ## For all missing data points, set the back-calculated value to the measured values so that it has no effect on the chi-squared value.\n # Loop over the spins.\n for si in range(self.num_spins):\n # Loop over the spectrometer frequencies.\n for mi in range(len(self.fields)):\n # Extract number of dispersion points.\n num_disp_points = self.num_disp_points[mi]\n\n # Extract the value\n self.back_calc[0][si][mi][0][:] = self.back_calc_a[0][si][mi][0][:num_disp_points]\n\n # Check values.\n for di in range(num_disp_points):\n self.assertAlmostEqual(self.back_calc[0][si][mi][0][di], self.R20A_a[0][si][mi][0][di])",
"def return_r2eff_arrays(self):\n\n # Initialise the data structures for the target function.\n exp_types = []\n values = []\n errors = []\n missing = []\n frqs = []\n frqs_H = []\n relax_times = []\n offsets = []\n for ei in range(len(self.exp_type)):\n values.append([])\n errors.append([])\n missing.append([])\n frqs.append([])\n frqs_H.append([])\n relax_times.append([])\n offsets.append([])\n for si in range(self.num_spins):\n values[ei].append([])\n errors[ei].append([])\n missing[ei].append([])\n frqs[ei].append([])\n frqs_H[ei].append([])\n offsets[ei].append([])\n for mi in range(len(self.fields)):\n values[ei][si].append([])\n errors[ei][si].append([])\n missing[ei][si].append([])\n frqs[ei][si].append(0.0)\n frqs_H[ei][si].append(0.0)\n offsets[ei][si].append([])\n for oi in range(len(self.offset)):\n values[ei][si][mi].append([])\n errors[ei][si][mi].append([])\n missing[ei][si][mi].append([])\n offsets[ei][si][mi].append([])\n for mi in range(len(self.fields)):\n relax_times[ei].append(None)\n\n cpmg_frqs = []\n for ei in range(len(self.exp_type)):\n cpmg_frqs.append([])\n for mi in range(len(self.fields)):\n cpmg_frqs[ei].append([])\n for oi in range(len(self.offset)):\n #cpmg_frqs[ei][mi].append(self.points)\n cpmg_frqs[ei][mi].append([])\n\n\n # Pack the R2eff/R1rho data.\n si = 0\n for spin_index in range(self.num_spins):\n data_flag = True\n\n for ei in range(len(self.exp_type)):\n exp_type = self.exp_type[ei]\n # Add the experiment type.\n if exp_type not in exp_types:\n exp_types.append(exp_type)\n\n for mi in range(len(self.fields)):\n # Get the frq.\n frq = self.fields[mi]\n\n # The Larmor frequency for this spin (and that of an attached proton for the MMQ models) and field strength (in MHz*2pi to speed up the ppm to rad/s conversion).\n frqs[ei][si][mi] = 2.0 * pi * frq / g1H * g15N * 1e-6\n\n # Get the cpmg frq.\n cpmg_frqs[ei][mi][oi] = self.points[mi]\n\n for oi in range(len(self.offset)):\n for di in range(len(self.points[mi])):\n\n missing[ei][si][mi][oi].append(0)\n\n # Values\n values[ei][si][mi][oi].append(self.value[mi][di])\n # The errors.\n errors[ei][si][mi][oi].append(self.error[mi][di])\n\n # The relaxation times.\n # Found.\n relax_time = self.relax_times[mi]\n\n # Store the time.\n relax_times[ei][mi] = relax_time\n\n # Increment the spin index.\n si += 1\n\n # Convert to numpy arrays.\n relax_times = array(relax_times, float64)\n for ei in range(len(self.exp_type)):\n for si in range(self.num_spins):\n for mi in range(len(self.fields)):\n for oi in range(len(self.offset)):\n values[ei][si][mi][oi] = array(values[ei][si][mi][oi], float64)\n errors[ei][si][mi][oi] = array(errors[ei][si][mi][oi], float64)\n missing[ei][si][mi][oi] = array(missing[ei][si][mi][oi], int32)\n\n # Return the structures.\n return values, errors, cpmg_frqs, missing, frqs, exp_types, relax_times, offsets",
"def get_clean_epc():\n epc = get_epc()\n #\n # Calculate median energy rating for each LA:\n epc_medians = (\n epc.groupby(\"LOCAL_AUTHORITY\")[\"CURRENT_ENERGY_EFFICIENCY\"]\n .apply(np.median)\n .reset_index(name=\"median_energy_efficiency\")\n )\n #\n # Calculate proportions of 'improvable' social housing\n # (socially rented dwellings that are currently EPC D or below,\n # and have the potential to be C or above)\n #\n # There are two different strings signifying socially rented\n # in the TENURE column of the EPC data:\n epc_social = epc.loc[epc[\"TENURE\"].isin([\"rental (social)\", \"Rented (social)\"])]\n #\n epc_social[\"is_improvable\"] = (\n epc_social[\"CURRENT_ENERGY_RATING\"].isin([\"G\", \"F\", \"E\", \"D\"])\n ) & (epc_social[\"POTENTIAL_ENERGY_RATING\"].isin([\"C\", \"B\", \"A\"]))\n #\n # Find the numbers of improvable / not improvable social houses in each LA\n potential_counts = (\n epc_social.groupby([\"LOCAL_AUTHORITY\", \"is_improvable\"])[\n [\"LOCAL_AUTHORITY\", \"is_improvable\"]\n ]\n .size()\n .reset_index(name=\"count\")\n .pivot(index=\"LOCAL_AUTHORITY\", columns=\"is_improvable\", values=\"count\")\n .rename(columns={True: \"total_improvable\", False: \"total_not_improvable\"})\n )\n # Calculate proportions\n potential_counts.columns.name = None\n potential_counts[\"total_social\"] = potential_counts.sum(axis=1)\n potential_counts[\"prop_improvable\"] = (\n potential_counts[\"total_improvable\"] / potential_counts[\"total_social\"]\n )\n potential_counts = potential_counts.reset_index()[\n [\"LOCAL_AUTHORITY\", \"total_improvable\", \"prop_improvable\"]\n ]\n # Join to medians\n clean_epc = epc_medians.merge(potential_counts, on=\"LOCAL_AUTHORITY\").rename(\n columns={\"LOCAL_AUTHORITY\": \"code\"}\n )\n #\n return clean_epc"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate a dataframe with the proportion of generation for each generator.
|
def generator_proportion_eia923(g):
# Set the datetimeindex
g = g.set_index(pd.DatetimeIndex(g['report_date']))
# groupby plant_id and by year
g_yr = g.groupby([pd.TimeGrouper(freq='A'), 'plant_id', 'generator_id'])
# sum net_gen by year by plant
g_net_generation_per_generator = pd.DataFrame(
g_yr.net_generation_mwh.sum())
g_net_generation_per_generator = \
g_net_generation_per_generator.reset_index(level=['generator_id'])
# groupby plant_id and by year
g_net_generation_per_plant = g.groupby(
[pd.TimeGrouper(freq='A'), 'plant_id'])
# sum net_gen by year by plant and convert to datafram
g_net_generation_per_plant = pd.DataFrame(
g_net_generation_per_plant.net_generation_mwh.sum())
# Merge the summed net generation by generator with the summed net
# generation by plant
g_gens_proportion = g_net_generation_per_generator.merge(
g_net_generation_per_plant, how="left", left_index=True,
right_index=True)
g_gens_proportion['proportion_of_generation'] = (
g_gens_proportion.net_generation_mwh_x /
g_gens_proportion.net_generation_mwh_y)
# Remove the net generation columns
g_gens_proportion = g_gens_proportion.drop(
['net_generation_mwh_x', 'net_generation_mwh_y'], axis=1)
return(g_gens_proportion)
|
[
"def initialize_genomes_df():\n # Genomes attribution:\n # Weights for use of Evolving Algorithms to evaluate moves.\n weights = ['w_highest tile',\n 'w_score',\n 'w_number of zeros',\n 'w_potential two step score',\n 'w_distance from right',\n 'w_distance from corner']\n # additional attribute for easier management.\n cols = weights + ['generation', 'max tile', 'final score']\n # Store genomes in a pd.DataFrame\n genomes = pd.DataFrame(columns=cols)\n # Initialize genome population with random weights\n generation = 1\n for _ in range(POPULATION_SIZE):\n rand_weights = list(np.random.rand(len(weights)) - WEIGHT_CONST)\n genomes.loc[len(genomes)] = rand_weights + [generation, None, None]\n return genomes",
"def process(df, return_metrics = True, pathway_generator_df = pd.DataFrame()):\r\n ngenes=[]\r\n explained_ratios =[]\r\n pathway_generator_df = read_reactome(reactome_file)\r\n pathways= pathway_generator_df.index\r\n pathway_id = pathway_generator_df[\"pathway_id\"]\r\n pathway_name = pathway_generator_df[\"pathway_name\"]\r\n for i in pathways: #loop the pathways to embed ngene and ratio info\r\n genes = pathway_generator_df.loc[i]\r\n test = [x in df.columns for x in genes.tolist()[0]] #gene number calculation\r\n ngene = sum(test)\r\n test = any(test)\r\n if test:\r\n sub_df = df.loc[:,genes.tolist()[0]].transpose()\r\n components, explained_ratio = analyze.my_pca(sub_df) #ratio calculation\r\n #key function from analyze file, can be modified when data updated.\r\n explained_ratio = np.array(explained_ratio)\r\n else:\r\n explained_ratio = float('nan')\r\n ngene = 0\r\n ngenes.append(ngene)\r\n explained_ratios = np.append(explained_ratios, explained_ratio)\r\n out_df = pd.DataFrame(columns = [\"pathways\",'pathway_id', 'pathway_name', \"explained_ratios\",\"ngenes\"]) #set up columns beforehands\r\n if return_metrics:\r\n for i in range(len(pathways)): #fill up the column values from each lists\r\n out_df = out_df.append({\"pathways\":pathways[i],'pathway_id':pathway_id[i], 'pathway_name': pathway_name[i], \"explained_ratios\":explained_ratios[i], \"ngenes\":ngenes[i]}, ignore_index=True)\r\n return out_df",
"def test_get_proportions_data():\n test_data = {'YEAR': [2012, 2012, 2012, 2012, 2013, 2013, 2013, 2013,\n 2014, 2014, 2014, 2014, 2015, 2015, 2015, 2015,\n 2016, 2016, 2016, 2016, 2017, 2017, 2017, 2017,\n 2018, 2018, 2018, 2018],\n 'TREAT_EARLY': [1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1,\n 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0],\n 'TREAT_LATE': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0,\n 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],\n 'GOOD_GENHLTH': [0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1,\n 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1],\n 'PHYS_DISTRESS': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0,\n 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1],\n 'MENT_DISTRESS': [1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1,\n 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],\n 'POOR_OVR_HLTH': [0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0,\n 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1],\n 'HLTHPLN': [0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],\n 'HAS_PERSDOC': [1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1,\n 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1],\n 'MEDCOST': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1,\n 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],\n 'ANNUAL_CHECKUP': [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1,\n 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0]}\n\n expected_data = {'YEAR': [2012, 2012, 2012, 2012, 2013, 2013, 2013, 2013,\n 2014, 2014, 2014, 2014, 2015, 2015, 2015, 2015,\n 2016, 2016, 2016, 2016, 2017, 2017, 2017, 2017,\n 2018, 2018, 2018, 2018],\n 'TREAT_EARLY': [1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0,\n 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0],\n 'TREAT_LATE': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1,\n 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],\n 'GOOD_GENHLTH': [0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0,\n 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0,\n 0, 1],\n 'PHYS_DISTRESS': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1,\n 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0,\n 0, 1],\n 'MENT_DISTRESS': [1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1,\n 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1,\n 0, 0],\n 'POOR_OVR_HLTH': [0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1,\n 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0,\n 0, 1],\n 'HLTHPLN': [0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1,\n 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0],\n 'HAS_PERSDOC': [1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1,\n 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1],\n 'MEDCOST': [0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0,\n 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],\n 'ANNUAL_CHECKUP': [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0,\n 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0,\n 1, 0],\n 'PROP_GOOD_GENHLTH': [1/3, 1/1, 1/3, 1/3, 0/2, 1/1, 1/1,\n 0/2, 1/2, 1/2, 0/1, 1/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 1/1, 1/2,\n 1/2, 1/2, 1/2, 1/2, 1/2, 0/1, 1/1],\n 'PROP_PHYS_DISTRESS': [1/3, 0/1, 1/3, 1/3, 1/2, 0/1, 0/1,\n 1/2, 1/2, 1/2, 0/1, 1/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 0/1, 2/2,\n 1/2, 1/2, 2/2, 0/2, 0/2, 0/1, 1/1],\n 'PROP_MENT_DISTRESS': [2/3, 0/1, 2/3, 2/3, 0/2, 1/1, 1/1,\n 0/2, 0/2, 0/2, 1/1, 0/1, 2/2, 2/2,\n 2/2, 2/2, 0/1, 0/2, 0/2, 1/1, 2/2,\n 0/2, 0/2, 2/2, 2/2, 2/2, 0/1, 0/1],\n 'PROP_POOR_OVR_HLTH': [2/3, 1/1, 2/3, 2/3, 1/2, 0/1, 0/1,\n 1/2, 1/2, 1/2, 0/1, 0/1, 1/2, 1/2,\n 1/2, 1/2, 1/1, 1/2, 1/2, 0/1, 1/2,\n 1/2, 1/2, 1/2, 1/2, 1/2, 0/1, 1/1],\n 'PROP_HLTHPLN': [2/3, 0/1, 2/3, 2/3, 2/2, 1/1, 0/1, 2/2,\n 1/2, 1/2, 1/1, 1/1, 1/2, 1/2, 1/2, 1/2,\n 1/1, 1/2, 1/2, 0/1, 0/2, 2/2, 2/2, 0/2,\n 2/2, 2/2, 0/1, 0/1],\n 'PROP_HAS_PERSDOC': [3/3, 0/1, 3/3, 3/3, 2/2, 1/1, 0/1,\n 2/2, 1/2, 1/2, 1/1, 1/1, 2/2, 1/2,\n 2/2, 1/2, 0/1, 1/2, 1/2, 1/1, 0/2,\n 2/2, 2/2, 0/2, 0/2, 0/2, 1/1, 1/1],\n 'PROP_MEDCOST': [2/3, 1/1, 2/3, 2/3, 1/2, 0/1, 1/1, 1/2,\n 1/2, 1/2, 0/1, 0/1, 1/2, 2/2, 1/2, 2/2,\n 1/1, 0/2, 0/2, 1/1, 1/2, 1/2, 1/2, 1/2,\n 2/2, 2/2, 0/1, 0/1],\n 'PROP_ANNUAL_CHECKUP': [1/3, 0/1, 1/3, 1/3, 1/2, 1/1,\n 0/1, 1/2, 1/2, 1/2, 1/1, 0/1,\n 0/2, 2/2, 0/2, 2/2, 1/1, 0/2,\n 0/2, 2/2, 1/2, 1/2, 1/2, 1/2,\n 1/2, 1/2, 1/1, 0/1]}\n\n test_df = pd.DataFrame(test_data)\n expected_df = pd.DataFrame(expected_data)\n actual_df = get_proportions_data(test_df)\n actual_df = actual_df.drop(columns=['TREAT', 'ALL_PROP_GOOD_GENHLTH',\n 'ALL_PROP_PHYS_DISTRESS',\n 'ALL_PROP_MENT_DISTRESS',\n 'ALL_PROP_POOR_OVR_HLTH',\n 'ALL_PROP_HLTHPLN',\n 'ALL_PROP_HAS_PERSDOC',\n 'ALL_PROP_MEDCOST',\n 'ALL_PROP_ANNUAL_CHECKUP'])\n assert_frame_equal(expected_df, actual_df)",
"def generate_pivot_table(self):\n if self.aggregation == \"count\":\n table = pd.DataFrame(self.data.groupby([self.row])[\n self.column].value_counts()).unstack(level=-1).fillna(0)\n for i in table.columns.values:\n table[i] = table[i].astype('int64')\n table['Total'] = table.sum(axis=1).values\n table_dict = table.to_dict()\n col_sum = table.sum(axis=0).values\n idx = 0\n for i in table_dict:\n table_dict[i]['Total'] = col_sum[idx]\n idx = idx + 1\n return pd.DataFrame(table_dict)\n\n else:\n raise Exception(\"Other aggregations are not yet supported\")",
"def population_stats(df):\n df = df.transpose()\n\n num_nonnull = df.notna().sum(axis=1)\n pct_nonnull = num_nonnull / df.shape[1]\n num_distinct = df.nunique(axis=1, dropna=True)\n pct_distinct = num_distinct / num_nonnull\n\n return pd.DataFrame({'num_nonnull': num_nonnull, 'pct_nonnull': pct_nonnull,\n 'num_distinct': num_distinct,\n 'pct_distinct': pct_distinct})",
"def resample_table_by_fraction(count_tab:pd.DataFrame, fraction:float, processors=1,\n index_name='guide') -> pd.DataFrame:\n\n str_cols = count_tab.columns[count_tab.iloc[0, :].apply(type) == str]\n str_series = {c:count_tab[c] for c in str_cols}\n\n starting_cols = list(count_tab.columns)\n\n #count_tab.index = range(count_tab.shape[0])\n\n count_tab.drop(str_cols, 1, inplace=True)\n\n # First resamples number of reads per physical sample, then guide counts per sample\n sz = int(count_tab.sum().sum() * fraction)\n weights = count_tab.sum() / count_tab.sum().sum()\n colinds = np.random.choice(range(count_tab.shape[1]), sz, p=weights)\n colcounts = np.bincount(colinds)\n\n resamped_tab = {}\n with mp.Pool(processors) as pool:\n for smp_total, smp in zip(colcounts, count_tab.columns):\n resamped_tab[smp] = pool.apply_async(_resamp, args=(smp_total, count_tab[smp]))\n resamped_tab = {k:p.get() for k, p in resamped_tab.items()}\n resamped_tab = pd.DataFrame(resamped_tab, columns=count_tab.columns, index=count_tab.index)\n # resamped_tab.insert(0, index_name, count_tab.index)\n # resamped_tab.set_index(index_name, inplace=True)\n for col in str_cols:\n # position should work because we're going left to right\n pos = starting_cols.index(col)\n resamped_tab.insert(pos, col, str_series[col], )\n\n #resamped_tab.set_index('guide', inplace=True)\n\n return resamped_tab",
"def run_stats(df: pd.DataFrame, col: str) -> pd.DataFrame:\n pw = PairwisePermutationTest(\n \"cluster\",\n col,\n df,\n order=snakemake.params.cluster_order,\n threads=THREADS,\n ).fit()\n\n return pw.results.assign(chrom_ratio=col).set_index(\n [\"chrom_ratio\", \"name1\", \"name2\"]\n )",
"def produce(self, df):\n result = df.copy()\n for p in self.producers:\n result = p.produce(result)\n\n return result",
"def test_proportions_prep():\n test_one = {'STATE': [2, 2, 23, 23, 4],\n 'GENHLTH': [5, 4, 3, 4, 1],\n 'PHYSHLTH': [1, 20, 15, 16, 18],\n 'MENTHLTH': [29, 2, 21, 8, 5],\n 'POORHLTH': [7, 9, 17, 23, 21],\n 'PERSDOC': [1, 3, 2, 3, 2],\n 'CHECKUP': [2, 1, 4, 1, 8]}\n\n expected_one = {'STATE': [2, 2, 23, 23, 4],\n 'GENHLTH': [5, 4, 3, 4, 1],\n 'PHYSHLTH': [1, 20, 15, 16, 18],\n 'MENTHLTH': [29, 2, 21, 8, 5],\n 'POORHLTH': [7, 9, 17, 23, 21],\n 'PERSDOC': [1, 3, 2, 3, 2],\n 'CHECKUP': [2, 1, 4, 1, 8],\n 'GOOD_GENHLTH': [0, 0, 1, 0, 1],\n 'PHYS_DISTRESS': [0, 1, 1, 1, 1],\n 'MENT_DISTRESS': [1, 0, 1, 0, 0],\n 'POOR_OVR_HLTH': [0, 0, 1, 1, 1],\n 'HAS_PERSDOC': [1, 0, 1, 0, 1],\n 'ANNUAL_CHECKUP': [0, 1, 0, 1, 0]}\n\n test_df = pd.DataFrame(test_one)\n expected_df = pd.DataFrame(expected_one)\n actual_df = proportions_prep_for_outcomes(test_df)\n assert_frame_equal(actual_df, expected_df)",
"def _make_probs(self, df: pd.DataFrame) -> pd.DataFrame:\n\n def fn(item):\n probabilities = np.zeros(len(self.train_labels))\n pairs = [\n (self.parse_label(item[f\"Pred{i}\"]), item[f\"Pred{i}Score\"])\n for i in range(MAX_PREDS)\n if f\"Pred{i}\" in item\n ]\n if pairs:\n idx, probs = zip(*pairs)\n if probs == (\"\",):\n probs = 1.0\n probabilities[np.array(idx)] = np.array(probs)\n else:\n # If we have no pred, we get the label\n probabilities[item[self.azimuth_config.columns.label]] = 1.0\n if not np.isclose(0.0, diff := 1.0 - probabilities.sum()):\n probabilities[probabilities == 0.0] = diff / (probabilities == 0.0).sum()\n return probabilities\n\n df[\"probs\"] = df.apply(fn, axis=1)\n return df",
"def _make_df(recipe: MyRecipe) -> pd.DataFrame:\n df = pd.DataFrame()\n res = recipe.res = FitResults(recipe)\n df[\"name\"] = [\"Rw\", \"half_chi2\"] + res.varnames\n df[\"val\"] = [res.rw, res.chi2 / 2] + res.varvals.tolist()\n df[\"std\"] = [0, 0] + res.varunc\n df = df.set_index(\"name\")\n return df",
"def train_data_generator_hist(self):\n def __gen__():\n while True:\n idxs = list(self.df_train.visit_id)\n np.random.shuffle(idxs)\n df_train = self.df_train.set_index('visit_id')\n train_visits = self.train_visits.set_index('visit_id')\n for idx in idxs:\n prev_idxs = self.find_prev_nvisits(idx, numhistories=FLAGS.max_num_histories)\n assert idx == prev_idxs[-1]\n prev_idxs = [None for _ in range(FLAGS.max_num_histories - len(prev_idxs))] + prev_idxs\n output1, output2, output3, output4 = [], [], [], []\n for pidx in prev_idxs[:FLAGS.max_num_histories]:\n try:\n visit = train_visits.loc[pidx]\n label = df_train.loc[pidx]\n output1 = np.append(output1, visit['visit_indices'])\n output2 = np.append(output2, visit['area_indices'])\n output3 = np.append(output3, [visit[ft] for ft in self.handcrafted_features])\n output4 = np.append(output4, [label[ft] for ft in ['revisit_intention', 'suppress_time']])\n except TypeError:\n output1 = np.append(output1, [self.pad_val_visit])\n output2 = np.append(output2, np.full(shape=self.num_area_thres, fill_value=self.pad_val_area))\n output3 = np.append(output3, np.zeros(len(self.handcrafted_features)))\n output4 = np.append(output4, np.array([-1, -1]))\n\n yield np.hstack((output1.reshape(-1, 1), output2.reshape(-1, len(visit['area_indices'])),\n output3.reshape(-1, len(self.handcrafted_features)))), output4[-2:] # only the last visit's labels (Last two elements)\n\n gen = __gen__()\n\n while True:\n batch = [np.stack(x) for x in zip(*(next(gen) for _ in range(FLAGS.batch_size)))]\n # moke_data = [batch[0].reshape(-1, 1), batch[1], batch[2]], batch[-1]\n # moke_data = [batch[0], batch[1]], batch[-1]\n moke_data = batch[0], batch[-1]\n yield moke_data",
"def show_prop(df, target_col='target'):\n return df \\\n .groupby(target_col) \\\n .agg(num=(target_col, 'size')) \\\n .assign(prop=lambda x: x.num / x.num.sum()) \\\n .style \\\n .format(dict(\n num='{:,.0f}',\n prop='{:.2%}'))",
"def compute_class_df(self):\n \n if ((self.the_class) and (isinstance(self.the_class, int))):\n\n # Create the bins from the classes\n self.data['the_class'] = LogReg.create_the_class(self, self.data.iloc[:,0])\n \n # Compute the probability\n the_sum = self.data.iloc[:,1:].groupby('the_class').sum()\n the_count = self.data.iloc[:,1:].groupby('the_class').count()\n self.class_prob = (the_sum / the_count).reset_index()\n \n # Remove classes from the main dataframe\n self.data.drop('the_class', axis=1, inplace=True)\n \n else:\n self.class_prob = None",
"def create_employee_distributions(reps, input_choices):\n ratios = []\n for i in range(reps):\n employee_list = create_employee_list(1000, input_choices)\n counts = Counter(employee_list)\n ratio = counts['F'] / 1000\n ratios.append(ratio)\n return ratios",
"def population_stats(df):\n\n return ...",
"def get_generation_process_df(use_alt_gen_process=None, regions=None, **kwargs):\n if use_alt_gen_process is None:\n use_alt_gen_process = model_specs['use_alt_gen_process']\n if regions is None:\n regions = model_specs['regional_aggregation']\n\n if use_alt_gen_process is True:\n try:\n upstream_df = kwargs['upstream_df']\n except KeyError:\n print(\n \"A kwarg named 'upstream_dict' must be included if use_alt_gen_process \"\n \"is True\"\n )\n # upstream_df = get_upstream_process_df()\n if model_specs['include_upstream_processes'] is True:\n upstream_dict = write_upstream_process_database_to_dict(\n upstream_df\n )\n upstream_dict = write_upstream_dicts_to_jsonld(upstream_dict)\n gen_df = get_alternate_gen_plus_netl()\n combined_df, canadian_gen = combine_upstream_and_gen_df(\n gen_df, upstream_df\n )\n gen_plus_fuels = add_fuels_to_gen(\n gen_df, upstream_df, canadian_gen, upstream_dict\n )\n else:\n gen_df = get_alternate_gen_plus_netl()\n upstream_df=pd.DataFrame(columns=gen_df.columns)\n upstream_dict={}\n gen_plus_fuels=gen_df\n #This change has been made to accomodate the new method of generating\n #consumption mixes for FERC regions. They now pull BAs to provide\n #a more accurate inventory. The tradeoff here is that it's no longer possible\n #to make a FERC region generation mix and also provide the consumption mix.\n #Or it could be possible but would requir running through aggregate twice.\n# generation_process_df = aggregate_gen(\n# gen_plus_fuels, subregion=regions\n# )\n generation_process_df = aggregate_gen(\n gen_plus_fuels, subregion=\"BA\"\n )\n return generation_process_df\n\n else:\n from electricitylci.egrid_filter import (\n electricity_for_selected_egrid_facilities,\n egrid_facilities_to_include,\n emissions_and_waste_for_selected_egrid_facilities,\n )\n from electricitylci.eia923_generation import build_generation_data\n from electricitylci.generation import create_generation_process_df\n from electricitylci.model_config import replace_egrid\n\n if replace_egrid is True:\n # This is a dummy function that doesn't exist yet\n # updated_emissions = build_new_emissions(year)\n\n generation_data = build_generation_data()\n generation_process_df = create_generation_process_df(\n generation_data,\n emissions_and_waste_for_selected_egrid_facilities,\n subregion=regions,\n )\n\n else:\n electricity_for_selected_egrid_facilities[\"Year\"] = model_specs[\"egrid_year\"]\n generation_process_df = create_generation_process_df(\n electricity_for_selected_egrid_facilities,\n emissions_and_waste_for_selected_egrid_facilities,\n subregion=regions,\n )\n return generation_process_df",
"def compute_probability(self):\n N = self.number_of_products\n for i in range(N):\n self.df[f'exp U{i+1}'] = np.exp(C*self.df[f'U{i+1}'])\n \n self.df[f'Total Exp'] = self.df.iloc[:, -N:].sum(axis=1)\n\n for i in range(N):\n self.df[f'Prob{i+1}'] = self.df[f'exp U{i+1}'] / self.df[f'Total Exp']",
"def _sample_proportional(self):\n indices = []\n p_total = self.sum_tree.sum(0, len(self)-1)\n\n segment = p_total / self.batch_size\n\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i+1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n\n return indices"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract data relevant to the calculation of a power plant's MCOE. Given a PUDL utility_id and a PUDL plant_id, return several data series relevant to the calculation of the plant's marginal cost of electricity (MCOE). Both utility_id and plant_id are required because the same plants are reported by multiple FERC respondents in cases where ownership is shared. Including the utility_id allows us to pull only a single instance of the plant, rather than duplicates,w hich would result in incorrect total fuel consumption, etc.
|
def mcoe_by_plant(utility_id, plant_id, pudl_engine, years):
# For testing purposes right now...
utility_id = 272 # PSCo's PUDL utility_id
plant_id = 122 # Comanche's PUDL plant_id
# Grab the tables that we're going to need to work with from FERC.
pudl_tables = models.PUDLBase.metadata.tables
utilities_ferc1 = pudl_tables['utilities_ferc1']
plants_ferc1 = pudl_tables['plants_ferc1']
fuel_ferc1 = pudl_tables['fuel_ferc1']
steam_ferc1 = pudl_tables['plants_steam_ferc1']
# We need to pull the fuel information separately, because it has several
# entries for each plant for each year -- we'll groupby() plant before
# merging it with the steam plant info
fuel_ferc1_select = sa.sql.select([
fuel_ferc1.c.report_year,
utilities_ferc1.c.respondent_id,
utilities_ferc1.c.util_id_pudl,
utilities_ferc1.c.respondent_name,
plants_ferc1.c.plant_id_pudl,
fuel_ferc1.c.plant_name,
fuel_ferc1.c.fuel,
fuel_ferc1.c.fuel_qty_burned,
fuel_ferc1.c.fuel_avg_mmbtu_per_unit,
fuel_ferc1.c.fuel_cost_per_unit_burned,
fuel_ferc1.c.fuel_cost_per_unit_delivered,
fuel_ferc1.c.fuel_cost_per_mmbtu,
fuel_ferc1.c.fuel_cost_per_mwh,
fuel_ferc1.c.fuel_mmbtu_per_mwh]).\
where(sa.sql.and_(
utilities_ferc1.c.respondent_id == fuel_ferc1.c.respondent_id,
plants_ferc1.c.respondent_id == fuel_ferc1.c.respondent_id,
plants_ferc1.c.plant_name == fuel_ferc1.c.plant_name))
fuel_df = pd.read_sql(fuel_ferc1_select, pudl_engine)
# Pull relevant cost/expense data from the FERC large plant table:
steam_ferc1_select = sa.sql.select([
steam_ferc1.c.report_year,
utilities_ferc1.c.respondent_id,
utilities_ferc1.c.util_id_pudl,
utilities_ferc1.c.respondent_name,
plants_ferc1.c.plant_id_pudl,
steam_ferc1.c.plant_name,
steam_ferc1.c.total_capacity_mw,
steam_ferc1.c.net_generation_mwh,
steam_ferc1.c.expns_operations,
steam_ferc1.c.expns_fuel,
steam_ferc1.c.expns_coolants,
steam_ferc1.c.expns_steam,
steam_ferc1.c.expns_steam_other,
steam_ferc1.c.expns_transfer,
steam_ferc1.c.expns_electric,
steam_ferc1.c.expns_misc_power,
steam_ferc1.c.expns_rents,
steam_ferc1.c.expns_allowances,
steam_ferc1.c.expns_engineering,
steam_ferc1.c.expns_structures,
steam_ferc1.c.expns_boiler,
steam_ferc1.c.expns_plants,
steam_ferc1.c.expns_misc_steam,
steam_ferc1.c.expns_production_total,
steam_ferc1.c.expns_per_mwh]).\
where(sa.sql.and_(
utilities_ferc1.c.respondent_id == steam_ferc1.c.respondent_id,
plants_ferc1.c.respondent_id == steam_ferc1.c.respondent_id,
plants_ferc1.c.plant_name == steam_ferc1.c.plant_name))
steam_df = pd.read_sql(steam_ferc1_select, pudl_engine)
# Add some columns with totals so we can sum things up...
fuel_df['fuel_burned_mmbtu_total'] = \
fuel_df['fuel_qty_burned'] * fuel_df['fuel_avg_mmbtu_per_unit']
fuel_df['fuel_burned_cost_total'] = \
fuel_df['fuel_qty_burned'] * fuel_df['fuel_cost_per_unit_burned']
fuel_merge = fuel_df[['report_year', 'plant_id_pudl', 'plant_name']]
fuel_merge = fuel_merge.drop_duplicates(
subset=['report_year', 'plant_id_pudl'])
gb_plant_yr = fuel_df.groupby(['plant_id_pudl', 'report_year'])
# Create single column data frames with year and plant as the index,
# and the field summed up by plant that we're trying to bring into our
# output data frame...
mmbtu_sum = pd.DataFrame(gb_plant_yr['fuel_burned_mmbtu_total'].sum())
cost_sum = pd.DataFrame(gb_plant_yr['fuel_burned_cost_total'].sum())
# Merge the total heat and total cost into our output dataframe
fuel_merge = fuel_merge.merge(mmbtu_sum,
left_on=['plant_id_pudl', 'report_year'],
right_index=True)
fuel_merge = fuel_merge.merge(cost_sum,
left_on=['plant_id_pudl', 'report_year'],
right_index=True)
# Calculate correlation of expenses to net power generation. Require a
# minimum plant capacity factor of 0.6 so we the signal will be high,
# but we'll still have lots of plants to look at:
expns_corr = ferc1_expns_corr(pudl_engine, capacity_factor=0.6)
# These are columns that pertain to the plant, and are not expenses.
steam_common_cols = ['report_year',
'plant_id_pudl',
'plant_name',
'total_capacity_mw']
# These aren't individual total expense fields, and should be left out
steam_cols_to_remove = ['expns_per_mwh',
'expns_production_total']
# Remove the expns_* columns that we don't want
for key in steam_cols_to_remove:
x = expns_corr.pop(key, None)
# For now using correlation with net_generation > 0.5 as indication of
# "production expenses" (px) vs. "non-production expenses" (npx)
nonfuel_px = [k for k in expns_corr.keys() if expns_corr[k] >= 0.5]
npx = [k for k in expns_corr.keys() if expns_corr[k] < 0.5]
# Grab the common columns for our output:
steam_out = steam_df[steam_common_cols].copy()
# 3 categories of expense that we are pulling together:
# - fuel production expenses
# - non-fuel production expenses
# - non-production expenses
steam_out['total_fuel_px'] = steam_df['expns_fuel']
steam_out['net_generation_mwh'] = steam_df['net_generation_mwh']
steam_out['total_nonfuel_px'] = steam_df[nonfuel_px].copy().sum(axis=1)
steam_out['total_npx'] = steam_df[npx].copy().sum(axis=1)
steam_out['fuel_expns_per_mwh'] = \
steam_out['total_fuel_px'] / steam_out['net_generation_mwh']
steam_out['total_nonfuel_px'] = \
steam_out['total_nonfuel_px'] / steam_out['net_generation_mwh']
steam_out['npx_per_mwh'] = \
steam_out['total_npx'] / steam_out['net_generation_mwh']
steam_prod_gb = steam_out.groupby(['plant_id_pudl', 'report_year'])
return(output)
|
[
"def get_mfcc_matrices_for_each_speaker(data, winlen=0.025, numcep=13, nfilt=26, nfft=512, appendEnergy=True, delta_=True, deltadelta_=True):\n ret = defaultdict(lambda: [])\n for el in data:\n speaker_id = el[0].split('_')[0]\n digit = el[0].split('_')[1]\n assert(len(speaker_id) == 5 and len(digit) == 1)\n signal = el[1]\n sample_rate = el[2]\n mfcc_ = get_mfcc(signal, sample_rate, winlen, numcep, nfilt, nfft, appendEnergy, delta_, deltadelta_)\n ret[speaker_id].append((mfcc_, digit))\n return ret",
"def DD_CAPE_CALC(data, sfc_press, upper_press, ES):\n # Trim data to only consider the levels within the identified layer\n # Flip order of the data to capture the descending motion of the parcel\n DD_layer = data.loc[(data['press'] <= sfc_press) & (data['press'] >= upper_press)].sort_values(by='press')\n ## Create the parcel profile for decent along a moist adiabat\n # # # # # # # # # # # # # # # # # # # # # # # # # # # \n #calc parcel path temps (aka moist adiabtic descent) \n parcel_temp = [DD_layer.tempK.values[0]]\n for i in range(1, len(DD_layer.index)):\n dz= DD_layer.hght[i]-DD_layer.hght[i-1] #new height - previous height\n new_temp=moist_lapse(parcel_temp[i-1], DD_layer.press.values[i-1], dz, ES)\n parcel_temp.append(new_temp)\n \n #convert to Celcius \n pa_t=[x - 273.15 for x in parcel_temp] \n #attach a new column of the parcel temps to the pandas dataframe\n DD_layer['p_tempC'], DD_layer['p_tempK'] = pa_t, parcel_temp\n DD_layer['TV_env'] = Virtual_Temp(DD_layer['press'], DD_layer['tempK'], ES)\n DD_layer['TV_par'] = Virtual_Temp(DD_layer['press'], DD_layer['p_tempK'], ES)\n ############\n \n ## Calculate the difference in profile and environmental temperature to integrate\n DD_layer['evn_par_diff']= DD_layer['TV_env'] - DD_layer['TV_par']\n with pd.option_context( 'display.max_columns', None): # more options can be specified also\n DD_layer = DD_layer.drop(columns=['hght', 'dewC', 'dewK', 'tempK', 'p_tempK'])\n try: DD_layer = DD_layer.drop(columns=['layer_group'])\n except: pass\n # print(DD_layer)\n \n # Calculate DCAPE\n dcape = ((mpconsts.Rd) * (np.trapz(DD_layer['evn_par_diff'], x=np.log(DD_layer['press'].values)) * units.kelvin)).to('J/kg')\n return dcape",
"def get_consumer_unit_electric_data_raw(\n electric_data_name,\n cu_id,\n start,\n end\n):\n electric_data_raw = []\n try:\n consumer_unit = c_center.models.ConsumerUnit.objects.get(pk=cu_id)\n\n except c_center.models.ConsumerUnit.DoesNotExist:\n return electric_data_raw\n\n current_timezone = django.utils.timezone.get_current_timezone()\n start_localtime = current_timezone.localize(start)\n start_utc = start_localtime.astimezone(django.utils.timezone.utc)\n end_localtime = current_timezone.localize(end)\n end_utc = end_localtime.astimezone(django.utils.timezone.utc)\n\n param = data_warehouse_extended.models.ElectricalParameter.objects.get(\n name=electric_data_name\n )\n electric_data_name = param.name_transactional\n\n electric_data_values = c_center.models.ElectricDataTemp.objects.filter(\n profile_powermeter=consumer_unit.profile_powermeter,\n medition_date__gte=start_utc,\n medition_date__lte=end_utc\n ).order_by(\n 'medition_date'\n ).values(\n 'medition_date',\n electric_data_name\n )\n\n def get_first_two(electric_data_values):\n cont = 0\n first_m = electric_data_values[cont]['medition_date']\n second_m = electric_data_values[cont + 1]['medition_date']\n while first_m == second_m:\n cont += 1\n second_m = electric_data_values[cont]['medition_date']\n return first_m, second_m\n\n if electric_data_values:\n consumer_unit_data_len = len(electric_data_values)\n first_m, second_m = get_first_two(electric_data_values)\n #first_m = electric_data_values[0]['medition_date']\n #second_m = electric_data_values[1]['medition_date']\n\n delta_m = second_m - first_m\n\n time_m = start_localtime\n cont = 0\n while time_m < end_localtime:\n #difference between readings default to delta_m\n adj_time = delta_m\n try:\n #real difference between readings\n adj_time = electric_data_values[cont]['medition_date'] - time_m\n except IndexError:\n adj_time += delta_m\n\n time_m += delta_m\n #add a margin of 3 seconds between readings\n if adj_time > (delta_m + datetime.timedelta(seconds=3)):\n #probably an empty spot\n electric_data_raw.append(\n dict(datetime=int(time.mktime(\n django.utils.timezone.localtime(time_m).timetuple())),\n value=None,\n certainty=False))\n elif cont < consumer_unit_data_len:\n electric_data = abs(\n electric_data_values[cont][electric_data_name])\n if electric_data_name == \"PF\" and electric_data > 1:\n electric_data = 1\n medition_date = electric_data_values[cont]['medition_date']\n\n electric_data_raw.append(\n dict(datetime=\n int(time.mktime(\n django.utils.timezone.localtime(\n medition_date).timetuple())),\n value=abs(electric_data),\n certainty=True))\n cont += 1\n else:\n break\n return electric_data_raw",
"def buildMaterialData(material=\"SS-316L\"):\n if(material==\"SS-316L\"):\n # starter temperature, room temperature\n T0 = CelciusToK(23.0)\n\n # melting temperature of 316L\n Tm = 1390 # C\n Tm_K = CelciusToK(Tm) # K\n\n ### ALL TEMPS IN C\n #temperature resolution for [0-20] C or [0 - 293.15] K \n starter_res = params[\"starter_res\"]\n \n K_data,K_temp_range = genKdata()\n # temperature range to test fitting on\n T_test_data = np.linspace(0,CelciusToK(1500),params[\"res_scale\"]*(1500/20))\n\n ### interpolate data\n ## thermal conductivity\n # returns a function that can be used later\n # s parameter is the min square distance between interpolation and data\n K_spline = UnivariateSpline(K_temp_range,K_data)\n K_ispline = InterpolatedUnivariateSpline(K_temp_range,K_data)\n \n Kp_data = Kp(K_data,K_temp_range,Tm_K)\n Kp_spline = UnivariateSpline(K_temp_range,Kp_data)\n Kp_ispline = InterpolatedUnivariateSpline(K_temp_range,Kp_data)\n\n ## specific heat capacity\n C_data,C_temp_range = genCdata(material)\n\n C_spline = UnivariateSpline(C_temp_range,C_data)\n C_ispline = InterpolatedUnivariateSpline(C_temp_range,C_data)\n\n ## volumetric expansion\n Ev_data, Ev_temp_range = genEvData(material)\n\n Ev_spline = UnivariateSpline(Ev_temp_range,Ev_data)\n Ev_ispline = InterpolatedUnivariateSpline(Ev_temp_range,Ev_data)\n\n ## Density\n p_data,p_temp_range = genDensityData(Ev_ispline)\n\n p_spline = UnivariateSpline(p_temp_range,p_data)\n p_ispline = InterpolatedUnivariateSpline(p_temp_range,p_data)\n\n # thermal diffusivity of the solid material\n Ds_data = genThermalDiff(K_ispline,p_ispline,C_ispline,T_test_data)\n\n Ds_spline = UnivariateSpline(T_test_data,Ds_data)\n Ds_ispline = InterpolatedUnivariateSpline(T_test_data,Ds_data)\n\n # thermal diffusivity using thermal conductivity scaling approximation\n Dp_data = genThermalDiff_powder(Kp_ispline,p_ispline,C_ispline,T_test_data,Tm)\n\n Dp_spline = UnivariateSpline(T_test_data,Dp_data)\n Dp_ispline = InterpolatedUnivariateSpline(T_test_data,Dp_data)\n # tests have shown that the model best performs with the univariate spline for\n # thermal conductivity powder version and the univariate spline of thermal diffusivity\n # solid version\n return 0.53,Kp_spline,Ds_ispline\n else:\n return None",
"def investigate_emcee_result(which_line):\n # pixel_name = \"bluest-component\"\n pixel_name = \"wide-profile\"\n result_chisq = np.loadtxt(f\"/home/ramsey/Pictures/2021-12-15-work/emcee-3p-{pixel_name}_{which_line}.txt\")\n # Only load in CII if we need it\n if which_line == 'cii':\n cii_cube = cube_utils.CubeData(\"sofia/M16_CII_pillar1_BGsubtracted.fits\").data\n regrid = True # Just in case I want to switch back to regular resolution? doesn't hurt\n fn = f\"carma/M16.ALL.hcop.sdi.cm.subpv{'.SOFIAbeam.regrid' if regrid else ''}.fits\"\n hcop_cube = cube_utils.CubeData(fn).convert_to_K().data.with_spectral_unit(kms)\n hcop_flat_wcs = hcop_cube[0, :, :].wcs\n\n # sky_regions = regions.Regions.read(catalog.utils.search_for_file(\"catalogs/pillar1_emissionpeaks.hcopregrid.moreprecise.reg\")) # order appears to be [HCO+, CII]\n # sky_regions = regions.Regions.read(catalog.utils.search_for_file(\"catalogs/p1_threads_pathsandpoints.reg\")) # order appears to be North-E, North-W, South-E, South-W\n sky_regions = regions.Regions.read(catalog.utils.search_for_file(\"catalogs/pillar1_pointsofinterest.reg\")) # order is wide profile, blue tail, north of west thread, bluest component\n pixel_coords = [tuple(round(x) for x in reg.to_pixel(hcop_flat_wcs).center.xy[::-1]) for reg in sky_regions] # converted to (i, j) tuples\n selected_pixel = pixel_coords[0]\n\n # Start with one pixel, just fit that first\n assert regrid\n if which_line == 'cii':\n cii_spectrum = cii_cube[(slice(None), *selected_pixel)].to_value()\n cii_x = cii_cube.spectral_axis.to(kms).to_value()\n hcop_spectrum = hcop_cube[(slice(None), *selected_pixel)].to_value()\n hcop_x = hcop_cube.spectral_axis.to(kms).to_value()\n # Noise\n noise_cii = 1 # 1 K has been my estimate for a while\n noise_hcop = 0.12 # estimated from the cube, lower than the original 0.5 due to smoothing to CII beam and rebinning to CII channels\n # The input/output for the fitting\n if which_line == 'cii':\n x_arr, y_arr = cii_x, cii_spectrum\n e_arr = np.full(cii_spectrum.size, noise_cii)\n e_level = noise_cii\n elif which_line == 'hcop':\n x_arr, y_arr = hcop_x, hcop_spectrum\n e_arr = np.full(hcop_spectrum.size, noise_hcop)\n e_level = noise_hcop\n # Options for the models\n fixedstd = True\n tiestd = True\n untieciistd = True\n fixedmean = True\n stddev_hcop = 0.55\n\n # Setup models\n g0 = cps2.models.Gaussian1D(amplitude=10, mean=24.5, stddev=stddev_hcop,\n bounds={'amplitude': (0, None), 'mean': (20, 30)})\n g1 = g0.copy()\n g1.mean = 25.5\n g2 = g0.copy()\n g2.mean = 26\n g = g0 + g1 + g2\n fitter = cps2.fitting.LevMarLSQFitter(calc_uncertainties=True)\n if tiestd:\n cps2.tie_std_models(g)\n if fixedstd:\n cps2.fix_std(g)\n g_fit_hcop = fitter(g, hcop_x, hcop_spectrum, weights=np.full(hcop_spectrum.size, 1.0/noise_hcop))\n if which_line == 'cii':\n # Now do the rest of the CII fitting with emcee using info from HCO+ fit\n if fixedmean:\n g = g_fit_hcop.copy()\n cps2.fix_mean(g)\n if untieciistd:\n cps2.tie_std_models(g, untie=True)\n cps2.unfix_std(g)\n g_fit_cii = fitter(g, cii_x, cii_spectrum, weights=np.full(cii_spectrum.size, 1.0/noise_cii))\n g = g_fit_cii.copy()\n elif which_line == 'hcop':\n g = g_fit_hcop.copy()\n fittable_param_names = get_fittable_param_names(g)\n # Now grab an emcee sample\n\n sorted_chisqs = np.argsort(result_chisq[:, 6])\n chosen_chisqs = sorted_chisqs[:25]\n print(chosen_chisqs)\n print(result_chisq[chosen_chisqs, 6])\n # p_sample = np.where(result_chisq[:, 6] < 1.5)\n # print(p_sample)\n # p_sample = result_chisq[p_sample][0]\n # p_sample = result_chisq[current_index]\n for current_chisq_idx in chosen_chisqs:\n p_sample = result_chisq[current_chisq_idx]\n print(p_sample)\n # print(fittable_param_names)\n # print(p_sample)\n set_fittable_parameters(p_sample, g, fittable_param_names)\n # print(g)\n fig = plt.figure(figsize=(15, 9))\n ax = plt.subplot(111)\n cps2.plot_noise_and_vlims(ax, e_level, (23, 26))\n cps2.plot_everything_about_models(ax, x_arr, y_arr, g, dy=-0.08, noise=e_level, dof=(y_arr.size - len(fittable_param_names)))\n ax.set_xlim((20, 30))\n y_model = g(x_arr)\n chisq = np.sum((y_model - y_arr)**2 / (e_arr**2))\n # print(y_model - y_arr)\n # print(\"000\")\n # print(e_arr)\n dof = y_arr.size - len(fittable_param_names)\n print(\"CHISQ\", chisq)\n print(\"REDUCED CHISQ\", chisq/dof)\n # plt.show()\n # plt.savefig(f'/home/ramsey/Pictures/2021-12-15-work/all_cii_emcee_results/emcee-3p-investigation-{pixel_name}_{which_line}_chisqlow_IDX{current_chisq_idx:05d}.png')",
"def extractMongeData(pi,mu,nu,posNu,zeroThresh=1E-16):\n \n # compute marginals of coupling pi\n pi0=np.array(pi.sum(axis=1)).flatten()\n pi1=np.array(pi.sum(axis=0)).flatten()\n \n # length barycenter measure\n shapeBarycenter=mu.shape[0]\n\n # detect zero locations\n sptMu=(pi0>=zeroThresh)\n sptNu=(pi1>=zeroThresh)\n # complements (for computing perp measures)\n sptMuPerp=(sptMu==False)\n sptNuPerp=(sptNu==False)\n muPerp=mu*sptMuPerp\n nuPerp=nu*sptNuPerp\n\n # density of first marginal\n u0=np.zeros_like(mu)\n u0[sptMu]=mu[sptMu]/pi0[sptMu]\n # density of second marginal\n u1Full=np.zeros_like(nu)\n u1Full[sptNu]=nu[sptNu]/pi1[sptNu]\n\n\n # but in the end we are interested in u1Full(T(x)), i.e. the composition with the Monge map\n # this will be stored in variable u1 below and approximated by \"barycentric projection\"\n \n # reserve empty arrays for Monge map and second marginal density at location of Monge map\n # so u1 is supposed to become nu/pi1 evaluated at T, and for this we also use barycentric averaging\n T=np.zeros((shapeBarycenter,posNu.shape[1]),dtype=np.double)\n u1=np.zeros((shapeBarycenter,),dtype=np.double)\n \n # go through points in barycenter\n for j in range(shapeBarycenter):\n # check if current row is empty\n if pi.indptr[j+1]==pi.indptr[j]:\n continue\n\n # extract masses in that row of the coupling (based on csr format)\n piRow=pi.data[pi.indptr[j]:pi.indptr[j+1]]\n # normalize masses\n piRow=piRow/np.sum(piRow)\n # extract indices non-zero entries (based on csr format)\n piIndRow=pi.indices[pi.indptr[j]:pi.indptr[j+1]]\n \n # compute averages of u1 and T wrt this row of pi\n u1[j]=np.sum(u1Full[piIndRow]*piRow)\n # need einsum for averaging along first (\"zeroth\") axis\n T[j,:]=np.einsum(posNu[piIndRow],[0,1],piRow,[0],[1])\n \n return (u0,u1,T,muPerp,nuPerp)",
"def test_cp02pmci(self):\n self.create_sample_data_set_dir('CP02PMCI.DAT', RECOV_DIR, RECOV_FILE_ONE)\n self.create_sample_data_set_dir('CP02PMCI.DAT', TELEM_DIR, TELEM_FILE_ONE)\n self.assert_initialize()\n\n self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)\n self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 53, 40)\n self.get_samples(DataParticleType.STATUS_RECOVERED, 7, 10)\n self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)\n self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 53, 40)\n self.get_samples(DataParticleType.STATUS_TELEMETERED, 7, 10)",
"def test_create_cpd_info():\n\tdf_master = pd.DataFrame(['C([C@@H]1[C@H]([C@@H]([C@H](C(O1)O)O)O)O)O',\n\t\t 'C([C@@H]1[C@@H]([C@@H]([C@H]([C@H](O1)O)O)O)O)O',\n\t\t 'C([C@H]([C@H]([C@@H](C(=O)CO)O)O)O)O',\n\n'C[C@@H]1CC[C@H]2C[C@@H](/C(=C/C=C/C=C/[C@H](C[C@H](C(=O)[C@@H]([C@@H](/C(=C/[C@H](C(=O)C[C@H](OC(=O)[C@@H]3CCCCN3C(=O)C(=O)[C@@]1(O2)O)[C@H](C)C[C@@H]4CC[C@H]([C@@H](C4)OC)O)C)/C)O)OC)C)C)/C)OC']\n, columns=['SMILES'])\n\ttest = cheminform.create_cpd_info(df_master)\n\n\tassert test['n_C'][0] == 6, \"ValueError: Carbon count is incorrect\"\n\tassert test['DoU'][3] == 13, \"ValueError: Degree of Unsaturation in inaccurate\"\n\tassert type(test['MW'][2]) == type(test['n_C'][0]), \"TypeError: MW should be float\"\n\tassert type(test['n_H'][3]) == type(test['n_C'][0]), \"TypeError: All data should be float\"\n\n\treturn '3/3 Tests successful'",
"def pwr_core():\n model = openmc.model.Model()\n\n # Define materials.\n fuel = openmc.Material(1, name='UOX fuel')\n fuel.set_density('g/cm3', 10.062)\n fuel.add_nuclide('U234', 4.9476e-6)\n fuel.add_nuclide('U235', 4.8218e-4)\n fuel.add_nuclide('U238', 2.1504e-2)\n fuel.add_nuclide('Xe135', 1.0801e-8)\n fuel.add_nuclide('O16', 4.5737e-2)\n\n clad = openmc.Material(2, name='Zircaloy')\n clad.set_density('g/cm3', 5.77)\n clad.add_nuclide('Zr90', 0.5145)\n clad.add_nuclide('Zr91', 0.1122)\n clad.add_nuclide('Zr92', 0.1715)\n clad.add_nuclide('Zr94', 0.1738)\n clad.add_nuclide('Zr96', 0.0280)\n\n cold_water = openmc.Material(3, name='Cold borated water')\n cold_water.set_density('atom/b-cm', 0.07416)\n cold_water.add_nuclide('H1', 2.0)\n cold_water.add_nuclide('O16', 1.0)\n cold_water.add_nuclide('B10', 6.490e-4)\n cold_water.add_nuclide('B11', 2.689e-3)\n cold_water.add_s_alpha_beta('c_H_in_H2O')\n\n hot_water = openmc.Material(4, name='Hot borated water')\n hot_water.set_density('atom/b-cm', 0.06614)\n hot_water.add_nuclide('H1', 2.0)\n hot_water.add_nuclide('O16', 1.0)\n hot_water.add_nuclide('B10', 6.490e-4)\n hot_water.add_nuclide('B11', 2.689e-3)\n hot_water.add_s_alpha_beta('c_H_in_H2O')\n\n rpv_steel = openmc.Material(5, name='Reactor pressure vessel steel')\n rpv_steel.set_density('g/cm3', 7.9)\n rpv_steel.add_nuclide('Fe54', 0.05437098, 'wo')\n rpv_steel.add_nuclide('Fe56', 0.88500663, 'wo')\n rpv_steel.add_nuclide('Fe57', 0.0208008, 'wo')\n rpv_steel.add_nuclide('Fe58', 0.00282159, 'wo')\n rpv_steel.add_nuclide('Ni58', 0.0067198, 'wo')\n rpv_steel.add_nuclide('Ni60', 0.0026776, 'wo')\n rpv_steel.add_nuclide('Mn55', 0.01, 'wo')\n rpv_steel.add_nuclide('Cr52', 0.002092475, 'wo')\n rpv_steel.add_nuclide('C0', 0.0025, 'wo')\n rpv_steel.add_nuclide('Cu63', 0.0013696, 'wo')\n\n lower_rad_ref = openmc.Material(6, name='Lower radial reflector')\n lower_rad_ref.set_density('g/cm3', 4.32)\n lower_rad_ref.add_nuclide('H1', 0.0095661, 'wo')\n lower_rad_ref.add_nuclide('O16', 0.0759107, 'wo')\n lower_rad_ref.add_nuclide('B10', 3.08409e-5, 'wo')\n lower_rad_ref.add_nuclide('B11', 1.40499e-4, 'wo')\n lower_rad_ref.add_nuclide('Fe54', 0.035620772088, 'wo')\n lower_rad_ref.add_nuclide('Fe56', 0.579805982228, 'wo')\n lower_rad_ref.add_nuclide('Fe57', 0.01362750048, 'wo')\n lower_rad_ref.add_nuclide('Fe58', 0.001848545204, 'wo')\n lower_rad_ref.add_nuclide('Ni58', 0.055298376566, 'wo')\n lower_rad_ref.add_nuclide('Mn55', 0.0182870, 'wo')\n lower_rad_ref.add_nuclide('Cr52', 0.145407678031, 'wo')\n lower_rad_ref.add_s_alpha_beta('c_H_in_H2O')\n\n upper_rad_ref = openmc.Material(7, name='Upper radial reflector / Top plate region')\n upper_rad_ref.set_density('g/cm3', 4.28)\n upper_rad_ref.add_nuclide('H1', 0.0086117, 'wo')\n upper_rad_ref.add_nuclide('O16', 0.0683369, 'wo')\n upper_rad_ref.add_nuclide('B10', 2.77638e-5, 'wo')\n upper_rad_ref.add_nuclide('B11', 1.26481e-4, 'wo')\n upper_rad_ref.add_nuclide('Fe54', 0.035953677186, 'wo')\n upper_rad_ref.add_nuclide('Fe56', 0.585224740891, 'wo')\n upper_rad_ref.add_nuclide('Fe57', 0.01375486056, 'wo')\n upper_rad_ref.add_nuclide('Fe58', 0.001865821363, 'wo')\n upper_rad_ref.add_nuclide('Ni58', 0.055815129186, 'wo')\n upper_rad_ref.add_nuclide('Mn55', 0.0184579, 'wo')\n upper_rad_ref.add_nuclide('Cr52', 0.146766614995, 'wo')\n upper_rad_ref.add_s_alpha_beta('c_H_in_H2O')\n\n bot_plate = openmc.Material(8, name='Bottom plate region')\n bot_plate.set_density('g/cm3', 7.184)\n bot_plate.add_nuclide('H1', 0.0011505, 'wo')\n bot_plate.add_nuclide('O16', 0.0091296, 'wo')\n bot_plate.add_nuclide('B10', 3.70915e-6, 'wo')\n bot_plate.add_nuclide('B11', 1.68974e-5, 'wo')\n bot_plate.add_nuclide('Fe54', 0.03855611055, 'wo')\n bot_plate.add_nuclide('Fe56', 0.627585036425, 'wo')\n bot_plate.add_nuclide('Fe57', 0.014750478, 'wo')\n bot_plate.add_nuclide('Fe58', 0.002000875025, 'wo')\n bot_plate.add_nuclide('Ni58', 0.059855207342, 'wo')\n bot_plate.add_nuclide('Mn55', 0.0197940, 'wo')\n bot_plate.add_nuclide('Cr52', 0.157390026871, 'wo')\n bot_plate.add_s_alpha_beta('c_H_in_H2O')\n\n bot_nozzle = openmc.Material(9, name='Bottom nozzle region')\n bot_nozzle.set_density('g/cm3', 2.53)\n bot_nozzle.add_nuclide('H1', 0.0245014, 'wo')\n bot_nozzle.add_nuclide('O16', 0.1944274, 'wo')\n bot_nozzle.add_nuclide('B10', 7.89917e-5, 'wo')\n bot_nozzle.add_nuclide('B11', 3.59854e-4, 'wo')\n bot_nozzle.add_nuclide('Fe54', 0.030411411144, 'wo')\n bot_nozzle.add_nuclide('Fe56', 0.495012237964, 'wo')\n bot_nozzle.add_nuclide('Fe57', 0.01163454624, 'wo')\n bot_nozzle.add_nuclide('Fe58', 0.001578204652, 'wo')\n bot_nozzle.add_nuclide('Ni58', 0.047211231662, 'wo')\n bot_nozzle.add_nuclide('Mn55', 0.0156126, 'wo')\n bot_nozzle.add_nuclide('Cr52', 0.124142524198, 'wo')\n bot_nozzle.add_s_alpha_beta('c_H_in_H2O')\n\n top_nozzle = openmc.Material(10, name='Top nozzle region')\n top_nozzle.set_density('g/cm3', 1.746)\n top_nozzle.add_nuclide('H1', 0.0358870, 'wo')\n top_nozzle.add_nuclide('O16', 0.2847761, 'wo')\n top_nozzle.add_nuclide('B10', 1.15699e-4, 'wo')\n top_nozzle.add_nuclide('B11', 5.27075e-4, 'wo')\n top_nozzle.add_nuclide('Fe54', 0.02644016154, 'wo')\n top_nozzle.add_nuclide('Fe56', 0.43037146399, 'wo')\n top_nozzle.add_nuclide('Fe57', 0.0101152584, 'wo')\n top_nozzle.add_nuclide('Fe58', 0.00137211607, 'wo')\n top_nozzle.add_nuclide('Ni58', 0.04104621835, 'wo')\n top_nozzle.add_nuclide('Mn55', 0.0135739, 'wo')\n top_nozzle.add_nuclide('Cr52', 0.107931450781, 'wo')\n top_nozzle.add_s_alpha_beta('c_H_in_H2O')\n\n top_fa = openmc.Material(11, name='Top of fuel assemblies')\n top_fa.set_density('g/cm3', 3.044)\n top_fa.add_nuclide('H1', 0.0162913, 'wo')\n top_fa.add_nuclide('O16', 0.1292776, 'wo')\n top_fa.add_nuclide('B10', 5.25228e-5, 'wo')\n top_fa.add_nuclide('B11', 2.39272e-4, 'wo')\n top_fa.add_nuclide('Zr90', 0.43313403903, 'wo')\n top_fa.add_nuclide('Zr91', 0.09549277374, 'wo')\n top_fa.add_nuclide('Zr92', 0.14759527104, 'wo')\n top_fa.add_nuclide('Zr94', 0.15280552077, 'wo')\n top_fa.add_nuclide('Zr96', 0.02511169542, 'wo')\n top_fa.add_s_alpha_beta('c_H_in_H2O')\n\n bot_fa = openmc.Material(12, name='Bottom of fuel assemblies')\n bot_fa.set_density('g/cm3', 1.762)\n bot_fa.add_nuclide('H1', 0.0292856, 'wo')\n bot_fa.add_nuclide('O16', 0.2323919, 'wo')\n bot_fa.add_nuclide('B10', 9.44159e-5, 'wo')\n bot_fa.add_nuclide('B11', 4.30120e-4, 'wo')\n bot_fa.add_nuclide('Zr90', 0.3741373658, 'wo')\n bot_fa.add_nuclide('Zr91', 0.0824858164, 'wo')\n bot_fa.add_nuclide('Zr92', 0.1274914944, 'wo')\n bot_fa.add_nuclide('Zr94', 0.1319920622, 'wo')\n bot_fa.add_nuclide('Zr96', 0.0216912612, 'wo')\n bot_fa.add_s_alpha_beta('c_H_in_H2O')\n\n # Define the materials file.\n model.materials = (fuel, clad, cold_water, hot_water, rpv_steel,\n lower_rad_ref, upper_rad_ref, bot_plate,\n bot_nozzle, top_nozzle, top_fa, bot_fa)\n\n # Define surfaces.\n s1 = openmc.ZCylinder(r=0.41, surface_id=1)\n s2 = openmc.ZCylinder(r=0.475, surface_id=2)\n s3 = openmc.ZCylinder(r=0.56, surface_id=3)\n s4 = openmc.ZCylinder(r=0.62, surface_id=4)\n s5 = openmc.ZCylinder(r=187.6, surface_id=5)\n s6 = openmc.ZCylinder(r=209.0, surface_id=6)\n s7 = openmc.ZCylinder(r=229.0, surface_id=7)\n s8 = openmc.ZCylinder(r=249.0, surface_id=8, boundary_type='vacuum')\n\n s31 = openmc.ZPlane(z0=-229.0, surface_id=31, boundary_type='vacuum')\n s32 = openmc.ZPlane(z0=-199.0, surface_id=32)\n s33 = openmc.ZPlane(z0=-193.0, surface_id=33)\n s34 = openmc.ZPlane(z0=-183.0, surface_id=34)\n s35 = openmc.ZPlane(z0=0.0, surface_id=35)\n s36 = openmc.ZPlane(z0=183.0, surface_id=36)\n s37 = openmc.ZPlane(z0=203.0, surface_id=37)\n s38 = openmc.ZPlane(z0=215.0, surface_id=38)\n s39 = openmc.ZPlane(z0=223.0, surface_id=39, boundary_type='vacuum')\n\n # Define pin cells.\n fuel_cold = openmc.Universe(name='Fuel pin, cladding, cold water',\n universe_id=1)\n c21 = openmc.Cell(cell_id=21, fill=fuel, region=-s1)\n c22 = openmc.Cell(cell_id=22, fill=clad, region=+s1 & -s2)\n c23 = openmc.Cell(cell_id=23, fill=cold_water, region=+s2)\n fuel_cold.add_cells((c21, c22, c23))\n\n tube_cold = openmc.Universe(name='Instrumentation guide tube, '\n 'cold water', universe_id=2)\n c24 = openmc.Cell(cell_id=24, fill=cold_water, region=-s3)\n c25 = openmc.Cell(cell_id=25, fill=clad, region=+s3 & -s4)\n c26 = openmc.Cell(cell_id=26, fill=cold_water, region=+s4)\n tube_cold.add_cells((c24, c25, c26))\n\n fuel_hot = openmc.Universe(name='Fuel pin, cladding, hot water',\n universe_id=3)\n c27 = openmc.Cell(cell_id=27, fill=fuel, region=-s1)\n c28 = openmc.Cell(cell_id=28, fill=clad, region=+s1 & -s2)\n c29 = openmc.Cell(cell_id=29, fill=hot_water, region=+s2)\n fuel_hot.add_cells((c27, c28, c29))\n\n tube_hot = openmc.Universe(name='Instrumentation guide tube, hot water',\n universe_id=4)\n c30 = openmc.Cell(cell_id=30, fill=hot_water, region=-s3)\n c31 = openmc.Cell(cell_id=31, fill=clad, region=+s3 & -s4)\n c32 = openmc.Cell(cell_id=32, fill=hot_water, region=+s4)\n tube_hot.add_cells((c30, c31, c32))\n\n # Set positions occupied by guide tubes\n tube_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8, 11, 14,\n 2, 5, 8, 11, 14, 3, 13, 5, 8, 11])\n tube_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8, 8,\n 11, 11, 11, 11, 11, 13, 13, 14, 14, 14])\n\n # Define fuel lattices.\n l100 = openmc.RectLattice(name='Fuel assembly (lower half)', lattice_id=100)\n l100.lower_left = (-10.71, -10.71)\n l100.pitch = (1.26, 1.26)\n l100.universes = np.tile(fuel_cold, (17, 17))\n l100.universes[tube_x, tube_y] = tube_cold\n\n l101 = openmc.RectLattice(name='Fuel assembly (upper half)', lattice_id=101)\n l101.lower_left = (-10.71, -10.71)\n l101.pitch = (1.26, 1.26)\n l101.universes = np.tile(fuel_hot, (17, 17))\n l101.universes[tube_x, tube_y] = tube_hot\n\n # Define assemblies.\n fa_cw = openmc.Universe(name='Water assembly (cold)', universe_id=5)\n c50 = openmc.Cell(cell_id=50, fill=cold_water, region=+s34 & -s35)\n fa_cw.add_cell(c50)\n\n fa_hw = openmc.Universe(name='Water assembly (hot)', universe_id=7)\n c70 = openmc.Cell(cell_id=70, fill=hot_water, region=+s35 & -s36)\n fa_hw.add_cell(c70)\n\n fa_cold = openmc.Universe(name='Fuel assembly (cold)', universe_id=6)\n c60 = openmc.Cell(cell_id=60, fill=l100, region=+s34 & -s35)\n fa_cold.add_cell(c60)\n\n fa_hot = openmc.Universe(name='Fuel assembly (hot)', universe_id=8)\n c80 = openmc.Cell(cell_id=80, fill=l101, region=+s35 & -s36)\n fa_hot.add_cell(c80)\n\n # Define core lattices\n l200 = openmc.RectLattice(name='Core lattice (lower half)', lattice_id=200)\n l200.lower_left = (-224.91, -224.91)\n l200.pitch = (21.42, 21.42)\n l200.universes = [\n [fa_cw]*21,\n [fa_cw]*21,\n [fa_cw]*7 + [fa_cold]*7 + [fa_cw]*7,\n [fa_cw]*5 + [fa_cold]*11 + [fa_cw]*5,\n [fa_cw]*4 + [fa_cold]*13 + [fa_cw]*4,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*2 + [fa_cold]*17 + [fa_cw]*2,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*3 + [fa_cold]*15 + [fa_cw]*3,\n [fa_cw]*4 + [fa_cold]*13 + [fa_cw]*4,\n [fa_cw]*5 + [fa_cold]*11 + [fa_cw]*5,\n [fa_cw]*7 + [fa_cold]*7 + [fa_cw]*7,\n [fa_cw]*21,\n [fa_cw]*21]\n\n l201 = openmc.RectLattice(name='Core lattice (lower half)', lattice_id=201)\n l201.lower_left = (-224.91, -224.91)\n l201.pitch = (21.42, 21.42)\n l201.universes = [\n [fa_hw]*21,\n [fa_hw]*21,\n [fa_hw]*7 + [fa_hot]*7 + [fa_hw]*7,\n [fa_hw]*5 + [fa_hot]*11 + [fa_hw]*5,\n [fa_hw]*4 + [fa_hot]*13 + [fa_hw]*4,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*2 + [fa_hot]*17 + [fa_hw]*2,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*3 + [fa_hot]*15 + [fa_hw]*3,\n [fa_hw]*4 + [fa_hot]*13 + [fa_hw]*4,\n [fa_hw]*5 + [fa_hot]*11 + [fa_hw]*5,\n [fa_hw]*7 + [fa_hot]*7 + [fa_hw]*7,\n [fa_hw]*21,\n [fa_hw]*21]\n\n # Define root universe.\n root = openmc.Universe(universe_id=0, name='root universe')\n c1 = openmc.Cell(cell_id=1, fill=l200, region=-s6 & +s34 & -s35)\n c2 = openmc.Cell(cell_id=2, fill=l201, region=-s6 & +s35 & -s36)\n c3 = openmc.Cell(cell_id=3, fill=bot_plate, region=-s7 & +s31 & -s32)\n c4 = openmc.Cell(cell_id=4, fill=bot_nozzle, region=-s5 & +s32 & -s33)\n c5 = openmc.Cell(cell_id=5, fill=bot_fa, region=-s5 & +s33 & -s34)\n c6 = openmc.Cell(cell_id=6, fill=top_fa, region=-s5 & +s36 & -s37)\n c7 = openmc.Cell(cell_id=7, fill=top_nozzle, region=-s5 & +s37 & -s38)\n c8 = openmc.Cell(cell_id=8, fill=upper_rad_ref, region=-s7 & +s38 & -s39)\n c9 = openmc.Cell(cell_id=9, fill=bot_nozzle, region=+s6 & -s7 & +s32 & -s38)\n c10 = openmc.Cell(cell_id=10, fill=rpv_steel, region=+s7 & -s8 & +s31 & -s39)\n c11 = openmc.Cell(cell_id=11, fill=lower_rad_ref, region=+s5 & -s6 & +s32 & -s34)\n c12 = openmc.Cell(cell_id=12, fill=upper_rad_ref, region=+s5 & -s6 & +s36 & -s38)\n root.add_cells((c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12))\n\n # Assign root universe to geometry\n model.geometry.root_universe = root\n\n model.settings.batches = 10\n model.settings.inactive = 5\n model.settings.particles = 100\n model.settings.source = openmc.Source(space=openmc.stats.Box(\n [-160, -160, -183], [160, 160, 183]))\n\n plot = openmc.Plot()\n plot.origin = (125, 125, 0)\n plot.width = (250, 250)\n plot.pixels = (3000, 3000)\n plot.color_by = 'material'\n model.plots.append(plot)\n\n return model",
"def genCdata(material=\"SS-316L\"):\n starter_res = params[\"starter_res\"]\n if material==\"SS-316L\":\n # temp range [0-20]\n C_range_1 = np.full(starter_res,450.0)\n # temp range [20-93]\n C_range_2 = np.full(int(starter_res*(73/20)),(485.0+500.0)/2.0)\n # temp range [93-100]\n C_range_3 = np.full(int(starter_res*(7/20)),500.0)\n # specific heat capacity, complete\n C_data = np.concatenate([C_range_1,C_range_2,C_range_3],axis=0)\n # temperature data for plotting data\n C_temp_range = np.linspace(0,CelciusToK(100),C_data.shape[0])\n return C_data,C_temp_range\n else:\n print(material,\" : Unsupported material\")\n return None",
"def calc_complex_occulter(substrate, metal, dielectric, lam, aoi, t_Ti,\n t_metal_map, t_diel_map, d0, pol, flagOPD=False):\n real_nonnegative_scalar(t_Ti, 't_Ti', TypeError)\n twoD_array(t_metal_map, 't_metal_map', TypeError)\n twoD_array(t_diel_map, 't_diel_map', TypeError)\n\n out_map = np.zeros_like(t_metal_map, dtype=complex)\n\n t_Ti_map = np.zeros_like(t_metal_map)\n t_Ti_map[t_metal_map > 10*np.finfo(float).eps] = t_Ti\n # Put each vector as a column in a matrix\n t_unique_mat = np.unique(np.stack((t_diel_map.flatten(),\n t_metal_map.flatten(),\n t_Ti_map.flatten()),\n ),\n axis=1)\n\n t_diel_vec_short = t_unique_mat[0, :]\n t_metal_vec_short = t_unique_mat[1, :]\n t_Ti_vec_short = t_unique_mat[2, :]\n\n Nshort = t_unique_mat.shape[1]\n # tCoefShort = np.zeros(Nshort)\n # rCoefShort = np.zeros(Nshort)\n\n for ii in range(Nshort):\n\n t_diel = t_diel_vec_short[ii]\n t_metal = t_metal_vec_short[ii]\n t_Ti_here = t_Ti_vec_short[ii]\n\n tCoef, rCoef = calc_complex_trans_matrix(\n substrate, metal, dielectric, lam, aoi, t_Ti_here,\n [t_metal, ], [t_diel, ], d0, pol, flagOPD)\n\n thisRegion = np.logical_and(\n np.logical_and(t_metal_map == t_metal, t_diel_map == t_diel),\n t_Ti_map == t_Ti_here)\n\n out_map[thisRegion] = tCoef[0]\n\n return out_map",
"def dcg_from_dc_params(mx_collect_dict):\n\n group = None\n\n try:\n ws_client = Client(_WS_COLLECTION_URL,\n cache = None)\n\n group = \\\n ws_client.factory.create('ns0:dataCollectionGroupWS3VO')\n except:\n raise\n else:\n try: \n group.actualContainerBarcode = \\\n mx_collect_dict['actualContainerBarcode']\n except:\n pass\n\n try: \n group.actualContainerSlotInSC = \\\n mx_collect_dict['actualContainerSlotInSC']\n except KeyError:\n pass\n\n\n try: \n group.actualSampleBarcode = \\\n mx_collect_dict['actualSampleBarcode']\n except KeyError:\n pass\n\n\n try: \n group.actualSampleSlotInContainer = \\\n mx_collect_dict['actualSampleSlotInContiner']\n except KeyError:\n pass\n\n\n try:\n group.blSampleId = \\\n mx_collect_dict['sample_reference']['blSampleId']\n except KeyError,diag:\n pass\n\n\n try:\n group.comments = mx_collect_dict['comment']\n except KeyError,diag:\n pass\n\n group.endTime = datetime.now()\n\n# try:\n# group.crystalClass = mx_collect_dict['crystalClass']\n# except KeyError,diag:\n# pass\n\n# modes=(\"Software binned\", \"Unbinned\", \"Hardware binned\")\n\n# try:\n# det_mode = int(mx_collect_dict['detector_mode'])\n# group.detectorMode = modes[det_mode]\n# except (KeyError, IndexError, ValueError, TypeError):\n# det_mode = 1\n# group.detectorMode = modes[det_mode]\n\n\n try:\n try:\n helical_used = mx_collect_dict['helical']\n except:\n helical_used = False\n else:\n if helical_used:\n mx_collect_dict['experiment_type'] = 'Helical'\n mx_collect_dict['comment'] = 'Helical'\n\n try:\n directory = mx_collect_dict['fileinfo']['directory']\n except:\n directory = ''\n else:\n if 'mesh' in directory:\n mesh_used = True\n else:\n mesh_used = False\n\n if mesh_used:\n mx_collect_dict['experiment_type'] = 'Mesh'\n comment = mx_collect_dict.get(\"comment\", \"\")\n if not comment:\n try:\n mx_collect_dict['comment'] = \\\n 'Mesh: phiz:' + str(mx_collect_dict['motors'].values()[0]) + \\\n ', phiy' + str(mx_collect_dict['motors'].values()[1])\n except:\n mx_collect_dict['comment'] = 'Mesh: Unknown motor positions' \n\n group.experimentType = mx_collect_dict['experiment_type']\n except KeyError,diag:\n pass\n\n\n try:\n group.sessionId = mx_collect_dict['sessionId']\n except:\n pass\n\n try:\n start_time = mx_collect_dict[\"collection_start_time\"]\n start_time = datetime.\\\n strptime(start_time , \"%Y-%m-%d %H:%M:%S\")\n group.startTime = start_time\n except:\n pass\n\n try:\n group.dataCollectionGroupId = mx_collect_dict[\"group_id\"]\n except:\n pass\n\n return group",
"def convert(mdi_model):\r\n\r\n timer = timer_m.Timer()\r\n reporter_m.info(\"Converting MDI to MDC ...\")\r\n\r\n mdc_model = mdc_m.MDC()\r\n\r\n # type conversions\r\n for num_surface, mdi_surface in enumerate(mdi_model.surfaces):\r\n\r\n mdi_surface.uv_map_to_type(mdi_m.MDIUVMapBijective)\r\n mdi_surface.shader_to_type(mdi_m.MDIShaderPaths)\r\n mdi_surface.vertices_to_type(mdi_m.MDIMorphVertex, mdi_model)\r\n\r\n mdi_model.tags_to_type(mdi_m.MDIFreeTag)\r\n mdi_model.lod_to_type(mdi_m.MDIDiscreteLOD)\r\n\r\n # mdc frame infos\r\n for num_frame in range(len(mdi_model.bounds.aabbs)):\r\n\r\n mdc_frame_info = MDIToModel._to_mdc_frame_info(mdi_model,\r\n num_frame)\r\n mdc_model.frame_infos.append(mdc_frame_info)\r\n\r\n # mdc tag infos\r\n for num_tag in range(len(mdi_model.tags)):\r\n\r\n mdc_tag_info = MDIToModel._to_mdc_tag_info(mdi_model,\r\n num_tag)\r\n mdc_model.tag_infos.append(mdc_tag_info)\r\n\r\n # mdc frame tags\r\n for mdi_free_tag in mdi_model.tags:\r\n\r\n for num_frame in range(len(mdi_free_tag.locations)):\r\n\r\n mdc_frame_tags = MDIToModel._to_mdc_frame_tags(mdi_model,\r\n num_frame)\r\n mdc_model.tags.append(mdc_frame_tags)\r\n\r\n break\r\n\r\n # mdc surfaces\r\n comp_frame_normals = _calc_comp_frame_normals()\r\n base_frame_indices, comp_frame_indices = \\\r\n MDIToModel._calc_frame_indices(mdi_model)\r\n\r\n for num_surface in range(len(mdi_model.surfaces)):\r\n\r\n mdc_surface = MDIToModel._to_mdc_surface(mdi_model,\r\n num_surface,\r\n comp_frame_normals,\r\n base_frame_indices,\r\n comp_frame_indices)\r\n mdc_model.surfaces.append(mdc_surface)\r\n\r\n # headers\r\n MDIToModel._calc_mdc_headers(mdc_model, mdi_model)\r\n\r\n time = timer.time()\r\n reporter_m.info(\"Converting MDI to MDC DONE (time={})\".format(time))\r\n\r\n return mdc_model",
"def _extract_measurements(\n self,\n csvl_data: list[list[str]],\n csvl_post: list[list[str]],\n verboseprint: typing.Callable[..., typing.Any],\n ) -> tuple[list[str], dict[str, typing.Any]]:\n measurements = self._parse_measurements_metadata(csvl_post, verboseprint)\n header = csvl_data[0]\n if not self._check_header_measurements_keys(header, measurements, verboseprint):\n msg = \"check header and measurements.keys() FAILED.\"\n raise CsvLineError(msg)\n columns = [r.replace(\":\", \"\") for r in header]\n dfdata = pd.DataFrame(csvl_data[1:], columns=columns)\n w = dfdata.drop_duplicates([\"Well\"])\n wells = w.Well.tolist()\n if wells != self._wells_platemap:\n msg = \"well_list from data_list and platemap differ. It might be that you did not export data for all acquired wells\"\n warnings.warn(msg, stacklevel=2)\n\n # Monochromator is expected to be either Exc or Ems\n for k, measurement in measurements.items():\n label = f\"Meas{k}\"\n heading = collections.namedtuple(\"heading\", \"ex em res\")\n head = heading(\n f\"{label}WavelengthExc\", f\"{label}WavelengthEms\", f\"{label}Result\"\n )\n # excitation spectra must have only one emission wavelength\n if measurement[\"metadata\"][\"Monochromator\"] == \"Excitation\":\n x = [r for r in dfdata[head.em] if r]\n c = collections.Counter(x)\n if (\n len(c) != 1\n or next(iter(c.keys())) != measurement[\"metadata\"][\"Wavelength\"]\n ):\n msg = f\"Excitation spectra with unexpected emission in {label}\"\n raise CsvLineError(msg)\n measurement[\"lambda\"] = [\n float(r) for r in dfdata[head.ex][dfdata.Well == wells[0]] if r\n ]\n # emission spectra must have only one excitation wavelength\n elif measurement[\"metadata\"][\"Monochromator\"] == \"Emission\":\n x = [r for r in dfdata[head.ex] if r]\n c = collections.Counter(x)\n if (\n len(c) != 1\n or next(iter(c.keys())) != measurement[\"metadata\"][\"Wavelength\"]\n ):\n msg = f\"Emission spectra with unexpected excitation in {label}\"\n raise CsvLineError(msg)\n measurement[\"lambda\"] = [\n float(r) for r in dfdata[head.em][dfdata.Well == wells[0]] if r\n ]\n else:\n msg = f'Unknown \"Monochromator\": {measurement[\"metadata\"][\"Monochromator\"]} in {label}'\n raise CsvLineError(msg)\n for w in wells:\n measurement[w] = [\n float(r) for r in dfdata[head.res][dfdata.Well == w] if r\n ]\n return wells, measurements",
"def extractcompactmodel(data):\n\n\n # Find the value of n_eff, n_g and D at lambda_c\n lambda_c = c/data['f'][5]*1e9\n n_eff = data['real(neff)'][5]\n n_g = data['ng'][5]\n D = data['D'][5]\n\n\n\n return lambda_c, n_eff, n_g, D",
"def get_basic_measurement_circuit(\n string: QubitPauliString,\n) -> Tuple[Circuit, MeasurementInfo]:\n measurement_circuit = Circuit()\n measured_qbs = []\n for qb, p in string.to_dict().items():\n if p == Pauli.I:\n continue\n measured_qbs.append(qb)\n measurement_circuit.add_qubit(qb)\n if p == Pauli.X:\n measurement_circuit.H(qb)\n elif p == Pauli.Y:\n measurement_circuit.Rx(0.5, qb)\n bits = []\n for b_idx, qb in enumerate(measured_qbs):\n unit = Bit(b_idx)\n bits.append(unit)\n measurement_circuit.add_bit(unit, False)\n measurement_circuit.Measure(qb, unit)\n return (measurement_circuit, (string, bits, False))",
"def get_items(self):\n\n self.logger.info(\"Dielectric Builder Started\")\n\n self.logger.info(\"Setting indexes\")\n self.ensure_indicies()\n\n q = dict(self.query)\n q.update(self.materials.lu_filter(self.dielectric))\n q[\"dielectric\"] = {\"$exists\": 1}\n mats = self.materials.distinct(self.materials.key, q)\n\n self.logger.info(\"Found {} new materials for dielectric data\".format(len(mats)))\n\n return self.materials.query(criteria=q, properties=[self.materials.key, \"dielectric\", \"piezo\", \"structure\"])",
"def test_cp02pmui(self):\n self.create_sample_data_set_dir('CP02PMUI.DAT', RECOV_DIR, RECOV_FILE_ONE)\n self.create_sample_data_set_dir('CP02PMUI.DAT', TELEM_DIR, TELEM_FILE_ONE)\n self.assert_initialize()\n\n self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)\n self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 267, 60)\n self.get_samples(DataParticleType.STATUS_RECOVERED, 7, 10)\n self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)\n self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 267, 60)\n self.get_samples(DataParticleType.STATUS_TELEMETERED, 7, 10)",
"def calc_pwr( BW_used_rd_1, BW_used_wr_1, p_empty_1, p_miss_1, p_hit_1, t_ppd_1, t_sr_1, P_tot_1, mem_conf_names, BW_used_rd_2, BW_used_wr_2 ):\n\n mem_conf_lookup = {'ddr3-800' : '800',\n 'ddr3-1066' : '1066',\n 'ddr3-1333' : '1333',\n 'ddr3-1600' : '1600'\n }\n\n mem_conf_1 = mem_conf_lookup[mem_conf_names[0]]\n mem_conf_2 = mem_conf_lookup[mem_conf_names[1]]\n\n\n # calculation of different parameters from Micron's power consumption guide TN-41-01\n CASno = 1 / ( 1 - p_hit_1 )\n\n P_read_mem1 = ( (Idd4r[mem_conf_1] - Idd2p0[mem_conf_1]/ranks_no)/1000.0 - Idd3n[mem_conf_1]/ranks_no/1000.0 )*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n P_read_mem2 = ( (Idd4r[mem_conf_2] - Idd2p0[mem_conf_2]/ranks_no)/1000.0 - Idd3n[mem_conf_2]/ranks_no/1000.0 )*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n P_write_mem1 = ( (Idd4w[mem_conf_1] - Idd2p0[mem_conf_1]/ranks_no)/1000.0 - Idd3n[mem_conf_1]/ranks_no/1000.0 )*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n P_write_mem2 = ( (Idd4w[mem_conf_2] - Idd2p0[mem_conf_2]/ranks_no)/1000.0 - Idd3n[mem_conf_2]/ranks_no/1000.0 )*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n P_rpmiss_mem1 = ( (Idd0[mem_conf_1] - Idd2p0[mem_conf_1]/ranks_no)/1000.0 - ( Idd3n[mem_conf_1]/ranks_no/1000.0*tRAS[mem_conf_1] \\\n + Idd2n[mem_conf_1]/ranks_no/1000.0*(tRC[mem_conf_1]-tRAS[mem_conf_1])) / tRC[mem_conf_1] + ((Idd4r[mem_conf_1] - Idd2p0[mem_conf_1]/ranks_no)/1000.0 - Idd3n[mem_conf_1]/ranks_no/1000.0 )*BL/2*tCK[mem_conf_1] / tRC[mem_conf_1] ) \\\n * dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n P_rpmiss_mem2 = ( (Idd0[mem_conf_2] - Idd2p0[mem_conf_2]/ranks_no)/1000.0 - ( Idd3n[mem_conf_2]/ranks_no/1000.0*tRAS[mem_conf_2] \\\n + Idd2n[mem_conf_2]/ranks_no/1000.0*(tRC[mem_conf_2]-tRAS[mem_conf_2])) / tRC[mem_conf_2] + ((Idd4r[mem_conf_2] - Idd2p0[mem_conf_2]/ranks_no)/1000.0 - Idd3n[mem_conf_2]/ranks_no/1000.0 )*BL/2*tCK[mem_conf_2] / tRC[mem_conf_2] ) \\\n * dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n P_wpmiss_mem1 = ( (Idd0[mem_conf_1] - Idd2p0[mem_conf_1]/ranks_no)/1000.0 - ( Idd3n[mem_conf_1]/ranks_no/1000.0*tRAS[mem_conf_1] \\\n + Idd2n[mem_conf_1]/ranks_no/1000.0*(tRC[mem_conf_1]-tRAS[mem_conf_1])) / tRC[mem_conf_1] + ((Idd4w[mem_conf_1] - Idd2p0[mem_conf_1]/ranks_no)/1000.0 - Idd3n[mem_conf_1]/ranks_no/1000.0 )*BL/2*tCK[mem_conf_1] / tRC[mem_conf_1] ) \\\n * dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n P_wpmiss_mem2 = ( (Idd0[mem_conf_2] - Idd2p0[mem_conf_2]/ranks_no)/1000.0 - ( Idd3n[mem_conf_2]/ranks_no/1000.0*tRAS[mem_conf_2] \\\n + Idd2n[mem_conf_2]/ranks_no/1000.0*(tRC[mem_conf_2]-tRAS[mem_conf_2])) / tRC[mem_conf_2] + ((Idd4w[mem_conf_2] - Idd2p0[mem_conf_2]/ranks_no)/1000.0 - Idd3n[mem_conf_2]/ranks_no/1000.0 )*BL/2*tCK[mem_conf_2] / tRC[mem_conf_2] ) \\\n * dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n E_hit_rd_1 = P_read_mem1*BL/2*tCK[mem_conf_1]\n\n E_miss_rd_1 = P_rpmiss_mem1*tRC[mem_conf_1]\n\n E_term_rd_1 = P_term_rd*DQ_rd/1000.0*BL/2*tCK[mem_conf_1]\n\n E_hit_rd_2 = P_read_mem2*BL/2*tCK[mem_conf_2]\n\n E_miss_rd_2 = P_rpmiss_mem2*tRC[mem_conf_2]\n\n E_term_rd_2 = P_term_rd*DQ_rd/1000.0*BL/2*tCK[mem_conf_2]\n\n\n E_hit_wr_1 = P_read_mem1*BL/2*tCK[mem_conf_1]\n\n E_miss_wr_1 = P_wpmiss_mem1*tRC[mem_conf_1]\n\n E_term_wr_1 = P_term_wr*DQ_wr/1000.0*BL/2*tCK[mem_conf_1]\n\n E_hit_wr_2 = P_read_mem2*BL/2*tCK[mem_conf_2]\n\n E_miss_wr_2 = P_wpmiss_mem2*tRC[mem_conf_2]\n\n E_term_wr_2 = P_term_wr*DQ_wr/1000.0*BL/2*tCK[mem_conf_2]\n\n # calculate P_rd and P_wr using equations 28 -- 31\n P_rd_1 = BW_used_rd_1/1000. * ( E_miss_rd_1 * ( p_miss_1 + p_empty_1 ) + E_hit_rd_1 * ( p_hit_1 ) + E_term_rd_1 )/1000.0*16.0*1.024*1.024\n\n P_rd_2 = BW_used_rd_2/1000. * ( E_miss_rd_2 * ( p_miss_1 + p_empty_1 ) + E_hit_rd_2 * ( p_hit_1 ) + E_term_rd_2 )/1000.0*16.0*1.024*1.024\n\n P_wr_1 = BW_used_wr_1/1000. * ( E_miss_wr_1 * ( p_miss_1 + p_empty_1 ) + E_hit_wr_1 * ( p_hit_1 ) + E_term_wr_1 )/1000.0*16.0*1.024*1.024\n\n P_wr_2 = BW_used_wr_2/1000. * ( E_miss_wr_2 * ( p_miss_1 + p_empty_1 ) + E_hit_wr_2 * ( p_hit_1 ) + E_term_wr_2 )/1000.0*16.0*1.024*1.024\n\n # Active standby\n P_stb_1 = Idd3n[mem_conf_1]/ranks_no/1000.0*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n P_stb_2 = Idd3n[mem_conf_2]/ranks_no/1000.0*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n # PPD and SR time percentages are measured per channel, therefore both ranks\n P_ppd_1 = Idd2p1[mem_conf_1]/1000.0*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n P_ppd_2 = Idd2p1[mem_conf_2]/1000.0*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n P_sr_1 = Idd6[mem_conf_1]/1000.0*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n P_sr_2 = Idd6[mem_conf_2]/1000.0*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n P_ref_1 = (Idd5b[mem_conf_1] - Idd2p0[mem_conf_1]/ranks_no - Idd3n[mem_conf_1]/ranks_no)/1000.0*tRFC/(tREFI*1000.0)*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n P_ref_2 = (Idd5b[mem_conf_2] - Idd2p0[mem_conf_2]/ranks_no - Idd3n[mem_conf_2]/ranks_no)/1000.0*tRFC/(tREFI*1000.0)*dram_voltage_max*(dram_voltage_normal/dram_voltage_max)**2\n\n\n # calculate P_tot_2\n P_tot_2 = P_tot_1 - ( P_ppd_1*(t_ppd_1 - t_sr_1) + P_sr_1*(t_sr_1) + ( 1 - t_ppd_1 )*( P_stb_1 + P_ref_1 )*ranks_no )*dimms_no \\\n + ( P_ppd_2*(t_ppd_1 - t_sr_1) + P_sr_2*(t_sr_1) + ( 1 - t_ppd_1 )*( P_stb_2 + P_ref_2 )*ranks_no )*dimms_no \\\n - P_rd_1 - P_wr_1 + P_rd_2 + P_wr_2\n\n return P_tot_2"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
API endpoint returning a JSON dict representing the result of a call to `utils.has_septic()` URL parameters `address` and `zipcode` are required or the view will report failure. If successful, the result of `has_septic()` is stored as a boolean under the `result` key If an error is caught, it is recorded under the `error` key
|
def check_septic(request, api_version=None):
address = request.GET.get('address')
zipcode = request.GET.get('zipcode')
if not (address and zipcode):
return JsonResponse({
"error": f"Missing one or more required url parameters: address ({address}), zipcode ({zipcode})",
})
data = {
"address": address,
"zipcode": zipcode,
"result": None,
"error": None,
}
try:
data["result"] = has_septic(address, zipcode)
except Exception as e:
data["error"] = str(e)
return JsonResponse(data)
|
[
"def isPincodePresent(arg0, context=None):\n if context.get(\"error\") is True:\n return {\n \"statusCode\": 4001,\n \"statusMessage\": context.get(\"error_response\", \"\")\n }\n response_dict = {\n \"statusCode\": 200,\n \"statusMessage\": \"Success\",\n \"response\": \"\"\n }\n token = context.get(\"token\")\n #url is build for client api to request for pincode serving \"Cash\"\n url = 'https://%s%s?token=%s&cash=Y&filter_codes=%s' %\\\n (settings.APP_API_URL, settings.POSTAL_CODE_API_CLIENT_JSON,\n token, arg0)\n pin_response = requests.get(url,\n headers={\"content-type\":\n \"application/x-www-form-urlencoded\"},\n verify=False)\n pin_dict = json.loads(pin_response.content)\n if pin_dict['delivery_codes']:\n response_dict[\"response\"] = \"True\"\n else:\n response_dict[\"response\"] = \"False\"\n return response_dict",
"def validate_address():\n def validate(address, typ, currency):\n try:\n ver = currencies.validate_bc_address(address)\n except InvalidAddressException:\n return False\n\n if typ == 'buyable':\n lst = currencies.buyable_currencies\n elif typ == 'sellable':\n lst = currencies.sellable_currencies\n elif typ == 'unsellable':\n lst = currencies.unsellable_currencies\n elif typ == 'unbuyable':\n lst = currencies.unbuyable_currencies\n else:\n abort(400)\n\n for curr in lst:\n if ver in curr.address_version:\n if curr.key == currency or currency == 'Any':\n return True\n return False\n\n data = request.json\n if validate(data['address'], data['type'], data['currency']):\n return jsonify({data['currency']: True})\n else:\n return jsonify({data['currency']: False})",
"def address(self, **kwargs):\n valid_address = None\n\n address_dict = OrderedDict()\n address_dict[\"Address1\"] = kwargs.get(\"address1\", \"\")\n address_dict[\"Address2\"] = kwargs.get(\"address2\", \"\")\n address_dict[\"City\"] = kwargs.get(\"city\", \"\")\n address_dict[\"State\"] = kwargs.get(\"state\", \"\")\n address_dict[\"Zip5\"] = kwargs.get(\"zip_code\", \"\")\n address_dict[\"Zip4\"] = kwargs.get(\"plus_four\", \"\")\n\n address_validate_request = OrderedDict()\n address_validate_request[\"@USERID\"] = self.usps_id\n address_validate_request[\"Address\"] = address_dict\n\n address_validate_dict = {\"AddressValidateRequest\": address_validate_request}\n\n resp = self.usps_call(api=\"Verify\", data=address_validate_dict, resp_variable=\"address_validate_response\")\n\n if \"address\" in resp:\n ret_address = resp.get(\"address\")\n valid_address = {\n \"address1\": ret_address.get(\"address2\"),\n \"address2\": ret_address.get(\"address1\"),\n \"city\": ret_address.get(\"city\"),\n \"state\": ret_address.get(\"state\"),\n \"zip_code\": ret_address.get(\"zip5\"),\n \"plus_four\": ret_address.get(\"zip4\"),\n }\n\n return valid_address",
"def location_view(_request, zipcode):\n logging.info(\"Calling Idemia /locations endpoint with zipcode: %s\", zipcode)\n\n # Dummy location info\n location_list = [\n {\n \"title\": \"IdentoGO - TSA PreCheck™\",\n \"address\": \"1 Saarinen Circle\",\n \"address2\": \"IAD International Airport\",\n \"city\": \"Sterling\",\n \"state\": \"VA\",\n \"postalCode\": \"20166-7547\",\n \"distance\": \"10.452655457551472\",\n \"hours\": \"Monday-Friday: 8:00 AM - 9:30 AM & 9:45 AM - 11:30 AM & 12:00 PM - 2:00 PM & 2:15 PM - 4:00 PM\",\n \"phone\": \"855-787-2227\",\n \"geocode\": {\"latitude\": \"38.952809\", \"longitude\": \"-77.447961\"},\n },\n {\n \"title\": \"IdentoGO TSA PreCheck™ Enrollment at Staples\",\n \"address\": \"8387 Leesburg Pike\",\n \"address2\": \"Ste C\",\n \"city\": \"Vienna\",\n \"state\": \"VA\",\n \"postalCode\": \"22182-2420\",\n \"distance\": \"10.452655457551472\",\n \"hours\": \"Monday-Friday: 10:00 AM - 12:00 PM & 1:00 PM - 5:00 PM\",\n \"phone\": \"703-883-0011\",\n \"geocode\": {\"latitude\": \"38.921954\", \"longitude\": \"-77.236917\"},\n },\n {\n \"title\": \"IdentoGO - TSA PreCheck™, TWIC, HAZMAT\",\n \"address\": \"1968 Gallows Rd\",\n \"address2\": \"VA DMV-Tyson's Corner\",\n \"city\": \"Vienna\",\n \"state\": \"VA\",\n \"postalCode\": \"22182-3909\",\n \"distance\": \"20.51593994774416\",\n \"hours\": \"Monday-Friday: 8:00 AM - 1:00 PM & 2:00 PM - 4:30 PM Saturday: 8:00 AM - 12:00 PM\",\n \"phone\": \"807-497-7100\",\n \"geocode\": {\"latitude\": \"38.910709\", \"longitude\": \"-77.225463\"},\n },\n {\n \"title\": \"IdentoGO TSA PreCheck™ Enrollment at Staples\",\n \"address\": \"9890 Liberia Ave\",\n \"address2\": \"\",\n \"city\": \"Manassas\",\n \"state\": \"VA\",\n \"postalCode\": \"20110-5836\",\n \"distance\": \"24.29308762203185\",\n \"hours\": \"Monday-Thursday: 10:00 AM - 12:00 PM & 1:00 PM - 6:00 PM\",\n \"phone\": \"877-783-4187\",\n \"geocode\": {\"latitude\": \"38.743717\", \"longitude\": \"-77.451883\"},\n },\n {\n \"title\": \"IdentoGO - State Agency Enrollment\",\n \"address\": \"3139 Duke St\",\n \"address2\": \"\",\n \"city\": \"Alexandria\",\n \"state\": \"VA\",\n \"postalCode\": \"22314-4518\",\n \"distance\": \"30.81106117961712\",\n \"hours\": \"Monday-Thursday: 8:00 AM - 1:00 PM & 1:30 PM - 4:30 PM Friday: 8:00 AM - 1:00 PM & 1:30 PM - 4:00 PM\",\n \"phone\": \"877-783-4187\",\n \"geocode\": {\"latitude\": \"38.808868\", \"longitude\": \"-77.084946\"},\n },\n ]\n\n return Response(location_list)",
"def test_search_by_SSN_with_tester_and_invalid_ssn(self):\n Test_Start()\n\n prn_info = False\n\n # We should search for this\n p = get_object_or_404(PatientProfile, patient_id=VALID_PATIENT_ID)\n\n if prn_info!=False:\n print p\n\n usrname = USERNAME_FOR_TEST\n passwd=PASSWORD_FOR_TEST\n output = []\n post_url = '/intake/search-by-ssn'\n post_parameters = {'last_4_ssn':INVALID_LAST_4_SSN}\n look_for_this = \" 0 Matches For \"\n calling_test_function = inspect.getframeinfo(inspect.currentframe().f_back)[2]\n print \"calling:[\"+calling_test_function+\"]\"\n\n Access_Authorised = test_for_200(self, usrname, passwd, output, post_url,post_parameters, look_for_this, calling_test_function, prn_info )\n if Access_Authorised == None:\n Test_Msg(\"Successful Test for 200\")\n else:\n Test_Msg(\"Test Failed for 200\")\n\n Test_End()\n\n return",
"def verify_city(json_response):\n if json_response['cod'] == '404':\n print('City could not be found, please retry')\n return False\n\n return True",
"def test_search_by_SSN_with_non_Tester_401(self):\n\n Test_Start()\n\n prn_info = False\n\n # We should search for this\n p = get_object_or_404(PatientProfile, patient_id=VALID_PATIENT_ID)\n if prn_info!=False:\n print p\n\n calling_test_function = inspect.getframeinfo(inspect.currentframe().f_back)[2]\n\n if prn_info!=False:\n print \"calling:[\"+calling_test_function+\"]\"\n\n usrname = USERNAME_NOT_TEST\n passwd=PASSWORD_NOT_TEST\n output = []\n post_url = '/intake/search-by-ssn'\n post_parameters = {'last_4_ssn':VALID_LAST_4_SSN}\n\n Access_Authorised = test_for_401(self, usrname, passwd, output, post_url,post_parameters, calling_test_function, prn_info )\n if Access_Authorised == None:\n Test_Msg(\"Successful Test for 401\")\n else:\n Test_Msg(\"Test Failed for 401\")\n\n Test_End()\n\n return",
"def get_zipcode(self, **kwargs):\n valid_address = {\"address1\": \"\", \"address2\": \"\", \"city\": \"\", \"state\": \"XX\"}\n\n address_dict = OrderedDict()\n address_dict[\"Address1\"] = kwargs.get(\"address2\", \"\")\n address_dict[\"Address2\"] = kwargs.get(\"address1\", \"\")\n address_dict[\"City\"] = kwargs.get(\"city\", \"\")\n address_dict[\"State\"] = kwargs.get(\"state\", \"\")\n\n address_validate_request = OrderedDict()\n address_validate_request[\"@USERID\"] = self.usps_id\n address_validate_request[\"Address\"] = address_dict\n\n address_validate_dict = {\"ZipCodeLookupRequest\": address_validate_request}\n\n resp = self.usps_call(api=\"ZipCodeLookup\", data=address_validate_dict)\n\n if \"address\" in resp:\n ret_address = resp.get(\"address\")\n valid_address = {\n \"address1\": ret_address.get(\"address2\"),\n \"address2\": ret_address.get(\"address1\"),\n \"city\": ret_address.get(\"city\"),\n \"state\": ret_address.get(\"state\"),\n \"zip_code\": ret_address.get(\"zip5\"),\n \"plus_four\": ret_address.get(\"zip4\"),\n }\n\n return valid_address",
"def search_by_pin(pincode, date):\n url = f'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/' \\\n f'calendarByPin?pincode={pincode}&date={date}'\n\n try:\n request = requests.get(url, headers={})\n json_data = request.json()\n if request.status_code == 200:\n return json_data\n else:\n print('Error: ', str(json_data))\n except Exception as e:\n print('Exception: ', str(e))",
"async def _setSkodaAPI(self, endpoint, vin, **data):\n try:\n await self.set_token('connect')\n url = f\"https://api.connect.skoda-auto.cz/api/v1/{endpoint}/operation-requests?vin={vin}\"\n response = await self._data_call(url, **data)\n if not response:\n raise SkodaException('Invalid or no response')\n else:\n request_id = response.get('id', 0)\n request_state = response.get('status', 'unknown')\n _LOGGER.debug(f'Request returned with state \"{request_state}\", request id: {request_id}')\n return dict({'id': str(request_id), 'state': request_state})\n except:\n raise\n return False",
"def __json__(self, request):\n return {\n \"street\": self.street,\n \"city\": self.city,\n \"state\": self.state,\n \"country\": self.country,\n \"post_code\": self.post_code\n }",
"def entrypoint(params, config):\r\n\r\n school_return_data = dict()\r\n\r\n # Calculate Number of Returning Students and Teachers\r\n number_alunos_retornantes, number_professores_retornantes, limite_turmas, salasocupadas, salaslivres, diasletivos, alunoslivres, professoreslivres = get_school_return_projections(\r\n params[\"number_alunos\"],\r\n params[\"number_alunos_naovoltando\"],\r\n params[\"number_professores\"],\r\n params[\"number_professores_naovoltando\"],\r\n params[\"number_salas\"],\r\n params[\"maxalunossalas\"],\r\n params[\"hours_classpresencial\"],\r\n params[\"hours_classpremoto\"],\r\n params[\"turnos\"],\r\n params[\"professorday\"],\r\n params[\"horaaula\"],\r\n )\r\n\r\n # Calculate Amount of Required Protection Equipment\r\n total_masks, total_sanitizer, total_thermometers = get_school_return_supplies(\r\n number_alunos_retornantes,\r\n number_professores_retornantes,\r\n params[\"hours_classpresencial\"],\r\n params[\"maxalunossalas\"],\r\n config,\r\n )\r\n # Build School Return Data Dictionary\r\n return {\r\n \"number_alunos_retornantes\": number_alunos_retornantes,\r\n \"number_professores_retornantes\": number_professores_retornantes,\r\n \"limite_turmas\": limite_turmas,\r\n \"salasocupadas\": salasocupadas,\r\n \"salaslivres\": salaslivres,\r\n \"alunoslivres\": alunoslivres,\r\n \"professoreslivres\": professoreslivres,\r\n \"diasletivos\": diasletivos,\r\n \"total_masks\": total_masks,\r\n \"total_sanitizer\": round(total_sanitizer, 2),\r\n \"total_thermometers\": total_thermometers,\r\n }",
"def location_search(cls, postcode):\n api_url = f\"http://v0.postcodeapi.com.au/suburbs/{postcode}.json\"\n response = requests.get(api_url)\n return json.loads(response.text) or None",
"def is_serp(referring_url, parser=None, use_naive_method=False):\n res = extract(referring_url, parser=parser,\n use_naive_method=use_naive_method)\n return res is not None",
"def validateroute():\n\n # Process hte request's data\n if request.method == 'POST':\n # Get data in JSON format\n req_data = request.get_json()\n\n # If there is request data then process it\n if req_data:\n # Get the route text\n route_text = req_data.get('route')\n \n # If no route text then log an error_msg and return false\n if not route_text:\n current_app.logger.warning(f'No route was posted. URL [{request.url}] ... JSON Data[{request.get_json()}]')\n return json.dumps({'is-route-valid' : False, 'route_points' : [], 'error' : 'No Waypoints in the route'})\n \n # Process the route, returning whether it is valid, route points, and an error_msg message\n route_valid, route_list, error_msg = process_flightplan_points(route_text)\n \n # If the route is not valid, return it\n if route_valid == False:\n return json.dumps({'is_route_valid' : False, 'route_points' : route_list, 'error' : error_msg})\n \n # Route is valid\n # Create a Flightplan object - used to create a GEOJSON object\n fplan = FlightPlan()\n \n # Add the FlightPlanPoints to the route (if route is valis, then route_list is a list of FlightPlanPoint objects\n fplan.FlightPlanPoints = route_list\n # Pass flightplan to generate a GEOJSON object, generating a JSON response showing route is valid \n fpl_geojson = flightplans.generate_flight_geojson(flightplan_object=fplan)\n\n # Then return a JSON string, showing the status and the GEOJSON object\n fp_json = json.dumps({'is_route_valid' : route_valid, 'GEOJSON' : fpl_geojson})\n \n return fp_json \n \n # No request data passed\n return json.dumps({'is_route_valid' : False, 'error' : 'No request data passed'})",
"def address():\n username = request.args.get('username')\n\n # If username is not given, use the logged in username.\n if username is None or username == '':\n username = auth_username()\n\n if username is None or username == '':\n return bad_json_response(\"Bad request: Missing parameter 'username'.\")\n\n if users.exists(username=username):\n server_id = users.export_one('server_id', username=username)\n\n if not servers.exists(id=server_id):\n bad_json_response('Server is not registered.')\n\n name, address = servers.export_one('name', 'address', id=server_id)\n return good_json_response({\n 'name': name,\n 'address': address,\n 'username': username\n })\n else:\n return bad_json_response('User is not found.')",
"def find_pair_details(kwargs):\n\n try:\n msisdn_list = []\n\n chk_primary = Pairing.query.filter(Pairing.msisdn == '{}'.format(kwargs['primary_msisdn']),\n Pairing.is_primary == True,\n Pairing.end_date == None).first()\n\n # to check if request is made from primary-pair\n if chk_primary:\n\n chk_sec = Pairing.query.filter(Pairing.primary_id == '{}'.format(chk_primary.id),\n Pairing.end_date == None).all()\n if chk_sec:\n for m in chk_sec:\n msisdn_list.append(int(m.msisdn))\n return msisdn_list\n else:\n return custom_text_response(_(\"No Pair is associated with %(pm)s\", pm=kwargs['primary_msisdn']),\n status=STATUS_CODES.get('UNPROCESSABLE_ENTITY'),\n mimetype=MIME_TYPES.get('TEXT'))\n else:\n return custom_text_response(_(\"%(pm)s is not registered as Primary-Pair\", pm=kwargs['primary_msisdn']),\n status=STATUS_CODES.get('UNPROCESSABLE_ENTITY'),\n mimetype=MIME_TYPES.get('TEXT'))\n except Exception as e:\n db.session.rollback() # pragma: no cover\n\n finally:\n db.session.close()",
"def prospects_advisor_details():\n try:\n data = request.get_json()\n\n if data is None:\n return jsonify({'error': 'Request JSON was not found'}), Status.BAD\n\n filter_id = data.get('advisorId', None)\n if filter_id is None:\n return jsonify({'error':'No advisorId provided'})\n\n found_advisor = db.session.query(Advisor)\\\n .filter(Advisor.status == 'Active', Advisor.pk_id == filter_id).first()\n\n if found_advisor is not None:\n response = jsonify({\"id\": found_advisor.pk_id,\n \"email\": found_advisor.email,\n \"firstName\": found_advisor.first_name,\n \"lastName\": found_advisor.last_name[0] if found_advisor.last_name else '',\n \"city\": found_advisor.city,\n \"state\": found_advisor.state,\n \"location\": found_advisor.location,\n \"specialty\": [specialty.to_json() for specialty in (found_advisor.specialties or [])],\n \"occupation\": [occ.to_json() for occ in (found_advisor.occupations or [])],\n \"previousFirm\": [firm.to_json() for firm in (found_advisor.previous_firms or [])],\n \"yearsOfExperience\": found_advisor.years_of_experience_range.value if found_advisor.years_of_experience_range else '',\n \"biography\": found_advisor.biography,\n \"currentFirm\": found_advisor.current_firm.name if found_advisor.current_firm else '',\n \"currentFirmSize\": found_advisor.current_firm_size,\n \"currentFirmRevenue\": found_advisor.current_firm_revenue,\n \"undergradEducation\": found_advisor.undergrad_education,\n \"gradEducation\": found_advisor.grad_education,\n \"imageUrl\": found_advisor.linkedin_picture_url,\n \"resumeUrl\": found_advisor.resume_url} if found_advisor is not None else {})\n else:\n response = jsonify({\"error\": \"No active advisor found with that ID\"})\n\n db.session.close()\n\n return response, Status.COMPLETED\n except:\n db.session.rollback()\n db.session.close()\n e = sys.exc_info()[0]\n v = sys.exc_info()[1]\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(e))\n print(str(v))\n return jsonify({'error': str(e),\n 'value': str(v),\n 'line' : str(exc_tb.tb_lineno)\n }), Status.BAD",
"def test_shodan(self):\n\n from context import networks\n\n shodan = networks.shodan\n\n # Complete parameters\n conf = {\"enabled\": True,\n \"api_key\": OPTIONS[\"SHODAN_APIKEY\"]}\n results = {}\n self.assertFalse(results)\n shodan(conf, self.ipaddress, results)\n self.assertIn(\"shodan\", results)\n self.assertIsInstance(results[\"shodan\"], six.text_type)\n r = json.loads(results[\"shodan\"])\n self.assertTrue(r)\n self.assertIsInstance(r, dict)\n self.assertIn(\"data\", r)\n\n results = {}\n shodan(conf, \"8.8.8\", results)\n self.assertFalse(results)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test stations by distance function by checking length and validity of output
|
def test_stations_by_distance(): # Add test for stations_by_distance function
stations = build_station_list() # Create list of stations for testing
p = (0, 0) # p can be anything for the test
output = stations_by_distance(stations, p) # Use the tested function
assert len(output) > 0 # Ensure that it outputs something
assert output[0][1] > 0 or output[2][1] > 0 # Ensure that it outputs a non-zero distance
# (or a non-zero distance for another tuple on the small chance that one station is exactly
# at p)
assert type(output[0][0]) == MonitoringStation # Ensure that the type of object in the "station" section is
# a MonitoringStation object
assert type(output[0][1]) == float # Ensure that the type of object in the "distance" section
# is a float
|
[
"def test_stations_within_radius(): # Add test for stations_within_radius function\n stations = build_station_list() # Create list of stations for testing\n centre = (53, -1) # Put the centre (roughly) in the middle of the UK\n # (according to the data from DEFRA, the extent of the stations is between\n # Lat 49.9-55.8 and Long -6.2 - 2.1)\n r = 1500 # Set a large radius to guarantee encompassing all of the stations\n output = stations_within_radius(stations, centre, r) # Use the test function\n\n if len(stations) == 0: # Ensure that there is some data to be tested\n # from the source\n raise ValueError(\"Source list gives no data\")\n else:\n assert len(output) > 0 # Ensure that it outputs some data\n assert type(output[0]) == MonitoringStation # Ensure that it is outputting a list of names\n # in MonitoringStation format\n assert len(output) == len(stations) # Make sure that it includes all of the stations\n # (as r and centre are set so that it should encompass all of the stations)",
"def testDistance(self):\n\n # testList holds a couple 3-tuple (variable1, variable2, result)\n basicTestList = [(chr(170) * 48, chr(85) * 48, long((chr(255) * 48).encode('hex'), 16))]\n\n for test in basicTestList:\n result = Distance(test[0])(test[1])\n self.failIf(result != test[2], 'Result of _distance() should be %s but %s returned' %\n (test[2], result))",
"def test_forming_propositions_by_distance_in_meters_to_all_buildings_of_Infrastructure():",
"def check_flight_length():\n\n apt_a = validate_input(\"Type first IATA\", str.upper, 3, 3)\n apt_b = validate_input(\"Type second IATA code\", str.upper, 3, 3)\n airports_dict = get_airports()\n distance = int(great_circle(airports_dict[apt_a].coord,\n airports_dict[apt_b].coord)\n .nautical)\n print(\"The distance between {0} and {1} is {2} nautical miles.\"\n .format(airports_dict[apt_a].name,\n airports_dict[apt_b].name,\n distance))",
"def test_should_return_distances_for_known_distances(self):\n self.update_distance_by_satellite('kenobi')\n satellites_info = Satellite.objects.all()\n\n result = get_distances(satellites_info)\n \n assert result == {'kenobi': 100.0, 'skywalker': None, 'sato': None}",
"def test_diff_distance_calculation(self):\n location_a = (45.7597, 4.8422)\n location_b = (48.8567, 2.3508)\n distance = self.haversine.distance(location_a, location_b)\n self.assertEqual(distance, 392.2167178065958)",
"def test_river_by_station_number():\n\n stations = build_station_list()\n assert type (geo.rivers_by_station_number(stations,10)[0]) == tuple\n assert len(geo.rivers_by_station_number(stations,10)) >= 10",
"def test_same_distance_calculation(self):\n location_a = (45.7597, 4.8422)\n location_b = (45.7597, 4.8422)\n distance = self.haversine.distance(location_a, location_b)\n message = \"Expect the distance between {} and {} to be 0.0\".format(\n location_a, location_b\n )\n self.assertEqual(self.haversine.get_location_a, location_a)\n self.assertEqual(distance, 0.0, msg=message)",
"def test_valid_stations(self):\n q = self.client.query()\n q.stations('KFTG', 'KTLX')\n assert self.client.validate_query(q), 'Bad validation check'",
"def find_station_distances(station_list):\n output = \"\"\n for i in range(len(station_list)):\n station = station_list[i][0]\n n1 = station_list[i][1]\n e1 = station_list[i][2]\n for j in range(len(station_list[i:-1])):\n next_station = station_list[j][0]\n n2 = station_list[j][1]\n e2 = station_list[j][2]\n distance = dist(n1,e1,n2,e2)\n output += station + \" -> \" + next_station + \"= \" + str(distance) +\" m\\n\"\n out = open(\"distances.txt\",\"w\")\n out.write(output)\n out.close()",
"def filter_one_station(self, station):\n for lat, lon in self.event_locations:\n dist = get_distance(lat, lon, station.latitude, station.longitude)\n if self.distance_range['mindistance'] <= dist <= self.distance_range['maxdistance']:\n return True\n return False",
"def test_one(self):\n self.assertEqual(a1ece650.calculateDistance(4,2,4,4),2)",
"def test_should_return_none_distances_if_no_distance_is_known(self):\n satellites_info = Satellite.objects.all()\n\n result = get_distances(satellites_info)\n \n assert result == {'kenobi': None, 'skywalker': None, 'sato': None}",
"def test_filter_by_distance(self):\n\n threshold = 1\n points = random.uniform(-1,1,size=(100,6))\n points = mathtools.filter_by_distance(points, threshold)\n \n for point in points:\n dif = points[:,0:3]-point[0:3]\n euclidean_distance = sum(dif*dif,1)\n euclidean_distance = euclidean_distance[euclidean_distance>=1e-6] # excluding the evaluated point from list\n nearer_point = argmin(euclidean_distance)\n self.assertTrue(min(euclidean_distance)>=threshold**2,\n msg = \"The points: \"+str(point)+\" and \"+str(points[nearer_point])+\" are too close\"\n )",
"def filter_gps_distance(correct_gps,distorted_gps,match_features):\n\n dist = []\n for i in range(len(correct_gps)):\n (x1,y1) = correct_gps[i]\n (x2,y2) = distorted_gps[i]\n dist.append(metres_between_gps(y1,x1,y2,x2))\n\n mean = np.mean(dist)\n\n print 'mean dist = ', mean\n print 'min dist = ', np.min(dist)\n print 'max dist = ', np.max(dist)\n\n sel_gps_correct = []\n sel_gps_distorted = []\n sel_match = []\n for j in range(len(correct_gps)):\n #if dist[j] <mean +std and dist[j] > mean -std:\n if dist[j] <= 5:\n sel_gps_correct.append(correct_gps[j])\n sel_gps_distorted.append(distorted_gps[j])\n sel_match.append(match_features[j])\n print '#selected matches: %d out of %d'% (len(sel_gps_distorted), len(distorted_gps))\n return sel_gps_correct,sel_gps_distorted,sel_match",
"def test_levenshteinDistance_blank_blank(self):\n distance = util.levenshteinDistance('', '')\n self.assertEqual(distance, 0)",
"def check_radwin_link_distance_invent(item, _no_params, info):\n state = 3\n\n\n infotext = \"unknown output\"\n try:\n logging.debug(\"radwin_link_distance_invent SNMP output %s\", info)\n link_distance = int(info[0][0])\n if link_distance == -1:\n infotex = \"Device Link is not established \"\n state = 0\n elif link_distance > 0:\n state = 0\n infotext = \"%d\" % link_distance\n else:\n infotext = \"unknown value\"\n state = 0\n except ValueError:\n infotext = \"type mismatch\"\n logging.critical(\"radwin_link_distance_invent %s\", infotext, exc_info=True)\n except:\n infotext = \"unknown value\"\n logging.critical(\"radwin_link_distance_invent %s\", infotext, exc_info=True)\n return (state, infotext)",
"def bond_check(distance, minimum_length=0, maximum_length=1.5):\n if distance > minimum_length and distance < maximum_length:\n return True\n else: \n return False",
"def test_invalid_stations(self):\n q = self.client.query()\n q.stations('KFOO', 'KTLX')\n assert not self.client.validate_query(q), 'Bad validation check'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test stations_within_radius function by having it find all the stations within a large radius and ensuring the output is all of the stations
|
def test_stations_within_radius(): # Add test for stations_within_radius function
stations = build_station_list() # Create list of stations for testing
centre = (53, -1) # Put the centre (roughly) in the middle of the UK
# (according to the data from DEFRA, the extent of the stations is between
# Lat 49.9-55.8 and Long -6.2 - 2.1)
r = 1500 # Set a large radius to guarantee encompassing all of the stations
output = stations_within_radius(stations, centre, r) # Use the test function
if len(stations) == 0: # Ensure that there is some data to be tested
# from the source
raise ValueError("Source list gives no data")
else:
assert len(output) > 0 # Ensure that it outputs some data
assert type(output[0]) == MonitoringStation # Ensure that it is outputting a list of names
# in MonitoringStation format
assert len(output) == len(stations) # Make sure that it includes all of the stations
# (as r and centre are set so that it should encompass all of the stations)
|
[
"def stations_within_radius(stations, centre, r):\n\n # getting sorted stations and carrying forward exceptions\n try:\n sorted_stations = stations_by_distance(stations, centre)\n except Exception as e:\n raise e\n\n # validation for r\n if type(r) != float and type(r) != int:\n raise TypeError(\"invalid radius, radius was type {}\".format(type(r)))\n\n if r < 0:\n raise ValueError(\"invalid raidus, radius was negative\")\n\n index = (\n binary_search_highest_lesser(sorted_stations, 1, r, 0, len(sorted_stations)) + 1\n )\n\n return [stat[0] for stat in sorted_stations[:index]]",
"def test_radius(self):\n dictList = get_dict_list()\n postcodelatlng = [50.827974, -4.543799]\n radius = 0\n actualOutput = filterData(dictList, postcodelatlng, radius)\n expectedOutput = []\n self.assertEqual(actualOutput, expectedOutput)",
"def get_parking_slots_within_radius(self, radius):\n return ParkingSlot.query.filter(func.ST_Distance_Sphere(ParkingSlot.geo, self.geo) < radius).all()",
"def filter_stores(sp, lat_lng_ecef, initial_radius, inc_radius):\n\n matches = []\n radius = initial_radius\n while len(matches) < 1:\n results = sp.query(lat_lng_ecef, radius)\n if results is not None:\n if len(results):\n matches.extend(results)\n radius += inc_radius\n return matches",
"def verifyRadius(radius:float) -> bool:\n return (0 < radius < 6371)",
"def get_points_in_radius(center_x, center_y, radius, box_size, map_width, map_height):\n min_i = max(center_y - box_size, 0)\n max_i = min(center_y + box_size, map_height - 1)\n min_j = max(center_x - box_size, 0)\n max_j = min(center_x + box_size, map_width - 1)\n radius_square = radius * radius\n for x in range(min_j, max_j + 1):\n for y in range(min_i, max_i + 1):\n # we may use function \"euclidean_distance\", but squared comparison seems more efficient\n # (save about 25% time)\n dx = x - center_x\n dy = y - center_y\n if dx * dx + dy * dy <= radius_square:\n yield (x, y)",
"def check_range(lat_user, lon_user, lat_test, lon_test, radius):\n distance = haversine(lon_user,lat_user, lon_test, lat_test)\n if distance <= radius:\n return True\n else:\n return False",
"def test_stations_by_distance(): # Add test for stations_by_distance function\n stations = build_station_list() # Create list of stations for testing\n p = (0, 0) # p can be anything for the test\n output = stations_by_distance(stations, p) # Use the tested function\n assert len(output) > 0 # Ensure that it outputs something\n assert output[0][1] > 0 or output[2][1] > 0 # Ensure that it outputs a non-zero distance\n # (or a non-zero distance for another tuple on the small chance that one station is exactly\n # at p)\n assert type(output[0][0]) == MonitoringStation # Ensure that the type of object in the \"station\" section is\n # a MonitoringStation object\n assert type(output[0][1]) == float # Ensure that the type of object in the \"distance\" section\n # is a float",
"def find_nearby_bike_station(lat, lon, BIKE_STATION, R=0.1):\n for y in BIKE_STATION:\n r = ProcessGeoData.calculate_distance(lat, lon, y[0], y[1])\n BIKE_STATION_available = False\n BIKE_STATION_location = []\n if r <= R:\n BIKE_STATION_available = True\n BIKE_STATION_location = (y[0], y[1])\n break\n return [BIKE_STATION_available, BIKE_STATION_location]",
"def checkWithinRITRadius(latitude,longitude):\n radius_to_check=2 #2 miles radius\n RIT_center=(-77.679955,43.08611833333333)\n if geopy.distance.distance((RIT_center),(latitude,longitude)).miles<=radius_to_check:\n return True\n else:\n return False",
"def find_in_line_stations():\n try:\n in_line_station = line.find_station(station.position())\n except AttributeError:\n pass\n # find next normal pairs\n get_next_station_pair(in_line_station.index)\n # if line is circle\n if in_line_station.alter_station:\n get_next_station_pair(in_line_station.alter_station.index)\n return near_station_list",
"def test_radar_query_multiple_stations():\n q = RadarQuery()\n q.stations('KTLX', 'KFTG')\n assert str(q) == 'stn=KTLX&stn=KFTG'",
"def get_locations_within_box(lat, lng, lat_inc, lng_inc):\n\n results = PrivateLocation.query(PrivateLocation.min_latitude <= lat+lat_inc)\n locations = list()\n for r in results:\n if r.max_latitude >= lat - lat_inc and\\\n r.max_longitude >= lng - lng_inc and\\\n r.min_longitude <= lng + lng_inc:\n locations.append(r)\n return locations",
"def test_river_by_station_number():\n\n stations = build_station_list()\n assert type (geo.rivers_by_station_number(stations,10)[0]) == tuple\n assert len(geo.rivers_by_station_number(stations,10)) >= 10",
"def black_box_full(x,y,z,n,\n stations_local,ordered_threshs,stations_ecef,center_ecef,\n c0,dt_rms,tanp,projl,chi2_filter,min_stations=5):\n # Creates an array of poins at x,y,z of n elements\n points = np.array([np.zeros(n)+x, np.zeros(n)+y, np.zeros(n)+z]).T\n powers = np.empty(n)\n for i in range(len(powers)):\n powers[i] = np.max(1./np.random.uniform(0,1000,2000))\n # Calculates distance and powers retrieved at each station to threshold\n dt, ran = travel_time(points, stations_local, c0, get_r=True)\n pwr = received_power(powers, ran)\n masking = 10.*np.log10(pwr/1e-3) < ordered_threshs[:,np.newaxis]\n pwr = np.ma.masked_where(masking, pwr)\n dt = np.ma.masked_where(masking, dt)\n ran = np.ma.masked_where(masking, ran)\n # Adds error to the retreived times\n dt_e = dt + np.random.normal(scale=dt_rms, size=np.shape(dt))\n dt_mins = np.argmin(dt_e, axis=0)\n # Precalculate some terms\n points_f_ecef = (tanp.fromLocal(points.T)).T \n full_dxvec, full_drsq = precalc_station_terms(stations_ecef)\n # Run the retrieved locations calculation\n # gen_retrieval returns a tuple of four positions, x,y,z,t.\n dtype=[('x', float), ('y', float), ('z', float), \n ('t', float), ('chi2', float), ('terror',float), \n ('stations', float)]\n # Prime the generator function - it pauses at the first yield statement.\n point_gen = gen_retrieval_full(dt_e, dt_mins, full_dxvec, full_drsq, \n center_ecef, stations_ecef, dt_rms, \n min_stations) \n # Suck up all the values produced by the generator, produce named array.\n # retrieved_locations = np.fromiter(point_gen, dtype=dtype)\n # retrieved_locations = np.array([(a,b,c,e,f,g) for (a,b,c,d,e,f,g) in \n # retrieved_locations])\n retrieved_locations = array_from_generator2(point_gen,rows=n)\n retrieved_locations = retrieved_locations[:,[0,1,2,4,5,6]]\n station_count = retrieved_locations[:,5]\n terror = retrieved_locations[:,4]\n chi2 = retrieved_locations[:,3]\n retrieved_locations = retrieved_locations[:,:3]\n retrieved_locations = np.ma.masked_invalid(retrieved_locations)\n # Converts to projection\n soluts = tanp.toLocal(retrieved_locations.T)\n good = soluts[2] > 0\n station_count[~good] = np.nan\n # proj_points = projl.fromECEF(points_f_ecef[good,0], \n # points_f_ecef[good,1], \n # points_f_ecef[good,2])\n # proj_soluts = projl.fromECEF(retrieved_locations[good,0], \n # retrieved_locations[good,1], \n # retrieved_locations[good,2])\n # proj_soluts = np.ma.masked_invalid(proj_soluts)\n # ranges = (np.sum((retrieved_locations[good,:3][:,np.newaxis] - \n # stations_ecef)**2, axis=2)**0.5).T\n # cal_pwr = (np.mean(pwr*(4*np.pi*ranges/4.762)**2, axis=0))\n # return proj_points, proj_soluts, chi2[good], terror[good], station_count[good]\n # return powers[good], cal_pwr, pwr[0][good]\n # return station_count",
"def in_circle(radius):\n return lambda z: z.real ** 2 + z.imag ** 2 < radius ** 2",
"def test_forming_propositions_by_distance_in_meters_to_all_buildings_of_Infrastructure():",
"def black_box(x,y,z,n,\n stations_local,ordered_threshs,stations_ecef,center_ecef,\n tanps,\n c0,dt_rms,tanp,projl,chi2_filter,min_stations=5,just_rms=False):\n points = np.array([np.zeros(n)+x, np.zeros(n)+y, np.zeros(n)+z]).T\n powers = np.empty(n)\n \n # # For the old 1/p distribution:\n # powers = np.random.power(2, size=len(points[:,0]))**-2\n \n # # For high powered sources (all stations contributing):\n # powers[:] = 10000\n\n # For the theoretical distribution:\n for i in range(len(powers)):\n powers[i] = np.max(1./np.random.uniform(0,1000,2000))\n \n # Calculate distance and power retrieved at each station and mask\n # the stations which have higher thresholds than the retrieved power\n\n points_f_ecef = (tanp.fromLocal(points.T)).T \n dt, ran = travel_time(points, stations_local, c0, get_r=True)\n pwr = received_power(powers, ran)\n masking = 10.*np.log10(pwr/1e-3) < ordered_threshs[:,np.newaxis]\n masking2 = np.empty_like(masking)\n for i in range(len(stations_ecef[:,0])):\n masking2[i] = tanps[i].toLocal(points_f_ecef.T)[2]<0\n\n masking = masking | masking2\n pwr = np.ma.masked_where(masking, pwr)\n dt = np.ma.masked_where(masking, dt)\n ran = np.ma.masked_where(masking, ran)\n \n # Add error to the retreived times\n dt_e = dt + np.random.normal(scale=dt_rms, size=np.shape(dt))\n dt_mins = np.argmin(dt_e, axis=0)\n # Precalculate some terms in ecef (fastest calculation)\n points_f_ecef = (tanp.fromLocal(points.T)).T \n full_dxvec, full_drsq = precalc_station_terms(stations_ecef)\n # Run the retrieved locations calculation\n # gen_retrieval returns a tuple of four positions, x,y,z,t.\n dtype=[('x', float), ('y', float), ('z', float), ('t', float), \n ('chi2', float)]\n # Prime the generator function - pauses at the first yield statement.\n point_gen = gen_retrieval(dt_e, dt_mins, full_dxvec, full_drsq, \n center_ecef, stations_ecef, dt_rms, \n min_stations)\n # Suck up the values produced by the generator, produce named array.\n # retrieved_locations = np.fromiter(point_gen, dtype=dtype)\n # retrieved_locations = np.array([(a,b,c,e) for (a,b,c,d,e) in \n # retrieved_locations])\n retrieved_locations = array_from_generator2(point_gen,rows=n)\n retrieved_locations = retrieved_locations[:,[0,1,2,-1]]\n chi2 = retrieved_locations[:,3]\n retrieved_locations = retrieved_locations[:,:3]\n retrieved_locations = np.ma.masked_invalid(retrieved_locations)\n if just_rms == False:\n # Converts to projection\n # soluts = tanp.toLocal(retrieved_locations.T)\n # good = soluts[2] > 0\n proj_soluts = projl.fromECEF(retrieved_locations[:,0], \n retrieved_locations[:,1], \n retrieved_locations[:,2])\n good = proj_soluts[2] > 0\n proj_soluts = (proj_soluts[0][good],proj_soluts[1][good],\n proj_soluts[2][good])\n proj_points = projl.fromECEF(points_f_ecef[good,0], \n points_f_ecef[good,1], \n points_f_ecef[good,2])\n\n proj_soluts = np.ma.masked_invalid(proj_soluts)\n # Converts to cylindrical coordinates since most errors \n # are in r and z, not theta \n proj_points_cyl = np.array([(proj_points[0]**2+proj_points[1]**2)**0.5,\n np.degrees(np.arctan(proj_points[0]/proj_points[1])),\n proj_points[2]])\n proj_soluts_cyl = np.ma.masked_array([(proj_soluts[1]**2+proj_soluts[0]**2)**0.5,\n np.degrees(np.arctan(proj_soluts[0]/proj_soluts[1])),\n proj_soluts[2]])\n difs = proj_soluts_cyl - proj_points_cyl\n difs[1][difs[1]>150]=difs[1][difs[1]>150]-180\n difs[1][difs[1]<-150]=difs[1][difs[1]<-150]+180\n return np.mean(difs.T[chi2[good]<chi2_filter].T, axis=1\n ), np.std(difs.T[chi2[good]<chi2_filter].T, axis=1\n ), np.ma.count_masked(difs[0])+np.sum(chi2[good]>=chi2_filter\n )+np.sum(~good)\n else:\n #Convert back to local tangent plane\n soluts = tanp.toLocal(retrieved_locations.T)\n proj_soluts = projl.fromECEF(retrieved_locations[:,0], \n retrieved_locations[:,1], \n retrieved_locations[:,2])\n good = proj_soluts[2] > 0\n # good = soluts[2] > 0\n difs = soluts[:,good] - points[good].T\n return np.mean((difs.T[chi2[good]<chi2_filter].T)**2, axis=1)**0.5",
"def _query_filter(\n lat: float, lon: float, n: int, d: float, is_airport: bool, reporting: bool\n) -> List[Station]:\n k = n * 20\n last = 0\n stations = []\n while True:\n nodes = _query_coords(lat, lon, k, d)[last:]\n # Ran out of new stations\n if not nodes:\n return stations\n for icao, dist in nodes:\n stn = Station.from_icao(icao)\n if _station_filter(stn, is_airport, reporting):\n stations.append((stn, dist))\n # Reached the desired number of stations\n if len(stations) >= n:\n return stations\n last = k\n k += n * 100"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test plot_on_map function by ensuring it gives some output
|
def test_plot_on_map():
list_of_stations = build_station_list() # Create list of stations to test from
assert plot_on_map(list_of_stations) != 0 # Unsure what the output of this function will
# look like, but should be non-zero (i.e. some output).
|
[
"def test_plotting_functions_with_cmaps(plot_func, cmap):\n plot_func(load_mni152_template(resolution=2), cmap=cmap, colorbar=True)\n plt.close()",
"def test_plot_error_map_backend_v1(self):\n backend = FakeKolkata()\n img_ref = path_to_diagram_reference(\"kolkata_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)",
"def show_map(map_matrix, clean=False):\n\t# fig = plt.figure()\n\t# ax = fig.add_subplot(111)\n\t# ax.tick_params(axis='both', which='both', length=0)\n\tplt.imshow(map_matrix, cmap=\"gray\", origin='upper', interpolation='none', vmin=-3, vmax=1)\n\tif clean:\n\t\tplt.axis('off')\n\telse:\n\t\tplt.grid('on', linestyle='dotted', linewidth=1, which='major')",
"def overlap_map(self):\n self.method.plot_overlapping()",
"def test_plot_error_map_backend_v2(self):\n backend = FakeKolkataV2()\n img_ref = path_to_diagram_reference(\"kolkata_v2_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)",
"def test_plot_gate_map(self, backend):\n n = backend.configuration().n_qubits\n img_ref = path_to_diagram_reference(str(n) + \"bit_quantum_computer.png\")\n filename = \"temp.png\"\n fig = plot_gate_map(backend)\n fig.savefig(filename)\n self.assertImagesAreEqual(filename, img_ref, 0.2)\n os.remove(filename)",
"def plot_topology_map(topology_map, func_get_path):\n\n plt.rcParams.update({'font.size': FONT_SIZE * 0.6})\n\n lower_left, upper_right = topology_map.boundary_points\n width, height = topology_map.width_and_height\n grid = np.zeros((height, width))\n\n # populate the grid with our topology\n for x, y, z in topology_map.iter_all_points_xyz():\n # normalize each point so that the lower left point is on 0,0\n normalized_x = x - lower_left.x\n normalized_y = y - lower_left.y\n grid[normalized_y, normalized_x] = z # grid is indexed by rows(y) first, not columns(x)\n\n figtitle = \"Path is numbered in order of visitation. From white square to red triangle\\n\" \\\n \"The triangle will have red text if it didn't need to be scanned\\n\" \\\n \"Press the space bar to cycle through strategies, or click a new point\"\n\n fig = plt.figure(FigureClass=MyFigure, figtitle=figtitle)\n ax = fig.subplots()\n # fig, ax = plt.subplots()\n\n # To center axes labels with cells, enlarge the extent by half a cell width on all sides\n extent = (lower_left.x - .5, upper_right.x + .5, lower_left.y - .5, upper_right.y + .5)\n ax.imshow(grid, origin='lower', extent=extent)\n\n def draw_path(path):\n \"\"\"\n Draws the path. This consists of drawing the endpoints and numbering each cell in the path.\n Also, it indicates cells where a scan was done by making the font color blue for scanned cells,\n otherwise white. Currently it may glitch of drawing a point outside our range. For now, I'm\n not worring about it\n :param path: list of (x,y,z, scan_cost)\n \"\"\"\n font_size = FONT_SIZE // 2\n symbol_size = font_size * 0.75\n # Plot start point\n start_x, start_y, start_z, start_cost = path[0]\n plt.plot(start_x, start_y, 'wo', markersize=symbol_size) # start is a white circle\n\n # Plot end points\n end_x, end_y, end_z, end_cost = path[-1]\n plt.plot(end_x, end_y, 'r^', markersize=symbol_size) # endpoint is a red diamond\n\n offset = .2\n for i, point in enumerate(path):\n color = \"blue\" if point[3] else \"white\" # scanned here\n plt.text(point[0] - offset, point[1] - offset, str(i), color=color, fontsize=font_size)\n\n def replot(cx, cy, change_strategy=False):\n \"\"\"\n Queries for new data and plots it. If change_strategy is True, the creator will populate the new path\n using a different strategy, but the same points as before. If it's False, then the current strategy\n will be used for the new point\n :param cx: new starting x\n :param cy: new starting y\n :param change_strategy: True if change strategy. If so, x,y are ignored\n :return:\n \"\"\"\n plt.cla()\n ax.imshow(grid, origin='lower', extent=extent)\n\n # Ask our creator for get a new path from the click origin, or with new strategy\n path, strategy_name = func_get_path(cx, cy, change_strategy)\n if not path:\n return # To handle case where we've changed strategy but have no point yet\n\n draw_path(path)\n\n # Calculate cost/scan values from the cost element of the tuple (x,y,z, cost)\n cost = 0\n scans = 0\n for p in path:\n cost += p[3]\n if p[3]:\n scans += 1\n\n plt.title(\"{} strategy\\n({},{}),=> ({},{})\\nScans: {} Cost: {}\".\n format(strategy_name, path[0][0], path[0][1], path[-1][0], path[-1][1], scans, cost))\n\n fig.canvas.draw()\n\n def on_click(event):\n \"\"\"\n Handles clicks on cells by replotting new path\n :param event:\n \"\"\"\n if not event.xdata or not event.ydata: # if not clicked on a cell\n return\n # get closes integer points\n cx = math.floor(round(event.xdata))\n cy = math.floor(round(event.ydata))\n replot(cx, cy)\n\n _cid = fig.canvas.mpl_connect('button_press_event', on_click)\n\n def on_key(event):\n \"\"\"\n Handles keyboard events - spacebar will replot with new strategy\n :param event:\n :return:\n \"\"\"\n if event.key == ' ':\n replot(0, 0, True)\n\n _cid = fig.canvas.mpl_connect('key_press_event', on_key)\n\n plt.title(\"Click a point to navigate from it to high ground\")\n fig.canvas.set_window_title(\"Plot Path Example\")\n\n # make window big\n mng = plt.get_current_fig_manager()\n if hasattr(mng, 'window'):\n mng.resize(*mng.window.maxsize())\n plt.show()",
"def plot_map(self, output='save', save_as='resultMap.png'):\n self.get_ticks()\n fig, axis1 = plt.subplots(figsize=(10, 10))\n axis1.imshow(self.result_image)\n axis1.set_xlabel('Longitude')\n axis1.set_ylabel('Latitude')\n # axis1.set_xticklabels(self.x_ticks)\n # axis1.set_yticklabels(self.y_ticks)\n axis1.grid()\n if output == 'save':\n plt.savefig(save_as)\n else:\n plt.show()",
"def plot_offset_map(pointer_pd: pd.DataFrame, storage_path: str):\n m = 0\n n = 0\n plt.subplots(figsize=(6, 4))\n for i in range(len(pointer_pd)):\n if pointer_pd['frap_filter_optimal'][i] == 0:\n if m == 0:\n plt.plot([0, pointer_pd['x_diff'][i]], [0, pointer_pd['y_diff'][i]], color='#1E90FF', alpha=0.5,\n label='filtered ones')\n m = m + 1\n else:\n plt.plot([0, pointer_pd['x_diff'][i]], [0, pointer_pd['y_diff'][i]], color='#1E90FF', alpha=0.5)\n else:\n if n == 0:\n plt.plot([0, pointer_pd['x_diff'][i]], [0, pointer_pd['y_diff'][i]], color=(0.85, 0.35, 0.25),\n alpha=0.5, label='good ones')\n n = n + 1\n else:\n plt.plot([0, pointer_pd['x_diff'][i]], [0, pointer_pd['y_diff'][i]], color=(0.85, 0.35, 0.25),\n alpha=0.5)\n plt.xlim([-10, 10])\n plt.ylim([-10, 10])\n plt.xlabel('x offset (pixel)')\n plt.ylabel('y offset (pixel)')\n plt.legend(loc=2, bbox_to_anchor=(0.02, 0.99))\n plt.savefig('%s/offset_map.pdf' % storage_path)",
"def test_plot_error_map_over_100_qubit_backend_v2(self):\n backend = FakeWashingtonV2()\n img_ref = path_to_diagram_reference(\"washington_v2_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)",
"def test_plot_gate_map(self, backend):\n n = backend.configuration().n_qubits\n img_ref = path_to_diagram_reference(str(n) + \"bit_quantum_computer.png\")\n fig = plot_gate_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)",
"def plt_clean_maps(clean_map, out_dir):\n\n f = Path(clean_map)\n tile, ref, _, _ = f.stem.split(\"_\")\n\n pointings = [\"0\", \"2\", \"4\", \"41\"]\n\n # load data from map .npz file\n tile_data = np.load(f, allow_pickle=True)\n\n for p in pointings:\n\n Path(f\"{out_dir}/tile_maps_clean/clean_plots/{p}/tile_maps\").mkdir(\n parents=True, exist_ok=True\n )\n Path(f\"{out_dir}/tile_maps_clean/clean_plots/{p}/tile_errors\").mkdir(\n parents=True, exist_ok=True\n )\n Path(f\"{out_dir}/tile_maps_clean/clean_plots/{p}/tile_counts\").mkdir(\n parents=True, exist_ok=True\n )\n\n # healpix meadian map\n try:\n tile_map_med = np.asarray(\n [(np.nanmedian(i) if i != [] else np.nan) for i in tile_data[p]]\n )\n\n plt.style.use(\"seaborn\")\n fig = plt.figure(figsize=(10, 10))\n fig.suptitle(f\"Good Map: {tile}/{ref} @ {p}\", fontsize=16)\n plot_healpix(\n data_map=tile_map_med, sub=(1, 1, 1), cmap=jade, vmin=-50, vmax=0\n )\n plt.savefig(\n f\"{out_dir}/tile_maps_clean/clean_plots/{p}/tile_maps/{tile}_{ref}_{p}_clean_map.png\",\n bbox_inches=\"tight\",\n )\n plt.close()\n except Exception as e:\n print(e)\n\n # Plot MAD\n try:\n tile_map_mad = []\n for j in tile_data[p]:\n if j != []:\n j = np.asarray(j)\n j = j[~np.isnan(j)]\n tile_map_mad.append(mad(j))\n else:\n tile_map_mad.append(np.nan)\n\n vmin = np.nanmin(tile_map_mad)\n vmax = np.nanmax(tile_map_mad)\n\n plt.style.use(\"seaborn\")\n fig = plt.figure(figsize=(10, 10))\n fig.suptitle(f\"Good Map MAD: {tile}/{ref} @ {p}\", fontsize=16)\n plot_healpix(\n data_map=np.asarray(tile_map_mad),\n sub=(1, 1, 1),\n cmap=jade,\n vmin=vmin,\n vmax=vmax,\n )\n plt.savefig(\n f\"{out_dir}/tile_maps_clean/clean_plots/{p}/tile_errors/{tile}_{ref}_{p}_clean_map_errors.png\",\n bbox_inches=\"tight\",\n )\n plt.close()\n except Exception as e:\n print(e)\n\n # Plot satellite pass counts in pix\n try:\n tile_map_counts = [len(np.array(i)[~np.isnan(i)]) for i in tile_data[p]]\n\n plt.style.use(\"seaborn\")\n fig = plt.figure(figsize=(10, 10))\n fig.suptitle(f\"Good Map Counts: {tile}/{ref} @ {p}\", fontsize=16)\n plot_healpix(\n data_map=np.asarray(tile_map_counts),\n sub=(1, 1, 1),\n cmap=jade,\n vmin=0,\n vmax=80,\n )\n plt.savefig(\n f\"{out_dir}/tile_maps_clean/clean_plots/{p}/tile_counts/{tile}_{ref}_{p}_clean_map_counts.png\",\n bbox_inches=\"tight\",\n )\n plt.close()\n except Exception as e:\n print(e)",
"def PlotMapping(self, maptype, y, mapdims, step, area=None,\n xticker=1, colormap='Reds', alpha=1.0,\n numbered=False, vmin=None, vmax=None, grid=False,\n background='', msize=1, plot_missing=True, **kwargs):\n plot_matrix, missing_matrix, savefile, fitmean = self.CreatePlotMatrices(maptype,\n y, mapdims[::-1], **kwargs)\n\n # create and configure figure for mapping\n matplotlib.rcParams['font.sans-serif'] = \"Liberation Sans\"\n fontsize_int = 14 + 3 * np.sqrt(mapdims[0] * mapdims[1])\n matplotlib.rcParams.update({'font.size': fontsize_int})\n\n def set_size(mapdims, ax=None):\n w = mapdims[0]\n h = mapdims[1]\n \"\"\" w, h: width, height in inches \"\"\"\n if not ax: ax=plt.gca()\n left = ax.figure.subplotpars.left\n right = ax.figure.subplotpars.right\n top = ax.figure.subplotpars.top\n bot = ax.figure.subplotpars.bottom\n figw = float(w)/(right-left)\n figh = float(h)/(top-bot)\n # correct width and hight for non quadratic sizes\n dims = [figw, figh]\n dims.sort(reverse=True)\n correction = dims[0]/dims[1]/10\n figw = figw + correction*2\n figh = figh + correction\n ax.figure.set_size_inches(figw, figh)\n\n fig, ax = plt.subplots(figsize=mapdims)\n ax.set_aspect('equal')\n set_size(mapdims)\n self.ConfigureTicks(mapdims, step, xticker, plt, grid)\n\n # plot mapping, create patch mask and plot it over map\n if grid:\n # create data for plotting\n x = []\n y = []\n x_missing = []\n y_missing = []\n plot_matrix = np.flipud(plot_matrix)\n plot_vector = list(plot_matrix.flatten())\n missing_vector = np.full_like(plot_vector, False, dtype=bool)\n missing_vector = (plot_matrix == fitmean)\n missing_vector = missing_vector.flatten()\n if area is not None:\n area_corr = list(area.flatten())\n else:\n area_corr = None\n\n cor = 1.5\n for i in range(1, mapdims[1]+1):\n for j in range(1, mapdims[0]+1):\n x.append(j-cor)\n y.append(i-cor)\n\n ax.set_xlim(min(x), max(x)+1)\n ax.set_ylim(min(y), max(y)+1)\n\n deletelist = []\n for i, missing in enumerate(missing_vector):\n if missing:\n deletelist.append(i)\n deleted = 0\n for i in deletelist:\n x_missing.append(x[i-deleted])\n y_missing.append(y[i-deleted])\n del(x[i-deleted])\n del(y[i-deleted])\n del(plot_vector[i-deleted])\n if area_corr is not None:\n del(area_corr[i-deleted])\n deleted += 1\n\n try:\n img = io.imread(background)\n pos = cor - 2\n plt.imshow(img, zorder=0, cmap='Greys_r',\n extent=[0+pos, mapdims[0]+pos,\n 0+pos, mapdims[1]+pos])\n del img\n except ValueError:\n #traceback.print_exc()\n print('No background given.')\n\n if plot_missing:\n missng_col = scatter(x_missing, y_missing, ax, msize=msize,\n color='black', linewidth=0.5, alpha=alpha)\n\n sclb = scatter(x, y, ax, c=plot_vector, msize=msize, area=area_corr,\n cmap=colormap, linewidth=0.5, alpha=alpha)\n im = sclb.sc\n del sclb\n else:\n im = plt.imshow(plot_matrix, cmap=colormap, vmin=vmin, vmax=vmax)\n\n pc = self.CreatePatchMask(mapdims, fig, missing_matrix)\n ax.add_collection(pc)\n\n def add_colorbar(im, aspect=20, pad_fraction=0.5, **kwargs):\n \"\"\"Add a vertical color bar to an image plot.\"\"\"\n divider = axes_grid1.make_axes_locatable(im.axes)\n #width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)\n #pad = axes_grid1.axes_size.Fraction(pad_fraction, width)\n current_ax = plt.gca()\n cax = divider.append_axes(\"right\", size='5%', pad=0.05)\n plt.sca(current_ax)\n cbar = im.axes.figure.colorbar(im, cax=cax, **kwargs)\n return cbar\n\n clb = add_colorbar(im)\n\n # number the patches if numbered == True\n def NumberMap(mapdims, ax):\n product = mapdims[0] * mapdims[1]\n for i in range(0, mapdims[0]):\n for j in range(0, mapdims[1]):\n color = 'black'\n if missing_matrix[j][mapdims[0] - i-1]:\n color = 'white'\n ax.text(mapdims[0] - i-1, j,\n product - (j * mapdims[0] + i),\n ha='center', va='center',\n color=color, fontsize=fontsize_int*0.4)\n\n if numbered:\n NumberMap(mapdims, ax)\n\n # configure, save and show the plot\n plotname = re.sub(os.path.join(self.folder, 'results', 'plot', ''), '', savefile)\n try:\n # remove grid prefix\n if grid:\n gridname = kwargs['name'].split('_')[0]\n peakparameter = re.sub(gridname, '', kwargs['name'])[1:]\n else:\n peakparameter = kwargs['name']\n\n # remove scaled prefix\n if vmin is not None:\n scalename = peakparameter.split('_')[0]\n peakparameter = re.sub(scalename, '', peakparameter)[1:]\n\n # define peak shape and parameter\n peakshape = peakparameter\n peakparameter = peakparameter.split('_')[-1]\n peakshape = re.sub('_' + peakparameter, '', peakshape)\n except KeyError:\n peakshape = 'raw'\n peakparameter = peakshape\n\n zlabel = self.peaknames[peakshape][peakparameter]['name'] + '\\n'\n unit = self.peaknames[peakshape][peakparameter]['unit']\n\n if maptype == 'errs':\n zlabel = 'Relative error of\\n' + zlabel\n\n self.ConfigurePlot(clb, plt,\n peak = peakshape,\n label = zlabel,\n unit = unit)\n try:\n if 'pca' in kwargs['name']:\n clb.set_label('')\n clb.set_ticks([])\n clb.set_label('Clusters')\n except KeyError:\n pass\n\n if isinstance(colormap, str):\n colormap_name = colormap\n else:\n colormap_name = colormap.name\n plt.savefig(f'{savefile}_{colormap_name}.pdf', format='pdf')\n plt.savefig(f'{savefile}_{colormap_name}.png')\n plt.clf()\n plt.close(fig)\n del im, clb\n gc.collect()\n\n print(f'{plotname} {colormap_name} plotted')\n\n return plot_matrix, plotname",
"def plot_asteroid_map(asteroids_map: np.ndarray,\n visible_asteroids: np.ndarray,\n optimal_point: tuple,\n legend_visible: bool = True,\n xlims: tuple = None,\n ylims: tuple = None\n ):\n\n df = create_plotting_dataframe(asteroids_map,\n visible_asteroids,\n optimal_point)\n\n # Plot\n altair_plots.scatter_plot(df,\n sorting=['Optimal',\n 'Visible', 'Non visible'],\n legend_visible=legend_visible,\n xlims=xlims,\n ylims=ylims\n ).display()",
"def error_map(self):\n self.method.plot_error()",
"def TEST_Map_Geoid():\n HC, HS = imp.Fetch_Coef(\"full4\")\n lmax = 10; mins = 600; levels = 70;\n title = f\"Map of Geoid undulation\"\n fig = Map_Geoid(mins, levels, title, lmax, HC, HS)",
"def pf_res_plotly(net, cmap=\"Jet\", use_line_geodata=None, on_map=False, projection=None,\n map_style='basic', figsize=1, aspectratio='auto', line_width=2, bus_size=10,\n climits_volt=(0.9, 1.1), climits_load=(0, 100), cpos_volt=1.0, cpos_load=1.1,\n filename=\"temp-plot.html\", auto_open=True):\n if 'res_bus' not in net or net.get('res_bus').shape[0] == 0:\n logger.warning('There are no Power Flow results. A Newton-Raphson power flow will be executed.')\n runpp(net)\n\n # create geocoord if none are available\n if 'line_geodata' not in net:\n net.line_geodata = pd.DataFrame(columns=['coords'])\n if 'bus_geodata' not in net:\n net.bus_geodata = pd.DataFrame(columns=[\"x\", \"y\"])\n if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:\n logger.warning(\"No or insufficient geodata available --> Creating artificial coordinates.\" +\n \" This may take some time\")\n create_generic_coordinates(net, respect_switches=True)\n if on_map:\n logger.warning(\"Map plots not available with artificial coordinates and will be disabled!\")\n on_map = False\n for geo_type in [\"bus_geodata\", \"line_geodata\"]:\n dupl_geo_idx = pd.Series(net[geo_type].index)[pd.Series(\n net[geo_type].index).duplicated()]\n if len(dupl_geo_idx):\n if len(dupl_geo_idx) > 20:\n logger.warning(\"In net.%s are %i duplicated \" % (geo_type, len(dupl_geo_idx)) +\n \"indices. That can cause troubles for draw_traces()\")\n else:\n logger.warning(\"In net.%s are the following duplicated \" % geo_type +\n \"indices. That can cause troubles for draw_traces(): \" + str(\n dupl_geo_idx))\n\n\n # check if geodata are real geographycal lat/lon coordinates using geopy\n if on_map and projection is not None:\n geo_data_to_latlong(net, projection=projection)\n\n # ----- Buses ------\n # initializating bus trace\n # hoverinfo which contains name and pf results\n precision = 3\n hoverinfo = (\n net.bus.name.astype(str) + '<br />' +\n 'V_m = ' + net.res_bus.vm_pu.round(precision).astype(str) + ' pu' + '<br />' +\n 'V_m = ' + (net.res_bus.vm_pu * net.bus.vn_kv.round(2)).round(precision).astype(str) + ' kV' + '<br />' +\n 'V_a = ' + net.res_bus.va_degree.round(precision).astype(str) + ' deg').tolist()\n hoverinfo = pd.Series(index=net.bus.index, data=hoverinfo)\n bus_trace = create_bus_trace(net, net.bus.index, size=bus_size, infofunc=hoverinfo, cmap=cmap,\n cbar_title='Bus Voltage [pu]', cmin=climits_volt[0], cmax=climits_volt[1],\n cpos=cpos_volt)\n\n # ----- Lines ------\n # if bus geodata is available, but no line geodata\n # if bus geodata is available, but no line geodata\n cmap_lines = 'jet' if cmap == 'Jet' else cmap\n if use_line_geodata is None:\n use_line_geodata = False if len(net.line_geodata) == 0 else True\n elif use_line_geodata and len(net.line_geodata) == 0:\n logger.warning(\"No or insufficient line geodata available --> only bus geodata will be used.\")\n use_line_geodata = False\n # hoverinfo which contains name and pf results\n hoverinfo = (\n net.line.name.astype(str) + '<br />' +\n 'I = ' + net.res_line.loading_percent.round(precision).astype(str) + ' %' + '<br />' +\n 'I_from = ' + net.res_line.i_from_ka.round(precision).astype(str) + ' kA' + '<br />' +\n 'I_to = ' + net.res_line.i_to_ka.round(precision).astype(str) + ' kA' + '<br />').tolist()\n hoverinfo = pd.Series(index=net.line.index, data=hoverinfo)\n line_traces = create_line_trace(net, use_line_geodata=use_line_geodata, respect_switches=True,\n width=line_width,\n infofunc=hoverinfo,\n cmap=cmap_lines,\n cmap_vals=net.res_line['loading_percent'].values,\n cmin=climits_load[0],\n cmax=climits_load[1],\n cbar_title='Line Loading [%]',\n cpos=cpos_load)\n\n # ----- Trafos ------\n # hoverinfo which contains name and pf results\n hoverinfo = (\n net.trafo.name.astype(str) + '<br />' +\n 'I = ' + net.res_trafo.loading_percent.round(precision).astype(str) + ' %' + '<br />' +\n 'I_hv = ' + net.res_trafo.i_hv_ka.round(precision).astype(str) + ' kA' + '<br />' +\n 'I_lv = ' + net.res_trafo.i_lv_ka.round(precision).astype(str) + ' kA' + '<br />').tolist()\n hoverinfo = pd.Series(index=net.trafo.index, data=hoverinfo)\n trafo_traces = create_trafo_trace(net, width=line_width * 1.5, infofunc=hoverinfo,\n cmap=cmap_lines, cmin=0, cmax=100)\n\n # ----- Ext grid ------\n # get external grid from create_bus_trace\n marker_type = 'circle' if on_map else 'square'\n ext_grid_trace = create_bus_trace(net, buses=net.ext_grid.bus,\n color='grey', size=bus_size * 2, trace_name='external_grid',\n patch_type=marker_type)\n\n return draw_traces(line_traces + trafo_traces + ext_grid_trace + bus_trace,\n showlegend=False, aspectratio=aspectratio, on_map=on_map,\n map_style=map_style, figsize=figsize, filename=filename, auto_open=auto_open)",
"def PlotTomoMap(fname, dlon=0.5, dlat=0.5, title='', datatype='ph', outfname='', browseflag=False, saveflag=True):\n if title=='':\n title=fname;\n if outfname=='':\n outfname=fname;\n Inarray=np.loadtxt(fname)\n LonLst=Inarray[:,0]\n LatLst=Inarray[:,1]\n ZValue=Inarray[:,2]\n llcrnrlon=LonLst.min()\n llcrnrlat=LatLst.min()\n urcrnrlon=LonLst.max()\n urcrnrlat=LatLst.max()\n Nlon=int((urcrnrlon-llcrnrlon)/dlon)+1\n Nlat=int((urcrnrlat-llcrnrlat)/dlat)+1\n fig=plt.figure(num=None, figsize=(8, 12), dpi=80, facecolor='w', edgecolor='k')\n m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat, \\\n rsphere=(6378137.00,6356752.3142), resolution='l', projection='merc')\n \n lon = LonLst\n lat = LatLst\n x,y = m(lon, lat)\n xi = np.linspace(x.min(), x.max(), Nlon)\n yi = np.linspace(y.min(), y.max(), Nlat)\n xi, yi = np.meshgrid(xi, yi)\n \n #-- Interpolating at the points in xi, yi\n zi = griddata(x, y, ZValue, xi, yi)\n # m.pcolormesh(xi, yi, zi, cmap='seismic_r', shading='gouraud')\n cmap=matplotlib.cm.seismic_r\n cmap.set_bad('w',1.)\n m.imshow(zi, cmap=cmap)\n m.drawcoastlines()\n m.colorbar(location='bottom',size='2%')\n # m.fillcontinents()\n # draw parallels\n m.drawparallels(np.arange(-90,90,10),labels=[1,1,0,1])\n # draw meridians\n m.drawmeridians(np.arange(-180,180,10),labels=[1,1,1,0])\n plt.suptitle(title,y=0.9, fontsize=22);\n if browseflag==True:\n plt.draw()\n plt.pause(1) # <-------\n raw_input(\"<Hit Enter To Close>\")\n plt.close('all')\n if saveflag==True:\n fig.savefig(outfname+'.ps', format='ps')\n return",
"def plot_map(label='unk'):\n mutation_x = list(range(num_aa))\n mutation_y = list(range(num_gens))\n xx, yy = np.meshgrid(mutation_x, mutation_y)\n plt.figure()\n plt.pcolormesh(xx, yy, protein_mutation, cmap=cm.copper)\n plt.axis([xx.min(), xx.max(), yy.min(), yy.max()])\n plt.colorbar()\n plt.xlabel('amino acid index', fontweight='bold')\n plt.ylabel('generation number', fontweight='bold')\n plt.title('Mutation Heat Map for Homo sapiens\\nHemoglobin subunit $\\\\beta$', fontweight='bold')\n plt.savefig('mutation_map_{}.png'.format(label), dpi=300)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setter method for value, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/value (uint32)
|
def _set_value(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
|
[
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_rstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_rstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_global_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_global_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def config_setting_value_id(self, config_setting_value_id):\n\n self._config_setting_value_id = config_setting_value_id",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_mstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_mstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def getValue(self) -> \"uint32_t\":\n return _coin.SoSFUInt32_getValue(self)",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def setValue(self, *args):\n return _coin.SoSFUInt32_setValue(self, *args)",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def append_sint32(self, value):\n zigzag_value = wire_format.zig_zag_encode(value)\n self._stream.append_var_uint32(zigzag_value)",
"def config_setting_value(self, config_setting_value):\n\n self._config_setting_value = config_setting_value",
"def uvlo(self, value: int) -> int:\n return self._clib.fxSetUVLO(self.id, value)",
"def set_config(self, value):\n try:\n self.validate(config=value)\n except (KeyError, ValueError) as e:\n print(\"Config not set, encountered error %s\" % e.msg)\n\n self.config = value",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def setHSVValue(self, *args):\n return _coin.SbColor_setHSVValue(self, *args)",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def setHSVValue(self, *args) -> \"SbColor &\":\n return _coin.SbColor_setHSVValue(self, *args)",
"def setHSVValue(self, *args):\n return _coin.SoSFColorRGBA_setHSVValue(self, *args)",
"def setValue(self, *args) -> \"void\":\n return _coin.SoSFUInt32_setValue(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setter method for flooding_type, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/flooding_type (mplssrlgfloodingtype)
|
def _set_flooding_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"FLOODED_SRLG": {}, "STATIC_SRLG": {}},
),
default=six.text_type("FLOODED_SRLG"),
is_leaf=True,
yang_name="flooding-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="mpls-srlg-flooding-type",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flooding_type must be of a type compatible with mpls-srlg-flooding-type""",
"defined-type": "openconfig-network-instance:mpls-srlg-flooding-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FLOODED_SRLG': {}, 'STATIC_SRLG': {}},), default=six.text_type("FLOODED_SRLG"), is_leaf=True, yang_name="flooding-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='mpls-srlg-flooding-type', is_config=True)""",
}
)
self.__flooding_type = t
if hasattr(self, "_set"):
self._set()
|
[
"def _set_lsp_type_bypass(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-type-bypass\", rest_name=\"lsp-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_type_bypass must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-type-bypass\", rest_name=\"lsp-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_type_bypass = t\n if hasattr(self, '_set'):\n self._set()",
"def f_type(self, f_type):\n \n self._f_type = f_type",
"def get_flood_extents(data_path, flood_type, return_period):\n extents = []\n if flood_type == 'current_fluvial':\n # EUWATCH\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n 'EUWATCH_{:05d}_mask-1.shp'.format(return_period)\n )\n ).records())\n # SSBN fluvial\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n 'SSBN_FU_{}_mask-1.shp'.format(return_period)\n )\n ).records())\n if flood_type == 'current_pluvial':\n # SSBN pluvial\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n 'SSBN_PU_{}_mask-1.shp'.format(return_period)\n )\n ).records())\n if flood_type == 'future_fluvial':\n # GLOFRIS\n models = [\n 'GFDL-ESM2M',\n 'HadGEM2-ES',\n 'IPSL-CM5A-LR',\n 'MIROC-ESM-CHEM',\n 'NorESM1-M',\n ]\n for model in models:\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n '{}_{:05d}_mask-1.shp'.format(model, return_period)\n )\n ).records())\n return extents",
"def handle_flood_request(self, elem, bstring):\n _from = jid.JID(elem[\"from\"])\n if not re.match(r\"^nws-\", _from.user):\n msg = \"Sorry, you must be NWS to flood a chatroom!\"\n self.send_privatechat(elem[\"from\"], msg)\n return\n tokens = bstring.split()\n if len(tokens) == 1:\n msg = \"Did you specify a room to flood?\"\n self.send_privatechat(elem[\"from\"], msg)\n return\n room = tokens[1].lower()\n for _i in range(60):\n self.send_groupchat(room, self.get_fortune())\n self.send_groupchat(\n room,\n (\n \"Room flooding complete, offending message \"\n \"should no longer appear\"\n ),\n )",
"def hole_type(self):\n self._hole_type = self._hole_params[1]\n return self._hole_type",
"def _flood_fill(self, point):\n fboard = np.array(self.board, copy=True)\n flood_list=[point]\n color = fboard[point]\n fboard[point] = FLOODFILL\n while flood_list:\n current_point = flood_list.pop()\n neighbors = self._neighbors(current_point)\n for n in neighbors :\n if fboard[n] == color:\n fboard[n] = FLOODFILL\n flood_list.append(n)\n return fboard",
"def get_graph_type(light_graph):\n if GraphRunner.is_fully_supported(light_graph):\n return graph_types_pb2.LGFProtobuf\n\n for n in light_graph.nodes():\n if not n.supported and n.HasField(lgf_pb2.LNF.original.DESCRIPTOR.name):\n return n.original.t",
"def bugtracker_type(self, bugtracker_type):\n allowed_values = [undefined, undefined, undefined, ] # noqa: E501\n\n self._bugtracker_type = bugtracker_type",
"def field_type(self, field):\n return _ldns.ldns_rr_descriptor_field_type(self, field)\n #parameters: const ldns_rr_descriptor *, size_t,\n #retvals: ldns_rdf_type",
"def get_form_field_type(self, python_type):\n\n return self._type_map.get(python_type)",
"def flood(fin, T, option, Bc=None):\n\n if Bc is None: Bc = secross()\n raise NotImplementedError, 'pymorph.flood'",
"def _set_lsp_type_dynamic(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-type-dynamic\", rest_name=\"lsp-type-dynamic\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_type_dynamic must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-type-dynamic\", rest_name=\"lsp-type-dynamic\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_type_dynamic = t\n if hasattr(self, '_set'):\n self._set()",
"def get_field_type(self, field):\n return self._get_field_type_data(field)[0]",
"def is_grass_type(self):\n if self.type1 == 'Grass' or self.type2 == 'Grass':\n return 'This is a grass-type pokemon'\n else:\n return 'This is not a grass-type pokemon'",
"def flood_frequency(self):\n return (self.tp + self.fn) / (self.tn + self.fn + self.fp + self.tp)",
"def HasFDG(self):\n return self.__has('FDG')",
"def get_filter_type(self):\n\n choice = None\n if self.ui.radioButton_LP.isChecked():\n choice = \"lowpass\"\n elif self.ui.radioButton_HP.isChecked():\n choice = \"highpass\"\n elif self.ui.radioButton_BP.isChecked():\n choice = \"bandpass\"\n elif self.ui.radioButton_BS.isChecked():\n choice = \"bandstop\"\n elif self.ui.radioButton_AP.isChecked():\n warning(self, '', 'The allpass is not made yet.')\n choice = \"allpass\"\n self.config_dict['filter_type'] = choice\n print(\"picked filter type {}\".format(self.config_dict['filter_type']))",
"def _set_profile_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'synthetic-loss-measurement': {'value': 2}, u'delay-measurement': {'value': 1}},), is_leaf=True, yang_name=\"profile-type\", rest_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Profile Type as ETH-DM / ETH-SLM', u'alt-name': u'type', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='y1731-profile-type', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"profile_type must be of a type compatible with y1731-profile-type\"\"\",\n 'defined-type': \"brocade-dot1ag:y1731-profile-type\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'synthetic-loss-measurement': {'value': 2}, u'delay-measurement': {'value': 1}},), is_leaf=True, yang_name=\"profile-type\", rest_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Profile Type as ETH-DM / ETH-SLM', u'alt-name': u'type', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='y1731-profile-type', is_config=True)\"\"\",\n })\n\n self.__profile_type = t\n if hasattr(self, '_set'):\n self._set()",
"def floodfill(image, *args, **kwargs):\n image = Image.from_any(image)\n return FloodfillOperation(image, *args, **kwargs).run()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setter method for value, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/value (uint32)
|
def _set_value(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="value",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """value must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__value = t
if hasattr(self, "_set"):
self._set()
|
[
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_rstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_rstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_global_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_global_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def config_setting_value_id(self, config_setting_value_id):\n\n self._config_setting_value_id = config_setting_value_id",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_mstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_mstp_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def getValue(self) -> \"uint32_t\":\n return _coin.SoSFUInt32_getValue(self)",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def setValue(self, *args):\n return _coin.SoSFUInt32_setValue(self, *args)",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_red_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def append_sint32(self, value):\n zigzag_value = wire_format.zig_zag_encode(value)\n self._stream.append_var_uint32(zigzag_value)",
"def config_setting_value(self, config_setting_value):\n\n self._config_setting_value = config_setting_value",
"def uvlo(self, value: int) -> int:\n return self._clib.fxSetUVLO(self.id, value)",
"def set_config(self, value):\n try:\n self.validate(config=value)\n except (KeyError, ValueError) as e:\n print(\"Config not set, encountered error %s\" % e.msg)\n\n self.config = value",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_spanning_tree__stp_mstp_mst_instances_mst_instance_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/spanning-tree', defining_module='openconfig-spanning-tree', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def setHSVValue(self, *args):\n return _coin.SbColor_setHSVValue(self, *args)",
"def _set_config(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"config must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_config_openconfig_qos_elements__qos_queue_management_profiles_queue_management_profile_wred_uniform_config, is_container='container', yang_name=\"config\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__config = t\n if hasattr(self, '_set'):\n self._set()",
"def setHSVValue(self, *args) -> \"SbColor &\":\n return _coin.SbColor_setHSVValue(self, *args)",
"def setHSVValue(self, *args):\n return _coin.SoSFColorRGBA_setHSVValue(self, *args)",
"def setValue(self, *args) -> \"void\":\n return _coin.SoSFUInt32_setValue(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Setter method for flooding_type, mapped from YANG variable /network_instances/network_instance/mpls/te_global_attributes/srlgs/srlg/config/flooding_type (mplssrlgfloodingtype)
|
def _set_flooding_type(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"FLOODED_SRLG": {}, "STATIC_SRLG": {}},
),
default=six.text_type("FLOODED_SRLG"),
is_leaf=True,
yang_name="flooding-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="mpls-srlg-flooding-type",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """flooding_type must be of a type compatible with mpls-srlg-flooding-type""",
"defined-type": "openconfig-network-instance:mpls-srlg-flooding-type",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'FLOODED_SRLG': {}, 'STATIC_SRLG': {}},), default=six.text_type("FLOODED_SRLG"), is_leaf=True, yang_name="flooding-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='mpls-srlg-flooding-type', is_config=True)""",
}
)
self.__flooding_type = t
if hasattr(self, "_set"):
self._set()
|
[
"def _set_lsp_type_bypass(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-type-bypass\", rest_name=\"lsp-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_type_bypass must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-type-bypass\", rest_name=\"lsp-type-bypass\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_type_bypass = t\n if hasattr(self, '_set'):\n self._set()",
"def f_type(self, f_type):\n \n self._f_type = f_type",
"def get_flood_extents(data_path, flood_type, return_period):\n extents = []\n if flood_type == 'current_fluvial':\n # EUWATCH\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n 'EUWATCH_{:05d}_mask-1.shp'.format(return_period)\n )\n ).records())\n # SSBN fluvial\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n 'SSBN_FU_{}_mask-1.shp'.format(return_period)\n )\n ).records())\n if flood_type == 'current_pluvial':\n # SSBN pluvial\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n 'SSBN_PU_{}_mask-1.shp'.format(return_period)\n )\n ).records())\n if flood_type == 'future_fluvial':\n # GLOFRIS\n models = [\n 'GFDL-ESM2M',\n 'HadGEM2-ES',\n 'IPSL-CM5A-LR',\n 'MIROC-ESM-CHEM',\n 'NorESM1-M',\n ]\n for model in models:\n extents += list(shpreader.Reader(\n os.path.join(\n data_path,\n 'tanzania_flood',\n 'threshold_1',\n '{}_{:05d}_mask-1.shp'.format(model, return_period)\n )\n ).records())\n return extents",
"def handle_flood_request(self, elem, bstring):\n _from = jid.JID(elem[\"from\"])\n if not re.match(r\"^nws-\", _from.user):\n msg = \"Sorry, you must be NWS to flood a chatroom!\"\n self.send_privatechat(elem[\"from\"], msg)\n return\n tokens = bstring.split()\n if len(tokens) == 1:\n msg = \"Did you specify a room to flood?\"\n self.send_privatechat(elem[\"from\"], msg)\n return\n room = tokens[1].lower()\n for _i in range(60):\n self.send_groupchat(room, self.get_fortune())\n self.send_groupchat(\n room,\n (\n \"Room flooding complete, offending message \"\n \"should no longer appear\"\n ),\n )",
"def hole_type(self):\n self._hole_type = self._hole_params[1]\n return self._hole_type",
"def _flood_fill(self, point):\n fboard = np.array(self.board, copy=True)\n flood_list=[point]\n color = fboard[point]\n fboard[point] = FLOODFILL\n while flood_list:\n current_point = flood_list.pop()\n neighbors = self._neighbors(current_point)\n for n in neighbors :\n if fboard[n] == color:\n fboard[n] = FLOODFILL\n flood_list.append(n)\n return fboard",
"def get_graph_type(light_graph):\n if GraphRunner.is_fully_supported(light_graph):\n return graph_types_pb2.LGFProtobuf\n\n for n in light_graph.nodes():\n if not n.supported and n.HasField(lgf_pb2.LNF.original.DESCRIPTOR.name):\n return n.original.t",
"def bugtracker_type(self, bugtracker_type):\n allowed_values = [undefined, undefined, undefined, ] # noqa: E501\n\n self._bugtracker_type = bugtracker_type",
"def field_type(self, field):\n return _ldns.ldns_rr_descriptor_field_type(self, field)\n #parameters: const ldns_rr_descriptor *, size_t,\n #retvals: ldns_rdf_type",
"def get_form_field_type(self, python_type):\n\n return self._type_map.get(python_type)",
"def flood(fin, T, option, Bc=None):\n\n if Bc is None: Bc = secross()\n raise NotImplementedError, 'pymorph.flood'",
"def _set_lsp_type_dynamic(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-type-dynamic\", rest_name=\"lsp-type-dynamic\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_type_dynamic must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-type-dynamic\", rest_name=\"lsp-type-dynamic\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_type_dynamic = t\n if hasattr(self, '_set'):\n self._set()",
"def get_field_type(self, field):\n return self._get_field_type_data(field)[0]",
"def is_grass_type(self):\n if self.type1 == 'Grass' or self.type2 == 'Grass':\n return 'This is a grass-type pokemon'\n else:\n return 'This is not a grass-type pokemon'",
"def flood_frequency(self):\n return (self.tp + self.fn) / (self.tn + self.fn + self.fp + self.tp)",
"def HasFDG(self):\n return self.__has('FDG')",
"def get_filter_type(self):\n\n choice = None\n if self.ui.radioButton_LP.isChecked():\n choice = \"lowpass\"\n elif self.ui.radioButton_HP.isChecked():\n choice = \"highpass\"\n elif self.ui.radioButton_BP.isChecked():\n choice = \"bandpass\"\n elif self.ui.radioButton_BS.isChecked():\n choice = \"bandstop\"\n elif self.ui.radioButton_AP.isChecked():\n warning(self, '', 'The allpass is not made yet.')\n choice = \"allpass\"\n self.config_dict['filter_type'] = choice\n print(\"picked filter type {}\".format(self.config_dict['filter_type']))",
"def _set_profile_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'synthetic-loss-measurement': {'value': 2}, u'delay-measurement': {'value': 1}},), is_leaf=True, yang_name=\"profile-type\", rest_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Profile Type as ETH-DM / ETH-SLM', u'alt-name': u'type', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='y1731-profile-type', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"profile_type must be of a type compatible with y1731-profile-type\"\"\",\n 'defined-type': \"brocade-dot1ag:y1731-profile-type\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'synthetic-loss-measurement': {'value': 2}, u'delay-measurement': {'value': 1}},), is_leaf=True, yang_name=\"profile-type\", rest_name=\"type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Configure Profile Type as ETH-DM / ETH-SLM', u'alt-name': u'type', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='y1731-profile-type', is_config=True)\"\"\",\n })\n\n self.__profile_type = t\n if hasattr(self, '_set'):\n self._set()",
"def floodfill(image, *args, **kwargs):\n image = Image.from_any(image)\n return FloodfillOperation(image, *args, **kwargs).run()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the event(s) associated with the `address_id` given.
|
def get_by_address_id(
self,
address_id: UUID,
*,
raise_ex: bool = False
) -> tp.List[T]:
pass
|
[
"def get_event_detail(self, event_id):\n events = self.facebook.get_object(cat=\"single\", id=event_id, fields=['description', 'name', 'place', 'attending'])\n return events.get('description', None), events.get('name', None), \\\n events.get('place', None), events.get('attending', None)",
"def get_event_by_id(id):\r\n \r\n return Event.query.get(id)",
"def get_offset_events_by_event(self, event_id):\n return # osid.calendaring.OffsetEventList",
"def get_events(event_ids: List[Union[str, int]],\n webserver_address: str = GS_SERVER_ADDRESS,\n geojson: bool = True) -> List[Dict]:\n urls = [get_event_url(event_id, webserver_address, geojson) for event_id in event_ids]\n loop = asyncio.get_event_loop()\n events = loop.run_until_complete(download_all_sites(urls))\n return [json.loads(event) for event in events]",
"def getevent(self, id):\n # Request event details\n senddata = {\"id\": id}\n response = requests.post(f\"{self.api_address}/getevent\", json=senddata)\n recvdata = response.json()\n # Check if event was returned\n if not recvdata.get('id'):\n # Return None\n return None\n else:\n # Return event details\n return recvdata",
"def get_events_by_location(self, location_id):\n return # osid.calendaring.EventList",
"def get_event_detail_by_id(event_id):\n event = Event.objects.get(pk=event_id)\n return event",
"def get_address(address_id):\n session = connect()\n address = session.query(Address).filter_by(id=address_id).one_or_none()\n return address",
"def get_event_by_id(event_id):\n session = _open_session()\n sql = ('''\n SELECT datestamp, device, error_code, error_message, result\n FROM events\n WHERE id=?\n ''')\n event = session.execute(sql, (event_id,)).fetchone()\n session.close()\n return event",
"def get_eventbrite_events():\n eb = Eventbrite(config.EVENTBRITE_TOKEN)\n # me = eb.event_search(**{\"user.id\": eb.get_user()[\"id\"]})\n # print(json.dumps(me))\n\n has_more = True\n events = []\n while has_more:\n search_results = eb.event_search(\n **{\"location.address\": \"New+York\", \"location.within\": \"5mi\"}\n )\n has_more = search_results.get(\"pagination\", \"\").get(\n \"has_more_items\", False\n )\n for i in search_results.get(\"events\", []):\n events.append(\n {\n \"id\": i.get(\"id\"),\n \"name\": i.get(\"name\").get(\"text\"),\n \"description\": i.get(\"description\").get(\"text\"),\n \"summary\": i.get(\"summary\"),\n \"start\": i.get(\"start\").get(\"local\"),\n \"end\": i.get(\"end\").get(\"local\"),\n \"status\": i.get(\"status\"),\n \"url\": i.get(\"url\"),\n }\n )\n\n return search_results[\"events\"]",
"def get_sg_event(event_id):\n\n params = {'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'id': event_id}\n\n response = requests.get(SG_URL + 'events', params=params)\n\n return response.json()",
"def _get_event_by_id(self, event_id):\n if self._getevent_warn:\n msg = (\n \"Seeking event by iterating through events.. (potentially long process)\"\n )\n self.log.warning(msg)\n self._getevent_warn = False\n\n self._reset() # Event ids may not be in order, so always reset\n\n for event in self._source:\n if event.index.event_id == event_id:\n return event\n raise IndexError(f\"Event id {event_id} not found in file\")",
"def search_txids_by_address(self, address: str) -> List[str]:\n txs = self.search_txs_by_address(address)\n\n txids = {i.txid for i in txs}\n txids = list(txids)\n return txids",
"def get_source_addresses_by_source_address_id(self, source_address_id, *, fields=None, **kwargs):\n function_endpoint = urljoin(self._baseurl,\n 'source_addresses/{source_address_id}'.format(source_address_id=source_address_id))\n return self._call('GET', function_endpoint, **kwargs)",
"def get_events(self, status):\n\n type_id = (BookmarkType.query\n .filter(BookmarkType.bookmark_type == status)\n .one().bookmark_type_id)\n\n return (Event.query.join(Bookmark)\n .filter(Bookmark.bookmark_type_id == type_id,\n Bookmark.user_id == self.user_id,\n Event.event_id == Bookmark.event_id).all())",
"def get_eventlogs_by_event(event):\n return event.db.EventLog.find({'event':event})",
"def _get_address(self, router, address):\n a_type = 'org.apache.qpid.dispatch.router.address'\n addrs = router.management.query(a_type).get_dicts()\n return [a for a in addrs if address in a['name']]",
"def get_events_by_ids(self, event_ids):\n return # osid.calendaring.EventList",
"def _get_details_by_evid(self, evid):\n event_data = self._router_request(\n self._make_request_data(\n 'detail',\n dict(\n evid=evid,\n )\n )\n )\n\n return event_data['event'][0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Represent a location given its id
|
def shn_gis_location_represent(id, showlink=True):
table = db.gis_location
try:
location = db(table.id == id).select(table.id,
table.name,
table.level,
table.parent,
table.lat,
table.lon,
cache=(cache.ram, 60),
limitby=(0, 1)).first()
return shn_gis_location_represent_row(location, showlink)
except:
try:
# "Invalid" => data consistency wrong
represent = location.id
except:
represent = NONE
return represent
|
[
"def test_facebook_get_location_by_id(self):\n location = get_location_info(location_id=120491747748)\n\n self.assertEqual(location.name, 'The Classic Cup')\n self.assertEqual(location.category, 'Restaurant/cafe')\n self.assertEqual(location.country, 'United States')\n self.assertEqual(location.latlong, (39.042173020445, -94.590903251913))\n self.assertEqual(location.line1, '301 W. 47th Street')\n self.assertEqual(location.line2, None)\n self.assertEqual(location.locality, 'Kansas City')\n self.assertEqual(location.phone, '816-753-1840')\n self.assertEqual(location.postal_code, '64112')\n self.assertEqual(location.source, Source.FACEBOOK)\n self.assertEqual(location.subdivision, 'MO')",
"def create_location(id, name, abbr, subtitle=\"\", audio=\"\"):\r\n\r\n location = Location(id=id, \r\n name=name, \r\n abbr=abbr, \r\n default_subtitle = subtitle, \r\n default_audio = audio)\r\n\r\n db.session.add(location)\r\n db.session.commit() \r\n\r\n return location",
"def fromId(cls, locId):\n location = None\n if isinstance(locId, Location):\n location = copy(locId)\n elif isinstance(locId, dict):\n location = cls()\n for tag, value in locId.items():\n location[tag] = value\n elif isinstance(locId, (tuple, list)):\n location = cls()\n for tag, value in locId:\n location[tag] = value\n return location",
"def create(cls, data, id_=None, **kwargs):\n data[\"location\"] = {\n \"$ref\": cls._location_resolver_path.format(\n scheme=current_app.config[\"JSONSCHEMAS_URL_SCHEME\"],\n host=current_app.config[\"JSONSCHEMAS_HOST\"],\n internal_location_pid=data[cls.pid_field],\n )\n }\n return super(InternalLocation, cls).create(data, id_=id_, **kwargs)",
"def Location(self) -> str:",
"def get_location(self, location_id):\n response = self._perform_request('/locations/' + location_id)\n return response",
"def editLoc(loc_id):\n\n item = session.query(Locations).filter_by(id=loc_id).one()\n if login_session['user_id'] != item.user_id:\n flash(\"Sorry, you do not have permissions to edit this item\")\n return redirect(url_for('showAllLocs'))\n form = newLocationForm()\n if request.method == 'POST' and form.validate_on_submit():\n if form.name.data:\n item.name = form.name.data\n if form.description.data:\n item.description = form.description.data\n if form.pic_url.data:\n item.pic_url = form.pic_url.data\n session.add(item)\n session.commit()\n flash('Location %s was edited!' % item.name)\n return redirect(url_for('showOneLoc', loc_id=item.id))\n else:\n return render_template('editlocation.html', form=form, location=item,\n login_session=login_session)",
"def add_location_db(self):\n result = self.db_handle.fetchdata('add_location', [self.location_name,self.location_size,self.notes,])\n self.lid = result[0][0]",
"def location(self):\n return '{}, {}'.format(\n self.city,\n self.state.abbr,\n )",
"def create_location(self):\n return self.client().post('/api/organizations/1/locations/',\n data=self.location_data)",
"def test_location_string(self):\n location = models.Location.objects.create(\n user=sample_user(),\n loc_id=23111,\n loc_name='Johnson Farm',\n )\n self.assertEqual(str(location), str(location.loc_id))",
"def addLocationNode(self, type, contextUid, id,\n description=None, address=None):\n facade = self._getFacade()\n organizer = facade.addLocationOrganizer(contextUid,\n id,\n description,\n address)\n uid = organizer.uid\n\n treeNode = facade.getTree(uid)\n audit('UI.Location.Add', uid, description=description, address=address)\n return DirectResponse.succeed(\"Location added\", nodeConfig=Zuul.marshal(treeNode))",
"def genelocations_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=GENELOCATION_TYPE_URI,\n rdf_type_name=GENELOCATION_TYPE_NAME, \n kls=GeneLocation)",
"def setLocation(self, hexID):\n self.hexLocation = hexID",
"def __parse_location(row):\n loc = Location(\n row['street_address'],\n row['city'],\n row['state'],\n row['zipcode'],\n row['latitude'],\n row['longitude'],\n row['id'],\n row['store_id']\n )\n return loc",
"def _parse_location(self, item):\n if item['location']:\n address = item['location'] + ' Chicago, IL'\n else:\n address = None\n return {'address': address, 'name': '', 'neighborhood': ''}",
"def edit_location(redflag_id):\n data = request.get_json()\n location = data.get('location')\n wrong_location = validators.validate_location(location)\n if wrong_location:\n return jsonify({\"status\": 400, 'error': wrong_location}), 400\n elif redflag.edits_record_location(redflag_id, 'location', location):\n return jsonify({\"status\": 201, \"data\": redflag.edits_record_location(redflag_id, 'location', location)})\n return jsonify({\"status\": 200, \"message\": \"the redflag with redflag_id is not available\"})",
"def location(self) -> LyricLocation:\n return self.coordinator.data.locations_dict[self._location.locationID]",
"def display_location(self):\n self.get()",
"def get_location(self, location=None, location_id=None):\n self._load_locations()\n if location:\n if location in self.locations:\n return self.locations[location]\n return None\n for location in self.locations:\n if location.id == location_id:\n return location\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper for gis_config prep and others where gis_config is a component. Hide location hierarchy fields above max allowed. Table definitions may include more levels than a particular site wants to allow. Rather than changing the definitions, hide the extra levels. Set defaults from the site config static defaults are sparse (no map size??) and may be annoyingly inappropriate (London??).
|
def gis_config_prep_helper(r):
table = db.gis_config
table_max_level_num = int(reduce(
max, filter(lambda field: len(field) == 2 and field[0] == "L",
table.fields))[1])
if table_max_level_num > gis.max_allowed_level_num:
for n in range(gis.max_allowed_level_num + 1, table_max_level_num + 1):
level = "L%d" % n
table[level].readable = table[level].writable = False
row = db(table.id == 1).select(limitby=(0, 1)).first()
if row:
exclude = ["id", "name", "region_location_id", "show_region_in_menu",
"region_changed_timestamp"]
exclude.extend(r.response.s3.all_meta_field_names)
for fieldname in table.fields:
if fieldname in row and fieldname not in exclude:
table[fieldname].default = row[fieldname]
|
[
"def HideDefaults(self, name, cfg):\n my_default = self.GetDefault()\n my_default['name'] = name\n\n template = cfg.get('_template')\n if template:\n my_default.update(self._templates[template])\n my_default['_template'] = None\n\n d = {}\n for k, v in cfg.iteritems():\n if my_default.get(k) != v:\n if k == 'child_configs':\n d['child_configs'] = [self.HideDefaults(name, child) for child in v]\n else:\n d[k] = v\n\n return d",
"def set_default_values(self):\n self.vmin.set(0)\n self.vmax.set(0)\n self.dq_show.set('184')\n self.segment.set('A')\n self.N_degraded.set(0)\n self.extract.set('None')\n self.draw.set('Modal Gain')\n self.extract_offset.set(0)\n self.cmap.set('gist_yarg')\n self.grid_limits.set(1)",
"def set_defaults():\n hv.opts.defaults(\n hv.opts.BoxWhisker(\n box_cmap=default_cmap,\n box_fill_alpha=0.75,\n box_line_color=\"black\",\n box_width=0.4,\n cmap=default_cmap,\n height=350,\n hooks=[_no_xgrid_hook],\n legend_offset=(10, 100),\n legend_position=\"right\",\n outlier_alpha=0.75,\n outlier_fill_color=None,\n outlier_fill_alpha=0,\n outlier_line_width=2,\n padding=0.05,\n show_grid=True,\n show_legend=False,\n show_title=True,\n toolbar=\"above\",\n whisker_color=\"black\",\n whisker_line_width=1,\n width=450,\n )\n )\n\n hv.opts.defaults(\n hv.opts.Curve(\n color=hv.Cycle(default_cmap),\n height=350,\n line_width=2,\n line_join=\"bevel\",\n muted_line_alpha=0.1,\n padding=0.05,\n show_grid=True,\n toolbar=\"above\",\n width=450,\n )\n )\n\n hv.opts.defaults(\n hv.opts.Histogram(\n fill_alpha=0.3,\n fill_color=hv.Cycle(default_cmap),\n height=450,\n line_alpha=1,\n line_width=2,\n padding=0.05,\n show_grid=True,\n show_legend=True,\n show_title=True,\n toolbar=\"above\",\n width=500,\n )\n )\n\n hv.opts.defaults(\n hv.opts.NdOverlay(\n click_policy=\"hide\",\n fontsize=dict(legend=8),\n height=350,\n legend_offset=(10, 100),\n legend_position=\"right\",\n padding=0.05,\n show_grid=True,\n show_legend=True,\n show_title=True,\n toolbar=\"above\",\n width=450,\n )\n )\n\n hv.opts.defaults(\n hv.opts.Overlay(\n click_policy=\"hide\",\n fontsize=dict(legend=8),\n height=350,\n legend_offset=(10, 100),\n legend_position=\"right\",\n padding=0.05,\n show_grid=True,\n show_legend=True,\n show_title=True,\n toolbar=\"above\",\n width=450,\n )\n )\n\n\n hv.opts.defaults(\n hv.opts.Points(\n alpha=0.75,\n color=hv.Cycle(default_cmap),\n fill_alpha=0,\n fill_color=None,\n fontsize=dict(legend=8),\n height=350,\n legend_offset=(10, 100),\n legend_position=\"right\",\n line_width=2,\n padding=0.05,\n show_grid=True,\n show_legend=True,\n show_title=True,\n size=5,\n toolbar=\"above\",\n width=450,\n )\n )\n\n hv.opts.defaults(\n hv.opts.Scatter(\n alpha=0.75,\n cmap=bokeh.palettes.Viridis256,\n color=hv.Cycle(default_cmap),\n fill_alpha=0,\n fill_color=None,\n fontsize=dict(legend=8),\n height=350,\n legend_offset=(10, 100),\n legend_position=\"right\",\n line_width=2,\n muted_line_alpha=0.1,\n padding=0.05,\n show_grid=True,\n size=5,\n toolbar=\"above\",\n width=450,\n )\n )",
"def __SetMissingDefaultConfigValues(self, config={}):\n config = super(DfpClient, self)._SetMissingDefaultConfigValues(config)\n default_config = {\n 'home': DfpClient.home,\n 'log_home': os.path.join(DfpClient.home, 'logs')\n }\n for key in default_config:\n if key not in config:\n config[key] = default_config[key]\n return config",
"def getDefaultConfiguration(nodeType):",
"def HideDefaults(cls, site_params):\n defaults = DefaultSiteParameters()\n return {k: v for k, v in site_params.iteritems() if defaults.get(k) != v}",
"async def test_entity_customization(hass: HomeAssistant) -> None:\n config = {\n CONF_LATITUDE: 50,\n CONF_LONGITUDE: 50,\n CONF_NAME: \"Test\",\n CONF_CUSTOMIZE: {\"test.test\": {\"hidden\": True}},\n }\n\n state = await _compute_state(hass, config)\n\n assert state.attributes[\"hidden\"]",
"def get_default_site_settings():\n return models.SiteSettings(center_gears=None, providers=None)",
"def set_options_frame_defaults(self):\r\n self.population_count.set('50')\r\n self.mutation_rate_percent.set('1')\r\n self.generations.set('-')\r\n self.average_fitness.set('-')\r\n self.best_fitness.set('-')\r\n self.best_generation.set('-')\r\n self.best_distance.set('-')\r\n self.minimizing_factor.set('1')",
"def supports_configuration_hierarchy_design(self):\n return # boolean",
"def system_cfg(cls):\r\n #attention_this_may_be_wrong\r\n #gamma is different from that in system.cfg but...\r\n\r\n cls.model_param_ising= {\r\n \"gamma\": 1.0, \r\n \"h\": 1.0, \r\n \"J_NN\": -1.0, \r\n \"J_NNN\": 1.0, \r\n \"alpha\": 2.0, \r\n \"beta\": 0.0 \r\n }\r\n cls.model_param_heisenberg= {\r\n \"h\": 0.0, \r\n \"Jzz\": 1.0, \r\n \"J_NN\": 1.0, \r\n \"J_NNN\": 0.0, #cls.J_NNN=0.241186\r\n \"alpha\": 2.0, \r\n \"beta\": 1.0 \r\n }\r\n\r\n if cls.MODEL == \"Ising\": \r\n cls.model_param.update(cls.model_param_ising)\r\n\r\n cls.Layer=3\r\n cls.SI_Layer=2\r\n cls.D_max=2\r\n \r\n elif cls.MODEL == \"Heisenberg\":\r\n cls.model_param.update(cls.model_param_heisenberg)\r\n \r\n\r\n cls.D_max=2\r\n cls.Layer=3\r\n else:\r\n print \"error, cls.MODEL is not defined\"\r\n print cls.MODEL",
"def merge_with_master_config(self, config, defaults={}, delete_orphan_fields=False) -> dict:\n if isinstance(config, str):\n import json\n config = json.loads(config)\n properties = self.all_properties()\n config['fields'] = config.get('fields', dict())\n fields = config['fields']\n\n d_color = defaults.get('color', 'white')\n d_icon = defaults.get('icon', 'icons:default')\n\n if delete_orphan_fields:\n exist = {p.name() for p in properties}\n unexist = set(fields.keys()) - exist\n for name in unexist:\n del fields[name]\n\n for p in properties:\n field = fields.get(p.name(), {'show_in_search': False,\n 'combine_fields': False,\n 'number_of_rules': 0,\n 'glossaries': [],\n 'use_in_network_search': False,\n 'case_sensitive': False,\n 'show_as_link': 'text',\n 'blacklists': [],\n 'show_in_result': 'no',\n 'rule_extractor_enabled': False,\n 'search_importance': 1,\n 'group_name': '',\n 'show_in_facets': False,\n 'predefined_extractor': 'none',\n 'rule_extraction_target': ''})\n config['fields'][p.name()] = field\n field['screen_label'] = ' '.join(p.label())\n field['description'] = '\\n'.join(p.definition())\n field['name'] = p.name()\n\n # color\n if 'color' not in field:\n color = self.__merge_close_ancestor_color(p, fields, attr='color')\n field['color'] = color if color else d_color\n # icon\n if 'icon' not in field:\n icon = self.__merge_close_ancestor_color(p, fields, attr='icon')\n field['icon'] = icon if icon else d_icon\n # type\n if isinstance(p, OntologyObjectProperty):\n field['type'] = 'kg_id'\n else:\n try:\n field['type'] = self.__merge_xsd_to_type(next(iter(p.included_ranges())))\n except StopIteration:\n field['type'] = None\n return config",
"def DefaultConfig(self) -> str:\n cfg = \\\n\"\"\"[Location]\nlat = 15.3\nlon = -120.2\nalt = 50\nname = Arayat\ntz = Asia/Manila\n\n[Track]\nsats = AO-92,SO-50,ISS,FO-29,FOX-1B,IO-86,AO-7,AO-27,AO-73,XW-2B,XW-2F,LILACSAT-2\n\n[Pass]\nminalt = 20.0\n\n[Tle]\nfiles = 'https://www.celestrak.com/NORAD/elements/amateur.txt,'\n\"\"\"\n return cfg",
"def map_options(self, configdialog):\n self._config.set('geography.path', config.get('geography.path'))\n self._config.set('geography.zoom_when_center',\n config.get('geography.zoom_when_center'))\n self._config.set('geography.max_places',\n self._config.get('geography.max_places'))\n table = Gtk.Table(n_rows=1, n_columns=1)\n table.set_border_width(12)\n table.set_col_spacings(6)\n table.set_row_spacings(6)\n configdialog.add_text(table,\n _('Where to save the tiles for offline mode.'),\n 0, line_wrap=False)\n configdialog.add_entry(table, '',\n 1, 'geography.path')\n configdialog.add_text(table,\n _('If you have no more space in your file system. '\n 'You can remove all tiles placed in the above path.\\n'\n 'Be careful! If you have no internet, you\\'ll get no map.'),\n 2, line_wrap=False)\n configdialog.add_slider(table,\n _('Zoom used when centering'),\n 3, 'geography.zoom_when_center',\n (2, 16))\n configdialog.add_slider(table,\n _('The maximum number of places to show'),\n 4, 'geography.max_places',\n (1000, 10000))\n return _('The map'), table",
"def resetDefaultConfig_2D(): #-----------------------------------------------\n\tpresetConfig_curv(1)\n\tkeywords2d = {\n\t\t'views_on' : 0,\n\t\t'cams_on' : 0,\n\t\t'lights_on' : 0,\n\t\t'vGroup_on' : 1,\n\t\t'thick_on' : 0,\n\t\t'thick_force': 0,\n\t\t'width_on' : 1,\n\t\t'width_force': 0,\n\t\t'dist_on' : 1,\n\t\t'dist_force': 0,\n\t\t'fill_on'\t: 0,\n\t\t'pl_trim_on': 1,\n\t\t'Z_force_on': 0,\n\t\t'meshSmooth_on': 0,\n\t\t'solids_as' : 2,\n\t\t'blocks_as' : 1,\n\t\t'texts_as' : 1\n\t\t}\n\n\tdrawTypes2d = {\n\t\t'point' : 1,\n\t\t'line' : 1,\n\t\t'arc' : 1,\n\t\t'circle': 1,\n\t\t'ellipse': 1,\n\t\t'mline' : 0,\n\t\t'polyline': 1,\n\t\t'spline': 1,\n\t\t'plmesh': 0,\n\t\t'pline3': 1,\n\t\t'lwpolyline': 1,\n\t\t'text' : 1,\n\t\t'mtext' : 0,\n\t\t'block' : 1,\n\t\t'insert': 1,\n\t\t'solid' : 1,\n\t\t'trace' : 1,\n\t\t'face' : 0,\n#\t\t'view' : 0,\n\t\t}\n\n\tupdateConfig(keywords2d, drawTypes2d)",
"def defaults(self) -> Mapping[str, str]:",
"def default_values(self):\n\n if self.production:\n return {\n 'official_status_index': '5',\n 'official_down_status': 'down_status[official_status_index]',\n 'official_up_status': 'up_status[official_status_index]',\n 'official_invalid_status': 'invalid_status[official_status_index]',\n 'adblock': 'False',\n 'auto_continue': 'True',\n 'command_before_end': \"''\",\n 'custom_ip': \"'0.0.0.0'\",\n 'days_between_db_retest': '1',\n 'debug': 'False',\n 'domain': \"''\",\n 'generate_hosts': 'True',\n 'header_printed': 'False',\n 'to_filter': \"''\",\n 'less': 'False',\n 'logs': 'True',\n 'plain_list_domain': 'False',\n 'quiet': 'False',\n 'referer': \"''\",\n 'seconds_before_http_timeout': '3',\n 'share_logs': 'False',\n 'show_execution_time': 'False',\n 'show_percentage': 'True',\n 'split_files': 'False',\n 'travis': 'False',\n 'travis_autosave_minutes': '15',\n 'travis_autosave_commit': '\"PyFunceble - Autosave\"',\n 'travis_autosave_final_commit': '\"PyFunceble - Results\"',\n 'unified_file': 'True',\n 'link_to_repo': \"'https://github.com/funilrys/PyFunceble'\",\n 'iana_server': \"'whois.iana.org'\",\n 'current_datetime': 'strftime(\"%a %d %b %H:%m:%S %Z %Y\")',\n 'number_of_tested': '0',\n 'number_of_up': '0',\n 'number_of_down': '0',\n 'number_of_invalid': '0',\n 'http_code_status': 'True',\n 'http_code': \"''\",\n 'cleaned_done': 'False',\n 'no_files': 'False',\n 'current_dir': \"'%%current_dir%%'\"}\n return {\n 'current_dir': \"'\" + repr(self.path).strip(\"'\") + \"'\"\n }",
"def ssdb_resetdefaults(_self=None):\n SecondaryStructureDB.addDefaults()",
"def make_default_settings():\n default_settings = {\n 'height': 24, \n 'width': 24, \n 'max_box_height': 7,\n 'max_box_width': 7,\n 'max_container_height': 5,\n 'max_container_width': 9,\n 'default_num_samples': 20,\n 'fixed_floor': False,\n 'floor_height': 3,\n 'infinite_position_domain': False,\n 'frame': False, # indicates presence of PixelWorld frame\n 'frame_color': PURPLE,\n 'padding': 0, # padding around outside edge\n 'colors': COLORS.values(), \n 'check_overlap': True,\n 'allow_pushable': False, # Whether to allow objects the option of being pushable\n 'allow_targets': False, # Whether to allow use of the is_target attribute\n 'add_self': True,\n 'make_self_red_pixel': True,\n 'self_color_is_unique': False,\n 'objects_are_white': False,\n 'objects_are_small_blobs': False,\n 'self_grips': False, # True if the self can grip/ungrip other objects\n }\n return default_settings"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
On Accept for GIS Locations (after DB I/O)
|
def gis_location_onaccept(form):
if session.rcvars and hasattr(name_dummy_element, "onaccept"):
# HTML UI, not XML import
name_dummy_element.onaccept(db, session.rcvars.gis_location, request)
else:
location_id = form.vars.id
table = db.gis_location_name
names = db(table.location_id == location_id).select(table.id)
if names:
ids = [str(name.id) for name in names]
#name_dummy = "|%s|" % "|".join(ids)
name_dummy = "|".join(ids) # That's not how it should be
table = db.gis_location
db(table.id == location_id).update(name_dummy=name_dummy)
# Update the Path
gis.update_location_tree(form.vars.id, form.vars.parent)
return
|
[
"def location():\n\n resource = request.function\n tablename = module + \"_\" + resource\n table = db[tablename]\n\n # Allow prep to pass vars back to the controller\n vars = {}\n \n # Pre-processor\n def prep(r, vars):\n\n # Restrict access to Polygons to just MapAdmins\n if deployment_settings.get_security_map() and not shn_has_role(\"MapAdmin\"):\n table.code.writable = False\n if r.method == \"create\":\n table.code.readable = False\n table.gis_feature_type.writable = table.gis_feature_type.readable = False\n table.wkt.writable = table.wkt.readable = False\n table.marker_id.comment = \"\"\n else:\n table.code.comment = DIV( _class=\"tooltip\", _title=Tstr(\"Code\") + \"|\" + Tstr(\"For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.\"))\n table.wkt.comment = DIV(SPAN(\"*\", _class=\"req\"), DIV( _class=\"tooltip\", _title=\"WKT\" + \"|\" + Tstr(\"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.\")))\n\n if r.http == \"GET\" and r.representation in (\"html\", \"popup\"):\n # Options which are only required in interactive HTML views\n table.level.comment = DIV( _class=\"tooltip\", _title=Tstr(\"Level\") + \"|\" + Tstr(\"If the location is a geographic area, then state at what level here.\"))\n table.parent.comment = DIV(A(ADD_LOCATION,\n _class=\"colorbox\",\n _href=URL(r=request, c=\"gis\", f=\"location\", args=\"create\", vars=dict(format=\"popup\", child=\"parent\")),\n _target=\"top\",\n _title=ADD_LOCATION),\n DIV(\n _class=\"tooltip\",\n _title=Tstr(\"Parent\") + \"|\" + Tstr(\"The Area which this Site is located within.\"))),\n table.name.comment = SPAN(\"*\", _class=\"req\")\n table.osm_id.comment = DIV( _class=\"tooltip\", _title=\"OSM ID\" + \"|\" + Tstr(\"The <a href='http://openstreetmap.org' target=_blank>OpenStreetMap</a> ID. If you don't know the ID, you can just say 'Yes' if it has been added to OSM.\"))\n\n # CRUD Strings\n LIST_LOCATIONS = T(\"List Locations\")\n s3.crud_strings[tablename] = Storage(\n title_create = ADD_LOCATION,\n title_display = T(\"Location Details\"),\n title_list = T(\"Locations\"),\n title_update = T(\"Edit Location\"),\n title_search = T(\"Search Locations\"),\n subtitle_create = T(\"Add New Location\"),\n subtitle_list = LIST_LOCATIONS,\n label_list_button = LIST_LOCATIONS,\n label_create_button = ADD_LOCATION,\n label_delete_button = T(\"Delete Location\"),\n msg_record_created = T(\"Location added\"),\n msg_record_modified = T(\"Location updated\"),\n msg_record_deleted = T(\"Location deleted\"),\n msg_list_empty = T(\"No Locations currently available\"))\n\n if r.method in (None, \"list\") and r.record == None:\n # List\n pass\n elif r.method in (\"delete\", \"search_simple\"):\n pass\n else:\n # Add Map to allow locations to be found this way\n config = gis.get_config()\n lat = config.lat\n lon = config.lon\n zoom = config.zoom\n feature_queries = []\n\n if r.method == \"create\":\n add_feature = True\n add_feature_active = True\n else:\n if r.method == \"update\":\n add_feature = True\n add_feature_active = False\n else:\n # Read\n add_feature = False\n add_feature_active = False\n \n location = db(db.gis_location.id == r.id).select(db.gis_location.lat, db.gis_location.lon, limitby=(0, 1)).first()\n if location and location.lat is not None and location.lon is not None:\n lat = location.lat\n lon = location.lon\n # Same as a single zoom on a cluster\n zoom = zoom + 2\n \n _map = gis.show_map(lat = lat,\n lon = lon,\n zoom = zoom,\n feature_queries = feature_queries,\n add_feature = add_feature,\n add_feature_active = add_feature_active,\n toolbar = True,\n collapsed = True)\n\n # Pass the map back to the main controller\n vars.update(_map=_map)\n return True\n response.s3.prep = lambda r, vars=vars: prep(r, vars)\n \n # Options\n _vars = request.vars\n filters = []\n # Deprecate!\n #fclass = _vars.get(\"feature_class\", None)\n #if fclass:\n # filters.append((db.gis_location.feature_class_id == db.gis_feature_class.id) &\n # (db.gis_feature_class.name.like(fclass)))\n\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n # Can't do this using a JOIN in DAL syntax\n # .belongs() not GAE-compatible!\n filters.append((db.gis_location.parent.belongs(db(db.gis_location.name.like(parent)).select(db.gis_location.id))))\n # ToDo: Make this recursive - want descendants not just direct children!\n # Use new gis.get_children() function\n\n # ToDo\n # bbox = _vars.get(\"bbox\", None):\n\n if filters:\n response.s3.filter = reduce(__and__, filters)\n\n caller = _vars.get(\"caller\", None)\n if caller:\n # We've been called as a Popup\n if \"gis_location_parent\" in caller:\n # Populate defaults & hide unnecessary rows\n # Use default Marker for Admin Locations\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n #table.osm_id.readable = table.osm_id.writable = False\n #table.source.readable = table.source.writable = False\n else:\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n table.parent.default = parent\n \n #fc = None\n # Populate defaults & hide unnecessary rows\n if \"cr_shelter\" in caller:\n #fc = db(db.gis_feature_class.name == \"Shelter\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"hms_hospital\" in caller:\n #fc = db(db.gis_feature_class.name == \"Hospital\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"irs_ireport\" in caller:\n #fc = db(db.gis_feature_class.name == \"Incident\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"org_office\" in caller:\n #fc = db(db.gis_feature_class.name == \"Office\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"pr_presence\" in caller:\n #fc = db(db.gis_feature_class.name == \"Person\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"assessment_location\" in caller:\n table.level.default = \"L4\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n elif \"school_district\" in caller:\n table.level.default = \"L2\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n elif \"school_report_location\" in caller:\n table.level.default = \"L2\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n elif \"school_report_union\" in caller:\n table.level.default = \"L4\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n \n #try:\n # If we have a pre-assigned Feature Class\n #table.feature_class_id.default = fc.id\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n # Use default Marker for Class\n #table.marker_id.readable = table.marker_id.writable = False\n #except:\n #pass\n\n table.osm_id.readable = table.osm_id.writable = False\n table.source.readable = table.source.writable = False\n\n level = _vars.get(\"level\", None)\n if level:\n # We've been called from the Location Selector widget\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n \n # Post-processor\n def user_postp(jr, output):\n shn_action_buttons(jr)\n return output\n response.s3.postp = user_postp\n\n response.s3.pagination = True\n output = shn_rest_controller(module, resource, listadd=False)\n\n _map = vars.get(\"_map\", None)\n if _map and isinstance(output, dict):\n output.update(_map=_map)\n\n return output",
"def list_locations():",
"def start_geo():\r\n database=\"gds\"\r\n keywords=input(\"Keywords to search on GEO?\")\r\n geo_info(database, keywords)",
"def process_request(self, request):\n request.location = None\n request.location_source = u'cookie' # default\n location = None\n http_host = request.get_host()\n #_log.debug(\"HTTP host: %s\" % http_host)\n if http_host:\n cx = http_host.lower().split(\".\")\n if len(cx) > 2:\n subdomain = cx[0]\n if subdomain in settings.LOCATION_SUBDOMAIN_MAP:\n subdomain = settings.LOCATION_SUBDOMAIN_MAP[subdomain]\n if subdomain in settings.LOCATION_DATA.keys():\n location = subdomain.lower()\n request.location_source = u'subdomain'\n if not location:\n location = request.COOKIES.get(settings.LOCATION_COOKIE_NAME, '').lower()\n if location not in settings.LOCATION_DATA.keys():\n if 'HTTP_X_FORWARDED_FOR' in request.META:\n ip_addr = request.META['HTTP_X_FORWARDED_FOR']\n else:\n ip_addr = request.META['REMOTE_ADDR']\n # Location was not found in the cookie. \n # Look it up via GeoIP\n geoip = RiotVineGeoIP()\n location = geoip.nearest_location(ip_addr)\n request.location_source = u'geoip'\n request.location = location\n request.location_name = settings.LOCATION_DATA.get(location)[3]\n other_cities = sorted(settings.LOCATION_DATA.keys())\n other_cities.remove(location)\n request.other_cities = ' / '.join([\n ('<a href=\"http://%s.%s%s%s\">%s</a>' % (settings.LOCATION_SUBDOMAIN_REVERSE_MAP[loc], settings.DISPLAY_SITE_DOMAIN, _SUBDOMAIN_PORT, reverse(\"list_events\"), settings.LOCATION_DATA.get(loc)[3])) for loc in other_cities\n ])\n return None",
"def choose_new_location(self, new_locations):\n\t\tpass",
"def special_cases(location):\n\tloc_words = location.split()\n\tno_stop_words = [w for w in loc_words if w.lower() not in LUCENE_STOP_WORDS] \n\n\tresult = None\n\tif ('IN' in loc_words or 'OR' in loc_words or 'ON' in loc_words) and (len(loc_words) - len(no_stop_words) == 1):\n\t\tresult = yahoo_geocode(location)\n\telif (location.lower().find(\"the netherlands\") > 0):\n\t\tresult = yahoo_geocode(location)\n\treturn result;",
"def lookup(self):",
"def save_location_data(self):\r\n\r\n try:\r\n col = list(self.new_data[0].keys())\r\n table_col = ', '.join(\"'{}'\".format(key) for key in col)\r\n col_count = len(col)\r\n print(\"Adding data to {} columns.\".format(col_count))\r\n\r\n sql_data = []\r\n for d in self.new_data:\r\n if len(list(d.values())) == col_count:\r\n sql_data.append(list(d.values()))\r\n\r\n def insert_data(x=1):\r\n \"\"\"\r\n Local function that allows a wait period if database file is busy, then retries\r\n \"\"\"\r\n try:\r\n self.db.executemany(\"\"\"INSERT OR REPLACE INTO GeoIP {} VALUES ({})\"\"\".format(\r\n tuple(col), \",\".join(\"?\" * col_count)), sql_data)\r\n except AttributeError:\r\n print('ERROR: You must create a table before loading to it. Try initiate_table().')\r\n except sqlite3.OperationalError as e:\r\n if x == 5:\r\n print(\"Renaming GeoIP to GeoIP_old and creating new table to continue sync.\")\r\n self.db.execute(\"\"\"ALTER TABLE GeoIP RENAME TO GeoIP_old;\"\"\")\r\n\r\n n_col = ', '.join(\"'{}' {}\".format(key, val) for key, val in self.columns.items())\r\n\r\n self.db.execute('''CREATE TABLE IF NOT EXISTS GeoIP\r\n ({})'''.format(n_col))\r\n insert_data()\r\n else:\r\n print(\"ERROR: {}\\n Waiting 15 seconds then trying again.\\nTry {} out of 5\".format(e, x))\r\n time.sleep(15)\r\n insert_data(x + 1)\r\n\r\n insert_data()\r\n\r\n print(\"Table has been populated, commit to finalize operation.\")\r\n\r\n except (AttributeError, TypeError):\r\n print(\"ERROR: You must use get_initial_data() or get_sync_data() \"\r\n \"to grab data from Eloqua before writing to a database.\")\r\n exit()",
"def country_custom_handler(country):\n if not country.jurisdiction:\n country.jurisdiction = country",
"def __init__(self, myshp, mydbf, west_lon, east_lon, south_lat, north_lat):\n self.reader = shapefile.Reader(shp = myshp, dbf = mydbf)\n\n# The following four coordinate inputs must be obtained outside of the scope of the program. Go to\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# Then, call the method bounding_box to see the bounding UTM coordinates of the\n# data collected. Enter the bounding coordinates into the calculator to find the\n# corresponding latitude and longitude values. These will be used later to\n# integrate with other parts of the code.\n\n# This step is necessary because accurately location constrained data\n# acquisition is not always possible.\n\n self.west_lon = west_lon\n self.east_lon = east_lon\n self.south_lat = south_lat\n self.north_lat = north_lat\n\n# Get shapes objects which include information such as location and grid size\n self.shapes = self.reader.shapes()\n# Get record objects containing wind data\n self.records = self.reader.records()\n# Get number of datum\n self.length = len(self.shapes)",
"def locations_visited(self,year,month,day):\n\n sql_st = '''\n SELECT lat, lng\n FROM goog_locations\n WHERE\n (goog_locations.yr = ?) and\n (goog_locations.mnth = ?) and\n (goog_locations.dy = ?)\n '''\n\n cur = self.conn.cursor()\n locations = cur.execute(sql_st,(year,month,day)).fetchall()\n\n return locations",
"def doNLLoc(self):\n prog_dict = PROGRAMS['nlloc']\n files = prog_dict['files']\n # determine which model should be used in location\n controlfilename = \"locate_%s.nlloc\" % \\\n str(self.widgets.qComboBox_nllocModel.currentText())\n\n #self.setXMLEventID()\n precall = prog_dict['PreCall']\n precall(prog_dict)\n\n f = open(files['phases'], 'wt')\n #phases_hypo71 = self.dicts2hypo71Phases()\n #f.write(phases_hypo71)\n phases_nlloc = self.dicts2NLLocPhases()\n f.write(phases_nlloc)\n f.close()\n\n print 'Phases for NLLoc:'\n self.catFile(files['phases'])\n\n call = prog_dict['Call']\n (msg, err, returncode) = call(prog_dict, controlfilename)\n print msg\n print >> sys.stderr, err\n print '--> NLLoc finished'\n self.catFile(files['summary'])",
"def prepare_unagi(self):",
"def queryAllRouteInfo(self):\n try:\n self.getLongestSingleFlight()\n self.getShortestSingleFlight()\n self.getAverageDistance()\n self.getBiggestCity()\n self.getSmallestCity()\n self.getAverageSizeOfCity()\n self.getContinentsInformation()\n self.getHubCities()\n except Exception:\n print(\"Error occurred when query route info\")\n print(Exception)",
"def main():\n\n term = getterminal()\n\n FREEGEOIP = 'http://freegeoip.net/json/'\n source_ip = term.session.addrport.rsplit(':', 1)[0]\n\n try:\n source = requests.get(FREEGEOIP + source_ip, timeout=0.5).json()\n except (requests.exceptions.RequestException, ValueError) as e:\n log.error('Unable to query FREEGEOIP, source_ip={0}, error={1}'.format(source_ip, e))\n return\n log.info('source_ip={0}, country_code={1}'.format(source_ip, source.get('country_code')))\n\n if source.get('country_code') in geo_banned_counties:\n log.warn('Source {0} is from banned country \"{1}\".'.format(source_ip, source['country_code']))\n echo(geo_disconnect_message + u'\\r\\n')\n disconnect('country is banned')",
"def describe_location_init(actor, loc, vis_cont, ctxt) :\n ctxt.world[Global(\"describe_location_notables\")] = []\n ctxt.world[Global(\"describe_location_mentioned\")] = []",
"def process_osm_service_areas():\n # Facilities\n # - Stations\n stations = make_path(BASIC_FEATURES, \"SMARTplanStations\")\n station_name = \"Name\"\n # - Parks\n parks = make_path(CLEANED, \"Park_Points.shp\")\n parks_name = \"NAME\"\n\n # For each analysis year, analyze networks (avoid redundant solves)\n solved = []\n solved_years = []\n modes = [\"walk\"] # [\"walk\", \"bike\"]\n dest_grp = [\"stn\", \"parks\"]\n runs = [\"MERGE\", \"NO_MERGE\", \"OVERLAP\", \"NON_OVERLAP\"]\n expected_fcs = [\n f\"{mode}_to_{dg}_{run}\" for mode in modes for dg in dest_grp for run in runs\n ]\n for year in YEARS: # TODO: add appropriate print/logging statements within loop\n out_fds_path = make_path(CLEANED, f\"PMT_{year}.gdb\", \"Networks\")\n out_fds = validate_feature_dataset(out_fds_path, SR_FL_SPF, overwrite=False)\n # Network setup\n net_suffix = prep_conf.NET_BY_YEAR[year][0]\n if net_suffix in solved:\n # Copy from other year if already solved\n # Set a source to copy network analysis results from based on net_by_year\n # TODO: write function for source year setting\n target_net = prep_conf.NET_BY_YEAR[year][0]\n source_year = None\n for solved_year in solved_years:\n solved_net = prep_conf.NET_BY_YEAR[solved_year][0]\n if solved_net == target_net:\n source_year = solved_year\n break\n source_fds = make_path(CLEANED, f\"PMT_{source_year}.gdb\", \"Networks\")\n target_fds = make_path(CLEANED, f\"PMT_{year}.gdb\", \"Networks\")\n p_help.copy_net_result(\n source_fds=source_fds, target_fds=target_fds, fc_names=expected_fcs\n ) # TODO: embellish this function with print/logging\n else:\n # Solve this network\n print(f\"\\n{net_suffix}\")\n for mode in modes:\n # Create separate service area problems for stations and parks\n nd = make_path(NETS_DIR, f\"{mode}{net_suffix}.gdb\", \"osm\", \"osm_ND\")\n stations = ServiceAreaAnalysis(\n name=f\"{mode}_to_stn\",\n network_dataset=nd,\n facilities=stations,\n name_field=station_name,\n net_loader=prep_conf.NET_LOADER,\n )\n parks = ServiceAreaAnalysis(\n name=f\"{mode}_to_parks\",\n network_dataset=nd,\n facilities=parks,\n name_field=parks_name,\n net_loader=prep_conf.NET_LOADER,\n )\n # Solve service area problems\n for sa_prob in [stations, parks]:\n print(f\"\\n - {sa_prob.name}\")\n # Set restrictions if needed\n if \"bike\" in sa_prob.name:\n restrictions = prep_conf.BIKE_RESTRICTIONS\n else:\n restrictions = \"\"\n # Solve (exports output to the out_fds)\n sa_prob.solve(\n imped_attr=prep_conf.OSM_IMPED,\n cutoff=prep_conf.OSM_CUTOFF,\n out_ws=out_fds,\n restrictions=restrictions,\n use_hierarchy=False,\n net_location_fields=\"\",\n )\n # Keep track of what's already been solved\n solved.append(net_suffix)\n solved_years.append(year)",
"def loadpostgislayers(self):\n try:\n #set wait cursor\n QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))\n\n # create a new postgis layer\n self.postgislayer = myfunctions.addPostGISLayer(HOST, PORT, DBNAME, USERNAME, PASSWORD, '',\n u\"(%s)\" % self.fun_sql, u\"Στίγματα:%s-%s-%s\" % (\n self.ui.comboBox.currentText(), self.StartDateTime,\n self.EndDateTime), 'the_geom', 'id')\n self.postgislayer_line = myfunctions.addPostGISLayer(HOST, PORT, DBNAME, USERNAME, PASSWORD, '',\n u\"(%s)\" % self.fun_sql_line, u\"Διαδρομή:%s-%s-%s\" % (\n self.ui.comboBox.currentText(), self.StartDateTime,\n self.EndDateTime), 'the_geom', 'device_id')\n except AttributeError:\n print \"No data\"\n finally:\n # set wait cursor\n QApplication.restoreOverrideCursor()\n self.emit(QtCore.SIGNAL('displayonmap'))",
"def gs_saved_handler(sender, instance, created, raw, **kwargs):\n if not created or raw:\n return\n\n local_s = server_models.Server.objects.get_local()\n local_s.groundstations.add(instance)\n local_s.save()",
"def refresh_country_region():\n\n # Delete all records\n TCountryRegion.delete_all_data()\n\n # Get all objects in static database\n d = CountryRegion.objects.all()\n\n # Pass all data to insert function\n TCountryRegion.populate_data(d)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
On Validation for GIS Locations (before DB I/O)
|
def gis_location_onvalidation(form):
# If you need more info from the old location record, add it here.
# Check if this has already been called and use the existing info.
def get_location_info():
if "id" in request:
query = (db.gis_location.id == request.id)
return db(query).select(db.gis_location.level,
limitby=(0, 1)).first()
else:
return None
record_error = T("Sorry, only users with the MapAdmin role are allowed to edit these locations")
field_error = T("Please select another level")
# Shortcuts
level = "level" in form.vars and form.vars.level
parent = "parent" in form.vars and form.vars.parent
lat = "lat" in form.vars and form.vars.lat
lon = "lon" in form.vars and form.vars.lon
members = "members" in form.vars and form.vars.members
# Allowed changes for location groups: Once it's a group, it stays a
# group, and existing non-group locations can't be converted to groups.
# For a new location, set the level to "GR" if members are present.
# If it's already a group, don't allow clearing the members or altering
# the level. Don't allow adding members to an existing location that's
# not a group. Note: We can't rely on checking form.vars.level to tell
# if an existing location was a group, because it might not be available
# in either form.vars or request.vars -- for an interactive form, that
# field was set to not writable, so it's just plain text in the page.
# Note also that many of the errors "available" here are not accessible
# via the interactive form. Note further that permission to *edit* a
# group does not give one permission to *mess up* a group, so the above
# restrictions apply to MapAdmins too.
if "id" in request.vars:
# Existing location? Check attempts to manipulate group or members.
# Note we cannot rely on merely checking form.vars.level -- it might
# not be present, or this request might be attempting to change the
# level to group.
# Is this a location group?
# Use the breadcrumb set in prep if available to avoid a db read.
if "location_is_group" in response.s3:
location_is_group = response.s3.location_is_group
else:
old_location = get_location_info()
location_is_group = old_location.level == "GR"
if location_is_group:
if not _gis.edit_GR:
response.error = record_error
return
# Make sure no-one takes away all members.
if "members" in form.vars and not form.vars.members:
form.errors["members"] = T("A location group must have at least one member.")
return
else:
# Don't allow changing non-group to group.
if members:
form.errors["members"] = T("Location cannot be converted into a group.")
return
if level == "GR":
form.errors["level"] = T("Location cannot be converted into a group.")
return
else:
# New location -- if the location has members, and if permitted to
# make a group, set "group" level. Don't allow also setting a parent.
if members:
if _gis.edit_GR:
if "parent" in form.vars and form.vars.parent:
form.errors["parent"] = T("Location group cannot have a parent.")
return
form.vars.level = "GR"
else:
response.error = T("Sorry, only users with the MapAdmin role are allowed to create location groups.")
return
# Check Permissions
# 'MapAdmin' has all these perms set, no matter what 000_config has
if level == "L0" and not _gis.edit_L0:
response.error = record_error
form.errors["level"] = field_error
return
elif level == "L1" and not _gis.edit_L1:
response.error = record_error
form.errors["level"] = field_error
return
elif level == "L2" and not _gis.edit_L2:
response.error = record_error
form.errors["level"] = field_error
return
elif level == "L3" and not _gis.edit_L3:
response.error = record_error
form.errors["level"] = field_error
return
elif level == "L4" and not _gis.edit_L4:
response.error = record_error
form.errors["level"] = field_error
return
elif level == "L5" and not _gis.edit_L5:
response.error = record_error
form.errors["level"] = field_error
return
if parent:
query = (db.gis_location.id == parent)
_parent = db(query).select(db.gis_location.level,
db.gis_location.gis_feature_type,
db.gis_location.lat_min,
db.gis_location.lon_min,
db.gis_location.lat_max,
db.gis_location.lon_max,
#db.gis_location.level,
limitby=(0, 1),
cache=(cache.ram, 3600)).first()
# Don't allow a group as parent (that way lies madness!).
# (Check not needed here -- enforced in requires validator.)
#if _parent and _parent.level == "GR":
# form.errors["parent"] = T("Location group cannot be a parent.")
# return
# Check Parents are in sane order
if level and parent and _parent:
# Check that parent is of a higher level (http://eden.sahanafoundation.org/ticket/450)
if level[1:] < _parent.level[1:]:
response.error = "%s: %s" % (T("Parent level should be higher than this record's level. Parent level is"),
gis.get_location_hierarchy()[_parent.level])
form.errors["level"] = T("Level is higher than parent's")
return
strict = gis.get_strict_hierarchy()
if strict:
# Check Parents are in exact order
if level == "L1" and len(_gis.countries) == 1:
# Hardcode the Parent
parent = _gis.countries.popitem()[1].id
elif level == "L0":
# Parent is impossible
parent = ""
elif not parent:
# Parent is mandatory
response.error = "%s: %s" % \
(T("Parent needs to be set for locations of level"),
gis.get_location_hierarchy()[level])
form.errors["parent"] = T("Parent needs to be set")
return
elif not level:
# Parents needs to be of level max_hierarchy
max_hierarchy = gis.get_max_hierarchy_level()
if _parent.level != max_hierarchy:
response.error = "%s: %s" % \
(T("Specific locations need to have a parent of level"),
gis.get_location_hierarchy()[max_hierarchy])
form.errors["parent"] = T("Parent needs to be of the correct level")
return
else:
# Check that parent is of exactly next higher order
if (int(level[1:]) - 1) != int(_parent.level[1:]):
response.error = "%s: %s" % \
(T("Locations of this level need to have a parent of level"),
gis.get_location_hierarchy()["L%i" % (int(level[1:]) - 1)])
form.errors["parent"] = T("Parent needs to be of the correct level")
return
# Check within permitted bounds
# (avoid incorrect data entry)
# Points only for now
if not "gis_feature_type" in form.vars or (form.vars.gis_feature_type == "1"):
# Skip if no Lat/Lon provided
if (lat != None) and (lon != None):
if parent and _parent.gis_feature_type == 3:
# Check within Bounds of the Parent
# Rough (Bounding Box)
min_lat = _parent.lat_min
min_lon = _parent.lon_min
max_lat = _parent.lat_max
max_lon = _parent.lon_max
base_error = T("Sorry that location appears to be outside the area of the Parent.")
lat_error = "%s: %s & %s" % (T("Latitude should be between"),
str(min_lat), str(max_lat))
lon_error = "%s: %s & %s" % (T("Longitude should be between"),
str(min_lon), str(max_lon))
if (lat > max_lat) or (lat < min_lat):
response.error = base_error
form.errors["lat"] = lat_error
return
elif (lon > max_lon) or (lon < min_lon):
response.error = base_error
form.errors["lon"] = lon_error
return
# @ToDo: Precise (GIS function)
# (if using PostGIS then don't do a separate BBOX check as this is done within the query)
else:
# Check bounds for the Instance
config = gis.get_config()
min_lat = config.min_lat
min_lon = config.min_lon
max_lat = config.max_lat
max_lon = config.max_lon
base_error = T("Sorry that location appears to be outside the area supported by this deployment.")
lat_error = "%s: %s & %s" % (T("Latitude should be between"),
str(min_lat), str(max_lat))
lon_error = "%s: %s & %s" % (T("Longitude should be between"),
str(min_lon), str(max_lon))
if (lat > max_lat) or (lat < min_lat):
response.error = base_error
form.errors["lat"] = lat_error
return
elif (lon > max_lon) or (lon < min_lon):
response.error = base_error
form.errors["lon"] = lon_error
return
# ToDo: Check for probable duplicates
# http://eden.sahanafoundation.org/ticket/481
# name soundex
# parent
# radius
# response.warning = T("This appears to be a duplicate of ") + xxx (with appropriate representation including hyperlink to view full details - launch de-duplication UI?)
# form.errors["name"] = T("Duplicate?")
# Set flag to say that this has been confirmed as not a duplicate
# Add the bounds (& Centroid for Polygons)
gis.wkt_centroid(form)
# ToDo: Calculate the bounding box
# gis.parse_location(form)
return
|
[
"def validate_location():\n location = request.args.get('location')\n\n try:\n possible_locations = bundle_location_data(location)\n except NoLocationResultsError as e:\n return jsonify({'error': e.message})\n\n return jsonify(possible_locations)",
"def validateLocation(location: str) -> str:\n # check location\n validLocation = \"NULL\" # init to \"NULL\" incase no valid location is found\n locationTokens = location.split()\n for token in locationTokens:\n # token = re.compile(\"[^a-zA-Z]\") # removes all non-alpha chars\n token = str(token)\n # print(\"token = \", token)\n if token in stateInitials:\n # print(\"--found valid: \", stateInitials[token])\n validLocation = stateInitials[token]\n break\n elif token in states:\n # print(\"--found valid: \", token)\n validLocation = token\n break\n return validLocation",
"def ST_IsValid(geos):\n return arctern.ST_IsValid(geos)",
"def special_cases(location):\n\tloc_words = location.split()\n\tno_stop_words = [w for w in loc_words if w.lower() not in LUCENE_STOP_WORDS] \n\n\tresult = None\n\tif ('IN' in loc_words or 'OR' in loc_words or 'ON' in loc_words) and (len(loc_words) - len(no_stop_words) == 1):\n\t\tresult = yahoo_geocode(location)\n\telif (location.lower().find(\"the netherlands\") > 0):\n\t\tresult = yahoo_geocode(location)\n\treturn result;",
"def test_unknown_crs(self):\n self.assertRaises(ValueError, geo_uri, \"geo:0,0,0;crs=SpamEggs\")",
"def test_check_location():\n cases = read_file('test_check_location.json')\n countries = read_file(\"countries.json\")\n\n # home, from known\n assert check_location(cases[0], countries)\n # home unknown\n assert_false(check_location(cases[1], countries))\n # via unknown\n assert_false(check_location(cases[2], countries))\n # all known\n assert check_location(cases[3], countries)",
"def test_user_location(self):\n assert self.user.location == 'Seattle, WA'",
"def location():\n\n resource = request.function\n tablename = module + \"_\" + resource\n table = db[tablename]\n\n # Allow prep to pass vars back to the controller\n vars = {}\n \n # Pre-processor\n def prep(r, vars):\n\n # Restrict access to Polygons to just MapAdmins\n if deployment_settings.get_security_map() and not shn_has_role(\"MapAdmin\"):\n table.code.writable = False\n if r.method == \"create\":\n table.code.readable = False\n table.gis_feature_type.writable = table.gis_feature_type.readable = False\n table.wkt.writable = table.wkt.readable = False\n table.marker_id.comment = \"\"\n else:\n table.code.comment = DIV( _class=\"tooltip\", _title=Tstr(\"Code\") + \"|\" + Tstr(\"For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.\"))\n table.wkt.comment = DIV(SPAN(\"*\", _class=\"req\"), DIV( _class=\"tooltip\", _title=\"WKT\" + \"|\" + Tstr(\"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.\")))\n\n if r.http == \"GET\" and r.representation in (\"html\", \"popup\"):\n # Options which are only required in interactive HTML views\n table.level.comment = DIV( _class=\"tooltip\", _title=Tstr(\"Level\") + \"|\" + Tstr(\"If the location is a geographic area, then state at what level here.\"))\n table.parent.comment = DIV(A(ADD_LOCATION,\n _class=\"colorbox\",\n _href=URL(r=request, c=\"gis\", f=\"location\", args=\"create\", vars=dict(format=\"popup\", child=\"parent\")),\n _target=\"top\",\n _title=ADD_LOCATION),\n DIV(\n _class=\"tooltip\",\n _title=Tstr(\"Parent\") + \"|\" + Tstr(\"The Area which this Site is located within.\"))),\n table.name.comment = SPAN(\"*\", _class=\"req\")\n table.osm_id.comment = DIV( _class=\"tooltip\", _title=\"OSM ID\" + \"|\" + Tstr(\"The <a href='http://openstreetmap.org' target=_blank>OpenStreetMap</a> ID. If you don't know the ID, you can just say 'Yes' if it has been added to OSM.\"))\n\n # CRUD Strings\n LIST_LOCATIONS = T(\"List Locations\")\n s3.crud_strings[tablename] = Storage(\n title_create = ADD_LOCATION,\n title_display = T(\"Location Details\"),\n title_list = T(\"Locations\"),\n title_update = T(\"Edit Location\"),\n title_search = T(\"Search Locations\"),\n subtitle_create = T(\"Add New Location\"),\n subtitle_list = LIST_LOCATIONS,\n label_list_button = LIST_LOCATIONS,\n label_create_button = ADD_LOCATION,\n label_delete_button = T(\"Delete Location\"),\n msg_record_created = T(\"Location added\"),\n msg_record_modified = T(\"Location updated\"),\n msg_record_deleted = T(\"Location deleted\"),\n msg_list_empty = T(\"No Locations currently available\"))\n\n if r.method in (None, \"list\") and r.record == None:\n # List\n pass\n elif r.method in (\"delete\", \"search_simple\"):\n pass\n else:\n # Add Map to allow locations to be found this way\n config = gis.get_config()\n lat = config.lat\n lon = config.lon\n zoom = config.zoom\n feature_queries = []\n\n if r.method == \"create\":\n add_feature = True\n add_feature_active = True\n else:\n if r.method == \"update\":\n add_feature = True\n add_feature_active = False\n else:\n # Read\n add_feature = False\n add_feature_active = False\n \n location = db(db.gis_location.id == r.id).select(db.gis_location.lat, db.gis_location.lon, limitby=(0, 1)).first()\n if location and location.lat is not None and location.lon is not None:\n lat = location.lat\n lon = location.lon\n # Same as a single zoom on a cluster\n zoom = zoom + 2\n \n _map = gis.show_map(lat = lat,\n lon = lon,\n zoom = zoom,\n feature_queries = feature_queries,\n add_feature = add_feature,\n add_feature_active = add_feature_active,\n toolbar = True,\n collapsed = True)\n\n # Pass the map back to the main controller\n vars.update(_map=_map)\n return True\n response.s3.prep = lambda r, vars=vars: prep(r, vars)\n \n # Options\n _vars = request.vars\n filters = []\n # Deprecate!\n #fclass = _vars.get(\"feature_class\", None)\n #if fclass:\n # filters.append((db.gis_location.feature_class_id == db.gis_feature_class.id) &\n # (db.gis_feature_class.name.like(fclass)))\n\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n # Can't do this using a JOIN in DAL syntax\n # .belongs() not GAE-compatible!\n filters.append((db.gis_location.parent.belongs(db(db.gis_location.name.like(parent)).select(db.gis_location.id))))\n # ToDo: Make this recursive - want descendants not just direct children!\n # Use new gis.get_children() function\n\n # ToDo\n # bbox = _vars.get(\"bbox\", None):\n\n if filters:\n response.s3.filter = reduce(__and__, filters)\n\n caller = _vars.get(\"caller\", None)\n if caller:\n # We've been called as a Popup\n if \"gis_location_parent\" in caller:\n # Populate defaults & hide unnecessary rows\n # Use default Marker for Admin Locations\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n #table.osm_id.readable = table.osm_id.writable = False\n #table.source.readable = table.source.writable = False\n else:\n parent = _vars.get(\"parent_\", None)\n # Don't use 'parent' as the var name as otherwise it conflicts with the form's var of the same name & hence this will be triggered during form submission\n if parent:\n table.parent.default = parent\n \n #fc = None\n # Populate defaults & hide unnecessary rows\n if \"cr_shelter\" in caller:\n #fc = db(db.gis_feature_class.name == \"Shelter\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"hms_hospital\" in caller:\n #fc = db(db.gis_feature_class.name == \"Hospital\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"irs_ireport\" in caller:\n #fc = db(db.gis_feature_class.name == \"Incident\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"org_office\" in caller:\n #fc = db(db.gis_feature_class.name == \"Office\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"pr_presence\" in caller:\n #fc = db(db.gis_feature_class.name == \"Person\").select(db.gis_feature_class.id, limitby=(0, 1)).first()\n table.level.readable = table.level.writable = False\n table.url.readable = table.url.writable = False\n elif \"assessment_location\" in caller:\n table.level.default = \"L4\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n elif \"school_district\" in caller:\n table.level.default = \"L2\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n elif \"school_report_location\" in caller:\n table.level.default = \"L2\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n elif \"school_report_union\" in caller:\n table.level.default = \"L4\"\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n \n #try:\n # If we have a pre-assigned Feature Class\n #table.feature_class_id.default = fc.id\n #table.feature_class_id.readable = table.feature_class_id.writable = False\n # Use default Marker for Class\n #table.marker_id.readable = table.marker_id.writable = False\n #except:\n #pass\n\n table.osm_id.readable = table.osm_id.writable = False\n table.source.readable = table.source.writable = False\n\n level = _vars.get(\"level\", None)\n if level:\n # We've been called from the Location Selector widget\n table.marker_id.readable = table.marker_id.writable = False\n table.addr_street.readable = table.addr_street.writable = False\n \n # Post-processor\n def user_postp(jr, output):\n shn_action_buttons(jr)\n return output\n response.s3.postp = user_postp\n\n response.s3.pagination = True\n output = shn_rest_controller(module, resource, listadd=False)\n\n _map = vars.get(\"_map\", None)\n if _map and isinstance(output, dict):\n output.update(_map=_map)\n\n return output",
"def check_maps(self):\n if self.is_spatial:\n Landscape.check_maps(self)\n if (\n self.grid.file_name == \"set\"\n and self.sample_map.x_offset + self.grid.x_size > self.sample_map.x_size\n or self.sample_map.y_offset + self.grid.y_size > self.sample_map.y_size\n ):\n raise ValueError(\"Grid is not within the sample map - please check offsets of sample map.\")\n self.check_dispersal_map()\n self.check_reproduction_map()\n self.check_death_map()",
"def test_geographical_coordinates_with_invalid_address(self):\n invalid_address = \"asdlfjladjkfld\"\n geo_coords = GeographicalCoordinates(invalid_address)\n\n self.assertEqual(geo_coords.latitude, 0.0)\n self.assertEqual(geo_coords.longitude, 0.0)\n self.assertEqual(geo_coords.status, 'ERROR')",
"def _entity_state_is_valid_coordinate_set(state: str) -> bool:\n schema = vol.Schema(cv.gps)\n try:\n coordinates = state.split(\",\")\n schema(coordinates)\n return True\n except (vol.MultipleInvalid):\n return False",
"def verify_location_in_db(location):\n\n loc_tuples = db.session.query(Location.location_name).all()\n loc_list = []\n for tup in loc_tuples:\n loc_list.append(tup[0])\n\n if location in loc_list:\n return True\n else:\n return False",
"def test_default_validate_loc(loc):\n with pytest.raises(SitemapValidationError):\n get_validated(loc=loc)",
"def is_lat_long_easting_northing_schema(self):\n field_names = [name.lower() for name in self.field_names]\n return all([\n 'latitude' in field_names,\n 'longitude' in field_names,\n 'easting' in field_names,\n 'northing' in field_names,\n 'zone' in field_names\n ])",
"def __init__(self, myshp, mydbf, west_lon, east_lon, south_lat, north_lat):\n self.reader = shapefile.Reader(shp = myshp, dbf = mydbf)\n\n# The following four coordinate inputs must be obtained outside of the scope of the program. Go to\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# Then, call the method bounding_box to see the bounding UTM coordinates of the\n# data collected. Enter the bounding coordinates into the calculator to find the\n# corresponding latitude and longitude values. These will be used later to\n# integrate with other parts of the code.\n\n# This step is necessary because accurately location constrained data\n# acquisition is not always possible.\n\n self.west_lon = west_lon\n self.east_lon = east_lon\n self.south_lat = south_lat\n self.north_lat = north_lat\n\n# Get shapes objects which include information such as location and grid size\n self.shapes = self.reader.shapes()\n# Get record objects containing wind data\n self.records = self.reader.records()\n# Get number of datum\n self.length = len(self.shapes)",
"def locn_is_latlong():\n s = read_command(\"g.region\", flags='pu')\n kv = parse_key_val(s, ':')\n if kv['projection'].split(' ')[0] == '3':\n return True\n else:\n return False",
"def validate_lat_in_range(value):\n\t_validate_in_range(value, -90, 90)",
"def test_locations_correct(self):\n location = \"/var/www/foo/\"\n handle_servername = ServerName(**{\n \"domain\" : self.valid_domain,\n }\n )\n handle_location = Location(**{\n \"location\" : location\n }\n )\n handle_servername.locations = handle_location\n self.assertEqual(handle_servername.locations[location].location, location)\n del handle_location\n del handle_servername",
"def verify_country(rec, orig):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of Parents for a Location
|
def s3_gis_location_parents(r, **attr):
resource = r.resource
table = resource.table
# Check permission
if not s3_has_permission("read", table):
r.unauthorised()
if r.representation == "html":
# @ToDo
output = dict()
#return output
raise HTTP(501, body=s3xrc.ERROR.BAD_FORMAT)
elif r.representation == "json":
if r.id:
# Get the parents for a Location
parents = gis.get_parents(r.id)
if parents:
_parents = {}
for parent in parents:
_parents[parent.level] = parent.id
output = json.dumps(_parents)
return output
else:
raise HTTP(404, body=s3xrc.ERROR.NO_MATCH)
else:
raise HTTP(404, body=s3xrc.ERROR.BAD_RECORD)
else:
raise HTTP(501, body=s3xrc.ERROR.BAD_FORMAT)
|
[
"def get_location_children(client: GraphqlClient, location_id: str) -> List[Location]:\n result = LocationChildrenQuery.execute(client, id=location_id)\n locations = result.location.children\n\n if len(locations) == 0:\n return []\n\n return [\n Location(name=location.name, id=location.id, externalId=location.externalId)\n for location in locations\n ]",
"def get_parents(self):\n return []",
"def _get_locations(self, location=None):\r\n if location is not None:\r\n yield location\r\n else:\r\n for loc in self.list_locations():\r\n yield loc",
"def get_locations_and_children(self, location_ids):\n locations = self.filter(location_id__in=location_ids)\n return self.get_queryset_descendants(locations, include_self=True)",
"def parent_names(self) -> List[str]:\n return [t.name for t in self.parents]",
"def get_parents(self):\n query = ParentOrGuardian.all(keys_only=True)\n query.filter(\"family = \", self.key())\n keys = query.fetch(100)\n return ParentOrGuardian.get(keys)",
"def list_locations():",
"def parentSites(self):\n return [site.name for site in self.sites.parentSites()]",
"def get_location_hierarchy(self):\n logger.info(\"Getting location hierarchy.\")\n sql_query = \"\"\"SELECT\n location_id,\n level,\n parent_id,\n is_estimate\n FROM\n shared.location_hierarchy_history lhh\n JOIN\n shared.location_set_version_active lsv USING (location_set_version_id)\n WHERE\n lhh.location_set_id = {location_set_id} AND\n lsv.gbd_round_id = {gbd_round_id};\n \"\"\".format(location_set_id=self.location_set_id, gbd_round_id=self.gbd_round_id)\n location_hierarchy_history = db_connect.query(sql_query, conn_def=self.conn_def)\n location_hierarchy_history.drop_duplicates(inplace=True)\n return location_hierarchy_history",
"def parents(self, host):\n return list(self.iter_parents(host))",
"def get_parent_paths(self, depth=None, hints=None):\n #pylint:disable=too-many-nested-blocks\n if depth is not None and depth == 0:\n return [[self]]\n results = []\n parents = PageElement.objects.filter(\n pk__in=RelationShip.objects.filter(\n dest_element=self).values('orig_element_id'))\n if not parents:\n return [[self]]\n if hints:\n for parent in parents:\n if parent.slug == hints[-1]:\n # we found a way to cut the search space early.\n parents = [parent]\n hints = hints[:-1]\n break\n for parent in parents:\n grandparents = parent.get_parent_paths(\n depth=(depth - 1) if depth is not None else None,\n hints=hints)\n if grandparents:\n for grandparent in grandparents:\n term_index = 0\n if hints:\n for node in grandparent:\n if node.slug == hints[term_index]:\n term_index += 1\n if term_index >= len(hints):\n break\n if not hints or term_index >= len(hints):\n # we have not hints or we consumed all of them.\n results += [grandparent + [self]]\n return results",
"def get_parents_list(self, block):\n if isinstance(block, str):\n block = self.blocks[block]\n parents = []\n current = block\n while True:\n if current == self.root_block:\n break\n parents.append(current.parent_block)\n current = current.parent_block\n parents.reverse()\n return parents",
"def get_parents(self, id_):\n return # osid.id.IdList",
"def locations(self):\n return self.__locations",
"def location_list(self):\n \n self._send(\"location_list\")\n return [e2string(x) for x in self._read_json(220)]",
"def get_locations(self, location_type):\n\n locations = []\n for key in self.buildings:\n if self.buildings[key] is location_type:\n locations.append(key)\n\n return locations if len(locations) > 0 else None",
"def get_parent_map_ids(self, map_id):\n return # osid.id.IdList",
"def parents(self):\n # Sort here for determinism\n # return sorted(self._parents.values(), key=lambda edge: str(edge))\n return list(self._parents.values())",
"def parents(self, nodename):\n parents = set(self.node_dict[nodename].parents.values())\n return parents",
"def parent_types(self) -> Iterable[\"Type\"]:\n return (self._hier.get(name) for name in self.parents)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes a panorex using internal attribute arch.
|
def compute_panorex(self):
return self.arch_handler.create_panorex(self.arch)
|
[
"def update(self, arch=None):\n if arch is not None:\n self.set_arch(arch)\n self.set_panorex(self.compute_panorex())",
"def _setup_x86_arch(self):\n arch_mode = self.binary.architecture_mode\n\n # Set up architecture information\n self.arch_info = X86ArchitectureInformation(arch_mode)\n self.disassembler = X86Disassembler(architecture_mode=arch_mode)\n self.ir_translator = X86Translator(architecture_mode=arch_mode)",
"def setArch(arch):\n if arch in supportedArchs:\n ArchInfo.currentArch = arch\n #print \"[+] Working under architecture : \" + str(arch)\n if arch == \"X86\":\n ArchInfo.bits = 32\n REGSIZE.size = 32\n ArchInfo.ip = \"eip\"\n ArchInfo.sp = \"esp\"\n elif arch == \"X86_64\":\n ArchInfo.bits = 64\n REGSIZE.size = 64\n ArchInfo.ip = \"rip\"\n ArchInfo.sp = \"rsp\"\n else:\n raise AnalysisException(\"Architecture %s is not supported.\\\n Sorry ! \" % arch)",
"def main_arch(self):\n return self.files[0].arch",
"def _generate_pores(self):\n Nx = self._Nx\n Ny = self._Ny\n Nz = self._Nz\n Lc = self._Lc\n Np = Nx*Ny*Nz\n ind = sp.arange(0,Np)\n self.set_pore_data(prop='numbering',data=ind)\n self.set_pore_info(label='all',locations=sp.ones_like(ind))\n pore_coords = Lc/2+Lc*sp.array(sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F'),dtype=sp.float64).T\n self.set_pore_data(prop='coords',data=pore_coords)",
"def deploy_octant(self) :\n\n n_dir_oct = self.n_dir/4\n offset = 0\n for i_octant in xrange(0,4) :\n if i_octant != 0 :\n for i in xrange (0,n_dir_oct) :\n# Copy omega and weight \n self.weight[i+offset] = self.weight[i]\n self.omega[i+offset,2] = self.omega[i,2]\n# Correct omega signs\n if i_octant == 1 :\n self.omega[i+offset,0] = self.omega[i,0]\n self.omega[i+offset,1] = -self.omega[i,1]\n elif i_octant == 2 :\n self.omega[i+offset,0] = -self.omega[i,0]\n self.omega[i+offset,1] = -self.omega[i,1]\n else :\n self.omega[i+offset,0] = -self.omega[i,0]\n self.omega[i+offset,1] = self.omega[i,1]\n offset += n_dir_oct\n\n sum_weight = 0.\n for i in xrange(0,n_dir_oct) :\n sum_weight += 4 * self.weight[i]\n self.weight[:] = self.weight[:]/sum_weight",
"def ProcessArchitecture(self) -> Architecture:",
"def test_arch():\n\n arch()\n arch(\"-f\")\n arch(\"--frontend\")\n arch(\"-b\")\n arch(\"--backend\")",
"def affine_transformation(X_unprj, affine_x, affine_y, args, header):\n\tx_pred = np.dot(X_unprj, affine_x)\n\ty_pred = np.dot(X_unprj, affine_y)\n\treturn x_pred, y_pred",
"def get_panorama_done(self):\n self.calculate_ransac_parameters()\n for i in range(0,4,1):\n self.perform_ransac((str(i),str(i+1),str(i)+str(i+1)))\n self.get_product_homography()\n self.get_panorama_image(('02','12','22','32','42'))",
"def update_base_arch(self, param_set):\n for i in range(len(self.pc_arg_val)):\n self.base_arch.config_label[self.param_set_labels[i]] = param_set[i]\n self.pc_arg_val[i][0].comp_args[self.pc_arg_val[i][1]] = param_set[i]\n self.pc_arg_val[i][0].clear_cache()",
"def make_permuterm(self):\n if self.multifield:\n self.ptindex = {}\n\n for field, _ in SAR_Project.fields:\n self.ptindex[field] = []\n\n inv_index = self.index[field]\n\n for term in inv_index.keys():\n permuterms = self.generate_permuterms(term)\n\n for p in permuterms:\n bisect.insort_left(self.ptindex[field], (p, term))\n\n else:\n self.ptindex = []\n\n for term in self.index.keys():\n permuterms = self.generate_permuterms(term)\n\n for p in permuterms:\n bisect.insort_left(self.ptindex, (p, term))",
"def projector(basis):\n\t\n\tK = numpy.column_stack(basis)\n\tP = numpy.dot(K, K.T)\n\treturn P",
"def _assemble(self, modules):\n image_dtype = config[\"SOURCE_PROC_IMAGE_DTYPE\"]\n if self._geom is not None:\n n_pulses = modules.shape[0]\n if self._out_array is None or self._out_array.shape[0] != n_pulses:\n self._out_array = self._geom.output_array_for_position_fast(\n extra_shape=(n_pulses, ), dtype=image_dtype)\n\n try:\n self._geom.position_all_modules(modules, out=self._out_array)\n # EXtra-foam raises ValueError while EXtra-geom raises\n # AssertionError if the shape of the output array does not\n # match the expected one, e.g. after a change of quadrant\n # positions during runtime.\n except (ValueError, AssertionError):\n # recreate the output array\n self._out_array = self._geom.output_array_for_position_fast(\n extra_shape=(n_pulses, ), dtype=image_dtype)\n self._geom.position_all_modules(modules, out=self._out_array)\n\n return self._out_array\n\n # temporary workaround for Pulse resolved JungFrau without geometry\n if config[\"DETECTOR\"] == \"JungFrauPR\":\n shape = modules.shape\n # Stacking modules vertically along y axis.\n return modules.reshape(shape[0], -1, shape[-1])\n\n # For train-resolved detector, assembled is a reference\n # to the array data received from the pyzmq. This array data\n # is only readable since the data is owned by a pointer in\n # the zmq message (it is not copied). However, other data\n # like data['metadata'] is writeable.\n # FIXME: why once a while this takes a few ms???\n return modules.astype(image_dtype)",
"def from_random(cls, arch: Architecture, num_uops_per_insn: int):\n return cls.from_random_with_core(\n arch, num_uops_per_insn=num_uops_per_insn, core_ratio=1.0\n )",
"def test_arch_platform():\n\n arch(\"-p\")\n arch(\"--platform\")\n arch(\"-f\", \"-p\")\n arch(\"-b\", \"-p\")",
"def calc_pore_density_12(path):\n fig, ax = plt.subplots()\n trj = md.load(f'{path}/com.trr', top=f'{path}/com.gro')\n for resname in ['emim', 'tf2n']:\n mean = list()\n il = trj.atom_slice(trj.topology.select(f'resname {resname}'))\n for i in range(0, 4000):\n frame = il[i]\n \n pore1 = np.intersect1d(\n np.intersect1d(np.where(frame.xyz[-1, :, 1] > 1),\n np.where(frame.xyz[-1, :, 1] < (5.467-1))\n ),\n np.intersect1d(np.where(frame.xyz[-1, :, 2] > 0.937),\n np.where(frame.xyz[-1, :, 2] < 2.332)\n ),\n )\n pore2 = np.intersect1d(\n np.intersect1d(np.where(frame.xyz[-1, :, 1] > 1),\n np.where(frame.xyz[-1, :, 1] < (5.467-1))\n ),\n np.intersect1d(np.where(frame.xyz[-1, :, 2] > 3.228),\n np.where(frame.xyz[-1, :, 2] < 4.6222)\n ),\n )\n\n pore_avg = list()\n for pore in (pore1, pore2):\n sliced = frame.atom_slice(pore)\n sliced.unitcell_lengths[:,1] = (5.467-1) - 1\n sliced.unitcell_lengths[:,2] = 1.395\n masses = list()\n for i in sliced.topology.atoms:\n if i.name == 'emim':\n masses.append(111)\n if i.name == 'tf2n':\n masses.append(280)\n\n density = md.density(sliced, masses=masses)\n pore_avg.append(density)\n\n avg_density = np.mean(pore_avg)\n mean.append(avg_density)\n\n if resname == 'emim':\n label = 'EMI'\n elif resname == 'tf2n':\n label = 'TFSI'\n plt.plot(range(0,4000), mean, label=label)\n print(np.mean(mean))\n plt.xlabel(\"MD Frame\")\n plt.ylabel(\"density (kg/m^3)\")\n plt.legend()\n plt.savefig(f'{path}/number_densities.pdf', dpi=400)\n plt.savefig(f'{path}/number_densities.png', dpi=400)",
"def define_transforms(arch):\n\n # inception_v3 requires 299 x 299\n if arch == 'inception_v3':\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomRotation(45),\n transforms.RandomResizedCrop(299),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(350),\n transforms.CenterCrop(299),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(350),\n transforms.CenterCrop(299),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ]),\n }\n else:\n # All other pretrained models require 224 x 224\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomRotation(45),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ]),\n 'test': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], \n [0.229, 0.224, 0.225])\n ]),\n }\n \n return data_transforms",
"def _make_binary(self, input_topology, input_coordinate,\n output_binary, md_parameters, tag):\n\n output_topology = os.path.splitext(input_topology)[0] + '.mdp'\n self._pipeline.append(\n (\n f'grompp_{tag}',\n Gromacs_grompp(\n command_options={\n '-f': md_parameters,\n '-p': input_topology,\n '-c': input_coordinate,\n '-o': output_binary,\n '-maxwarn': 4,\n '-po': output_topology\n }\n )\n )\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates the arch with a new set of points, then recomputes panorex.
|
def update(self, arch=None):
if arch is not None:
self.set_arch(arch)
self.set_panorex(self.compute_panorex())
|
[
"def updatePoints(self, x, y):",
"def compute_panorex(self):\n return self.arch_handler.create_panorex(self.arch)",
"def update_base_arch(self, param_set):\n for i in range(len(self.pc_arg_val)):\n self.base_arch.config_label[self.param_set_labels[i]] = param_set[i]\n self.pc_arg_val[i][0].comp_args[self.pc_arg_val[i][1]] = param_set[i]\n self.pc_arg_val[i][0].clear_cache()",
"def update(self):\n\t\tfor p in self.panes:\n\t\t\tp.update()",
"def transform_points(self):\n for key in self.keypoints:\n if self.keypoints[key].index is not 8:\n self.keypoints[key].x = self.keypoints[key].x - self.keypoints[8].x\n self.keypoints[key].y = self.keypoints[8].y - self.keypoints[key].y\n self.keypoints[8].x = 0\n self.keypoints[8].y = 0",
"def do_transform_cycle(self):\n points_world = self.transform_points(self.camera_points, self.z)\n if points_world:\n self.publish_points(points_world)\n else:\n rospy.logwarn(\"No points published, List object is empty, could not transform\")",
"def update_sweep_points(self):\n\n # update self.sweep_points\n swpts = deepcopy(self.sweep_points)\n par_names = []\n vals = []\n for par in swpts.get_parameters(0):\n values = swpts[par]\n if np.unique(values[:3]).size > 1:\n # the sweep points are not repeated 3 times (for each\n # pair of base_ops)\n par_names += [par]\n vals += [np.repeat(values, 3)]\n self.sweep_points.update_property(par_names, values=vals)\n\n # update sweep points in preprocessed_task_list\n for task in self.preprocessed_task_list:\n swpts = task['sweep_points']\n for par in swpts.get_parameters(0):\n values = swpts[par]\n if np.unique(values[:3]).size > 1:\n swpts.update_property([par], values=[np.repeat(values, 3)])",
"def reset_points(self):\n self._container['points'] = {}",
"def do_transform_cycle(self):\n points_world = self.transform_points()\n if points_world:\n self.publish_points(points_world)\n else:\n rospy.logwarn(\"No points published, List object is empty, could not transform\")",
"def _adjust(self):\n y1, x1, y2, x2 = self.roi\n K = self.full_K.copy()\n P = self.full_P.copy()\n # Adjust K and P for binning and ROI\n if self._target_size is not None:\n self._binning_x = (x2 - x1) / self._target_size[0]\n self._binning_y = (y2 - y1) / self._target_size[1]\n K[0, 0] /= self._binning_x\n K[1, 1] /= self._binning_y\n K[0, 2] = (K[0, 2] - x1) / self._binning_x\n K[1, 2] = (K[1, 2] - y1) / self._binning_y\n P[0, 0] /= self._binning_x\n P[1, 1] /= self._binning_y\n P[0, 2] = (P[0, 2] - x1) / self._binning_x\n P[1, 2] = (P[1, 2] - y1) / self._binning_y\n self.K = K\n self.P = P\n self._width = x2 - x1\n self._height = y2 - y1\n self._aspect = 1.0 * self.width / self.height\n self._fovx = 2.0 * np.rad2deg(np.arctan(self.width / (2.0 * self.fx)))\n self._fovy = 2.0 * np.rad2deg(np.arctan(self.height / (2.0 * self.fy)))\n\n self.mapx = np.ndarray(shape=(self.height, self.width, 1),\n dtype='float32')\n self.mapy = np.ndarray(shape=(self.height, self.width, 1),\n dtype='float32')\n cv2.initUndistortRectifyMap(\n self.K, self.D, self.R, self.P,\n (self.width, self.height),\n cv2.CV_32FC1, self.mapx, self.mapy)",
"def _updateTransform(self):\n\t\tif PointsSetsIsEmpty(self.landmarkPointSets):\n\t\t\treturn\n\n\t\tnumberOfSets = NumberOfSets(self.landmarkPointSets)\n\t\tfixedPoints = vtkPoints()\n\t\tmovingPoints = vtkPoints()\n\t\tfixedPoints.SetNumberOfPoints(numberOfSets)\n\t\tmovingPoints.SetNumberOfPoints(numberOfSets)\n\n\t\tpointsetIndex = 0\n\t\tfor index in range(len(self.landmarkPointSets)):\n\t\t\tpointset = self.landmarkPointSets[index]\n\t\t\tif pointset[0] and pointset[1]:\n\t\t\t\tfixedPoint = pointset[0]\n\t\t\t\tmovingPoint = pointset[1]\n\t\t\t\t# Transform the point from the moving landmark with the original transform\n\t\t\t\ttransPoint = self.originalTransform.TransformPoint(movingPoint)\n\t\t\t\tfixedPoints.SetPoint(pointsetIndex, fixedPoint)\n\t\t\t\tmovingPoints.SetPoint(pointsetIndex, transPoint)\n\t\t\t\tpointsetIndex += 1\n\n\t\tlandmarkTransform = vtkLandmarkTransform()\n\t\tif self.landmarkTransformType == 0:\n\t\t\tlandmarkTransform.SetModeToRigidBody()\n\t\telif self.landmarkTransformType == 1:\n\t\t\tlandmarkTransform.SetModeToSimilarity()\n\t\telif self.landmarkTransformType == 2:\n\t\t\tlandmarkTransform.SetModeToAffine()\n\t\tlandmarkTransform.SetSourceLandmarks(fixedPoints)\n\t\tlandmarkTransform.SetTargetLandmarks(movingPoints)\n\t\tlandmarkTransform.Update()\n\n\t\ttransform = TransformWithMatrix(landmarkTransform.GetMatrix())\n\t\ttransform.Inverse()\n\t\t\n\t\ttransformation = self.multiWidget.transformations[-1]\n\t\tassert transformation.transformType == Transformation.TypeLandmark\n\t\ttransformation.transform = transform\n\t\tself.multiWidget.transformations[-1] = transformation\n\t\tself._updateLandmarkTransforms()",
"def update_pose_estimate(self, timestamp):\n self.normalize_particles()\n mean_x = 0\n mean_y = 0\n mean_theta = 0\n # Calculate the mean of the top se\n particle_cloud_majority = sorted(self.particle_cloud, key=lambda x: x.w, reverse=True)\n for particle in particle_cloud_majority[self.particles_to_incoporate_in_mean:]:\n mean_x += particle.x * particle.w\n mean_y += particle.y * particle.w\n mean_theta = particle.theta * particle.w\n mean_x /= self.particles_to_incoporate_in_mean\n mean_y /= self.particles_to_incoporate_in_mean\n mean_theta /= self.particles_to_incoporate_in_mean\n\n # Use particle methods to convert particle to pose.\n current_pose_particle = Particle(mean_x, mean_y, mean_theta)\n self.current_pose_estimate = current_pose_particle.as_pose()\n\n # Send out next map to odom transform with updated pose estimate.\n self.transform_helper.fix_map_to_odom_transform(self.current_pose_estimate, timestamp)",
"def set_points(self, points):\n self.discard_points()\n self.append_points(points)",
"def upstairs(self, centerX, centerZ, height):\n for i in range(int(self.cell / 2)):\n self.update_pin(centerX + i, centerZ, height)\n self.update_pin(centerX, centerZ + i, height)\n self.update_pin(centerX + int(self.cell / 2), centerZ - i, height)\n self.update_pin(centerX - i, centerZ + int(self.cell / 2), height)",
"def _generate_pores(self):\n Nx = self._Nx\n Ny = self._Ny\n Nz = self._Nz\n Lc = self._Lc\n Np = Nx*Ny*Nz\n ind = sp.arange(0,Np)\n self.set_pore_data(prop='numbering',data=ind)\n self.set_pore_info(label='all',locations=sp.ones_like(ind))\n pore_coords = Lc/2+Lc*sp.array(sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F'),dtype=sp.float64).T\n self.set_pore_data(prop='coords',data=pore_coords)",
"def fix_nodes_plane(self):\n # get the a1,a2,a3,a4 in terms of XYZ to transform xi to XYZ rep.\n a1 = np.array([2,-1,-1,0]); a2=np.array([-1,2,-1,0]); a3=np.array([-1,-1,2,0]); a4=np.array([0,0,0,1])\n repmat = np.array([self.X,self.Y,self.Z])\n repmat = np.transpose(repmat)\n repmat = np.linalg.inv(repmat) # [X|Y|Z]a = [ ]\n a1xyz = np.dot(repmat,a1); a2xyz=np.dot(repmat,a2); a3xyz=np.dot(repmat,a3); a4xyz=np.dot(repmat,a4)\n print(\"########### DEBUG ###########\")\n print(\"a1 = \"+str(a1xyz))\n print(\"a2 = \"+str(a2xyz))\n print(\"a3 = \"+str(a3xyz))\n print(\"a4 = \"+str(a4xyz))\n print(\"##############################\")\n for seg in self.segments:\n # find xi in XYZ rep\n n = a1xyz*xi[0] + a2xyz*xi[1] + a3xyz*xi[2] + a4xyz*xi[3]\n p0 = np.array([0.,0.,0.])\n for tmp_node in seg.vertices:\n p0 += np.array(tmp_node.coords)\n p0 /= len(seg.vertices)\n for iv in range(len(seg.vertices)):\n p1 = np.array(seg.vertices[iv].coords)\n tmp = p1-p0\n t = -(xi[0]*tmp[0]+xi[1]*tmp[1]+xi[2]*tmp[2])/(np.sum(xi**2))\n seg.vertices[iv].coords = list(p1+xi*t)\n print(\"############ DEBUG - fix_nodes_plane #############\")\n print(\"old = \"+str(p1))\n print(\"new = \"+str(seg.vertices[iv].coords))\n print(\"##################################################\")",
"def set_points(self, points):\n self.clear_points()\n self.add_points(points)\n return",
"def finish_loop(self):\n pub = rospy.Publisher('/ekf_pose', PoseStamped, queue_size=10)\n msg = PoseStamped()\n msg.header.frame_id = '/map'\n msg.header.stamp = rospy.Time().now()\n \n msg.pose.position = Point(self.X_est[0, 0], self.X_est[1, 0], self.X_est[2, 0])\n x, y, z, w = get_quaternion(self.X_est[3, 0], self.X_est[4, 0], self.X_est[5, 0])\n msg.pose.orientation = Quaternion(x, y, z, w)\n pub.publish(msg)\n\n self.X_prev = self.X_est\n self.P_prev = self.P_est",
"def update_objects(self):\n\t\tself.update_projectiles()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Applies offset to this Arch object
|
def offset(self, amount):
offsetted_arch = apply_offset_to_arch(self.arch, amount, self.poly[0])
self.update(offsetted_arch)
|
[
"def update_offset(self, new_offset):\r\n self.offset = new_offset",
"def offset(self, value):\n self._offset = value",
"def test_offset_adjustment(self):\n arch = get_arch('arm')\n obj1 = ObjectFile(arch)\n obj1.get_section('code', create=True).add_data(bytes(59))\n obj2 = ObjectFile(arch)\n obj2.get_section('code', create=True).add_data(bytes(59))\n obj2.add_symbol(1, 'x', 'local', 5, 'code', 'object', 0)\n obj2.debug_info = debuginfo.DebugInfo()\n loc = SourceLocation('a.txt', 1, 1, 22)\n obj2.debug_info.add(\n debuginfo.DebugLocation(\n loc,\n address=debuginfo.DebugAddress(1)))\n obj = link([obj1, obj2], debug=True)\n\n # Take into account alignment! So 60 + 5 = 65.\n self.assertEqual(0, obj.debug_info.locations[0].address.symbol_id)\n self.assertEqual(65, obj.get_symbol_id_value(0))",
"def shift(self, offset):\n new_origin = offset\n delta = (new_origin - self.origin)\n self.origin = new_origin\n for peak in self.peaklist:\n peak.mz += delta\n return self",
"def offset(self, offset):\n\n # If range is empty it can't be offset\n if not self:\n return self\n\n offset_type = self.type if self.offset_type is None else self.offset_type\n\n if offset is not None and not isinstance(offset, offset_type):\n raise TypeError(\n f\"Invalid type for offset '{offset.__class__.__name__!r}'\"\n f\" expected '{offset_type.__name__}'\"\n )\n\n lower = None if self.lower is None else self.lower + offset\n upper = None if self.upper is None else self.upper + offset\n\n return self.replace(lower=lower, upper=upper)",
"def __sub__(self, offset):\n return self + -offset",
"def reset_offset(self):\n self.offset = np.array([0, 0, 0])",
"def __iadd__(self, offset):\n self.ptr.value += offset\n return self",
"def update_netoffset(self, deltaoffset):\n bboxes = self.bboxes.get_value()\n bboxes[:, :, :2] = bboxes[:, :, :2] + deltaoffset",
"def apply_offset(star_catalog, params):\n \n stars = SkyCoord(star_catalog['RA'], star_catalog['DEC'], unit=\"deg\")\n \n new_stars = SkyCoord(stars[:].ra.value + params['delta_ra'], \n stars[:].dec.value + params['delta_dec'], unit=\"deg\")\n \n star_catalog['RA'] = new_stars.ra.value\n star_catalog['DEC'] = new_stars.dec.value\n \n return star_catalog",
"def offset_references(self, offset: int) -> None:\n self.stream_dict.offset_references(offset)\n self.object_number += offset",
"def offset(self):\n # TODO: only works isotropic sigma\n if getattr(self, \"_offset\", None) is None:\n r_AM = self.electrode_separations[\"AM\"]\n r_AN = self.electrode_separations[\"AN\"]\n r_BM = self.electrode_separations[\"BM\"]\n r_BN = self.electrode_separations[\"BM\"]\n self._offset = np.r_[r_AM, r_AN, r_BM, r_BN]\n return self._offset",
"def set_offset(self):\n self.rotor_offset[-1] += 1\n self.rotor_offset = [offset % 26 for offset in self.rotor_offset]",
"def _ahop_compute_offset(self):\n\n with FortranFile(self._fname) as fpu:\n self._headers = fpu.read_attrs(self._header_attributes)\n\n nhalos = self._headers[\"nhalos\"]\n nsubs = self._headers[\"nsubs\"]\n\n Nskip = len(self._halo_attributes)\n if self._read_halo_data:\n Nskip += len(self._halo_attributes_contam)\n\n for _ in range(nhalos + nsubs):\n ipos = fpu.tell()\n fpu.skip(2) # number + ids of parts\n halo_ID = fpu.read_int()\n fpu.skip(Nskip)\n\n # Fill-in data\n dummy = DummyHalo()\n dummy.properties[\"file_offset\"] = ipos\n self._halos[halo_ID] = dummy",
"def apply_annotation(self, annotation):\n\n vs = self.copy()\n vs._merge_si(annotation.region_id, annotation.region_base_addr, annotation.offset)\n return vs",
"def load_by_offset(self, offset, size):\n raise NotImplementedError()",
"def rim_offset(self, rim_offset):\n\n self._rim_offset = rim_offset",
"def apply_offset_event_sequencing(self, offset_event_search_order):\n pass",
"def offset(self, x=None, y=None):\n old_x = self.active.image.offset[0]\n old_y = self.active.image.offset[1]\n changed = False\n\n if x == 0:\n self.active.image.offset[0] = 0\n changed = True\n elif x is not None:\n self.active.image.offset[0] += x\n\n if not config.negative_offset_allowed:\n if self.active.image.offset[0] < 0:\n self.active.image.offset[0] = 0 # Limit to 0\n\n changed = True\n\n if y == 0:\n self.active.image.offset[1] = 0\n changed = True\n elif y is not None:\n self.active.image.offset[1] += y\n\n if not config.negative_offset_allowed:\n if self.active.image.offset[1] < 0:\n self.active.image.offset[1] = 0 # Limit to 0\n\n changed = True\n\n if changed is True:\n logging.debug(\"Active Image offset changed to: %s\" % str(self.active.image.offset))\n self.on_change()\n\n if old_x != self.active.image.offset[0] or old_y != self.active.image.offset[1]:\n return 1\n else:\n return 0\n else:\n return self.active.image.offset"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a copy of this Arch object
|
def copy(self):
arch = self.arch.copy()
return Arch(self.arch_handler, arch)
|
[
"def copy(self):\n copy = Atom(atom_type=self.type,\n position=self.position,\n if_pos=self.if_pos,\n charge=self.charge) # add more if necessary\n # carries no neighbors, hybridization, etc\n return copy",
"def create_copy(self):\n print('WARNING: Implementation and testing still in progress!!!!')\n\n new_obj = self.__class__()\n new_obj.data = copy.deepcopy(self.data)\n new_obj.topography = copy.deepcopy(self.topography)\n new_obj.electrode_positions = copy.deepcopy(\n self.electrode_positions)\n\n # what about the log?\n print('WARNING: Journal and log is not copied!')\n\n return new_obj",
"def copy_of(self):\n theatom = Atom()\n theatom.atomname = self.atomname\n theatom.residue = self.residue\n theatom.coordinates = self.coordinates.copy_of()\n theatom.element = self.element\n theatom.pdb_index = self.pdb_index\n theatom.line = self.line\n theatom.atomtype = self.atomtype\n theatom.indices_of_atoms_connecting = self.indices_of_atoms_connecting[:]\n theatom.charge = self.charge\n theatom.resid = self.resid\n theatom.chain = self.chain\n theatom.structure = self.structure\n theatom.comment = self.comment\n\n return theatom",
"def copy(self):\n\t\tnewAttr = AtomAttributes(None, None, None, None, None, None)\n\t\tnewAttr.__dict__ = self.__dict__.copy()\n\t\treturn newAttr",
"def copy(self,mode='NORMAL'):\n return Tree(self.tree,self.shot,mode)",
"def __copy__(self):\n out = type(self)(self.name, self.data, self.dimensions[:],\n self.attributes.copy())\n out.id = self.id\n return out",
"def copy(self):\n chart = Chart.__new__(Chart)\n chart.date = self.date\n chart.pos = self.pos\n chart.hsys = self.hsys\n chart.objects = self.objects.copy()\n chart.houses = self.houses.copy()\n chart.angles = self.angles.copy()\n return chart",
"def clone(self):\n return self.__clone(True)",
"def clone(self, name, **attr):\n\t\tobj = copy.deepcopy(self._objects.get(name))\n\t\tobj.__dict__.update(attr)\n\t\treturn obj",
"def copy(self):\n bin_copy = self.bin.copy()\n obj_copy = [o for o in self.objects]\n next_obj_copy = self.next_object.copy()\n # new_state = State(self.bin, self.objects, self.next_object)\n new_state = State(bin_copy, obj_copy, next_obj_copy)\n return new_state",
"def copy(self):\n return Atmosphere(\n wavelength=self.wl.copy(),\n transmission=self.transmission.copy(),\n mask=self.mask.copy(),\n std=self.std.copy(),\n )",
"def copy(self):\n new = Shavtsak(self.soldiers)\n new.days = self.days.copy()\n new.watches = self.watches.copy()\n new.schedule = {day: {watch: self.schedule[day][watch].copy() for watch in self.watches} for day in self.days}\n new.reduced = self.reduced.copy()\n new.name = '' + self.name\n return new",
"def copy(self):\n return Struct(self.__dict__.copy())",
"def copy(self) -> \"Atoms\":\n return deepcopy(self)",
"def copy(self):\r\n U = CatMorphism(self.name,self.source,self.target)\r\n U.set_mapping_matrix(self.get_mapping_matrix())\r\n\r\n return U",
"def copy(self):\n if self.data is not None:\n _data = self.data.copy()\n else:\n _data = None\n return self.__class__(data=_data, header=self.header.copy())",
"def clone(self):\n return Myinfo(self._info, self._build_growth)",
"def __copy__(self):\n assignment = Assignment(self)\n assignment._cached_hash = self._cached_hash\n return assignment",
"def copy(self):\n\t\treturn Account(self._init, self._option_posi, self.option, self._one_side_cost)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the additional arguments of test's process.
|
def SetTestArgs(self, args):
if not args:
return
# The generated xctest is always empty. So set it directly.
self.SetXctestrunField('CommandLineArguments', args)
|
[
"def set_arguments(self):\r\n pass",
"def addArgs(self):\n \n self.createArgument('--fork', self.fork, 1, 'Fork to background', action='store_true')\n self.createArgument('--run', self.run, 1, 'Execute run on remote server (to be used with --client argument)', action='store_true')\n self.createArgument('--stop', self.stop, 1, 'Stop previous job', action='store_true')\n self.createArgument('--debug', self.debug, 1, 'Debugging mode', action='store_true')\n self.createArgument('--socket', self.setSocket, '', 'use TCP or UDP connection over ethernet/wireless, default TCP, available TCP, UDP, RFC (bluetooth)')\n self.createArgument('--client', self.client, 1, 'Connect to comma separated client addresses')\n self.createArgument('--server', self.bindMode, 1, 'turn into a server mode that handles instructions', action='store_true')\n self.createArgument('--target', self.selectTarget, '', 'target adress (bluetooth mac or ip adress over ethernet/wireless)')\n self.createArgument('--port', self.selectPort, 80, 'destination port')\n self.createArgument('--bytes', self.packetSize, 80, 'number of bytes to send in one packet')",
"def _add_arguments(self):\n #the base arguments\n self.parser.add_argument(\"-d\", \"--debug\",\n help=\"Display debugging messages.\",\n action=\"store_true\",\n default=False, dest=\"debug\")\n \n self.parser.add_argument(\"--pudb\",\n help=\"Enable pudb interactive debugging.\",\n action=\"store_true\",\n default=False, dest='pudb')\n\n self.parser.add_argument(\"--pdb\",\n help=\"Enable python's debugger\",\n action=\"store_true\",\n default=False, dest='pdb')\n \n\n self.parser.add_argument(\"-s\", \"--silent\",\n help=\"Turn off screen output.\",\n action=\"store_true\", default=False,\n dest='silent')\n return",
"def add_argument_cmd(self, *args, **kwargs):\n pass",
"def test_add_shared_args(self):\n parser = argparse.ArgumentParser()\n add_shared_args(parser)\n args = parser.parse_args(['--version', '-v'])\n self.assertTrue(args.version)\n self.assertTrue(args.verbose)",
"def _subprocess_popen_arguments(self, argv, **kwargs):\n del argv\n del kwargs\n\n raise NotImplementedError()",
"def setup_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\", \n action=\"store_true\", \n help=\"Sets debug mode\"\n ) \n parser.add_argument(\n \"-f\",\n \"--filename\",\n action=\"store\",\n type=str,\n default=LOG_LOC,\n help=\"Location where the log file will be located\"\n )\n return parser.parse_args()",
"def set_script_args(self, args):\n global _env_args_str\n argstr = pickle.dumps(args)\n os.environ[_env_args_str] = argstr",
"def setInitialArguments(self):\r\n self.argparse.add_argument(\r\n '-s', '--start-dir', default=None,\r\n help=\"Directory to start discovery ('.' default)\")\r\n self.argparse.add_argument(\r\n '-t', '--top-level-directory', '--project-directory',\r\n help='Top level directory of project (defaults to start dir)')\r\n self.argparse.add_argument(\r\n '--config', '-c', nargs='?', action='append',\r\n default=['unittest.cfg', 'nose2.cfg'],\r\n help=\"Config files to load, if they exist. ('unittest.cfg' \"\r\n \"and 'nose2.cfg' in start directory default)\")\r\n self.argparse.add_argument(\r\n '--no-user-config', action='store_const',\r\n dest='user_config', const=False, default=True,\r\n help=\"Do not load user config files\")\r\n self.argparse.add_argument(\r\n '--no-plugins', action='store_const',\r\n dest='load_plugins', const=False, default=True,\r\n help=\"Do not load any plugins. Warning: nose2 does not \"\r\n \"do anything if no plugins are loaded\")\r\n self.argparse.add_argument(\r\n '--plugin', action='append',\r\n dest='plugins', default=[],\r\n help=\"Load this plugin module.\")\r\n self.argparse.add_argument(\r\n '--exclude-plugin', action='append',\r\n dest='exclude_plugins', default=[],\r\n help=\"Do not load this plugin module\")\r\n self.argparse.add_argument(\r\n '--verbose', '-v', action='count', default=0, help=\"print test case names and statuses\")\r\n self.argparse.add_argument('--quiet', action='store_const',\r\n dest='verbose', const=0)\r\n self.argparse.add_argument(\r\n '--log-level', default=logging.WARN,\r\n help='Set logging level for message logged to console.')",
"def addCommonArguments(self):\n pass",
"def test_runner_args_only_when_set(runner):\n\n conf = Config(test_runner=runner)\n assert \"runner_args\" not in conf._metadata\n\n conf2 = Config(test_runner=runner, runner_args=[\"-vv\", \"--pdb\"])\n assert conf2._metadata.get(\"runner_args\") == [\"-vv\", \"--pdb\"]",
"def setup_args():\n parser = ParlaiParser(False, False)\n parser_grp = parser.add_argument_group('Browser Chat')\n parser_grp.add_argument(\n '--port', default=35496, type=int, help='Port used by the web socket (run.py)'\n )\n parser_grp.add_argument(\n '--host',\n default='0.0.0.0',\n type=str,\n help='Host from which allow requests, use 0.0.0.0 to allow all IPs',\n )\n parser_grp.add_argument(\n '--serving_port',\n default=8080,\n type=int,\n help='Port used to configure the server',\n )\n\n return parser.parse_args()",
"def AddProcessingOptions(self, argument_group):\n argument_helper_names = ['temporary_directory', 'zeromq']\n if self._CanEnforceProcessMemoryLimit():\n argument_helper_names.append('process_resources')\n helpers_manager.ArgumentHelperManager.AddCommandLineArguments(\n argument_group, names=argument_helper_names)\n\n argument_group.add_argument(\n '--worker_memory_limit', '--worker-memory-limit',\n dest='worker_memory_limit', action='store', type=int,\n metavar='SIZE', help=(\n 'Maximum amount of memory (data segment and shared memory) '\n 'a worker process is allowed to consume in bytes, where 0 '\n 'represents no limit. The default limit is 2147483648 (2 GiB). '\n 'If a worker process exceeds this limit it is killed by the main '\n '(foreman) process.'))\n\n argument_group.add_argument(\n '--worker_timeout', '--worker-timeout', dest='worker_timeout',\n action='store', type=float, metavar='MINUTES', help=(\n 'Number of minutes before a worker process that is not providing '\n 'status updates is considered inactive. The default timeout is '\n '15.0 minutes. If a worker process exceeds this timeout it is '\n 'killed by the main (foreman) process.'))",
"def add_arguments(self, parser):\n parser.add_argument(\n \"--datetime\",\n action=\"store\",\n help=\"ISO datetime used for calculating eligibility. Defaults to now. Currently only used for backdating command runs in tests.\",\n )\n parser.add_argument(\n \"--global_userinfo\",\n action=\"store\",\n help=\"specify Wikipedia global_userinfo data. Defaults to fetching live data. Currently only used for faking command runs in tests.\",\n )",
"def add_standard_args(self):\n self.add_argument(\"-v\", \"--verbose\",\n help=\"Set log verbosity to True, nominal debug level.\", action=\"store_true\")\n self.add_argument(\"--verbosity\",\n help=\"Set log verbosity to a specific level: 0..100.\", type=int, default=0)\n self.add_argument(\"--dump-cmdline\", action=\"store_true\",\n help=\"Dump the command line parameters used to start the script to the log.\")\n self.add_argument(\"-R\", \"--readonly-cache\", action=\"store_true\",\n help=\"Don't modify the CRDS cache. Not compatible with options which implicitly modify the cache.\")\n self.add_argument('-I', '--ignore-cache', action='store_true', dest=\"ignore_cache\",\n help=\"Download required files even if they're already in the cache.\")\n self.add_argument(\"-V\", \"--version\",\n help=\"Print the software version and exit.\", action=\"store_true\")\n self.add_argument(\"-J\", \"--jwst\", dest=\"jwst\", action=\"store_true\",\n help=\"Force observatory to JWST for determining header conventions.\"\"\")\n self.add_argument(\"-H\", \"--hst\", dest=\"hst\", action=\"store_true\",\n help=\"Force observatory to HST for determining header conventions.\"\"\")\n self.add_argument(\"--roman\", dest=\"roman\", action=\"store_true\",\n help=\"Force observatory to Roman for determining header conventions.\"\"\")\n self.add_argument(\"--stats\", action=\"store_true\",\n help=\"Track and print timing statistics.\")\n self.add_argument(\"--profile\",\n help=\"Output profile stats to the specified file.\", type=str, default=\"\")\n self.add_argument(\"--log-time\", action=\"store_true\",\n help=\"Add date/time to log messages.\")\n self.add_argument(\"--pdb\",\n help=\"Run under pdb.\", action=\"store_true\")\n self.add_argument(\"--debug-traps\",\n help=\"Bypass exception error message traps and re-raise exception.\", action=\"store_true\")",
"def add_arguments(self, parser):\n parser.add_argument('start_index', type=int)",
"def change_command_line_arguments(self, accession, params):\n self.invoke('changeCommandLineArguments', accession, params if isinstance(params, list) else [params])",
"def test_argv(self):\n script_name = 'print_argv'\n script_path = join_pwb_tests_path(script_name + '.py')\n script_opts = ['-help']\n command = [script_path] + script_opts\n without_global_args = execute_pwb(command)\n with_no_global_args = execute_pwb(['-maxlag:5'] + command)\n self.assertEqual(without_global_args['stdout'],\n with_no_global_args['stdout'])\n self.assertEqual(without_global_args['stdout'].rstrip(),\n str([script_name] + script_opts))",
"def _cli_extras(self):\n kwargs = self.kwargs or {}\n extras = [\n \"--silent\",\n ]\n for k, v in kwargs.items():\n extras.append(\"--\" + k.replace(\"_\", \"-\"))\n extras.append(str(v))\n\n # For the high/low memory test cases of NTN, SE, etc.\n if self.training_loop_kwargs and \"automatic_memory_optimization\" in self.training_loop_kwargs:\n automatic_memory_optimization = self.training_loop_kwargs.get(\"automatic_memory_optimization\")\n if automatic_memory_optimization is True:\n extras.append(\"--automatic-memory-optimization\")\n elif automatic_memory_optimization is False:\n extras.append(\"--no-automatic-memory-optimization\")\n # else, leave to default\n\n extras += [\n \"--number-epochs\",\n self.train_num_epochs,\n \"--embedding-dim\",\n self.embedding_dim,\n \"--batch-size\",\n self.train_batch_size,\n ]\n extras.extend(self.cli_extras)\n\n # Make sure that inverse triples are created if create_inverse_triples=True\n if self.create_inverse_triples:\n extras.append(\"--create-inverse-triples\")\n\n extras = [str(e) for e in extras]\n return extras",
"def add_simple_args(self):\n self.ctrl_parser.add_argument(\"-V\", \"--version\", action=\"version\", version='0.1.0',\n help='Provides the version of the tool')\n self.ctrl_parser.add_argument(\"-v\", \"--verbosity\", action=\"count\", help=\"increase output verbosity\")\n self.ctrl_parser.add_argument(\"-i\", action=InteractiveCli, nargs=0, help=\"Start in interactive mode\")\n self.ctrl_parser.add_argument(\"-t\", \"--timeout\", type=float,\n help=\"Provides a timeout for the command\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the additional environment variables of app under test's process.
|
def SetAppUnderTestEnvVars(self, env_vars):
if not env_vars:
return
if self._test_type == ios_constants.TestType.XCUITEST:
key = 'UITargetAppEnvironmentVariables'
else:
key = 'EnvironmentVariables'
aut_env_vars = self.GetXctestrunField(key)
if not aut_env_vars:
aut_env_vars = {}
for env_key, env_value in env_vars.items():
aut_env_vars[env_key] = env_value
self.SetXctestrunField(key, aut_env_vars)
|
[
"def prepare_environment_variables(self):\n for env_variable in self.environment_variables:\n for k, v in env_variable.iteritems():\n os.environ[k] = v",
"def setup_environment():\n global GIVEN_ENV\n GIVEN_ENV['env'] = env.copy()",
"def set_envs(self):\n for k, v in self._envs.iteritems():\n os.environ[k] = v",
"def setup_environment():",
"def set_environment_variables(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Used for logging.\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n self.logger.info(\"INFO|:\" + cur_function + '|' + cur_filename + '| ' +\n \"Setting all environment variables specified in \"\n \"the MET config file...\")\n\n # Set the environment variables\n self.add_env_var(b'MODEL_NAME', str(self.ps_dict['MODEL_NAME']))\n\n regrid_to_grid = str(self.ps_dict['REGRID_TO_GRID'])\n self.add_env_var(b'REGRID_TO_GRID', regrid_to_grid)\n os.environ['REGRID_TO_GRID'] = regrid_to_grid\n\n # MET accepts a list of values for POINT_STAT_POLY, POINT_STAT_GRID,\n # POINT_STAT_STATION_ID, and POINT_STAT_MESSAGE_TYPE. If these\n # values are not set in the MET+ config file, assign them to \"[]\" so\n # MET recognizes that these are empty lists, resulting in the\n # expected behavior.\n poly_str = str(self.ps_dict['POINT_STAT_POLY'])\n if not poly_str:\n self.add_env_var(b'POINT_STAT_POLY', \"[]\")\n else:\n poly = poly_str.replace(\"\\'\", \"\\\"\")\n self.add_env_var(b'POINT_STAT_POLY', poly)\n\n grid_str = str(self.ps_dict['POINT_STAT_GRID'])\n if not grid_str:\n self.add_env_var(b'POINT_STAT_GRID', \"[]\")\n else:\n # grid = grid_str.replace(\"\\'\", \"\\\"\")\n grid = '\"' + grid_str + '\"'\n self.add_env_var(b'POINT_STAT_GRID', grid)\n\n sid_str = str(self.ps_dict['POINT_STAT_STATION_ID'])\n if not sid_str:\n self.add_env_var(b'POINT_STAT_STATION_ID', \"[]\")\n else:\n sid = sid_str.replace(\"\\'\", \"\\\"\")\n self.add_env_var(b'POINT_STAT_STATION_ID', sid)\n\n tmp_message_type = str(self.ps_dict['POINT_STAT_MESSAGE_TYPE'])\n # Check for \"empty\" POINT_STAT_MESSAGE_TYPE in MET+ config file and\n # set the POINT_STAT_MESSAGE_TYPE environment variable appropriately.\n if not tmp_message_type:\n self.add_env_var('POINT_STAT_MESSAGE_TYPE', \"[]\")\n else:\n # Not empty, set the POINT_STAT_MESSAGE_TYPE environment\n # variable to the\n # message types specified in the MET+ config file.\n tmp_message_type = str(tmp_message_type).replace(\"\\'\", \"\\\"\")\n # Remove all whitespace\n tmp_message_type = ''.join(tmp_message_type.split())\n self.add_env_var(b'POINT_STAT_MESSAGE_TYPE', tmp_message_type)\n\n # Retrieve all the fcst and obs field values (name, level, options)\n # from the MET+ config file, passed into the MET config file via\n # the FCST_FIELD and OBS_FIELD environment variables.\n all_vars_list = util.parse_var_list(self.p)\n met_fields = util.reformat_fields_for_met(all_vars_list, self.logger)\n\n self.add_env_var(b'FCST_FIELD', met_fields.fcst_field)\n self.add_env_var(b'OBS_FIELD', met_fields.obs_field)\n\n # Set the environment variables corresponding to the obs_window\n # dictionary.\n self.add_env_var(b'OBS_WINDOW_BEGIN',\n str(self.ps_dict['OBS_WINDOW_BEGIN']))\n self.add_env_var(b'OBS_WINDOW_END', str(self.ps_dict['OBS_WINDOW_END']))\n\n self.logger.debug(\"\")\n self.logger.debug(\"COPYABLE ENVIRONMENT FOR NEXT COMMAND: \")\n self.print_env_copy([\"MODEL_NAME\",\"FCST_FIELD\",\"POINT_STAT_MESSAGE_TYPE\",\n \"OBS_WINDOW_BEGIN\",\"OBS_WINDOW_END\",\"POINT_STAT_GRID\",\n \"POINT_STAT_POLY\"])\n self.logger.debug(\"\")",
"def set_env_vars(self):\n env_file = f\"{self.ipppssoot}_cal_env.txt\"\n if os.path.isfile(env_file):\n self.divider(f\"processing env file {env_file}\")\n with open(env_file, \"r\") as f:\n for line in f.readlines():\n try:\n key, value = line.split(\"=\")\n except ValueError:\n log.info(f\"{line} is not a valid key=value pair\")\n continue\n os.environ[key.strip()] = value.strip()\n log.info(f\"setting {key}={value} in processing env\")\n return",
"def set_env_vars(self):\n # For each server/host entry, write the appropriate config file\n write_data = {utils.MAYA_ENV: '%s:%s' % (self.maya_host, self.maya_port),\n utils.ZBRUSH_ENV: '%s:%s' % (self.zbrush_host, self.zbrush_port),\n utils.SHARED_DIR_ENV: self.shared_dir}\n for env_var_key in write_data:\n os.environ[env_var_key] = write_data[env_var_key]",
"def test_prepare_environment(self):\n pass",
"def set_env_vars(self):\n # pylint:disable=protected-access\n # sys._getframe is a legitimate way to access the current\n # filename and method.\n # Used for logging information\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.debug(': Setting environment variables that will be' +\n ' used by MET...')\n\n # For all cases below, we need to do some pre-processing so that\n # Python will use \" and not ' because currently MET doesn't\n # support single-quotes.\n\n # INIT_BEG, INIT_END\n # pull out YYYYMMDD from INIT_BEG/END\n tmp_init_beg = self.tcp_dict['INIT_BEG'][0:8]\n tmp_init_end = self.tcp_dict['INIT_END'][0:8]\n\n if not tmp_init_beg:\n self.add_env_var(b'INIT_BEG', \"\")\n else:\n init_beg = str(tmp_init_beg).replace(\"\\'\", \"\\\"\")\n init_beg_str = ''.join(init_beg.split())\n self.add_env_var(b'INIT_BEG', str(init_beg_str))\n\n if not tmp_init_end:\n self.add_env_var(b'INIT_END', \"\")\n else:\n init_end = str(tmp_init_end).replace(\"\\'\", \"\\\"\")\n init_end_str = ''.join(init_end.split())\n self.add_env_var(b'INIT_END', str(init_end_str))\n\n # INIT_INCLUDE and INIT_EXCLUDE\n # Used to set init_inc in \"TC_PAIRS_CONFIG_FILE\"\n tmp_init_inc = self.tcp_dict['INIT_INCLUDE']\n if not tmp_init_inc:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n else:\n # Not empty, set the environment variable to the\n # value specified in the MET+ config file after removing whitespace\n # and replacing ' with \".\n init_inc = str(tmp_init_inc).replace(\"\\'\", \"\\\"\")\n init_inc_str = ''.join(init_inc.split())\n self.add_env_var('INIT_INCLUDE', str(init_inc_str))\n\n tmp_init_exc = self.tcp_dict['INIT_EXCLUDE']\n if not tmp_init_exc:\n # Empty, MET is expecting [] to indicate all models are\n # to be included\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n else:\n # Replace ' with \" and remove whitespace\n init_exc = str(tmp_init_exc).replace(\"\\'\", \"\\\"\")\n init_exc_str = ''.join(init_exc.split())\n self.add_env_var('INIT_EXCLUDE', str(init_exc_str))\n\n # MODEL\n tmp_model = self.tcp_dict['MODEL']\n if not tmp_model:\n # Empty, MET is expecting [] to indicate all models are to be\n # included\n self.add_env_var('MODEL', \"[]\")\n else:\n # Replace ' with \" and remove whitespace\n model = str(tmp_model).replace(\"\\'\", \"\\\"\")\n model_str = ''.join(model.split())\n self.add_env_var(b'MODEL', str(model_str))\n\n # STORM_ID\n tmp_storm_id = self.tcp_dict['STORM_ID']\n if not tmp_storm_id:\n # Empty, use all storm_ids, indicate this to MET via '[]'\n self.add_env_var('STORM_ID', \"[]\")\n else:\n # Replace ' with \" and remove whitespace\n storm_id = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n storm_id_str = ''.join(storm_id.split())\n self.add_env_var(b'STORM_ID', str(storm_id_str))\n\n # BASIN\n tmp_basin = self.tcp_dict['BASIN']\n if not tmp_basin:\n # Empty, we want all basins. Send MET '[]' to indicate that\n # we want all the basins.\n self.add_env_var('BASIN', \"[]\")\n else:\n # Replace any ' with \" and remove whitespace.\n basin = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n basin_str = ''.join(basin.split())\n self.add_env_var(b'BASIN', str(basin_str))\n\n # CYCLONE\n tmp_cyclone = self.tcp_dict['CYCLONE']\n if not tmp_cyclone:\n # Empty, use all cyclones, send '[]' to MET.\n self.add_env_var('CYCLONE', \"[]\")\n else:\n # Replace ' with \" and get rid of any whitespace\n cyclone = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n cyclone_str = ''.join(cyclone.split())\n self.add_env_var(b'CYCLONE', str(cyclone_str))\n\n # STORM_NAME\n tmp_storm_name = self.tcp_dict['STORM_NAME']\n if not tmp_storm_name:\n # Empty, equivalent to 'STORM_NAME = \"[]\"; in MET config file,\n # use all storm names.\n self.add_env_var('STORM_NAME', \"[]\")\n else:\n storm_name = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n storm_name_str = ''.join(storm_name.split())\n self.add_env_var(b'STORM_NAME', str(storm_name_str))\n\n # Valid time window variables\n tmp_valid_beg = self.tcp_dict['VALID_BEG']\n tmp_valid_end = self.tcp_dict['VALID_END']\n\n if not tmp_valid_beg:\n self.add_env_var(b'VALID_BEG', \"\")\n else:\n valid_beg = str(tmp_valid_beg).replace(\"\\'\", \"\\\"\")\n valid_beg_str = ''.join(valid_beg.split())\n self.add_env_var(b'VALID_BEG', str(valid_beg_str))\n\n if not tmp_valid_end:\n self.add_env_var(b'VALID_END', \"\")\n else:\n valid_end = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n valid_end_str = ''.join(valid_end.split())\n self.add_env_var(b'VALID_END', str(valid_end_str))\n\n # DLAND_FILE\n tmp_dland_file = self.tcp_dict['DLAND_FILE']\n self.add_env_var(b'DLAND_FILE', str(tmp_dland_file))",
"def setup_env(app_dir, app_id, version, module_id, remote_api=False):\n # GCS library behaves differently when running under remote_api. It uses\n # SERVER_SOFTWARE to figure this out. See cloudstorage/common.py, local_run().\n if remote_api:\n os.environ['SERVER_SOFTWARE'] = 'remote_api'\n else:\n os.environ['SERVER_SOFTWARE'] = 'Development yo dawg/1.0'\n if app_dir:\n app_id = app_id or Application(app_dir).app_id\n version = version or 'default-version'\n if app_id:\n os.environ['APPLICATION_ID'] = app_id\n if version:\n os.environ['CURRENT_VERSION_ID'] = '%s.%d' % (\n version, int(time.time()) << 28)\n if module_id:\n os.environ['CURRENT_MODULE_ID'] = module_id",
"async def test_process_env() -> None:\n env = os.environ.copy()\n env.update({\"foo\": \"bar\"})\n cmd = [sys.executable, \"-c\", \"import os; print(os.environ['foo'])\"]\n result = await run_process(cmd, env=env)\n assert result.stdout.decode().strip() == env[\"foo\"]",
"def test_environment(self):\n pass",
"def extend_env(extra_env):\n env = os.environ.copy()\n env.update(extra_env)\n return env",
"def test_environ(self):\n return create_environ('/test', None)",
"def load_environment_variables():\n config_json = json.load(open('settings/config.json'))\n\n for key in config_json.keys():\n if key not in os.environ:\n os.environ[key] = config_json[key]",
"def environInject(shellName):",
"def patch_env(self, **kwargs):\n new_shell = self._shell.clone()\n for key, value in kwargs.items():\n new_shell.setenv(key, value)\n return attr.evolve(self, shell=new_shell)",
"def set_env():\n from kolibri import dist as kolibri_dist # noqa\n\n monkey_patch_collections()\n\n monkey_patch_translation()\n\n sys.path = [os.path.realpath(os.path.dirname(kolibri_dist.__file__))] + sys.path\n\n # Add path for c extensions to sys.path\n prepend_cext_path(os.path.realpath(os.path.dirname(kolibri_dist.__file__)))\n\n # This was added in\n # https://github.com/learningequality/kolibri/pull/580\n # ...we need to (re)move it /benjaoming\n # Force python2 to interpret every string as unicode.\n if sys.version[0] == \"2\":\n reload(sys) # noqa\n sys.setdefaultencoding(\"utf8\")\n\n # Dynamically add the path of `py2only` to PYTHONPATH in Python 2 so that\n # we only import the `future` and `futures` packages from system packages when\n # running with Python 3. Please see `build_tools/py2only.py` for details.\n sys.path = sys.path + [\n os.path.join(\n os.path.realpath(os.path.dirname(kolibri_dist.__file__)), \"py2only\"\n )\n ]\n\n # Set default env\n for key, value in ENVIRONMENT_VARIABLES.items():\n if \"default\" in value:\n os.environ.setdefault(key, value[\"default\"]())",
"def test_noenv():\n if not os.path.exists(\"/proc/self/environ\"):\n pytest.skip(\"'/proc/self/environ' not available\")\n\n env = os.environ.copy()\n env[\"SPT_TESTENV\"] = \"testenv\"\n rv = run_script(\n \"\"\"\nimport os\nos.environ['SPT_NOENV'] = \"1\"\n\ncmdline_len = len(open('/proc/self/cmdline').read())\nprint(cmdline_len)\nprint('SPT_TESTENV=testenv' in open('/proc/self/environ').read())\n\nimport setproctitle\nsetproctitle.setproctitle('X' * cmdline_len * 10)\n\ntitle = open('/proc/self/cmdline').read().rstrip()\nprint(title)\nprint(len(title))\n\nprint('SPT_TESTENV=testenv' in open('/proc/self/environ').read())\n \"\"\",\n env=env,\n )\n lines = rv.splitlines()\n cmdline_len = int(lines[0])\n assert lines[1] == \"True\", \"can't verify testenv\"\n title = lines[2]\n assert \"XXX\" in _clean_up_title(title), \"title not set as expected\"\n title_len = int(lines[3])\n assert lines[4] == \"True\", \"env has been clobbered\"\n assert (\n title_len <= cmdline_len\n ), \"title (len {title_len}) not limited to argv (len {cmdline_len})\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the additional arguments of app under test's process.
|
def SetAppUnderTestArgs(self, args):
if not args:
return
if self._test_type == ios_constants.TestType.XCUITEST:
key = 'UITargetAppCommandLineArguments'
else:
key = 'CommandLineArguments'
self.SetXctestrunField(key, args)
|
[
"def set_script_args(self, args):\n global _env_args_str\n argstr = pickle.dumps(args)\n os.environ[_env_args_str] = argstr",
"def addArgs(self):\n \n self.createArgument('--fork', self.fork, 1, 'Fork to background', action='store_true')\n self.createArgument('--run', self.run, 1, 'Execute run on remote server (to be used with --client argument)', action='store_true')\n self.createArgument('--stop', self.stop, 1, 'Stop previous job', action='store_true')\n self.createArgument('--debug', self.debug, 1, 'Debugging mode', action='store_true')\n self.createArgument('--socket', self.setSocket, '', 'use TCP or UDP connection over ethernet/wireless, default TCP, available TCP, UDP, RFC (bluetooth)')\n self.createArgument('--client', self.client, 1, 'Connect to comma separated client addresses')\n self.createArgument('--server', self.bindMode, 1, 'turn into a server mode that handles instructions', action='store_true')\n self.createArgument('--target', self.selectTarget, '', 'target adress (bluetooth mac or ip adress over ethernet/wireless)')\n self.createArgument('--port', self.selectPort, 80, 'destination port')\n self.createArgument('--bytes', self.packetSize, 80, 'number of bytes to send in one packet')",
"def test_platform_args(self):\n self.app = self.make_app(argv = ['production', 'run', 'J.Doe_00_04', '--debug', '--force', '--amplicon', '--restart', '--sample', SAMPLES[1], '--drmaa'], extensions=['scilifelab.pm.ext.ext_distributed'])\n handler.register(ProductionController)\n self._run_app()\n os.chdir(filedir)",
"def set_arguments(self):\r\n pass",
"def test_add_shared_args(self):\n parser = argparse.ArgumentParser()\n add_shared_args(parser)\n args = parser.parse_args(['--version', '-v'])\n self.assertTrue(args.version)\n self.assertTrue(args.verbose)",
"def _add_arguments(self):\n #the base arguments\n self.parser.add_argument(\"-d\", \"--debug\",\n help=\"Display debugging messages.\",\n action=\"store_true\",\n default=False, dest=\"debug\")\n \n self.parser.add_argument(\"--pudb\",\n help=\"Enable pudb interactive debugging.\",\n action=\"store_true\",\n default=False, dest='pudb')\n\n self.parser.add_argument(\"--pdb\",\n help=\"Enable python's debugger\",\n action=\"store_true\",\n default=False, dest='pdb')\n \n\n self.parser.add_argument(\"-s\", \"--silent\",\n help=\"Turn off screen output.\",\n action=\"store_true\", default=False,\n dest='silent')\n return",
"def _subprocess_popen_arguments(self, argv, **kwargs):\n del argv\n del kwargs\n\n raise NotImplementedError()",
"def setup_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\", \n action=\"store_true\", \n help=\"Sets debug mode\"\n ) \n parser.add_argument(\n \"-f\",\n \"--filename\",\n action=\"store\",\n type=str,\n default=LOG_LOC,\n help=\"Location where the log file will be located\"\n )\n return parser.parse_args()",
"def __init__(self, app, options=None):\n self.options = options or {}\n self.application = app\n super(StandaloneApplication, self).__init__()",
"def setInitialArguments(self):\r\n self.argparse.add_argument(\r\n '-s', '--start-dir', default=None,\r\n help=\"Directory to start discovery ('.' default)\")\r\n self.argparse.add_argument(\r\n '-t', '--top-level-directory', '--project-directory',\r\n help='Top level directory of project (defaults to start dir)')\r\n self.argparse.add_argument(\r\n '--config', '-c', nargs='?', action='append',\r\n default=['unittest.cfg', 'nose2.cfg'],\r\n help=\"Config files to load, if they exist. ('unittest.cfg' \"\r\n \"and 'nose2.cfg' in start directory default)\")\r\n self.argparse.add_argument(\r\n '--no-user-config', action='store_const',\r\n dest='user_config', const=False, default=True,\r\n help=\"Do not load user config files\")\r\n self.argparse.add_argument(\r\n '--no-plugins', action='store_const',\r\n dest='load_plugins', const=False, default=True,\r\n help=\"Do not load any plugins. Warning: nose2 does not \"\r\n \"do anything if no plugins are loaded\")\r\n self.argparse.add_argument(\r\n '--plugin', action='append',\r\n dest='plugins', default=[],\r\n help=\"Load this plugin module.\")\r\n self.argparse.add_argument(\r\n '--exclude-plugin', action='append',\r\n dest='exclude_plugins', default=[],\r\n help=\"Do not load this plugin module\")\r\n self.argparse.add_argument(\r\n '--verbose', '-v', action='count', default=0, help=\"print test case names and statuses\")\r\n self.argparse.add_argument('--quiet', action='store_const',\r\n dest='verbose', const=0)\r\n self.argparse.add_argument(\r\n '--log-level', default=logging.WARN,\r\n help='Set logging level for message logged to console.')",
"def add_argument_cmd(self, *args, **kwargs):\n pass",
"def test_runner_args_only_when_set(runner):\n\n conf = Config(test_runner=runner)\n assert \"runner_args\" not in conf._metadata\n\n conf2 = Config(test_runner=runner, runner_args=[\"-vv\", \"--pdb\"])\n assert conf2._metadata.get(\"runner_args\") == [\"-vv\", \"--pdb\"]",
"def setup_args():\n parser = ParlaiParser(False, False)\n parser_grp = parser.add_argument_group('Browser Chat')\n parser_grp.add_argument(\n '--port', default=35496, type=int, help='Port used by the web socket (run.py)'\n )\n parser_grp.add_argument(\n '--host',\n default='0.0.0.0',\n type=str,\n help='Host from which allow requests, use 0.0.0.0 to allow all IPs',\n )\n parser_grp.add_argument(\n '--serving_port',\n default=8080,\n type=int,\n help='Port used to configure the server',\n )\n\n return parser.parse_args()",
"def sys_argv_app_key_restore(tst_app_key): # needed for tests using sys.argv/get_opt() of ConsoleApp\n old_argv = sys.argv\n sys.argv = [tst_app_key, ]\n\n yield tst_app_key\n\n sys.argv = old_argv",
"def addCommonArguments(self):\n pass",
"def extra_start_args(self):\n\n\t\treturn self.tool_config.get('extra_start_args', default = '')",
"def test_argv(self):\n script_name = 'print_argv'\n script_path = join_pwb_tests_path(script_name + '.py')\n script_opts = ['-help']\n command = [script_path] + script_opts\n without_global_args = execute_pwb(command)\n with_no_global_args = execute_pwb(['-maxlag:5'] + command)\n self.assertEqual(without_global_args['stdout'],\n with_no_global_args['stdout'])\n self.assertEqual(without_global_args['stdout'].rstrip(),\n str([script_name] + script_opts))",
"def test_app_cmd(runpath):\n app = App(name=\"App\", binary=\"binary\", runpath=runpath)\n assert app.cmd == [\"binary\"]\n app = App(\n name=\"App\", pre_args=[\"a\", \"b\"], binary=\"binary\", runpath=runpath\n )\n assert app.cmd == [\"a\", \"b\", \"binary\"]\n app = App(name=\"App\", args=[\"c\", \"d\"], binary=\"binary\", runpath=runpath)\n assert app.cmd == [\"binary\", \"c\", \"d\"]\n app = App(\n name=\"App\",\n pre_args=[\"a\", \"b\"],\n args=[\"c\", \"d\"],\n binary=\"binary\",\n runpath=runpath,\n )\n assert app.cmd == [\"a\", \"b\", \"binary\", \"c\", \"d\"]",
"def _update_kwargs(self, kwargs):\n if 'env' in kwargs:\n env = dict(kwargs['env'])\n env.update(self.env)\n else:\n env = dict(self.env)\n kwargs['env'] = env\n\n if self.preexec_fn is not None:\n kwargs['preexec_fn'] = self.preexec_fn",
"def set_application(app):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the specific test methods/test classes to skip in xctestrun file.
|
def SetSkipTests(self, skip_tests):
if not skip_tests:
return
self.SetXctestrunField('SkipTestIdentifiers', skip_tests)
|
[
"def test_skip(self):\n LOG.info('About to skip...')\n self.skipTest('No reason.')",
"def pytest_ignore(cls):\n cls.__test__ = False\n return cls",
"def test_classes_and_functions_excluded(self, flake8dir):\n\n # Setup\n flake8dir.make_setup_cfg(\"\"\"\n[flake8]\npytest_mark1 = name=test,\n exclude_classes=true,\n exclude_methods=false,\n exclude_functions=true\n\n\"\"\")\n flake8dir.make_example_py(self.example_tests)\n\n # Test\n result = flake8dir.run_flake8(extra_args)\n assert ['./example.py:2:1: M501 test definition not marked with test'] == result.out_lines",
"def skipTest(self, prevTest, test, nextTest):\n return\n rec = test.addStepRecord(\"run\")\n rec.setResult(Core.SKIPPED)\n test.startNewRun()",
"def test_pytest_all_tests_skipped_propagates(self):\n py_file = self.testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.skip(reason=\"Because\")\n def test_not_ok_but_skipped():\n assert 0\n\n @pytest.mark.skip(reason=\"Because\")\n def test_also_not_ok_but_skipped():\n assert 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = self.inline_run(\"--ddtrace\", file_name)\n rec.assertoutcome(skipped=2)\n spans = self.pop_spans()\n for span in spans:\n assert span.get_tag(\"test.status\") == \"skip\"",
"def test_methods_and_functions_excluded(self, flake8dir):\n\n # Setup\n flake8dir.make_setup_cfg(\"\"\"\n[flake8]\npytest_mark1 = name=test,\n exclude_classes=false,\n exclude_methods=true,\n exclude_functions=true\n\n\"\"\")\n flake8dir.make_example_py(self.example_tests)\n\n # Test\n result = flake8dir.run_flake8(extra_args)\n assert ['./example.py:1:1: M501 test definition not marked with test'] == result.out_lines",
"def test_enable_exclude_classes_configuration(self, flake8dir):\n\n # Setup\n flake8dir.make_setup_cfg(self.config)\n flake8dir.make_example_py(\"\"\"\nclass TestDisabledConfiguration(object):\n pass\n\"\"\")\n\n # Test\n result = flake8dir.run_flake8(extra_args)\n assert [] == result.out_lines",
"def test_skip_module_with_xfail_cases(self):\n py_file = self.testdir.makepyfile(\n \"\"\"\n import pytest\n\n pytestmark = pytest.mark.skip(reason=\"reason\")\n\n @pytest.mark.xfail(reason=\"XFail Case\")\n def test_xfail():\n pass\n\n @pytest.mark.xfail(condition=False, reason=\"XFail Case\")\n def test_xfail_conditional():\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = self.inline_run(\"--ddtrace\", file_name)\n rec.assertoutcome(skipped=2)\n spans = self.pop_spans()\n\n assert len(spans) == 5\n test_spans = [span for span in spans if span.get_tag(\"type\") == \"test\"]\n assert test_spans[0].get_tag(test.STATUS) == test.Status.SKIP.value\n assert test_spans[0].get_tag(test.SKIP_REASON) == \"reason\"\n assert test_spans[1].get_tag(test.STATUS) == test.Status.SKIP.value\n assert test_spans[1].get_tag(test.SKIP_REASON) == \"reason\"\n assert test_spans[0].get_tag(\"component\") == \"pytest\"\n assert test_spans[1].get_tag(\"component\") == \"pytest\"",
"def test_skipif_module(self):\n py_file = self.testdir.makepyfile(\n \"\"\"\n import pytest\n\n pytestmark = pytest.mark.skipif(True, reason=\"reason\")\n\n @pytest.mark.xfail(reason=\"XFail\")\n def test_xfail():\n pass\n\n @pytest.mark.xfail(condition=False, reason=\"XFail Case\")\n def test_xfail_conditional():\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = self.inline_run(\"--ddtrace\", file_name)\n rec.assertoutcome(skipped=2)\n spans = self.pop_spans()\n\n assert len(spans) == 5\n test_spans = [span for span in spans if span.get_tag(\"type\") == \"test\"]\n assert test_spans[0].get_tag(test.STATUS) == test.Status.SKIP.value\n assert test_spans[0].get_tag(test.SKIP_REASON) == \"reason\"\n assert test_spans[1].get_tag(test.STATUS) == test.Status.SKIP.value\n assert test_spans[1].get_tag(test.SKIP_REASON) == \"reason\"\n assert test_spans[0].get_tag(\"component\") == \"pytest\"\n assert test_spans[1].get_tag(\"component\") == \"pytest\"",
"def ExcludeTestFiles(self):\n files_to_delete = []\n for path in self._coverage:\n if any(path.endswith(postfix) for postfix in TEST_FILES_POSTFIXES):\n files_to_delete.append(path)\n\n for path in files_to_delete:\n del self._coverage[path]",
"def test_pytest_not_all_tests_skipped_does_not_propagate(self):\n py_file = self.testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.skip(reason=\"Because\")\n def test_not_ok_but_skipped():\n assert 0\n\n def test_ok():\n assert True\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = self.inline_run(\"--ddtrace\", file_name)\n rec.assertoutcome(skipped=1, passed=1)\n spans = self.pop_spans()\n test_span_skipped = spans[0]\n test_span_ok = spans[1]\n test_suite_span = spans[4]\n test_session_span = spans[2]\n test_module_span = spans[3]\n assert test_suite_span.get_tag(\"type\") == \"test_suite_end\"\n assert test_module_span.get_tag(\"type\") == \"test_module_end\"\n assert test_session_span.get_tag(\"type\") == \"test_session_end\"\n assert test_span_skipped.get_tag(\"test.status\") == \"skip\"\n assert test_span_ok.get_tag(\"test.status\") == \"pass\"\n assert test_suite_span.get_tag(\"test.status\") == \"pass\"\n assert test_module_span.get_tag(\"test.status\") == \"pass\"\n assert test_session_span.get_tag(\"test.status\") == \"pass\"",
"def test_classes(self):\n pass",
"def testSkipNonExec(self):\n for phase in (constants.HOOKS_PHASE_PRE, constants.HOOKS_PHASE_POST):\n fname = \"%s/test\" % self.ph_dirs[phase]\n f = open(fname, \"w\")\n f.close()\n self.torm.append((fname, False))\n self.assertEqual(self.hr.RunHooks(self.hpath, phase, {}),\n [(self._rname(fname), HKR_SKIP, \"\")])",
"def add_disabled_tests(self, *disabled_tests):\n # Disallow setting both enabled_tests and disabled_tests.\n assert not self._enabled_tests\n self._disabled_tests += list(disabled_tests)",
"def skip_this_extension_module():\n if not run_end_to_end:\n raise unittest.SkipTest('this module is skipped because it is an extension module')",
"def test_classes_and_methods_excluded_functions_take_args(self, flake8dir):\n\n # Setup\n flake8dir.make_setup_cfg(\"\"\"\n[flake8]\npytest_mark1 = name=test,\n exclude_classes=true,\n exclude_methods=true,\n exclude_functions=false\n\n\"\"\")\n flake8dir.make_example_py(\"\"\"\nclass TestClass(object):\n def test_method(self):\n pass\n\ndef test_function_with_args(fixture_one, fixture_two):\n pass\n\ndef test_function_without_args():\n pass\n\"\"\")\n\n # Test\n result = flake8dir.run_flake8(extra_args)\n assert ['./example.py:5:1: M501 test definition not marked with test',\n './example.py:8:1: M501 test definition not marked with test'] == result.out_lines",
"def _test_disabled(self, tc, context):\n self.msg.print('{}: {}DISABLED{}\\t({}/{})'\n .format(tc, futils.Color.YELLOW, futils.Color.END,\n tc.test_type, context))",
"def _test_classes(self):",
"def make_exclude():\n # Simple utility to make IPython paths more readably, we need a lot of\n # these below\n ipjoin = lambda *paths: pjoin('IPython', *paths)\n\n exclusions = [ipjoin('external'),\n ipjoin('quarantine'),\n ipjoin('deathrow'),\n # This guy is probably attic material\n ipjoin('testing', 'mkdoctests'),\n # Testing inputhook will need a lot of thought, to figure out\n # how to have tests that don't lock up with the gui event\n # loops in the picture\n ipjoin('lib', 'inputhook'),\n # Config files aren't really importable stand-alone\n ipjoin('config', 'profile'),\n # The notebook 'static' directory contains JS, css and other\n # files for web serving. Occasionally projects may put a .py\n # file in there (MathJax ships a conf.py), so we might as\n # well play it safe and skip the whole thing.\n ipjoin('html', 'static'),\n ipjoin('html', 'fabfile'),\n ]\n if not have['sqlite3']:\n exclusions.append(ipjoin('core', 'tests', 'test_history'))\n exclusions.append(ipjoin('core', 'history'))\n if not have['wx']:\n exclusions.append(ipjoin('lib', 'inputhookwx'))\n \n if 'IPython.kernel.inprocess' not in sys.argv:\n exclusions.append(ipjoin('kernel', 'inprocess'))\n \n # FIXME: temporarily disable autoreload tests, as they can produce\n # spurious failures in subsequent tests (cythonmagic).\n exclusions.append(ipjoin('extensions', 'autoreload'))\n exclusions.append(ipjoin('extensions', 'tests', 'test_autoreload'))\n\n # We do this unconditionally, so that the test suite doesn't import\n # gtk, changing the default encoding and masking some unicode bugs.\n exclusions.append(ipjoin('lib', 'inputhookgtk'))\n exclusions.append(ipjoin('kernel', 'zmq', 'gui', 'gtkembed'))\n\n #Also done unconditionally, exclude nbconvert directories containing\n #config files used to test. Executing the config files with iptest would\n #cause an exception.\n exclusions.append(ipjoin('nbconvert', 'tests', 'files'))\n exclusions.append(ipjoin('nbconvert', 'exporters', 'tests', 'files'))\n\n # These have to be skipped on win32 because the use echo, rm, cd, etc.\n # See ticket https://github.com/ipython/ipython/issues/87\n if sys.platform == 'win32':\n exclusions.append(ipjoin('testing', 'plugin', 'test_exampleip'))\n exclusions.append(ipjoin('testing', 'plugin', 'dtexample'))\n\n if not have['pexpect']:\n exclusions.extend([ipjoin('lib', 'irunner'),\n ipjoin('lib', 'tests', 'test_irunner'),\n ipjoin('terminal', 'console'),\n ])\n\n if not have['zmq']:\n exclusions.append(ipjoin('lib', 'kernel'))\n exclusions.append(ipjoin('kernel'))\n exclusions.append(ipjoin('qt'))\n exclusions.append(ipjoin('html'))\n exclusions.append(ipjoin('consoleapp.py'))\n exclusions.append(ipjoin('terminal', 'console'))\n exclusions.append(ipjoin('parallel'))\n elif not have['qt'] or not have['pygments']:\n exclusions.append(ipjoin('qt'))\n\n if not have['pymongo']:\n exclusions.append(ipjoin('parallel', 'controller', 'mongodb'))\n exclusions.append(ipjoin('parallel', 'tests', 'test_mongodb'))\n\n if not have['matplotlib']:\n exclusions.extend([ipjoin('core', 'pylabtools'),\n ipjoin('core', 'tests', 'test_pylabtools'),\n ipjoin('kernel', 'zmq', 'pylab'),\n ])\n\n if not have['cython']:\n exclusions.extend([ipjoin('extensions', 'cythonmagic')])\n exclusions.extend([ipjoin('extensions', 'tests', 'test_cythonmagic')])\n\n if not have['oct2py']:\n exclusions.extend([ipjoin('extensions', 'octavemagic')])\n exclusions.extend([ipjoin('extensions', 'tests', 'test_octavemagic')])\n\n if not have['tornado']:\n exclusions.append(ipjoin('html'))\n\n if not have['jinja2']:\n exclusions.append(ipjoin('html', 'notebookapp'))\n\n if not have['rpy2'] or not have['numpy']:\n exclusions.append(ipjoin('extensions', 'rmagic'))\n exclusions.append(ipjoin('extensions', 'tests', 'test_rmagic'))\n\n if not have['azure']:\n exclusions.append(ipjoin('html', 'services', 'notebooks', 'azurenbmanager'))\n\n if not all((have['pygments'], have['jinja2'], have['sphinx'])):\n exclusions.append(ipjoin('nbconvert'))\n\n # This is needed for the reg-exp to match on win32 in the ipdoctest plugin.\n if sys.platform == 'win32':\n exclusions = [s.replace('\\\\','\\\\\\\\') for s in exclusions]\n \n # check for any exclusions that don't seem to exist:\n parent, _ = os.path.split(get_ipython_package_dir())\n for exclusion in exclusions:\n if exclusion.endswith(('deathrow', 'quarantine')):\n # ignore deathrow/quarantine, which exist in dev, but not install\n continue\n fullpath = pjoin(parent, exclusion)\n if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):\n warn(\"Excluding nonexistent file: %r\" % exclusion)\n\n return exclusions",
"def skip_if_windows(obj):\n return unittest.skipIf(sys.platform == \"win32\", \"Skipping tests on Windows\")(obj)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the specific field in the xctestrun file.
|
def GetXctestrunField(self, field):
try:
return self._xctestrun_file_plist_obj.GetPlistField(
'%s:%s' % (self._root_key, field))
except ios_errors.PlistError:
return None
|
[
"def get_field(self, x, y):\n return self.fields[x][y]",
"def get_field(self, field_name):\n\n field_names = field_name.split('.')\n return _find_field(self.__msg, field_names)",
"def get_field(self, field):\n value = self._fields.get(field)\n if value is None:\n msg = message_factory.get_message(\n \"vapi.data.structure.getfield.unknown\",\n field)\n logger.debug(msg)\n raise CoreException(msg)\n return value",
"def readField(self, *args):\r\n return _osgDB.FieldReader_readField(self, *args)",
"def __getField(self, record, field):\n\t\t(offset, length) = (self.allFields[field].ffOffset, self.allFields[field].maxlength)\n\t\treturn record[offset:offset+length].strip()",
"def Get(self, f = None):\n return self.__fields[f]",
"def get_field(self, name):\n return self._fields[name]",
"def getZTFfield(fid, fields):\n found=[o for o in fields if o.id==fid]\n if len(found)!=1:\n print \"big problems.....\"\n return\n return found[0]",
"def field(self, tag):\n return self[self.index(tag)]",
"def _get_field(self, line):\n field_name, _ = line.split(\",\", 1)\n field_name = field_name.strip()\n return field_name",
"def get(self, field, load_reference=True):",
"def get_wrf_field(file_name, file_format, data_field):\n data_file = netCDF4.Dataset(file_name, mode='r', format=file_format)\n\n try:\n\n data = np.squeeze(data_file.variables[data_field][:])\n data_file.close()\n\n return data\n\n except KeyError:\n\n data_file.close()\n return False",
"def getField(self, fieldName):\n self.isLoggedIn()\n bugUri = self.uri + '/field/bug/'+ fieldName + '?' + self.getTokenParam()\n data = self.getRequest(bugUri)\n fieldInfo = json.loads(data.text)\n\n return fieldInfo",
"def get(self, field):\n return self.track[field]",
"def get_field(self, _id):\n for field in self.field:\n if field.id == _id:\n return field\n else:\n raise RuntimeError('Field not found for id: ' + _id)",
"def get_field_data(self, field):\n return self._get_field_type_data(field)[1]",
"def get_field_info(self):\n return self.world.field_info",
"def __extractField(self, raw: dict, name: str):\n if not 'fields' in raw:\n return None\n fields = raw['fields']\n if not name in fields:\n return None\n return fields[name]",
"def get_field(response, xpath):\n if xpath:\n field = response.xpath(xpath).get()\n if field:\n return field.strip() # TODO: clean data even further\n\n return None",
"def _get_field(self, field_name):\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the 'extra__kubernetes__' prefix \"\n f\"when using this method.\"\n )\n if field_name in self.conn_extras:\n return self.conn_extras[field_name] or None\n prefixed_name = f\"extra__kubernetes__{field_name}\"\n return self.conn_extras.get(prefixed_name) or None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if the specific field is in the xctestrun file.
|
def HasXctestrunField(self, field):
try:
self._xctestrun_file_plist_obj.GetPlistField(
'%s:%s' % (self._root_key, field))
return True
except ios_errors.PlistError:
return False
|
[
"def hasContent(field):\n file = field.file\n pos = file.tell()\n file.seek(0)\n ch = file.read(1)\n file.seek(pos)\n return ch != ''",
"def check_field_exists(curdir, fieldname):\n # TODO replace all those os.path.exists() calls by\n # check_field_exists() to get easier code\n if os.path.exists(os.path.join(curdir,fieldname)):\n return True\n else:\n return False",
"def _is_exist(self, field):\n return bool([item for item in field if item])",
"def field_exist(dataset, field):\n try:\n list_field = arcpy.ListFields(dataset, field)\n except:\n print(\"error with list fields.\")\n sys.exit(543)\n\n # If the length of the list equals 1 then the field is in the list.\n if len(list_field) == 1:\n return True\n else:\n return False",
"def has_field(block, field_name):\n return field_name in block.fields",
"def checkFruFieldInJson(self, field: str) -> bool:\n if \"Custom Data\" in field:\n return True\n if field in self.fru_data_in_json.keys() and not self.fru_data_in_json[field]:\n return True\n if (\n field in self.fru_data_in_json.keys()\n and self.fru_data_in_json[field] == \"N/A\"\n ):\n return True\n return False",
"def contains_forbidden_fields(self) -> bool:\n error_msg = \"\"\n is_valid = True\n fields_to_exclude = [\n \"system\",\n \"isCommon\",\n \"shared\",\n \"owner\",\n \"sortValues\",\n \"vcShouldIgnore\",\n \"commitMessage\",\n \"shouldCommit\",\n ]\n\n widgets = self.get_widgets_from_dashboard(self.current_file)\n\n for field in fields_to_exclude:\n if self.current_file.get(field) is not None:\n error_message, error_code = Errors.remove_field_from_dashboard(field)\n formatted_message = self.handle_error(\n error_message,\n error_code,\n file_path=self.file_path,\n )\n if formatted_message:\n is_valid = False\n error_msg += formatted_message\n # iterate over the widgets if exist\n if widgets:\n for widget in widgets:\n if widget.get(field):\n error_message, error_code = Errors.remove_field_from_widget(\n field, widget\n )\n formatted_message = self.handle_error(\n error_message,\n error_code,\n file_path=self.file_path,\n )\n if formatted_message:\n is_valid = False\n error_msg += formatted_message\n if error_msg:\n logger.info(f\"[red]{error_msg}[/red]\")\n return is_valid",
"def field_has_raw(cls, field):\n\n try:\n mapping = cls.get_field_mapping(field)\n return 'raw' in \\\n mapping[mapping.keys()[-1]]['mappings'][cls._doc_type.name][\n field]['mapping'][field]['fields']\n except KeyError:\n return False",
"def check_hgf_field(fieldname):\n\tif fieldname == \"hgf_release\": return False, \"\" #do not check for hgf_release\n\tif len(fieldname) < len(\"hgf_xxx\"): return False, \"\" # controlfields or more\n\tif fieldname == \"hgf_master\": return True, \"master\" #record linking\n\tif re.search('_[A-Za-z0-9]{3}[_A-z0-9]', fieldname):\n\t\tif len(fieldname) == 9: return True, \"json\"\n\t\tif len(fieldname) > 9: return True, \"asci\"\n\t\telse: return False, \"\" \n\telse: return False, \"\"",
"def validate(self, field):",
"def check_field_type_known(field_type_in):\n for type_label, type_desc in FEDS_NOTIONAL_FIELD_TYPES:\n if type_label == field_type_in:\n return True\n return False",
"def is_empty_file(file_field):\n return file_field is None or not file_field.name",
"def has_field(self, field_name: str) -> bool:\n return bool(self.try_get_field(field_name))",
"def name_not_contain_the_type(self):\n\n name = self.current_file.get(\"name\", \"\")\n if \"playbook\" in name.lower():\n error_message, error_code = Errors.field_contain_forbidden_word(\n field_names=[\"name\"], word=\"playbook\"\n )\n if self.handle_error(error_message, error_code, file_path=self.file_path):\n self.is_valid = False\n return False\n return True",
"def matches_field(self, field):\n if self.name != field.name:\n return False\n\n if self.ftype != etau.get_class_name(field):\n return False\n\n if self.subfield and self.subfield != etau.get_class_name(field.field):\n return False\n\n if (\n self.embedded_doc_type\n and self.embedded_doc_type\n != etau.get_class_name(field.document_type)\n ):\n return False\n\n if self.db_field != field.db_field:\n return False\n\n return True",
"def _contains_required_fields(self):\n return LinkFile.FIELD_URL in self._data",
"def check_dimension(self, filename, dimension):\n self.dimension = dimension\n self.filename = filename\n if self.dimension in self.filename:\n return True\n else:\n return False",
"def check_file_validation(self):\r\n if self.snap is None:\r\n # file existence\r\n print('file for stock %s at date %s is not existed' % (self.code, self.date))\r\n return False\r\n elif self.snap.iloc[-1]['iTurnover'] == 0:\r\n # stock is traded or not\r\n print('stock %s has no trade record at date %s' % (self.code, self.date))\r\n return False\r\n else:\r\n return True",
"def __validateFile(self):\n\n xml_schema_doc = etree.parse(TemplateReader.TemplateXSD)\n xmlSchema = etree.XMLSchema(xml_schema_doc)\n \n return xmlSchema.validate(self.template_xml)",
"def requestFillsField( self, field, request ):\n if request is None: return False\n allpossible = self.getAllOurReqFieldsFor( field )\n sys.stderr.write( \"RT.requestFillsField: checking for %s in %s\\n\" % \\\n ( field, str( allpossible ) )\n )\n for f in allpossible:\n if f in request.keys():\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the field with provided value in xctestrun file.
|
def SetXctestrunField(self, field, value):
self._xctestrun_file_plist_obj.SetPlistField(
'%s:%s' % (self._root_key, field), value)
|
[
"def set(self, field, value):\n raise NotImplementedError",
"def set_value(self, config_field, include_doc=False):\n raise NotImplementedError",
"def set_field(self, field):\n return self.set_param('field', field)",
"def track_set(field_name, val, msg=track):\r\n success = utils.pb_set(msg, field_name, val)\r\n\r\n if not success:\r\n log.info(\"could not pb_set track.%s = %r for '%s'\", field_name, val, filepath)\r\n\r\n return success",
"def setval(filepath, key, value):\n if key.upper().startswith(\"META_\"):\n key = key.replace(\"META_\", \"META.\")\n file_obj = file_factory(filepath)\n file_obj.setval(key, value)",
"def set(self, tag: str, value: str) -> None:",
"def add_field(self, field):\n config = configparser.ConfigParser()\n config.read(self.path)\n config.set(self.section, field, '')\n with open(self.path, 'w') as config_file:\n config.write(config_file)",
"def set(value,force=False):",
"def cmd_field_disc( self, match_name, cmd, value):\n self.disc.set_field( match_name, value)",
"def at_set(self, new_value):\r\n pass",
"def set_metadata(self, node: Node, path: str, key: str, value: Any):\n # Unsupported fields cannot be selected\n if (\n key == \"selected\"\n and value == True\n and node.get(\"inclusion\") == \"unsupported\"\n ):\n return\n\n node[key] = value\n logging.debug(\"Setting '%s.%s' to '%s'\", path, key, value) # noqa: WPS323",
"def edit_field(self, field_key: str, field: 'MetadataField') -> None:\n self.add_operation({\n 'op': 'editField',\n 'fieldKey': field_key,\n 'data': field.json(),\n })",
"def setExtendedAttribute(self,name,value):\n if value is None:\n ref = _C.c_void_p(0)\n else:\n data = _dat.Data(value)\n if data.__hasBadTreeReferences__(self.tree):\n data = data.__fixTreeReferences__(self.tree)\n ref = _dat.Data.byref(data)\n _exc.checkStatus(\n _TreeShr._TreeSetXNci(self.ctx,\n self.nid,\n _C.c_char_p(_ver.tobytes(name)),\n ref))",
"def do_set(self, line):\n\t\t# print self.IPHASdata.__dict__\n\t\tparams = line.split()\n\t\tif len(params)< 2:\n\t\t\tprint \"Usage 'set [property] [value]'\"\n\t\t\treturn\n\t\tpropertyToSet = params[0]\n\t\tvalue = params[1]\n\t\tself.IPHASdata.setProperty(propertyToSet, value)\n\t\treturn",
"def add_value_for_field(self, val, doc_id, field):\n guid = self.get_guid(doc_id)\n\n if guid not in self.m_file[indexfields.FIELDS]:\n self.m_file[indexfields.FIELDS][guid] = {}\n\n self.m_file[indexfields.FIELDS][guid][field] = val",
"def set_file(self, fileinstance):\n self.file = fileinstance\n return self",
"def set(self, row, column, value):",
"def set_vtu_ref(flml_file, new_vtu):\n tree = ET.parse(flml_file+'.flml')\n root = tree.getroot()\n \n root.find('material_phase').find('scalar_field').find('prognostic').find('initial_condition').find('from_file').set('file_name', new_vtu)\n \n tree.write(flml_file+'.flml')\n \n return",
"def __setattr__(self, name, val):\n if name in self._meta.fields:\n f = self._meta.fields[name]\n val = f.to_search_value(val)\n super(DocumentModel, self).__setattr__(name, val)",
"def pb_set(msg, field_name, val):\r\n\r\n #Find the proper type.\r\n field_desc = msg.DESCRIPTOR.fields_by_name[field_name]\r\n proper_type = cpp_type_to_python[field_desc.cpp_type]\r\n\r\n #Try with the given type first.\r\n #Their set hooks will automatically coerce.\r\n try_types = (type(val), proper_type)\r\n\r\n for t in try_types:\r\n log.debug(\"attempt %s.%s = %s(%r)\", msg.__class__.__name__, field_name, t, val)\r\n try:\r\n setattr(msg, field_name, t(val))\r\n log.debug(\"! success\")\r\n break\r\n except (TypeError, ValueError):\r\n log.debug(\"X failure\")\r\n else:\r\n return False # no assignments stuck\r\n\r\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deletes the field with provided value in xctestrun file.
|
def DeleteXctestrunField(self, field):
self._xctestrun_file_plist_obj.DeletePlistField(
'%s:%s' % (self._root_key, field))
|
[
"def delete_field(self):\n self.exec_command(b'DeleteField')",
"def delete_field(self, name):\n if 'idb_fields' in self.data:\n self.data['idb_fields'].remove(name)\n if name in self.data:\n del self.data[name]",
"def removeField(field):",
"def field_delete(self, core, field_name, verbose=False):\n\n if field_name not in self.schema_field_names(core, names_of='fields'):\n print('Solr field_delete: \"%s\" fieldname does not exist!' %\n field_name)\n return\n\n post_header = {\n 'Content-type': 'application/json',\n 'charset': 'utf-8'\n }\n\n binary_data = {\n 'delete-field': {'name': field_name}\n }\n\n if verbose:\n print('Solr field_delete:')\n\n self._post_core(core, 'schema', post_header, binary_data, verbose)",
"def deleteValue(self, value: str = None) -> None:\r\n data = self.read()\r\n for x in range(len(data)):\r\n try:\r\n data.remove(value)\r\n except ValueError:\r\n break\r\n self.writelines(data=data, mode=\"w\")",
"def delete_field(self, name: str) -> None:\n self._post_field(\"delete-field\", name=name)",
"def remove(ctx, key, field):\n data = ctx.obj[\"data\"]\n entry = query.get_by_key(data, key)\n\n if not field:\n data.remove(entry)\n elif \"fields\" in entry:\n for f in field:\n if f in entry[\"fields\"]:\n del entry[\"fields\"][f]\n else:\n click.echo('\"{}\" has no field \"{}\"'.format(key, f))\n else:\n click.echo('\"{}\" has no fields'.format(key))\n\n pybibs.write_file(data, ctx.obj[\"database\"])",
"def PBH_HASH_FIELD_delete(db, hash_field_name):\n\n ctx = click.get_current_context()\n\n hash_field_name_validator(ctx, db.cfgdb_pipe, hash_field_name)\n\n table = str(PBH_HASH_FIELD_CDB)\n key = str(hash_field_name)\n\n try:\n del_entry(db.cfgdb_pipe, table, key)\n except Exception as err:\n exit_with_error(\"Error: {}\".format(err), fg=\"red\")",
"def remove_field(self, field_key: str) -> None:\n self.add_operation({\n 'op': 'removeField',\n 'fieldKey': field_key,\n })",
"def SoftDeleteFieldDef(self, cnxn, project_id, field_id):\n self.fielddef_tbl.Update(cnxn, {'is_deleted': True}, id=field_id)\n self.config_2lc.InvalidateKeys(cnxn, [project_id])\n self.InvalidateMemcacheForEntireProject(project_id)",
"def delete_record():",
"def del_file(self, path: str) -> None:\n cmd = b''.join([\n ev3.DELETE_FILE,\n str.encode(path) + b'\\x00' # NAME\n ])\n self.send_system_cmd(cmd)",
"def package_delete(sender, instance, **kwargs):\n\tinstance.file.delete(False) # False so FileField doesn't save the model",
"def clean_fields(curdir, fieldname):\n\n # TODO either we should call delete_fields here or use this function\n # for delete_fields. At least both sound very similar.\n\n liste = os.listdir(curdir)\n to_remove = []\n for file in liste:\n if not fieldname in file:\n continue\n if fieldname == file: \n continue # technical field should not be deleted\n to_remove.append(file)\n\n for i in to_remove:\n file_to_delete = os.path.join(curdir,i)\n os.remove(file_to_delete)",
"def remove_field(self, index: int) -> None:\n try:\n del self._fields[index]\n except IndexError:\n pass",
"def _clear_field(self, index):\n self.__log.call(index)\n\n key = (\n self._fields[index][0].get(), # Vorbis comment\n self._fields[index][1].get() # ID3v2 tag\n )\n value = self._fields[index][2].get() # value\n self._cleared.add((key, value))\n\n super()._clear_field(index)",
"def remove(self, dbx_path, **kwargs):\n # try to move file (response will be metadata, probably)\n res = self.dbx.files_delete_v2(dbx_path, **kwargs)\n md = res.metadata\n\n logger.debug(f\"Item '{dbx_path}' removed from Dropbox\")\n\n return md",
"def removeDataField(self, fieldName):\n index = entity.fieldNameList.index(fieldName)\n entity.fieldNameList.pop(index)\n entity.data.pop(fieldName)",
"def delete(self):\n self.gridfs.delete(self.file_id)",
"def deleteValue (\n\n self,\n attribute = None,\n attributes = None,\n values = None\n ) :\n\n return self.deleteAttribute( attribute, attributes, values )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initializes the XctestRun object. If arg work_dir is provided, the original app under test file and test bundle file will be moved to work_dir/TEST_ROOT.
|
def __init__(self, app_under_test_dir, test_bundle_dir,
sdk=ios_constants.SDK.IPHONESIMULATOR,
test_type=ios_constants.TestType.XCUITEST,
signing_options=None, work_dir=None):
self._app_under_test_dir = app_under_test_dir
self._test_bundle_dir = test_bundle_dir
self._test_name = os.path.splitext(os.path.basename(test_bundle_dir))[0]
self._sdk = sdk
self._test_type = test_type
if self._sdk == ios_constants.SDK.IPHONEOS:
self._signing_options = signing_options
else:
if not signing_options:
logging.info(
'The signing options only works on sdk iphoneos, but current sdk '
'is %s', self._sdk)
self._signing_options = {}
self._work_dir = work_dir
self._test_root_dir = None
self._xctestrun_file_path = None
self._xctestrun_obj = None
self._delete_work_dir = False
self._ValidateArguments()
|
[
"def __init__(self,kim_code,*args,**kwargs):\n super(TestDriver,self).__init__(kim_code,*args,**kwargs)\n self.executable = os.path.join(self.path, cf.TEST_EXECUTABLE)",
"def testWorkingDir(self):\n\n os.environ.pop(\"TUNE_ORIG_WORKING_DIR\", None)\n working_dir = os.getcwd()\n\n def f(config):\n assert os.environ.get(\"TUNE_ORIG_WORKING_DIR\") == working_dir\n\n ray.init(num_cpus=1)\n tune.run(f)\n ray.shutdown()",
"def setUp(self):\r\n self._init_paths()\r\n os.makedirs(self._execPath, exist_ok=True)\r\n self._remove_all_tempfiles()\r\n copyfile(self._refYwFile, self._testYwFile)",
"def setUp(self):\n self.setUpPyfakefs()\n self.fs.add_real_directory(os.path.dirname(__file__),\n target_path=\"/orig\")\n self.fs.create_dir(\"/test\")\n self.ren = Renamer(\"/test\")",
"def _GenerateXctestrunFileForLogicTest(self):\n self._xctestrun_file_path = os.path.join(\n self._test_root_dir, 'xctestrun.plist')\n test_bundle_name = os.path.basename(self._test_bundle_dir).split('.')[0]\n plist_util.Plist(self._xctestrun_file_path).SetPlistField(\n test_bundle_name, {})\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, test_type=self._test_type)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'TestHostPath', xcode_info_util.GetXctestToolPath(self._sdk))\n dyld_framework_path = os.path.join(\n xcode_info_util.GetSdkPlatformPath(self._sdk),\n 'Developer/Library/Frameworks')\n self._xctestrun_obj.SetXctestrunField(\n 'TestingEnvironmentVariables',\n {'DYLD_FRAMEWORK_PATH': dyld_framework_path,\n 'DYLD_LIBRARY_PATH': dyld_framework_path})",
"def _GenerateXctestrunFileForXctest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_type, self._work_dir) as dummy_project_instance:\n # Use TEST_ROOT as dummy project's build products dir.\n dummy_project_instance.BuildForTesting(\n self._test_root_dir, dummyproject_derived_data_dir)\n\n app_under_test_plugins_dir = os.path.join(\n self._app_under_test_dir, 'PlugIns')\n if not os.path.exists(app_under_test_plugins_dir):\n os.mkdir(app_under_test_plugins_dir)\n new_test_bundle_path = os.path.join(\n app_under_test_plugins_dir, os.path.basename(self._test_bundle_dir))\n # The test bundle under PlugIns can not be symlink since it will cause\n # app installation error.\n if os.path.islink(self._test_bundle_dir):\n shutil.copytree(self._test_bundle_dir, new_test_bundle_path)\n self._test_bundle_dir = new_test_bundle_path\n elif new_test_bundle_path != self._test_bundle_dir:\n self._test_bundle_dir = _MoveAndReplaceFile(\n self._test_bundle_dir, app_under_test_plugins_dir)\n\n # The xctestrun file are under the build products directory of dummy\n # project's derived data dir.\n # DerivedData\n # |\n # +--Build\n # |\n # +--Products\n # |\n # +--***.xctestrun\n derived_data_build_products_dir = os.path.join(\n dummyproject_derived_data_dir, 'Build', 'Products')\n generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %\n derived_data_build_products_dir)\n if not generated_xctestrun_file_paths:\n raise ios_errors.XctestrunError(\n \"No generated xctestrun file was found in the dummy project's build \"\n \"products dir.\")\n self._xctestrun_file_path = os.path.join(self._test_root_dir,\n 'xctestrun.plist')\n shutil.move(generated_xctestrun_file_paths[0],\n self._xctestrun_file_path)\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, test_type=self._test_type)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)",
"def set_workdir(self, workdir=None):\n self.workdir = workdir\n\n # define temporal directory\n if type(self.workdir) is not str:\n self.workdir = tempfile.gettempdir()\n\n # check if workdir exist\n if not os.path.exists(self.workdir):\n os.makedirs(self.workdir) # create dir\n\n # create log and error files\n self.stdlog = os.path.join(self.workdir, \"processing.log\")\n self.errlog = os.path.join(self.workdir, \"processing.error.log\")",
"def _GenerateXctestrunFileForXcuitest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_type, self._work_dir) as dummy_project_instance:\n if (self._signing_options and\n self._signing_options.get('xctrunner_app_provisioning_profile')):\n dummy_project_instance.SetTestBundleProvisioningProfile(\n self._signing_options.get('xctrunner_app_provisioning_profile'))\n # Use TEST_ROOT as dummy project's build products dir.\n dummy_project_instance.BuildForTesting(\n self._test_root_dir, dummyproject_derived_data_dir)\n\n # The basic xctestrun file and XCTRunner app are under the build products\n # directory of dummy project's derived data dir.\n # DerivedData\n # |\n # +--Build\n # |\n # +--Products\n # |\n # +--Debug-***\n # |\n # +--***-Runner.app\n # +--***.xctestrun\n derived_data_build_products_dir = os.path.join(\n dummyproject_derived_data_dir, 'Build', 'Products')\n\n generated_xctrunner_app_dirs = glob.glob('%s/Debug-*/*-Runner.app' %\n derived_data_build_products_dir)\n if not generated_xctrunner_app_dirs:\n raise ios_errors.XctestrunError(\"No generated XCTRunner app was found in \"\n \"the dummy project's build products dir.\")\n if len(generated_xctrunner_app_dirs) > 1:\n raise ios_errors.XctestrunError(\"More than one XCTRunner app were found \"\n \"in the dummy project's build products \"\n \"dir.\")\n\n xctrunner_app_dir = os.path.join(\n self._test_root_dir, os.path.basename(generated_xctrunner_app_dirs[0]))\n shutil.move(generated_xctrunner_app_dirs[0], xctrunner_app_dir)\n if (self._signing_options and\n self._signing_options.get('xctrunner_app_enable_ui_file_sharing')):\n try:\n bundle_util.EnableUIFileSharing(xctrunner_app_dir)\n except ios_errors.BundleError as e:\n logging.warning(e.output)\n # The test bundle under XCTRunner.app/PlugIns is not actual test bundle. It\n # only contains Info.plist and _CodeSignature. So copy the real test bundle\n # under XCTRunner.app/PlugIns to replace it.\n xctrunner_plugins_dir = os.path.join(xctrunner_app_dir, 'PlugIns')\n if os.path.exists(xctrunner_plugins_dir):\n shutil.rmtree(xctrunner_plugins_dir)\n os.mkdir(xctrunner_plugins_dir)\n # The test bundle should not exist under the new generated XCTRunner.app.\n if os.path.islink(self._test_bundle_dir):\n # The test bundle under PlugIns can not be symlink since it will cause\n # app installation error.\n new_test_bundle_path = os.path.join(\n xctrunner_plugins_dir, os.path.basename(self._test_bundle_dir))\n shutil.copytree(self._test_bundle_dir, new_test_bundle_path)\n self._test_bundle_dir = new_test_bundle_path\n else:\n self._test_bundle_dir = _MoveAndReplaceFile(\n self._test_bundle_dir, xctrunner_plugins_dir)\n\n generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %\n derived_data_build_products_dir)\n if not generated_xctestrun_file_paths:\n raise ios_errors.XctestrunError(\n \"No generated xctestrun file was found in the dummy project's build \"\n \"products dir.\")\n self._xctestrun_file_path = os.path.join(self._test_root_dir,\n 'xctestrun.plist')\n shutil.move(generated_xctestrun_file_paths[0],\n self._xctestrun_file_path)\n\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, self._test_type)\n self._xctestrun_obj.SetXctestrunField('TestHostPath', xctrunner_app_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'UITargetAppPath', self._app_under_test_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)\n # When running on iphoneos, it is necessary to remove this field.\n # For iphonesimulator, this field won't effect the test functionality. To\n # be consistent, remove this field.\n self._xctestrun_obj.DeleteXctestrunField(\n 'TestingEnvironmentVariables:IDEiPhoneInternalTestBundleName')",
"def set_workdir(self, workdir=None):\n self.Script.set_work_directory(workdir)\n return",
"def setUp(self):\n super(ExampleProcessTestCase, self).setUp()\n self.files_path = TEST_FILES_DIR",
"def test_work_dir(self):\n self.__assert_empty_builder()\n self.__builder.work_dir('/test/test')\n self.assertEqual('path - -dir /test/test ', str(self.__builder))",
"def fill_xctest_run(self, out_dir):\n folder = os.path.abspath(os.path.join(out_dir, os.pardir))\n if not os.path.exists(folder):\n os.makedirs(folder)\n xctestrun = os.path.join(folder, 'run_%d.xctestrun' % int(time.time()))\n if not os.path.exists(xctestrun):\n with open(xctestrun, 'w'):\n pass\n # Creates a dict with data about egtests to run - fill all required fields:\n # egtests_module, egtest_app_path, egtests_xctest_path and\n # filtered tests if filter is specified.\n # Write data in temp xctest run file.\n plistlib.writePlist(self.fill_xctestrun_node(), xctestrun)\n return xctestrun",
"def _init_working_dir(self):\n self._working_dir_name = ProctorConfig.get_proctor_working_dir()",
"def __init__(self,kim_code,*args,**kwargs):\n super(TestDriver,self).__init__(kim_code,*args,**kwargs)\n self.executable = os.path.join(self.path, self.kim_code)",
"def init_local_test_(self, testing_framework, inst_type):\n dirname = os.path.join(self.local_res, get_prefix(testing_framework, inst_type))\n os.mkdir(dirname)\n self.curr_local_dir = dirname\n self.proj.save_proj_json(self.curr_local_dir)\n self.device.save_device_specs(os.path.join(self.curr_local_dir, \"device.json\"))\n self.device.save_device_info(os.path.join(self.curr_local_dir, \"deviceState.json\"))",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__TESTHOST__/Frameworks/libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def setup_testrun_dir():\n test_run = \"testrun_{}\".format(int(time.time()))\n os.mkdir(test_run)\n this_files_dir = os.path.dirname(os.path.realpath(__file__))\n config_templates = os.path.join(this_files_dir, \"integration\", \"config\")\n os.mkdir(os.path.join(test_run, \"runfolders\"))\n shutil.copy2(os.path.join(config_templates, \"app.config\"), test_run)\n shutil.copy2(os.path.join(config_templates, \"logger.config\"), test_run)\n return os.path.realpath(test_run)",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/usr/lib/'\n 'libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def __init__(self, *args, **kwargs):\n super(ExecUnitTest, self).__init__(*args, **kwargs)\n #find command\n cmd = \"../{0}/{1}\".format(self.name, self.progname)\n path = sys.modules[self.__module__].__file__\n if path:\n cmd = os.path.dirname(os.path.realpath(path)) + '/' + cmd\n self.cmd = cmd"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates the xctestrun file for XCUITest. The approach is creating a dummy project. Run 'buildfortesting' with the dummy project. Then the xctestrun file and XCTRunner app template will be under the build products directory of dummy project's derived data dir.
|
def _GenerateXctestrunFileForXcuitest(self):
dummyproject_derived_data_dir = os.path.join(self._work_dir,
'dummyproject_derived_data')
with dummy_project.DummyProject(
self._app_under_test_dir, self._test_bundle_dir, self._sdk,
self._test_type, self._work_dir) as dummy_project_instance:
if (self._signing_options and
self._signing_options.get('xctrunner_app_provisioning_profile')):
dummy_project_instance.SetTestBundleProvisioningProfile(
self._signing_options.get('xctrunner_app_provisioning_profile'))
# Use TEST_ROOT as dummy project's build products dir.
dummy_project_instance.BuildForTesting(
self._test_root_dir, dummyproject_derived_data_dir)
# The basic xctestrun file and XCTRunner app are under the build products
# directory of dummy project's derived data dir.
# DerivedData
# |
# +--Build
# |
# +--Products
# |
# +--Debug-***
# |
# +--***-Runner.app
# +--***.xctestrun
derived_data_build_products_dir = os.path.join(
dummyproject_derived_data_dir, 'Build', 'Products')
generated_xctrunner_app_dirs = glob.glob('%s/Debug-*/*-Runner.app' %
derived_data_build_products_dir)
if not generated_xctrunner_app_dirs:
raise ios_errors.XctestrunError("No generated XCTRunner app was found in "
"the dummy project's build products dir.")
if len(generated_xctrunner_app_dirs) > 1:
raise ios_errors.XctestrunError("More than one XCTRunner app were found "
"in the dummy project's build products "
"dir.")
xctrunner_app_dir = os.path.join(
self._test_root_dir, os.path.basename(generated_xctrunner_app_dirs[0]))
shutil.move(generated_xctrunner_app_dirs[0], xctrunner_app_dir)
if (self._signing_options and
self._signing_options.get('xctrunner_app_enable_ui_file_sharing')):
try:
bundle_util.EnableUIFileSharing(xctrunner_app_dir)
except ios_errors.BundleError as e:
logging.warning(e.output)
# The test bundle under XCTRunner.app/PlugIns is not actual test bundle. It
# only contains Info.plist and _CodeSignature. So copy the real test bundle
# under XCTRunner.app/PlugIns to replace it.
xctrunner_plugins_dir = os.path.join(xctrunner_app_dir, 'PlugIns')
if os.path.exists(xctrunner_plugins_dir):
shutil.rmtree(xctrunner_plugins_dir)
os.mkdir(xctrunner_plugins_dir)
# The test bundle should not exist under the new generated XCTRunner.app.
if os.path.islink(self._test_bundle_dir):
# The test bundle under PlugIns can not be symlink since it will cause
# app installation error.
new_test_bundle_path = os.path.join(
xctrunner_plugins_dir, os.path.basename(self._test_bundle_dir))
shutil.copytree(self._test_bundle_dir, new_test_bundle_path)
self._test_bundle_dir = new_test_bundle_path
else:
self._test_bundle_dir = _MoveAndReplaceFile(
self._test_bundle_dir, xctrunner_plugins_dir)
generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %
derived_data_build_products_dir)
if not generated_xctestrun_file_paths:
raise ios_errors.XctestrunError(
"No generated xctestrun file was found in the dummy project's build "
"products dir.")
self._xctestrun_file_path = os.path.join(self._test_root_dir,
'xctestrun.plist')
shutil.move(generated_xctestrun_file_paths[0],
self._xctestrun_file_path)
self._xctestrun_obj = XctestRun(
self._xctestrun_file_path, self._test_type)
self._xctestrun_obj.SetXctestrunField('TestHostPath', xctrunner_app_dir)
self._xctestrun_obj.SetXctestrunField(
'UITargetAppPath', self._app_under_test_dir)
self._xctestrun_obj.SetXctestrunField(
'TestBundlePath', self._test_bundle_dir)
# When running on iphoneos, it is necessary to remove this field.
# For iphonesimulator, this field won't effect the test functionality. To
# be consistent, remove this field.
self._xctestrun_obj.DeleteXctestrunField(
'TestingEnvironmentVariables:IDEiPhoneInternalTestBundleName')
|
[
"def _GenerateXctestrunFileForXctest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_type, self._work_dir) as dummy_project_instance:\n # Use TEST_ROOT as dummy project's build products dir.\n dummy_project_instance.BuildForTesting(\n self._test_root_dir, dummyproject_derived_data_dir)\n\n app_under_test_plugins_dir = os.path.join(\n self._app_under_test_dir, 'PlugIns')\n if not os.path.exists(app_under_test_plugins_dir):\n os.mkdir(app_under_test_plugins_dir)\n new_test_bundle_path = os.path.join(\n app_under_test_plugins_dir, os.path.basename(self._test_bundle_dir))\n # The test bundle under PlugIns can not be symlink since it will cause\n # app installation error.\n if os.path.islink(self._test_bundle_dir):\n shutil.copytree(self._test_bundle_dir, new_test_bundle_path)\n self._test_bundle_dir = new_test_bundle_path\n elif new_test_bundle_path != self._test_bundle_dir:\n self._test_bundle_dir = _MoveAndReplaceFile(\n self._test_bundle_dir, app_under_test_plugins_dir)\n\n # The xctestrun file are under the build products directory of dummy\n # project's derived data dir.\n # DerivedData\n # |\n # +--Build\n # |\n # +--Products\n # |\n # +--***.xctestrun\n derived_data_build_products_dir = os.path.join(\n dummyproject_derived_data_dir, 'Build', 'Products')\n generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %\n derived_data_build_products_dir)\n if not generated_xctestrun_file_paths:\n raise ios_errors.XctestrunError(\n \"No generated xctestrun file was found in the dummy project's build \"\n \"products dir.\")\n self._xctestrun_file_path = os.path.join(self._test_root_dir,\n 'xctestrun.plist')\n shutil.move(generated_xctestrun_file_paths[0],\n self._xctestrun_file_path)\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, test_type=self._test_type)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)",
"def _GenerateXctestrunFileForLogicTest(self):\n self._xctestrun_file_path = os.path.join(\n self._test_root_dir, 'xctestrun.plist')\n test_bundle_name = os.path.basename(self._test_bundle_dir).split('.')[0]\n plist_util.Plist(self._xctestrun_file_path).SetPlistField(\n test_bundle_name, {})\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, test_type=self._test_type)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'TestHostPath', xcode_info_util.GetXctestToolPath(self._sdk))\n dyld_framework_path = os.path.join(\n xcode_info_util.GetSdkPlatformPath(self._sdk),\n 'Developer/Library/Frameworks')\n self._xctestrun_obj.SetXctestrunField(\n 'TestingEnvironmentVariables',\n {'DYLD_FRAMEWORK_PATH': dyld_framework_path,\n 'DYLD_LIBRARY_PATH': dyld_framework_path})",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__TESTHOST__/Frameworks/libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/usr/lib/'\n 'libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def fill_xctest_run(self, out_dir):\n folder = os.path.abspath(os.path.join(out_dir, os.pardir))\n if not os.path.exists(folder):\n os.makedirs(folder)\n xctestrun = os.path.join(folder, 'run_%d.xctestrun' % int(time.time()))\n if not os.path.exists(xctestrun):\n with open(xctestrun, 'w'):\n pass\n # Creates a dict with data about egtests to run - fill all required fields:\n # egtests_module, egtest_app_path, egtests_xctest_path and\n # filtered tests if filter is specified.\n # Write data in temp xctest run file.\n plistlib.writePlist(self.fill_xctestrun_node(), xctestrun)\n return xctestrun",
"def gen_pytest_xmls(args):\n if args.testcases and args.testsuites:\n return\n\n if not args.testrun_id:\n raise TestcasesException('The testrun id was not specified')\n gen_xmls.run_pytest(args.testrun_id)",
"def build_tests():\r\n run(dir(\"Macaroni\", \"Next\", \"Tests\"), \"cavatappi -d -i\")",
"def test_generate_buildfile(self):\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, 'data')\n buildfilepath = os.path.join(path, 'build.yml')\n assert not os.path.exists(buildfilepath), \"%s already exists\" % buildfilepath\n build.generate_build_file(path)\n assert os.path.exists(buildfilepath)\n build.build_package(None, 'test_generated', 'generated', [], buildfilepath)\n os.remove(buildfilepath)\n from quilt.data.test_generated.generated import bad, foo, nuts, README # pylint:disable=W0612",
"def generate_go_test_code(self, root_codegen_release, import_prefix, paths):\n go_test_code_template = self.template_engine.get_template('mdt_telemetry_test.go.jinja2')\n go_test_code = go_test_code_template.render(package=self.package, import_prefix=import_prefix, paths=paths)\n # Append _test. yielding filename_test.go\n main_components = self.main.split('.')\n main_components.insert(1, '_test.')\n main_test_filename = os.path.join(\n root_codegen_release,\n ''.join(main_components)\n )\n logging.info('Writing Go test to %s', main_test_filename)\n with open(main_test_filename, 'w') as main_test_fd:\n main_test_fd.write(go_test_code)",
"def BuildTemplates(self):\n if args.config:\n build_args = [\n \"--verbose\", \"--config\", args.config, \"build\", \"--output\",\n args.output_dir\n ]\n else:\n build_args = [\"--verbose\", \"build\", \"--output\", args.output_dir]\n\n wix_tools_path = self._WixToolsPath()\n build_args += [\n \"-p\",\n \"ClientBuilder.wix_tools_path=%{\" + wix_tools_path + \"}\",\n \"-p\",\n \"ClientBuilder.build_msi=True\",\n \"-p\",\n \"ClientBuilder.fleetspeak_bundled=True\",\n ]\n _VerboseCheckCall([self.grr_client_build64] + build_args)",
"def test_addon_template(self):\n self.maxDiff = None\n result = self.create_template()\n self.assertItemsEqual(\n result.files_created.keys(),\n [\n self.project + '/.travis.yml',\n self.project + '/bootstrap.py',\n self.project + '/buildout.cfg',\n self.project + '/CHANGES.rst',\n self.project + '/CONTRIBUTORS.rst',\n self.project + '/docs',\n self.project + '/docs/LICENSE.GPL',\n self.project + '/docs/LICENSE.txt',\n self.project + '/Makefile',\n self.project + '/MANIFEST.in',\n self.project + '/README.rst',\n self.project + '/setup.py',\n self.project + '/src',\n self.project + '/src/customer',\n self.project + '/src/customer/__init__.py',\n self.project + '/src/customer/site',\n self.project + '/src/customer/site/__init__.py',\n self.project + '/src/customer/site/contenttype',\n self.project + '/src/customer/site/contenttype/__init__.py',\n self.project + '/src/customer/site/contenttype/browser',\n self.project + '/src/customer/site/contenttype/browser/__init__.py',\n self.project + '/src/customer/site/contenttype/browser/configure.zcml',\n self.project + '/src/customer/site/contenttype/browser/templates',\n self.project + '/src/customer/site/contenttype/browser/templates/helloworld.pt',\n self.project + '/src/customer/site/contenttype/browser/view.py',\n self.project + '/src/customer/site/contenttype/config.py',\n self.project + '/src/customer/site/contenttype/configure.zcml',\n self.project + '/src/customer/site/contenttype/content',\n self.project + '/src/customer/site/contenttype/content/__init__.py',\n self.project + '/src/customer/site/contenttype/content/example.py',\n self.project + '/src/customer/site/contenttype/interfaces.py',\n self.project + '/src/customer/site/contenttype/profiles',\n self.project + '/src/customer/site/contenttype/profiles.zcml',\n self.project + '/src/customer/site/contenttype/profiles/default',\n self.project + '/src/customer/site/contenttype/profiles/default/browserlayer.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/metadata.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/rolemap.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/types',\n self.project + '/src/customer/site/contenttype/profiles/default/types.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/types/Example.xml',\n self.project + '/src/customer/site/contenttype/profiles/uninstall',\n self.project + '/src/customer/site/contenttype/profiles/uninstall/customer.site.contenttype.txt',\n self.project + '/src/customer/site/contenttype/static',\n self.project + '/src/customer/site/contenttype/static/document_icon.png',\n self.project + '/src/customer/site/contenttype/testing.py',\n self.project + '/src/customer/site/contenttype/tests',\n self.project + '/src/customer/site/contenttype/tests/__init__.py',\n self.project + '/src/customer/site/contenttype/tests/test_content.py',\n self.project + '/src/customer/site/contenttype/tests/test_example.robot',\n self.project + '/src/customer/site/contenttype/tests/test_robot.py',\n self.project + '/src/customer/site/contenttype/tests/test_setup.py',\n self.project + '/src/customer/site/contenttype/upgrades',\n self.project + '/src/customer/site/contenttype/upgrades/__init__.py',\n self.project + '/src/customer/site/contenttype/upgrades/configure.zcml',\n self.project + '/src/customer/site/contenttype/upgrades/v1010',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/__init__.py',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/configure.zcml',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/handler.py',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/profile',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/profile/metadata.xml',\n self.project,\n ]\n )",
"def test_addon_template(self):\n self.maxDiff = None\n result = self.create_template()\n self.assertItemsEqual(\n result.files_created.keys(),\n [\n self.project + '/.travis.yml',\n self.project + '/bootstrap.py',\n self.project + '/buildout.cfg',\n self.project + '/CHANGES.rst',\n self.project + '/CONTRIBUTORS.rst',\n self.project + '/docs',\n self.project + '/docs/LICENSE.GPL',\n self.project + '/docs/LICENSE.txt',\n self.project + '/Makefile',\n self.project + '/MANIFEST.in',\n self.project + '/README.rst',\n self.project + '/setup.py',\n self.project + '/src',\n self.project + '/src/sc',\n self.project + '/src/sc/__init__.py',\n self.project + '/src/sc/addon',\n self.project + '/src/sc/addon/__init__.py',\n self.project + '/src/sc/addon/config.py',\n self.project + '/src/sc/addon/configure.zcml',\n self.project + '/src/sc/addon/controlpanel.py',\n self.project + '/src/sc/addon/Extensions',\n self.project + '/src/sc/addon/Extensions/__init__.py',\n self.project + '/src/sc/addon/Extensions/Install.py',\n self.project + '/src/sc/addon/interfaces.py',\n self.project + '/src/sc/addon/profiles',\n self.project + '/src/sc/addon/profiles.zcml',\n self.project + '/src/sc/addon/profiles/default',\n self.project + '/src/sc/addon/profiles/default/browserlayer.xml',\n self.project + '/src/sc/addon/profiles/default/controlpanel.xml',\n self.project + '/src/sc/addon/profiles/default/metadata.xml',\n self.project + '/src/sc/addon/profiles/default/registry.xml',\n self.project + '/src/sc/addon/profiles/uninstall',\n self.project + '/src/sc/addon/profiles/uninstall/registry.xml',\n self.project + '/src/sc/addon/setuphandlers.py',\n self.project + '/src/sc/addon/static',\n self.project + '/src/sc/addon/static/addon-icon.png',\n self.project + '/src/sc/addon/testing.py',\n self.project + '/src/sc/addon/tests',\n self.project + '/src/sc/addon/tests/__init__.py',\n self.project + '/src/sc/addon/tests/test_controlpanel.py',\n self.project + '/src/sc/addon/tests/test_example.robot',\n self.project + '/src/sc/addon/tests/test_robot.py',\n self.project + '/src/sc/addon/tests/test_setup.py',\n self.project + '/src/sc/addon/upgrades',\n self.project + '/src/sc/addon/upgrades/__init__.py',\n self.project + '/src/sc/addon/upgrades/configure.zcml',\n self.project + '/src/sc/addon/upgrades/v2',\n self.project + '/src/sc/addon/upgrades/v2/__init__.py',\n self.project + '/src/sc/addon/upgrades/v2/configure.zcml',\n self.project + '/src/sc/addon/upgrades/v2/profile',\n self.project + '/src/sc/addon/upgrades/v2/profile/metadata.xml',\n self.project,\n ]\n )",
"def test_building(self):\n OPTS = ['--debug', '--noupx',\n '--specpath', self._specdir,\n '--distpath', self._distdir,\n '--workpath', self._builddir]\n\n if self.verbose:\n OPTS.extend(['--debug', '--log-level=INFO'])\n else:\n OPTS.append('--log-level=ERROR')\n\n # Build executable in onefile mode.\n if self.test_file.startswith('test_onefile'):\n OPTS.append('--onefile')\n else:\n OPTS.append('--onedir')\n\n if self.with_crypto or '_crypto' in self.test_file:\n print('NOTE: Bytecode encryption is enabled for this test.', end=\"\")\n OPTS.append('--key=test_key')\n\n self._msg(\"BUILDING TEST \" + self.test_name)\n\n # Use pyinstaller.py for building test_name.\n testfile_spec = self.test_file + '.spec'\n if not os.path.exists(self.test_file + '.spec'):\n # .spec file does not exist and it has to be generated\n # for main script.\n testfile_spec = self.test_file + '.py'\n\n #pyinst_script = os.path.join(HOMEPATH, 'pyinstaller.py')\n\n # TODO Fix redirecting stdout/stderr\n # In report mode is stdout and sys.stderr redirected.\n #if self.report:\n ## Write output from subprocess to stdout/err.\n #retcode, out, err = compat.exec_python_all(pyinst_script,\n #testfile_spec, *OPTS)\n #sys.stdout.write(out)\n #sys.stdout.write(err)\n #else:\n #retcode = compat.exec_python_rc(pyinst_script,\n #testfile_spec, *OPTS)\n # abspath is required due to makespec.make_path_spec_relative()\n testfile_spec = os.path.abspath(testfile_spec)\n pyi_args = [testfile_spec] + OPTS\n # TODO fix return code in running PyInstaller programatically\n pyi_main.run(pyi_args, PYI_CONFIG)\n retcode = 0\n\n return retcode == 0",
"def build_templates():\n common.execute('python polymer_bundler.py', cwd='local')",
"def otx_build_task_testing(root, task):\n # Build otx-workspace per tasks check - Default Model Template only\n command_line = [\n \"otx\",\n \"build\",\n \"--task\",\n task,\n \"--workspace\",\n os.path.join(root, f\"otx-workspace-{task}\"),\n ]\n check_run(command_line)",
"def generate_template(self, project_name):\n startproject(project_name)",
"def generate(self):\n jinja_ctx = self.create_jinja_ctx()\n\n if not exists(join(self.export_dir, 'nbproject')):\n makedirs(join(self.export_dir, 'nbproject'))\n\n self.gen_file('nb/configurations.tmpl', jinja_ctx, 'nbproject/configurations.xml')\n self.gen_file('nb/project.tmpl', jinja_ctx, 'nbproject/project.xml')\n self.gen_file_nonoverwrite('nb/mbedignore.tmpl', jinja_ctx,\n '.mbedignore')\n self.gen_file('nb/Makefile.tmpl', jinja_ctx, 'Makefile')\n\n print('Done. Import the \\'{0}\\' project in Netbeans.'.format(self.project_name))",
"def setup_testrun_dir():\n test_run = \"testrun_{}\".format(int(time.time()))\n os.mkdir(test_run)\n this_files_dir = os.path.dirname(os.path.realpath(__file__))\n config_templates = os.path.join(this_files_dir, \"integration\", \"config\")\n os.mkdir(os.path.join(test_run, \"runfolders\"))\n shutil.copy2(os.path.join(config_templates, \"app.config\"), test_run)\n shutil.copy2(os.path.join(config_templates, \"logger.config\"), test_run)\n return os.path.realpath(test_run)",
"def setUp(self):\n self._file_upto = 0 # How many files we have created\n self.tmpdir = tempfile.mkdtemp()\n self.output = cros_output.Output()\n self.tools = Tools(self.output)\n self.tools.PrepareOutputDir(None)\n self.bundle = Bundle(self.tools, self.output)\n self.uboot_fname = self.MakeRandomFile(500 * 1024)\n self.bmpblk_fname = os.path.abspath('bin/bmpblk.bin')\n self.bct_fname = os.path.abspath('bin/board.bct')\n self.bundle.SetDirs('##/usr/share/vboot/devkeys')\n self.bundle.SetOptions(False, None)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates the xctestrun file for XCTest. The approach is creating a dummy project. Run 'buildfortesting' with the dummy project. Then the xctestrun file will be under the build products directory of dummy project's derived data dir.
|
def _GenerateXctestrunFileForXctest(self):
dummyproject_derived_data_dir = os.path.join(self._work_dir,
'dummyproject_derived_data')
with dummy_project.DummyProject(
self._app_under_test_dir, self._test_bundle_dir, self._sdk,
self._test_type, self._work_dir) as dummy_project_instance:
# Use TEST_ROOT as dummy project's build products dir.
dummy_project_instance.BuildForTesting(
self._test_root_dir, dummyproject_derived_data_dir)
app_under_test_plugins_dir = os.path.join(
self._app_under_test_dir, 'PlugIns')
if not os.path.exists(app_under_test_plugins_dir):
os.mkdir(app_under_test_plugins_dir)
new_test_bundle_path = os.path.join(
app_under_test_plugins_dir, os.path.basename(self._test_bundle_dir))
# The test bundle under PlugIns can not be symlink since it will cause
# app installation error.
if os.path.islink(self._test_bundle_dir):
shutil.copytree(self._test_bundle_dir, new_test_bundle_path)
self._test_bundle_dir = new_test_bundle_path
elif new_test_bundle_path != self._test_bundle_dir:
self._test_bundle_dir = _MoveAndReplaceFile(
self._test_bundle_dir, app_under_test_plugins_dir)
# The xctestrun file are under the build products directory of dummy
# project's derived data dir.
# DerivedData
# |
# +--Build
# |
# +--Products
# |
# +--***.xctestrun
derived_data_build_products_dir = os.path.join(
dummyproject_derived_data_dir, 'Build', 'Products')
generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %
derived_data_build_products_dir)
if not generated_xctestrun_file_paths:
raise ios_errors.XctestrunError(
"No generated xctestrun file was found in the dummy project's build "
"products dir.")
self._xctestrun_file_path = os.path.join(self._test_root_dir,
'xctestrun.plist')
shutil.move(generated_xctestrun_file_paths[0],
self._xctestrun_file_path)
self._xctestrun_obj = XctestRun(
self._xctestrun_file_path, test_type=self._test_type)
self._xctestrun_obj.SetXctestrunField(
'TestBundlePath', self._test_bundle_dir)
|
[
"def _GenerateXctestrunFileForXcuitest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_type, self._work_dir) as dummy_project_instance:\n if (self._signing_options and\n self._signing_options.get('xctrunner_app_provisioning_profile')):\n dummy_project_instance.SetTestBundleProvisioningProfile(\n self._signing_options.get('xctrunner_app_provisioning_profile'))\n # Use TEST_ROOT as dummy project's build products dir.\n dummy_project_instance.BuildForTesting(\n self._test_root_dir, dummyproject_derived_data_dir)\n\n # The basic xctestrun file and XCTRunner app are under the build products\n # directory of dummy project's derived data dir.\n # DerivedData\n # |\n # +--Build\n # |\n # +--Products\n # |\n # +--Debug-***\n # |\n # +--***-Runner.app\n # +--***.xctestrun\n derived_data_build_products_dir = os.path.join(\n dummyproject_derived_data_dir, 'Build', 'Products')\n\n generated_xctrunner_app_dirs = glob.glob('%s/Debug-*/*-Runner.app' %\n derived_data_build_products_dir)\n if not generated_xctrunner_app_dirs:\n raise ios_errors.XctestrunError(\"No generated XCTRunner app was found in \"\n \"the dummy project's build products dir.\")\n if len(generated_xctrunner_app_dirs) > 1:\n raise ios_errors.XctestrunError(\"More than one XCTRunner app were found \"\n \"in the dummy project's build products \"\n \"dir.\")\n\n xctrunner_app_dir = os.path.join(\n self._test_root_dir, os.path.basename(generated_xctrunner_app_dirs[0]))\n shutil.move(generated_xctrunner_app_dirs[0], xctrunner_app_dir)\n if (self._signing_options and\n self._signing_options.get('xctrunner_app_enable_ui_file_sharing')):\n try:\n bundle_util.EnableUIFileSharing(xctrunner_app_dir)\n except ios_errors.BundleError as e:\n logging.warning(e.output)\n # The test bundle under XCTRunner.app/PlugIns is not actual test bundle. It\n # only contains Info.plist and _CodeSignature. So copy the real test bundle\n # under XCTRunner.app/PlugIns to replace it.\n xctrunner_plugins_dir = os.path.join(xctrunner_app_dir, 'PlugIns')\n if os.path.exists(xctrunner_plugins_dir):\n shutil.rmtree(xctrunner_plugins_dir)\n os.mkdir(xctrunner_plugins_dir)\n # The test bundle should not exist under the new generated XCTRunner.app.\n if os.path.islink(self._test_bundle_dir):\n # The test bundle under PlugIns can not be symlink since it will cause\n # app installation error.\n new_test_bundle_path = os.path.join(\n xctrunner_plugins_dir, os.path.basename(self._test_bundle_dir))\n shutil.copytree(self._test_bundle_dir, new_test_bundle_path)\n self._test_bundle_dir = new_test_bundle_path\n else:\n self._test_bundle_dir = _MoveAndReplaceFile(\n self._test_bundle_dir, xctrunner_plugins_dir)\n\n generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %\n derived_data_build_products_dir)\n if not generated_xctestrun_file_paths:\n raise ios_errors.XctestrunError(\n \"No generated xctestrun file was found in the dummy project's build \"\n \"products dir.\")\n self._xctestrun_file_path = os.path.join(self._test_root_dir,\n 'xctestrun.plist')\n shutil.move(generated_xctestrun_file_paths[0],\n self._xctestrun_file_path)\n\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, self._test_type)\n self._xctestrun_obj.SetXctestrunField('TestHostPath', xctrunner_app_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'UITargetAppPath', self._app_under_test_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)\n # When running on iphoneos, it is necessary to remove this field.\n # For iphonesimulator, this field won't effect the test functionality. To\n # be consistent, remove this field.\n self._xctestrun_obj.DeleteXctestrunField(\n 'TestingEnvironmentVariables:IDEiPhoneInternalTestBundleName')",
"def _GenerateXctestrunFileForLogicTest(self):\n self._xctestrun_file_path = os.path.join(\n self._test_root_dir, 'xctestrun.plist')\n test_bundle_name = os.path.basename(self._test_bundle_dir).split('.')[0]\n plist_util.Plist(self._xctestrun_file_path).SetPlistField(\n test_bundle_name, {})\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, test_type=self._test_type)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'TestHostPath', xcode_info_util.GetXctestToolPath(self._sdk))\n dyld_framework_path = os.path.join(\n xcode_info_util.GetSdkPlatformPath(self._sdk),\n 'Developer/Library/Frameworks')\n self._xctestrun_obj.SetXctestrunField(\n 'TestingEnvironmentVariables',\n {'DYLD_FRAMEWORK_PATH': dyld_framework_path,\n 'DYLD_LIBRARY_PATH': dyld_framework_path})",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__TESTHOST__/Frameworks/libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/usr/lib/'\n 'libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def fill_xctest_run(self, out_dir):\n folder = os.path.abspath(os.path.join(out_dir, os.pardir))\n if not os.path.exists(folder):\n os.makedirs(folder)\n xctestrun = os.path.join(folder, 'run_%d.xctestrun' % int(time.time()))\n if not os.path.exists(xctestrun):\n with open(xctestrun, 'w'):\n pass\n # Creates a dict with data about egtests to run - fill all required fields:\n # egtests_module, egtest_app_path, egtests_xctest_path and\n # filtered tests if filter is specified.\n # Write data in temp xctest run file.\n plistlib.writePlist(self.fill_xctestrun_node(), xctestrun)\n return xctestrun",
"def test_generate_buildfile(self):\n mydir = os.path.dirname(__file__)\n path = os.path.join(mydir, 'data')\n buildfilepath = os.path.join(path, 'build.yml')\n assert not os.path.exists(buildfilepath), \"%s already exists\" % buildfilepath\n build.generate_build_file(path)\n assert os.path.exists(buildfilepath)\n build.build_package(None, 'test_generated', 'generated', [], buildfilepath)\n os.remove(buildfilepath)\n from quilt.data.test_generated.generated import bad, foo, nuts, README # pylint:disable=W0612",
"def gen_pytest_xmls(args):\n if args.testcases and args.testsuites:\n return\n\n if not args.testrun_id:\n raise TestcasesException('The testrun id was not specified')\n gen_xmls.run_pytest(args.testrun_id)",
"def build_tests():\r\n run(dir(\"Macaroni\", \"Next\", \"Tests\"), \"cavatappi -d -i\")",
"def generate_go_test_code(self, root_codegen_release, import_prefix, paths):\n go_test_code_template = self.template_engine.get_template('mdt_telemetry_test.go.jinja2')\n go_test_code = go_test_code_template.render(package=self.package, import_prefix=import_prefix, paths=paths)\n # Append _test. yielding filename_test.go\n main_components = self.main.split('.')\n main_components.insert(1, '_test.')\n main_test_filename = os.path.join(\n root_codegen_release,\n ''.join(main_components)\n )\n logging.info('Writing Go test to %s', main_test_filename)\n with open(main_test_filename, 'w') as main_test_fd:\n main_test_fd.write(go_test_code)",
"def test_addon_template(self):\n self.maxDiff = None\n result = self.create_template()\n self.assertItemsEqual(\n result.files_created.keys(),\n [\n self.project + '/.travis.yml',\n self.project + '/bootstrap.py',\n self.project + '/buildout.cfg',\n self.project + '/CHANGES.rst',\n self.project + '/CONTRIBUTORS.rst',\n self.project + '/docs',\n self.project + '/docs/LICENSE.GPL',\n self.project + '/docs/LICENSE.txt',\n self.project + '/Makefile',\n self.project + '/MANIFEST.in',\n self.project + '/README.rst',\n self.project + '/setup.py',\n self.project + '/src',\n self.project + '/src/customer',\n self.project + '/src/customer/__init__.py',\n self.project + '/src/customer/site',\n self.project + '/src/customer/site/__init__.py',\n self.project + '/src/customer/site/contenttype',\n self.project + '/src/customer/site/contenttype/__init__.py',\n self.project + '/src/customer/site/contenttype/browser',\n self.project + '/src/customer/site/contenttype/browser/__init__.py',\n self.project + '/src/customer/site/contenttype/browser/configure.zcml',\n self.project + '/src/customer/site/contenttype/browser/templates',\n self.project + '/src/customer/site/contenttype/browser/templates/helloworld.pt',\n self.project + '/src/customer/site/contenttype/browser/view.py',\n self.project + '/src/customer/site/contenttype/config.py',\n self.project + '/src/customer/site/contenttype/configure.zcml',\n self.project + '/src/customer/site/contenttype/content',\n self.project + '/src/customer/site/contenttype/content/__init__.py',\n self.project + '/src/customer/site/contenttype/content/example.py',\n self.project + '/src/customer/site/contenttype/interfaces.py',\n self.project + '/src/customer/site/contenttype/profiles',\n self.project + '/src/customer/site/contenttype/profiles.zcml',\n self.project + '/src/customer/site/contenttype/profiles/default',\n self.project + '/src/customer/site/contenttype/profiles/default/browserlayer.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/metadata.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/rolemap.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/types',\n self.project + '/src/customer/site/contenttype/profiles/default/types.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/types/Example.xml',\n self.project + '/src/customer/site/contenttype/profiles/uninstall',\n self.project + '/src/customer/site/contenttype/profiles/uninstall/customer.site.contenttype.txt',\n self.project + '/src/customer/site/contenttype/static',\n self.project + '/src/customer/site/contenttype/static/document_icon.png',\n self.project + '/src/customer/site/contenttype/testing.py',\n self.project + '/src/customer/site/contenttype/tests',\n self.project + '/src/customer/site/contenttype/tests/__init__.py',\n self.project + '/src/customer/site/contenttype/tests/test_content.py',\n self.project + '/src/customer/site/contenttype/tests/test_example.robot',\n self.project + '/src/customer/site/contenttype/tests/test_robot.py',\n self.project + '/src/customer/site/contenttype/tests/test_setup.py',\n self.project + '/src/customer/site/contenttype/upgrades',\n self.project + '/src/customer/site/contenttype/upgrades/__init__.py',\n self.project + '/src/customer/site/contenttype/upgrades/configure.zcml',\n self.project + '/src/customer/site/contenttype/upgrades/v1010',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/__init__.py',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/configure.zcml',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/handler.py',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/profile',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/profile/metadata.xml',\n self.project,\n ]\n )",
"def otx_build_task_testing(root, task):\n # Build otx-workspace per tasks check - Default Model Template only\n command_line = [\n \"otx\",\n \"build\",\n \"--task\",\n task,\n \"--workspace\",\n os.path.join(root, f\"otx-workspace-{task}\"),\n ]\n check_run(command_line)",
"def test_addon_template(self):\n self.maxDiff = None\n result = self.create_template()\n self.assertItemsEqual(\n result.files_created.keys(),\n [\n self.project + '/.travis.yml',\n self.project + '/bootstrap.py',\n self.project + '/buildout.cfg',\n self.project + '/CHANGES.rst',\n self.project + '/CONTRIBUTORS.rst',\n self.project + '/docs',\n self.project + '/docs/LICENSE.GPL',\n self.project + '/docs/LICENSE.txt',\n self.project + '/Makefile',\n self.project + '/MANIFEST.in',\n self.project + '/README.rst',\n self.project + '/setup.py',\n self.project + '/src',\n self.project + '/src/sc',\n self.project + '/src/sc/__init__.py',\n self.project + '/src/sc/addon',\n self.project + '/src/sc/addon/__init__.py',\n self.project + '/src/sc/addon/config.py',\n self.project + '/src/sc/addon/configure.zcml',\n self.project + '/src/sc/addon/controlpanel.py',\n self.project + '/src/sc/addon/Extensions',\n self.project + '/src/sc/addon/Extensions/__init__.py',\n self.project + '/src/sc/addon/Extensions/Install.py',\n self.project + '/src/sc/addon/interfaces.py',\n self.project + '/src/sc/addon/profiles',\n self.project + '/src/sc/addon/profiles.zcml',\n self.project + '/src/sc/addon/profiles/default',\n self.project + '/src/sc/addon/profiles/default/browserlayer.xml',\n self.project + '/src/sc/addon/profiles/default/controlpanel.xml',\n self.project + '/src/sc/addon/profiles/default/metadata.xml',\n self.project + '/src/sc/addon/profiles/default/registry.xml',\n self.project + '/src/sc/addon/profiles/uninstall',\n self.project + '/src/sc/addon/profiles/uninstall/registry.xml',\n self.project + '/src/sc/addon/setuphandlers.py',\n self.project + '/src/sc/addon/static',\n self.project + '/src/sc/addon/static/addon-icon.png',\n self.project + '/src/sc/addon/testing.py',\n self.project + '/src/sc/addon/tests',\n self.project + '/src/sc/addon/tests/__init__.py',\n self.project + '/src/sc/addon/tests/test_controlpanel.py',\n self.project + '/src/sc/addon/tests/test_example.robot',\n self.project + '/src/sc/addon/tests/test_robot.py',\n self.project + '/src/sc/addon/tests/test_setup.py',\n self.project + '/src/sc/addon/upgrades',\n self.project + '/src/sc/addon/upgrades/__init__.py',\n self.project + '/src/sc/addon/upgrades/configure.zcml',\n self.project + '/src/sc/addon/upgrades/v2',\n self.project + '/src/sc/addon/upgrades/v2/__init__.py',\n self.project + '/src/sc/addon/upgrades/v2/configure.zcml',\n self.project + '/src/sc/addon/upgrades/v2/profile',\n self.project + '/src/sc/addon/upgrades/v2/profile/metadata.xml',\n self.project,\n ]\n )",
"def BuildTemplates(self):\n if args.config:\n build_args = [\n \"--verbose\", \"--config\", args.config, \"build\", \"--output\",\n args.output_dir\n ]\n else:\n build_args = [\"--verbose\", \"build\", \"--output\", args.output_dir]\n\n wix_tools_path = self._WixToolsPath()\n build_args += [\n \"-p\",\n \"ClientBuilder.wix_tools_path=%{\" + wix_tools_path + \"}\",\n \"-p\",\n \"ClientBuilder.build_msi=True\",\n \"-p\",\n \"ClientBuilder.fleetspeak_bundled=True\",\n ]\n _VerboseCheckCall([self.grr_client_build64] + build_args)",
"def test_building(self):\n OPTS = ['--debug', '--noupx',\n '--specpath', self._specdir,\n '--distpath', self._distdir,\n '--workpath', self._builddir]\n\n if self.verbose:\n OPTS.extend(['--debug', '--log-level=INFO'])\n else:\n OPTS.append('--log-level=ERROR')\n\n # Build executable in onefile mode.\n if self.test_file.startswith('test_onefile'):\n OPTS.append('--onefile')\n else:\n OPTS.append('--onedir')\n\n if self.with_crypto or '_crypto' in self.test_file:\n print('NOTE: Bytecode encryption is enabled for this test.', end=\"\")\n OPTS.append('--key=test_key')\n\n self._msg(\"BUILDING TEST \" + self.test_name)\n\n # Use pyinstaller.py for building test_name.\n testfile_spec = self.test_file + '.spec'\n if not os.path.exists(self.test_file + '.spec'):\n # .spec file does not exist and it has to be generated\n # for main script.\n testfile_spec = self.test_file + '.py'\n\n #pyinst_script = os.path.join(HOMEPATH, 'pyinstaller.py')\n\n # TODO Fix redirecting stdout/stderr\n # In report mode is stdout and sys.stderr redirected.\n #if self.report:\n ## Write output from subprocess to stdout/err.\n #retcode, out, err = compat.exec_python_all(pyinst_script,\n #testfile_spec, *OPTS)\n #sys.stdout.write(out)\n #sys.stdout.write(err)\n #else:\n #retcode = compat.exec_python_rc(pyinst_script,\n #testfile_spec, *OPTS)\n # abspath is required due to makespec.make_path_spec_relative()\n testfile_spec = os.path.abspath(testfile_spec)\n pyi_args = [testfile_spec] + OPTS\n # TODO fix return code in running PyInstaller programatically\n pyi_main.run(pyi_args, PYI_CONFIG)\n retcode = 0\n\n return retcode == 0",
"def setup_testrun_dir():\n test_run = \"testrun_{}\".format(int(time.time()))\n os.mkdir(test_run)\n this_files_dir = os.path.dirname(os.path.realpath(__file__))\n config_templates = os.path.join(this_files_dir, \"integration\", \"config\")\n os.mkdir(os.path.join(test_run, \"runfolders\"))\n shutil.copy2(os.path.join(config_templates, \"app.config\"), test_run)\n shutil.copy2(os.path.join(config_templates, \"logger.config\"), test_run)\n return os.path.realpath(test_run)",
"def _build_wxs(ctx, env, outdir):\n wixsetup = f'{outdir}\\\\WixSetup.exe'\n if not os.path.exists(wixsetup):\n raise Exit(f\"WXS builder not found: {wixsetup}\")\n\n # Run the builder to produce the WXS\n # Set an env var to tell WixSetup.exe where to put the output\n env['AGENT_MSI_OUTDIR'] = outdir\n succeeded = ctx.run(\n f'cd {BUILD_SOURCE_DIR}\\\\WixSetup && {wixsetup}',\n warn=True,\n env=env,\n )\n if not succeeded:\n raise Exit(\"Failed to build the MSI WXS.\", code=1)\n\n # sign the MakeSfxCA output files\n _fix_makesfxca_dll(os.path.join(outdir, 'CustomActions.CA.dll'))\n sign_file(ctx, os.path.join(outdir, 'CustomActions.CA.dll'))",
"def setUp(self):\n self._file_upto = 0 # How many files we have created\n self.tmpdir = tempfile.mkdtemp()\n self.output = cros_output.Output()\n self.tools = Tools(self.output)\n self.tools.PrepareOutputDir(None)\n self.bundle = Bundle(self.tools, self.output)\n self.uboot_fname = self.MakeRandomFile(500 * 1024)\n self.bmpblk_fname = os.path.abspath('bin/bmpblk.bin')\n self.bct_fname = os.path.abspath('bin/board.bct')\n self.bundle.SetDirs('##/usr/share/vboot/devkeys')\n self.bundle.SetOptions(False, None)",
"def generate_project_file(self):\n self._create_project_content()\n common_util.file_generate(self.project_file, self.project_content)",
"def _GenerateTestBits(self, tempdir):\n build_root = cros_build_lib.GetSysroot(board=self.board)\n cwd = os.path.join(build_root, BOARD_BUILD_DIR)\n tarball_funcs = [commands.BuildAutotestControlFilesTarball,\n commands.BuildAutotestPackagesTarball,\n commands.BuildAutotestTestSuitesTarball,\n commands.BuildAutotestServerPackageTarball]\n for tarball_func in tarball_funcs:\n tarball_func(build_root, cwd, tempdir)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates the xctestrun file for Logic Test. The approach is setting on xctestrun.plist directly and using `xctest` tool as the test host of the logic test bundle.
|
def _GenerateXctestrunFileForLogicTest(self):
self._xctestrun_file_path = os.path.join(
self._test_root_dir, 'xctestrun.plist')
test_bundle_name = os.path.basename(self._test_bundle_dir).split('.')[0]
plist_util.Plist(self._xctestrun_file_path).SetPlistField(
test_bundle_name, {})
self._xctestrun_obj = XctestRun(
self._xctestrun_file_path, test_type=self._test_type)
self._xctestrun_obj.SetXctestrunField(
'TestBundlePath', self._test_bundle_dir)
self._xctestrun_obj.SetXctestrunField(
'TestHostPath', xcode_info_util.GetXctestToolPath(self._sdk))
dyld_framework_path = os.path.join(
xcode_info_util.GetSdkPlatformPath(self._sdk),
'Developer/Library/Frameworks')
self._xctestrun_obj.SetXctestrunField(
'TestingEnvironmentVariables',
{'DYLD_FRAMEWORK_PATH': dyld_framework_path,
'DYLD_LIBRARY_PATH': dyld_framework_path})
|
[
"def _GenerateXctestrunFileForXctest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_type, self._work_dir) as dummy_project_instance:\n # Use TEST_ROOT as dummy project's build products dir.\n dummy_project_instance.BuildForTesting(\n self._test_root_dir, dummyproject_derived_data_dir)\n\n app_under_test_plugins_dir = os.path.join(\n self._app_under_test_dir, 'PlugIns')\n if not os.path.exists(app_under_test_plugins_dir):\n os.mkdir(app_under_test_plugins_dir)\n new_test_bundle_path = os.path.join(\n app_under_test_plugins_dir, os.path.basename(self._test_bundle_dir))\n # The test bundle under PlugIns can not be symlink since it will cause\n # app installation error.\n if os.path.islink(self._test_bundle_dir):\n shutil.copytree(self._test_bundle_dir, new_test_bundle_path)\n self._test_bundle_dir = new_test_bundle_path\n elif new_test_bundle_path != self._test_bundle_dir:\n self._test_bundle_dir = _MoveAndReplaceFile(\n self._test_bundle_dir, app_under_test_plugins_dir)\n\n # The xctestrun file are under the build products directory of dummy\n # project's derived data dir.\n # DerivedData\n # |\n # +--Build\n # |\n # +--Products\n # |\n # +--***.xctestrun\n derived_data_build_products_dir = os.path.join(\n dummyproject_derived_data_dir, 'Build', 'Products')\n generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %\n derived_data_build_products_dir)\n if not generated_xctestrun_file_paths:\n raise ios_errors.XctestrunError(\n \"No generated xctestrun file was found in the dummy project's build \"\n \"products dir.\")\n self._xctestrun_file_path = os.path.join(self._test_root_dir,\n 'xctestrun.plist')\n shutil.move(generated_xctestrun_file_paths[0],\n self._xctestrun_file_path)\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, test_type=self._test_type)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)",
"def _GenerateXctestrunFileForXcuitest(self):\n dummyproject_derived_data_dir = os.path.join(self._work_dir,\n 'dummyproject_derived_data')\n with dummy_project.DummyProject(\n self._app_under_test_dir, self._test_bundle_dir, self._sdk,\n self._test_type, self._work_dir) as dummy_project_instance:\n if (self._signing_options and\n self._signing_options.get('xctrunner_app_provisioning_profile')):\n dummy_project_instance.SetTestBundleProvisioningProfile(\n self._signing_options.get('xctrunner_app_provisioning_profile'))\n # Use TEST_ROOT as dummy project's build products dir.\n dummy_project_instance.BuildForTesting(\n self._test_root_dir, dummyproject_derived_data_dir)\n\n # The basic xctestrun file and XCTRunner app are under the build products\n # directory of dummy project's derived data dir.\n # DerivedData\n # |\n # +--Build\n # |\n # +--Products\n # |\n # +--Debug-***\n # |\n # +--***-Runner.app\n # +--***.xctestrun\n derived_data_build_products_dir = os.path.join(\n dummyproject_derived_data_dir, 'Build', 'Products')\n\n generated_xctrunner_app_dirs = glob.glob('%s/Debug-*/*-Runner.app' %\n derived_data_build_products_dir)\n if not generated_xctrunner_app_dirs:\n raise ios_errors.XctestrunError(\"No generated XCTRunner app was found in \"\n \"the dummy project's build products dir.\")\n if len(generated_xctrunner_app_dirs) > 1:\n raise ios_errors.XctestrunError(\"More than one XCTRunner app were found \"\n \"in the dummy project's build products \"\n \"dir.\")\n\n xctrunner_app_dir = os.path.join(\n self._test_root_dir, os.path.basename(generated_xctrunner_app_dirs[0]))\n shutil.move(generated_xctrunner_app_dirs[0], xctrunner_app_dir)\n if (self._signing_options and\n self._signing_options.get('xctrunner_app_enable_ui_file_sharing')):\n try:\n bundle_util.EnableUIFileSharing(xctrunner_app_dir)\n except ios_errors.BundleError as e:\n logging.warning(e.output)\n # The test bundle under XCTRunner.app/PlugIns is not actual test bundle. It\n # only contains Info.plist and _CodeSignature. So copy the real test bundle\n # under XCTRunner.app/PlugIns to replace it.\n xctrunner_plugins_dir = os.path.join(xctrunner_app_dir, 'PlugIns')\n if os.path.exists(xctrunner_plugins_dir):\n shutil.rmtree(xctrunner_plugins_dir)\n os.mkdir(xctrunner_plugins_dir)\n # The test bundle should not exist under the new generated XCTRunner.app.\n if os.path.islink(self._test_bundle_dir):\n # The test bundle under PlugIns can not be symlink since it will cause\n # app installation error.\n new_test_bundle_path = os.path.join(\n xctrunner_plugins_dir, os.path.basename(self._test_bundle_dir))\n shutil.copytree(self._test_bundle_dir, new_test_bundle_path)\n self._test_bundle_dir = new_test_bundle_path\n else:\n self._test_bundle_dir = _MoveAndReplaceFile(\n self._test_bundle_dir, xctrunner_plugins_dir)\n\n generated_xctestrun_file_paths = glob.glob('%s/*.xctestrun' %\n derived_data_build_products_dir)\n if not generated_xctestrun_file_paths:\n raise ios_errors.XctestrunError(\n \"No generated xctestrun file was found in the dummy project's build \"\n \"products dir.\")\n self._xctestrun_file_path = os.path.join(self._test_root_dir,\n 'xctestrun.plist')\n shutil.move(generated_xctestrun_file_paths[0],\n self._xctestrun_file_path)\n\n self._xctestrun_obj = XctestRun(\n self._xctestrun_file_path, self._test_type)\n self._xctestrun_obj.SetXctestrunField('TestHostPath', xctrunner_app_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'UITargetAppPath', self._app_under_test_dir)\n self._xctestrun_obj.SetXctestrunField(\n 'TestBundlePath', self._test_bundle_dir)\n # When running on iphoneos, it is necessary to remove this field.\n # For iphonesimulator, this field won't effect the test functionality. To\n # be consistent, remove this field.\n self._xctestrun_obj.DeleteXctestrunField(\n 'TestingEnvironmentVariables:IDEiPhoneInternalTestBundleName')",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/usr/lib/'\n 'libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneSimulator.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def fill_xctestrun_node(self):\n xctestrun_data = {\n 'TestTargetName': {\n 'IsAppHostedTestBundle': True,\n 'TestBundlePath': '__TESTHOST__%s' % self._xctest_path(),\n 'TestHostBundleIdentifier': get_bundle_id(self.test_app_path),\n 'TestHostPath': '%s' % self.test_app_path,\n 'TestingEnvironmentVariables': {\n 'DYLD_INSERT_LIBRARIES':\n '__TESTHOST__/Frameworks/libXCTestBundleInject.dylib',\n 'DYLD_LIBRARY_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/Library',\n 'DYLD_FRAMEWORK_PATH':\n '__PLATFORMS__/iPhoneOS.platform/Developer/'\n 'Library/Frameworks',\n 'XCInjectBundleInto':\n '__TESTHOST__/%s' % self.module_name\n }\n }\n }\n\n if self.env_vars:\n xctestrun_data['TestTargetName'].update(\n {'EnvironmentVariables': self.env_vars})\n\n if self.included_tests or self.excluded_tests:\n gtest_filter = get_gtest_filter(self.included_tests, self.excluded_tests)\n # Removed previous gtest-filter if exists.\n self.test_args = [\n el for el in self.test_args if not el.startswith('--gtest_filter=')\n ]\n self.test_args.append('--gtest_filter=%s' % gtest_filter)\n\n if self.repeat_count > 1:\n self.test_args.append('--gtest_repeat=%s' % self.repeat_count)\n\n self.test_args.append('--gmock_verbose=error')\n self.test_args.append(GENERATE_COMPILED_GTESTS_FILE_TEST_ARG)\n\n xctestrun_data['TestTargetName'].update(\n {'CommandLineArguments': self.test_args})\n\n return xctestrun_data",
"def gen_pytest_xmls(args):\n if args.testcases and args.testsuites:\n return\n\n if not args.testrun_id:\n raise TestcasesException('The testrun id was not specified')\n gen_xmls.run_pytest(args.testrun_id)",
"def fill_xctest_run(self, out_dir):\n folder = os.path.abspath(os.path.join(out_dir, os.pardir))\n if not os.path.exists(folder):\n os.makedirs(folder)\n xctestrun = os.path.join(folder, 'run_%d.xctestrun' % int(time.time()))\n if not os.path.exists(xctestrun):\n with open(xctestrun, 'w'):\n pass\n # Creates a dict with data about egtests to run - fill all required fields:\n # egtests_module, egtest_app_path, egtests_xctest_path and\n # filtered tests if filter is specified.\n # Write data in temp xctest run file.\n plistlib.writePlist(self.fill_xctestrun_node(), xctestrun)\n return xctestrun",
"def test_addon_template(self):\n self.maxDiff = None\n result = self.create_template()\n self.assertItemsEqual(\n result.files_created.keys(),\n [\n self.project + '/.travis.yml',\n self.project + '/bootstrap.py',\n self.project + '/buildout.cfg',\n self.project + '/CHANGES.rst',\n self.project + '/CONTRIBUTORS.rst',\n self.project + '/docs',\n self.project + '/docs/LICENSE.GPL',\n self.project + '/docs/LICENSE.txt',\n self.project + '/Makefile',\n self.project + '/MANIFEST.in',\n self.project + '/README.rst',\n self.project + '/setup.py',\n self.project + '/src',\n self.project + '/src/customer',\n self.project + '/src/customer/__init__.py',\n self.project + '/src/customer/site',\n self.project + '/src/customer/site/__init__.py',\n self.project + '/src/customer/site/contenttype',\n self.project + '/src/customer/site/contenttype/__init__.py',\n self.project + '/src/customer/site/contenttype/browser',\n self.project + '/src/customer/site/contenttype/browser/__init__.py',\n self.project + '/src/customer/site/contenttype/browser/configure.zcml',\n self.project + '/src/customer/site/contenttype/browser/templates',\n self.project + '/src/customer/site/contenttype/browser/templates/helloworld.pt',\n self.project + '/src/customer/site/contenttype/browser/view.py',\n self.project + '/src/customer/site/contenttype/config.py',\n self.project + '/src/customer/site/contenttype/configure.zcml',\n self.project + '/src/customer/site/contenttype/content',\n self.project + '/src/customer/site/contenttype/content/__init__.py',\n self.project + '/src/customer/site/contenttype/content/example.py',\n self.project + '/src/customer/site/contenttype/interfaces.py',\n self.project + '/src/customer/site/contenttype/profiles',\n self.project + '/src/customer/site/contenttype/profiles.zcml',\n self.project + '/src/customer/site/contenttype/profiles/default',\n self.project + '/src/customer/site/contenttype/profiles/default/browserlayer.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/metadata.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/rolemap.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/types',\n self.project + '/src/customer/site/contenttype/profiles/default/types.xml',\n self.project + '/src/customer/site/contenttype/profiles/default/types/Example.xml',\n self.project + '/src/customer/site/contenttype/profiles/uninstall',\n self.project + '/src/customer/site/contenttype/profiles/uninstall/customer.site.contenttype.txt',\n self.project + '/src/customer/site/contenttype/static',\n self.project + '/src/customer/site/contenttype/static/document_icon.png',\n self.project + '/src/customer/site/contenttype/testing.py',\n self.project + '/src/customer/site/contenttype/tests',\n self.project + '/src/customer/site/contenttype/tests/__init__.py',\n self.project + '/src/customer/site/contenttype/tests/test_content.py',\n self.project + '/src/customer/site/contenttype/tests/test_example.robot',\n self.project + '/src/customer/site/contenttype/tests/test_robot.py',\n self.project + '/src/customer/site/contenttype/tests/test_setup.py',\n self.project + '/src/customer/site/contenttype/upgrades',\n self.project + '/src/customer/site/contenttype/upgrades/__init__.py',\n self.project + '/src/customer/site/contenttype/upgrades/configure.zcml',\n self.project + '/src/customer/site/contenttype/upgrades/v1010',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/__init__.py',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/configure.zcml',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/handler.py',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/profile',\n self.project + '/src/customer/site/contenttype/upgrades/v1010/profile/metadata.xml',\n self.project,\n ]\n )",
"def test_addon_template(self):\n self.maxDiff = None\n result = self.create_template()\n self.assertItemsEqual(\n result.files_created.keys(),\n [\n self.project + '/.travis.yml',\n self.project + '/bootstrap.py',\n self.project + '/buildout.cfg',\n self.project + '/CHANGES.rst',\n self.project + '/CONTRIBUTORS.rst',\n self.project + '/docs',\n self.project + '/docs/LICENSE.GPL',\n self.project + '/docs/LICENSE.txt',\n self.project + '/Makefile',\n self.project + '/MANIFEST.in',\n self.project + '/README.rst',\n self.project + '/setup.py',\n self.project + '/src',\n self.project + '/src/sc',\n self.project + '/src/sc/__init__.py',\n self.project + '/src/sc/addon',\n self.project + '/src/sc/addon/__init__.py',\n self.project + '/src/sc/addon/config.py',\n self.project + '/src/sc/addon/configure.zcml',\n self.project + '/src/sc/addon/controlpanel.py',\n self.project + '/src/sc/addon/Extensions',\n self.project + '/src/sc/addon/Extensions/__init__.py',\n self.project + '/src/sc/addon/Extensions/Install.py',\n self.project + '/src/sc/addon/interfaces.py',\n self.project + '/src/sc/addon/profiles',\n self.project + '/src/sc/addon/profiles.zcml',\n self.project + '/src/sc/addon/profiles/default',\n self.project + '/src/sc/addon/profiles/default/browserlayer.xml',\n self.project + '/src/sc/addon/profiles/default/controlpanel.xml',\n self.project + '/src/sc/addon/profiles/default/metadata.xml',\n self.project + '/src/sc/addon/profiles/default/registry.xml',\n self.project + '/src/sc/addon/profiles/uninstall',\n self.project + '/src/sc/addon/profiles/uninstall/registry.xml',\n self.project + '/src/sc/addon/setuphandlers.py',\n self.project + '/src/sc/addon/static',\n self.project + '/src/sc/addon/static/addon-icon.png',\n self.project + '/src/sc/addon/testing.py',\n self.project + '/src/sc/addon/tests',\n self.project + '/src/sc/addon/tests/__init__.py',\n self.project + '/src/sc/addon/tests/test_controlpanel.py',\n self.project + '/src/sc/addon/tests/test_example.robot',\n self.project + '/src/sc/addon/tests/test_robot.py',\n self.project + '/src/sc/addon/tests/test_setup.py',\n self.project + '/src/sc/addon/upgrades',\n self.project + '/src/sc/addon/upgrades/__init__.py',\n self.project + '/src/sc/addon/upgrades/configure.zcml',\n self.project + '/src/sc/addon/upgrades/v2',\n self.project + '/src/sc/addon/upgrades/v2/__init__.py',\n self.project + '/src/sc/addon/upgrades/v2/configure.zcml',\n self.project + '/src/sc/addon/upgrades/v2/profile',\n self.project + '/src/sc/addon/upgrades/v2/profile/metadata.xml',\n self.project,\n ]\n )",
"def otx_build_task_testing(root, task):\n # Build otx-workspace per tasks check - Default Model Template only\n command_line = [\n \"otx\",\n \"build\",\n \"--task\",\n task,\n \"--workspace\",\n os.path.join(root, f\"otx-workspace-{task}\"),\n ]\n check_run(command_line)",
"def make_xspec_script(outfile, data):\n data[\"prog_name\"] = os.path.basename(sys.argv[0])\n data[\"date\"] = datetime.now().isoformat()\n\n xspec_script = \"\"\"\\\n# Calculate the cooling function table w.r.t the temperature range.\n#\n# Generated by: %(prog_name)s\n# Date: %(date)s\n\n# debug (off)\nchatter 0\n\nset xs_return_results 1\nset xs_echo_script 0\n# set tcl_precision 12\n\nquery yes\nabund %(abund_table)s\ndummyrsp 0.01 100.0 4096 linear\nmodel wabs*apec & %(nh)s & 1.0 & %(abundance)s & %(redshift)s & %(norm)s & /*\n\n# flux unit and value index\nset unit \"%(unit)s\"\nif {$unit eq \"erg\"} {\n set fidx 0\n} else {\n set fidx 3\n}\n\n# output cooling function table\nset cf_fn \"%(outfile)s\"\nset cf_fd [open $cf_fn w]\n\nset elow %(elow)s\nset ehigh %(ehigh)s\nset tmin %(tmin)s\nset tmax %(tmax)s\nset tstep %(tstep)s\n\nputs $cf_fd \"# temperature(keV) flux($elow-$ehigh)($unit/cm^2/s)\"\n\n# temperature sampling points\nfor {set t $tmin} {$t <= $tmax} {set t [expr {$t + $tstep}]} {\n newpar 2 $t\n flux $elow $ehigh\n set cf [lindex [tcloutr flux 1] $fidx]\n puts $cf_fd \"$t $cf\"\n}\n\nclose $cf_fd\ntclexit\n\"\"\" % data\n with open(outfile, \"w\") as f:\n f.write(xspec_script)",
"def generate_go_test_code(self, root_codegen_release, import_prefix, paths):\n go_test_code_template = self.template_engine.get_template('mdt_telemetry_test.go.jinja2')\n go_test_code = go_test_code_template.render(package=self.package, import_prefix=import_prefix, paths=paths)\n # Append _test. yielding filename_test.go\n main_components = self.main.split('.')\n main_components.insert(1, '_test.')\n main_test_filename = os.path.join(\n root_codegen_release,\n ''.join(main_components)\n )\n logging.info('Writing Go test to %s', main_test_filename)\n with open(main_test_filename, 'w') as main_test_fd:\n main_test_fd.write(go_test_code)",
"def BuildTemplates(self):\n if args.config:\n build_args = [\n \"--verbose\", \"--config\", args.config, \"build\", \"--output\",\n args.output_dir\n ]\n else:\n build_args = [\"--verbose\", \"build\", \"--output\", args.output_dir]\n\n wix_tools_path = self._WixToolsPath()\n build_args += [\n \"-p\",\n \"ClientBuilder.wix_tools_path=%{\" + wix_tools_path + \"}\",\n \"-p\",\n \"ClientBuilder.build_msi=True\",\n \"-p\",\n \"ClientBuilder.fleetspeak_bundled=True\",\n ]\n _VerboseCheckCall([self.grr_client_build64] + build_args)",
"def generate_tests(work_dir, linker_dir, modules, config_dict, test_list,\n modules_dir):\n uarch_dir = os.path.dirname(uatg.__file__)\n\n if work_dir:\n pass\n else:\n work_dir = os.path.abspath((os.path.join(uarch_dir, '../work/')))\n\n os.makedirs(work_dir, exist_ok=True)\n\n logger.info(f'uatg dir is {uarch_dir}')\n logger.info(f'work_dir is {work_dir}')\n isa = 'RV64I'\n try:\n isa = config_dict['isa_dict']['hart0']['ISA']\n except Exception as e:\n logger.error(e)\n logger.error('Exiting UATG. ISA cannot be found/understood')\n exit(0)\n\n if modules == ['all']:\n logger.debug(f'Checking {modules_dir} for modules')\n modules = list_of_modules(modules_dir)\n logger.debug(f'The modules are {modules}')\n\n test_list_dict = {}\n logger.info('****** Generating Tests ******')\n for module in modules:\n module_dir = os.path.join(modules_dir, module)\n work_tests_dir = os.path.join(work_dir, module)\n try:\n module_params = config_dict['core_config'][module]\n except KeyError:\n # logger.critical(\"The {0} module is not in the dut config_file\",\n # format(module))\n module_params = {}\n module_params['isa'] = isa\n logger.debug(f'Directory for {module} is {module_dir}')\n logger.info(f'Starting plugin Creation for {module}')\n create_plugins(plugins_path=module_dir)\n logger.info(f'Created plugins for {module}')\n username = getuser()\n time = ((str(datetime.now())).split(\".\"))[0]\n license_str = f'# Licensing information can be found at ' \\\n f'LICENSE.incore\\n# Test generated by user - {username}' \\\n f' at {time}\\n\\n'\n includes = f'#include \\\"model_test.h\\\" \\n#include \\\"arch_test.h\\\"\\n'\n test_entry = f'RVTEST_ISA(\\\"{isa}\\\")\\n\\n.section .text.init\\n.globl' \\\n f' rvtest_entry_point\\nrvtest_entry_point:'\n\n rvcode_begin = '\\nRVMODEL_BOOT\\nRVTEST_CODE_BEGIN\\n'\n rvcode_end = '\\nRVTEST_CODE_END\\nRVMODEL_HALT\\n\\n'\n rvtest_data_begin = '\\nRVTEST_DATA_BEGIN\\n'\n rvtest_data_end = '\\nRVTEST_DATA_END\\n\\n'\n rvmodel_data_begin = '\\nRVMODEL_DATA_BEGIN\\n'\n rvmodel_data_end = '\\nRVMODEL_DATA_END\\n\\n'\n\n manager = PluginManager()\n manager.setPluginPlaces([module_dir])\n # plugins are stored in module_dir\n manager.collectPlugins()\n\n # check if prior test files are present and remove them. create new dir.\n if (os.path.isdir(work_tests_dir)) and \\\n os.path.exists(work_tests_dir):\n rmtree(work_tests_dir)\n\n os.mkdir(work_tests_dir)\n\n logger.debug(f'Generating assembly tests for {module}')\n\n # Loop around and find the plugins and writes the contents from the\n # plugins into an asm file\n for plugin in manager.getAllPlugins():\n check = plugin.plugin_object.execute(module_params)\n name = (str(plugin.plugin_object).split(\".\", 1))\n test_name = ((name[1].split(\" \", 1))[0])\n if check:\n asm_body = plugin.plugin_object.generate_asm()\n # Adding License, includes and macros\n asm = license_str + includes + test_entry\n # Appending Coding Macros & Instructions\n asm += rvcode_begin + asm_body + rvcode_end\n # Appending RVTEST_DATA macros and data values\n asm += rvtest_data_begin + rvtest_data(bit_width=32,\n num_vals=1,\n random=True,\n signed=False,\n align=4)\n asm += rvtest_data_end\n # Appending RVMODEL macros\n asm += rvmodel_data_begin + rvmodel_data_end\n os.mkdir(os.path.join(work_tests_dir, test_name))\n with open(\n os.path.join(work_tests_dir, test_name,\n test_name + '.S'), 'w') as f:\n f.write(asm)\n logger.debug(f'Generating test for {test_name}')\n else:\n logger.critical(f'Skipped {test_name}')\n logger.debug(f'Finished Generating Assembly Tests for {module}')\n if test_list:\n logger.info(f'Creating test_list for the {module}')\n test_list_dict.update(\n generate_test_list(work_tests_dir, uarch_dir, test_list_dict))\n\n logger.info('****** Finished Generating Tests ******')\n\n if linker_dir and os.path.isfile(os.path.join(linker_dir, 'link.ld')):\n logger.debug('Using user specified linker')\n else:\n create_linker(target_dir=work_dir)\n logger.debug(f'Creating a linker file at {work_dir}')\n\n if linker_dir and os.path.isfile(os.path.join(linker_dir, 'model_test.h')):\n logger.debug('Using user specified model_test file')\n else:\n create_model_test_h(target_dir=work_dir)\n logger.debug(f'Creating Model_test.h file at {work_dir}')\n if test_list:\n logger.info('Test List was generated by UATG. You can find it in '\n 'the work dir ')\n else:\n logger.info('Test list will not be generated by uatg')\n if test_list.lower() == 'true':\n with open(os.path.join(work_dir, 'test_list.yaml'), 'w') as outfile:\n yaml.dump(test_list_dict, outfile)",
"def setTouchExampleFile(self,configName, qtouchComponent):\n touchExampleFile = qtouchComponent.createFileSymbol(\"TOUCH_EXAMPLE_SOURCE\", None)\n touchExampleFile.setSourcePath(\"/templates/touch_example.c.ftl\")\n touchExampleFile.setOutputName(\"touch_example.c\")\n touchExampleFile.setDestPath(\"/touch/\")\n touchExampleFile.setProjectPath(\"config/\" + configName +\"/touch/\")\n touchExampleFile.setType(\"SOURCE\")\n touchExampleFile.setMarkup(True)\n return touchExampleFile",
"def test_create_entitlement_template(self):\n pass",
"def generate_test_cases(ukernel, init_fn, mr, nr, k_block, is_pipelined, isa):\n _, test_name = ukernel.split(\"_\", 1)\n _, datatype, ukernel_type, _ = ukernel.split(\"_\", 3)\n test_args = [ukernel, init_fn]\n return xngen.preprocess(TEST_TEMPLATE, {\n \"TEST_NAME\": test_name.upper().replace(\"UKERNEL_\", \"\"),\n \"TEST_ARGS\": test_args,\n \"UKERNEL_TYPE\": ukernel_type.upper(),\n \"DATATYPE\": datatype,\n \"MR\": mr,\n \"NR\": nr,\n \"KBLOCK\": k_block,\n \"ADJKBLOCK\": 2 * k_block if is_pipelined else k_block,\n \"IS_PIPELINED\": is_pipelined,\n \"ISA_CHECK\": xnncommon.generate_isa_check_macro(isa),\n \"next_prime\": next_prime,\n })",
"def setup_testrun_dir():\n test_run = \"testrun_{}\".format(int(time.time()))\n os.mkdir(test_run)\n this_files_dir = os.path.dirname(os.path.realpath(__file__))\n config_templates = os.path.join(this_files_dir, \"integration\", \"config\")\n os.mkdir(os.path.join(test_run, \"runfolders\"))\n shutil.copy2(os.path.join(config_templates, \"app.config\"), test_run)\n shutil.copy2(os.path.join(config_templates, \"logger.config\"), test_run)\n return os.path.realpath(test_run)",
"def test_add_zos_template(self):\n pass",
"def _GenerateTestBits(self, tempdir):\n build_root = cros_build_lib.GetSysroot(board=self.board)\n cwd = os.path.join(build_root, BOARD_BUILD_DIR)\n tarball_funcs = [commands.BuildAutotestControlFilesTarball,\n commands.BuildAutotestPackagesTarball,\n commands.BuildAutotestTestSuitesTarball,\n commands.BuildAutotestServerPackageTarball]\n for tarball_func in tarball_funcs:\n tarball_func(build_root, cwd, tempdir)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Moves the file under target directory and replace it if it exists.
|
def _MoveAndReplaceFile(src_file, target_parent_dir):
new_file_path = os.path.join(
target_parent_dir, os.path.basename(src_file))
if os.path.exists(new_file_path):
shutil.rmtree(new_file_path)
shutil.move(src_file, new_file_path)
return new_file_path
|
[
"def move_file(source, target):\n log.echo_info('move [%s] to [%s]' % (source, target))\n check_file_exists(source)\n shutil.move(source, target)\n log.echo_info('moving...')\n check_file_exists(target)",
"def _movefile(src, dest, **kwargs):\n\tif movefile(src, dest, **kwargs) is None:\n\t\traise portage.exception.PortageException(\n\t\t\t\"mv '%s' '%s'\" % (src, dest))",
"def move_current_file(self, src, dest):\n curr_file = self.get_current_file()\n fullsrc = os.path.join(src, curr_file)\n fulldest = os.path.join(dest, curr_file)\n if not os.path.exists(os.path.dirname(fulldest)):\n try:\n os.makedirs(os.path.dirname(fulldest))\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n shutil.move(fullsrc, fulldest)\n\n self.logger.debug(\"File %s was moved to directory %s\" % (curr_file, dest))\n self.source_files.pop(0)",
"def move_to_matching_folder(self, filename):",
"def move_file(self, path: str, filename: str, new_path: str, new_filename: str = None):\n self.copy_file(path, filename, new_path, new_filename)\n self.delete_file(path, filename)",
"def _move_mo_file(source, target):\n import os\n\n sourceFile = get_modelica_file_name(source)\n targetFile = get_modelica_file_name(target)\n\n _git_move(sourceFile, targetFile)\n # The targetFile may have `within Buildings.Fluid;`\n # Update this if needed.\n\n for fi in [sourceFile, targetFile]:\n di = os.path.dirname(fi)\n write_package_order(directory=di, recursive=False)\n\n if not os.listdir(os.path.dirname(sourceFile)):\n os.rmdir(os.path.dirname(sourceFile))\n\n def sd(s): return \"within \" + s[:s.rfind('.')] + \";\"\n replace_text_in_file(targetFile, sd(source), sd(target))\n # Update the class name\n replace_text_in_file(targetFile,\n \" \" + source[source.rfind('.') + 1:],\n \" \" + target[target.rfind('.') + 1:])",
"def move_file (\n source_path,\n target_path,\n allow_undo=True,\n no_confirm=False,\n rename_on_collision=True,\n silent=False,\n hWnd=None\n):\n return _file_operation (\n shellcon.FO_MOVE,\n source_path,\n target_path,\n allow_undo,\n no_confirm,\n rename_on_collision,\n silent,\n hWnd\n )",
"def rename_and_overwrite_file(source_filename, destination_filename):\n os.replace(source_filename, destination_filename)",
"def moveFile(fromFile, toDir, newFilename = None):\n fromFile.parentDir.moveFile(fromFile, toDir, newFilename)",
"def move_file(source: str, destination: str):\n if os.path.isdir(destination):\n shutil.move(source, destination)\n else:\n os.mkdir(destination)\n shutil.move(source, destination)",
"def _git_move(source, target):\n # Due to the recursive calls, this could be invoked to git mv an empty directory.\n # The directory would exist, but has no files in it.\n # In this case, simply delete the empty directory and return\n if os.path.isdir(source) and len(os.listdir(source)) == 0:\n os.rmdir(source)\n return\n\n # Throw an error if source is not a file that exist.\n if not (os.path.isfile(source) or os.path.isdir(source)):\n raise ValueError(\"Failed to move file '%s' as it does not exist.\" %\n os.path.abspath(os.path.join(os.path.curdir, source)))\n\n # Throw an error if target is an existing file, except if it is the package.mo file\n if os.path.isfile(target):\n if target.endswith(\"package.mo\"):\n print(\"*** Warning: Did not move {}.\".format(target))\n return\n else:\n raise ValueError(\"Failed to move '{}' to target '{}' as target already exists.\".format(\n os.path.abspath(os.path.join(os.path.curdir, source)),\n os.path.abspath(os.path.join(os.path.curdir, target))))\n\n # If the destination directory does not exist, create it.\n targetDir = os.path.dirname(target)\n ext = os.path.splitext(target)[1]\n if not os.path.exists(targetDir):\n # Directory does not exist.\n if ext == \".mo\":\n # It is a Modelica package.\n # Recursively create and populate it.\n create_modelica_package(targetDir)\n else:\n # Directory does not exist.\n os.makedirs(targetDir)\n\n _sh(cmd=['git', 'mv', source, target], directory=os.path.curdir)",
"def move_to(self, source_folder, target_folder):\n assert(self.is_dated())\n logger.info('Moving file \"%s\" from \"%s\" to \"%s\"' % (self.filename, source_folder, target_folder))\n os.rename(os.path.join(source_folder, self.filename), os.path.join(target_folder, self.filename))",
"def move(source, destination):\n\tshutil.move(_uri_to_path(source), _uri_to_path(destination)) #Use shutil because it overwrites old files on Windows too.",
"def vacate_target_if_exist_and_remove_backup_if_exist(file_path) -> None:\n if file_path.target.is_file():\n os.replace(str(file_path.target), str(file_path.backup_for_test))\n if file_path.backup.exists():\n os.unlink(str(file_path.backup))",
"def move(self, dest):\n shutil.move(self.path, dest)",
"def moveFile(self, source, destin):\n pylabs.q.logger.log('Move file from %s to %s'% (source, destin),6)\n if ((source is None) or (destin is None)):\n raise TypeError(\"Not enough parameters given to system.fs.moveFile: move from %s, to %s\" % (source, destin))\n try:\n if(pylabs.q.system.fs.isFile(source)):\n pylabs.q.system.fs.move(source, destin)\n else:\n raise RuntimeError(\"The specified source path in system.fs.moveFile does not exist: %s\" % source)\n except:\n raise RuntimeError(\"File could not be moved...in system.fs.moveFile: from %s to %s \" % (source, destin))",
"def _move_reference_result(source, target):\n # Reference result file for sourceFile.\n sourceRefFile = source[:source.find(\".\")] + \\\n os.path.sep + \\\n os.path.join(\"Resources\", \"ReferenceResults\", \"Dymola\") + \\\n os.path.sep + \\\n source.replace(\".\", \"_\") + \".txt\"\n\n if os.path.isfile(sourceRefFile):\n _git_move(sourceRefFile,\n sourceRefFile.replace(source.replace(\".\", \"_\"),\n target.replace(\".\", \"_\")))",
"def update_default(self, new_tag):\n if not self.root:\n raise ValueError(\"No root specified: %s\" % self.root)\n\n new_dir = os.path.join(os.path.dirname(self.root), new_tag)\n print(\"New dir: \", new_dir)\n if os.path.exists(new_dir):\n raise ValueError(\"Destination exists: %s (from %s)\" % (new_dir, self.tag))\n\n original_file = self.make_path()\n #original_dir = os.path.join(self.root, self.tag)\n\n new_name = \"%s.json\" % new_tag\n new_file = os.path.join(self.root, new_name)\n print(\"renaming: %s, %s\" % (original_file, new_file))\n #rename the file first, to make paths easier\n os.rename(original_file, new_file)\n\n #now move the directory\n #os.rename(original_dir, new_dir)\n os.rename(self.root, new_dir)\n\n self.root = new_dir\n self.tag = new_tag\n #now save it to our new destination (should over-write old data)\n self.save()",
"def move_directory_contents(source_dir, target_dir):\n directory_contents = glob.glob(os.path.join(source_dir, \"*\"))\n for fpath in directory_contents:\n target_path = os.path.join(target_dir, os.path.basename(fpath))\n if os.path.exists(target_path):\n logging.info(\n \"{} already exists. Run with force_overwrite=True to download from scratch\".format(\n target_path\n )\n )\n continue\n shutil.move(fpath, target_dir)\n\n shutil.rmtree(source_dir)",
"def cp_file_to_target(self, source_file, target_dir):\r\n if not os.path.exists(target_dir):\r\n os.mkdir(target_dir)\r\n target_file = target_dir + '/' + os.path.basename(source_file)\r\n if not os.path.exists(target_file):\r\n shutil.copy2(source_file, target_dir)\r\n result = 1\r\n self.debug_out('{} => {}'.format(source_file, target_dir))\r\n else:\r\n if os.path.getsize(target_file) == os.path.getsize(source_file):\r\n self.debug_out('target existed')\r\n self.debug_out('{} <====> {}'.format(source_file, target_file))\r\n result = 0\r\n else:\r\n # 新命名规则: 旧文件名_[1,2,3...]\r\n base, ext = os.path.splitext(target_file)\r\n i = 1\r\n while True:\r\n new_target_file = base + '_{:d}'.format(i) + ext\r\n if not os.path.exists(new_target_file):\r\n shutil.copy2(source_file, new_target_file)\r\n result = 2\r\n break\r\n else:\r\n i += 1\r\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build a dictionary of a particular binding. The keys are camel cased binding field names defined in `init_params` list and
|
def get_dict_repr(self) -> Dict:
params = list(dict.fromkeys(getattr(self, 'init_params', [])))
for p in params:
if p not in Binding.EXCLUDED_INIT_PARAMS:
self._dict[to_camel_case(p)] = getattr(self, p, None)
return self._dict
|
[
"def bind_params(self) -> Dict[str, Any]:\n return {}",
"def _make_port_dict(self, port, fields=None):\n\n if not fields:\n port.update(self.base_binding_dict)\n else:\n for key in self.base_binding_dict:\n if key in fields:\n port.update(self.base_binding_dict[key])\n return port",
"def binding_properties(self):\n binding_properties = {}\n for field_name, field in six.iteritems(self.fields):\n if field.binding and self[field_name] is not None:\n binding_properties[field_name] = self[field_name]\n return binding_properties",
"def bind_params(self, binding):\n for k, v in binding.items():\n temp = self.free_params.get(k) # it's a name\n if temp:\n temp.val = v\n elif k in self.free_params.values(): # it's a parameter\n k.val = v\n else:\n raise ParameterError(\"Unknown free parameter '{}'\".format(k))",
"def _pack_init_args(obj: object, *args, **kwargs) -> dict[str, Any]:\n init_signature = signature(obj.__init__) # type: ignore\n bound_signature = init_signature.bind(*args, **kwargs)\n bound_signature.apply_defaults()\n bound_args = bound_signature.arguments # Note: type `OrderedDict`\n return dict(bound_args)",
"def _build_param_dict(self):\n\n # TODO: leave this regex here for now - we've only tested against a simulator\n # the real instrument might give us floats, then we'll need this\n # FLOAT_REGEX = r'((?:[+-]?[0-9]|[1-9][0-9])+\\.[0-9]+)'\n\n int_regex = r'([+-]?[0-9]+)'\n\n # Add parameter handlers to parameter dict.\n self._param_dict.add(Parameter.ENDPOINT,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.STRING,\n display_name=\"Endpoint\",\n description='IP address of the system running the UltraGrid receiver process.',\n startup_param=False,\n direct_access=False,\n default_value=DEFAULT_ENDPOINT,\n visibility=ParameterDictVisibility.READ_ONLY)\n\n self._param_dict.add(Parameter.PAN_POSITION,\n r'\"pan\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Pan\",\n range=(45, 315),\n description='Camera pan position: (45 - 315)',\n startup_param=False,\n direct_access=False,\n default_value=180.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.TILT_POSITION,\n r'\"tilt\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Tilt\",\n description='Camera tilt position: (50 - 140)',\n range=(50, 140),\n startup_param=False,\n direct_access=False,\n default_value=90.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.PAN_TILT_SPEED,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Speed\",\n range=(0.5, 40),\n description='Pan-Tilt speed, in 0.5 deg/s increments: (0.5 - 40)',\n startup_param=False,\n direct_access=False,\n default_value=10.0,\n units=Units.DEGREE_PLANE_ANGLE_PER_SECOND,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.HEADING,\n r'\"heading\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Heading\",\n range=(0, 360),\n description='Heading relative to magnetic North: (0 - 360)',\n startup_param=False,\n direct_access=False,\n default_value=0.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_ONLY)\n\n self._param_dict.add(Parameter.PITCH,\n r'\"pitch\": ' + int_regex,\n lambda match: float(match.group(1)),\n str,\n type=ParameterDictType.FLOAT,\n display_name=\"Pitch\",\n range=(-90, 90),\n description='Gravity referenced pitch angle. Negative values are up, '\n 'positive values are down: (-90 - 90)',\n startup_param=False,\n direct_access=False,\n default_value=0.0,\n units=Units.DEGREE_PLANE_ANGLE,\n visibility=ParameterDictVisibility.READ_ONLY)\n\n self._param_dict.add(Parameter.LIGHT_1_LEVEL,\n r'\"intensity\": \\[([\\d]+), ([\\d]+)\\]',\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Light 1 Level\",\n range=(0, 100),\n description='Relative intensity of light 1: (0 - 100)',\n startup_param=False,\n direct_access=False,\n default_value=50,\n units=Units.PERCENT,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.LIGHT_2_LEVEL,\n r'\"intensity\": \\[([\\d]+), ([\\d]+)\\]',\n lambda match: int(match.group(2)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Light 2 Level\",\n range=(0, 100),\n description='Relative intensity of light 2: (0 - 100)',\n startup_param=False,\n direct_access=False,\n default_value=50,\n units=Units.PERCENT,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.ZOOM_LEVEL,\n r'\"zoom\": ' + int_regex,\n lambda match: int(match.group(1)),\n str,\n type=ParameterDictType.INT,\n display_name=\"Zoom Level\",\n range=(0, 7),\n description='Zoom level in steps relative to the current setting: (+/- integer value)',\n startup_param=False,\n direct_access=False,\n default_value=0,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.LASERS_STATE,\n r'\"laser\": \"(on|off)\"',\n lambda match: match.group(1),\n str,\n type=ParameterDictType.STRING,\n display_name=\"Lasers State\",\n range={'On': 'on', 'Off': 'off'},\n description='Lasers state: (on | off)',\n startup_param=False,\n direct_access=False,\n default_value='off',\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.STATUS_INTERVAL,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.STRING,\n display_name=\"Acquire Status Interval\",\n description='Driver parameter used for acquire status schedule.',\n startup_param=False,\n direct_access=False,\n default_value='00:00:00',\n units=ParameterUnit.TIME_INTERVAL,\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.ELEMENTAL_IP_ADDRESS,\n r'NOT USED',\n None,\n str,\n type=ParameterDictType.STRING,\n display_name=\"Elemental IP Address\",\n description='IP Address of the elemental live server running the video archive process.',\n startup_param=False,\n direct_access=False,\n default_value='209.124.182.238',\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.add(Parameter.OUTPUT_GROUP_ID,\n r'NOT USED',\n None,\n int,\n type=ParameterDictType.INT,\n display_name=\"Output Group ID\",\n description='Output group ID for the archive video output streams being recorded '\n 'by elemental.',\n startup_param=False,\n direct_access=False,\n default_value=27,\n range=(1, 65536),\n visibility=ParameterDictVisibility.READ_WRITE)\n\n self._param_dict.set_default(Parameter.STATUS_INTERVAL)\n self._param_dict.set_default(Parameter.ENDPOINT)\n self._param_dict.set_default(Parameter.PAN_TILT_SPEED)\n self._param_dict.set_default(Parameter.ELEMENTAL_IP_ADDRESS)\n self._param_dict.set_default(Parameter.OUTPUT_GROUP_ID)",
"def kwargs_to_initdict(kwargs: KwargsDict) -> InitDict:\n return {ARGS_LABEL: [], KWARGS_LABEL: kwargs}",
"def _populate_dict(self, params):\n \n output_dict = {}\n \n for prop in params:\n if getattr(self, prop) is not None:\n output_dict[prop] = getattr(self, prop)\n \n return output_dict",
"def __initialize_parameters(self):\n parameters = dict()\n parameters['weights_recognition'] = {\n 'l1': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_visible, self.__nb_hidden)),\n 'mean': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_hidden, self.__nb_z)),\n 'log_std_squared': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_hidden, self.__nb_z))\n }\n parameters['biases_recognition'] = {\n 'l1': numpy.zeros((1, self.__nb_hidden)),\n 'mean': numpy.zeros((1, self.__nb_z)),\n 'log_std_squared': numpy.zeros((1, self.__nb_z))\n }\n parameters['weights_generation'] = {\n 'l1': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_z, self.__nb_hidden)),\n 'mean': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_hidden, self.__nb_visible))\n }\n parameters['biases_generation'] = {\n 'l1': numpy.zeros((1, self.__nb_hidden)),\n 'mean': numpy.zeros((1, self.__nb_visible))\n }\n return parameters",
"def create_dict(**kwargs):\r\n return kwargs",
"def parameters_map(self):\n return dict([(p.name, p) for p in self.component.parameters])",
"def initialization_params(self):\n return {\n 'kinda_params': self._kinda_params.copy(),\n 'multistrand_params': self._multistrand_params.copy(),\n 'nupack_params': self._nupack_params.copy(),\n 'peppercorn_params': self._peppercorn_params.copy()\n }",
"def make_instance_to_initdict(attributes: List[str]) -> InstanceToDictFnType:\n\n def custom_instance_to_initdict(x: Instance) -> InitDict:\n kwargs = {}\n for a in attributes:\n kwargs[a] = getattr(x, a)\n return kwargs_to_initdict(kwargs)\n\n return custom_instance_to_initdict",
"def to_dict(self) -> ParamChangeDict:\n return {\n 'name': self.name,\n 'generator_id': self.generator.id,\n 'customization_args': self.customization_args\n }",
"def _params_dict(cls, params):\n keys = [\"amp\", \"x0\", \"y0\", \"sigma_x\", \"sigma_y\", \"rho\", \"offset\"]\n\n num_params = len(params)\n\n # adjust the dictionary size\n if num_params < 7:\n keys.remove(\"rho\")\n\n if num_params < 6:\n keys.remove(\"sigma_y\")\n\n return {k: p for k, p in zip(keys, params)}",
"def create_params(base_params, args):\n for key in args.keys():\n if key in base_params:\n base_params[key] = args[key]\n\n return base_params",
"def _fixed_dict(self):\n\n _fixed_dict = {}\n for key, val in self.defdict.items():\n if val[0] is False:\n # treat parameters that are intended to be constants\n # if value is provided as a scalar, insert it in the definition\n if isinstance(val[1], str) and val[1] == 'auxiliary':\n _fixed_dict[key] = 'auxiliary'\n else:\n _fixed_dict[key] = val[1]\n\n return _fixed_dict",
"def obj_with_no_args_to_init_dict(obj: Any) -> InitDict:\n\n return {ARGS_LABEL: [], KWARGS_LABEL: {}}",
"def to_dict(self) -> ParamSpecDict:\n return {\n 'obj_type': self.obj_type,\n }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the value of a particular setting attribute.
|
def get_settings_value(self, settings_attribute_key: str) -> Optional[str]:
return self.get_dict_repr().get(settings_attribute_key)
|
[
"def get_attribute_value(self, attribute_name):\n return self.attributes[attribute_name]",
"def get(self, setting):\n return self.settings.get(setting, \"\")",
"def get_setting(\n self,\n setting_name: webenginecore.webenginesettings.WebAttributeStr,\n ) -> bool:\n return self.get_settings()[setting_name]",
"def _getSetting(self, settingName):\r\n setting = Setting.objects.get(name__name=settingName).value\r\n return setting",
"def setting(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"setting\")",
"def get_attribute(self, attr):\n return self.eval_script('node.getAttribute(\"{0}\")'.format(attr))",
"def get_value(self, key):\n if self.settings.has_key(key):\n return self.settings[key]\n else:\n return None",
"def __get_value_from_datastore(name):\n # type: (str) -> str\n setting = GaeEnvSettings.query(\n GaeEnvSettings.name == str(name)).get() # type: GaeEnvSettings\n if not setting:\n return None\n return setting.value # type: str",
"def attribute_get(self, attr):\n attributes_struct = self.single_query_get('Attributes')\n attribute_struct = [x for x in attributes_struct\n if x['Name'] == attr]\n if len(attribute_struct) > 1:\n raise tdapi.TDException(\"Too many attributes with name {}\".format(attr))\n elif len(attribute_struct) == 0:\n return\n else:\n return attribute_struct[0]['Value']",
"def getattribute(self, k):\n return self.attributes[k] if k in self.attributes else None",
"def get_attribute(self,attr):\n\t\tif (attr is None):\n\t\t\traise ValueError(\"You must specify an attribute\")\n\t\tif (attr not in self._Attributes):\n\t\t\traise ValueError(\"Attribute \" + attr + \" unrecognized\")\n\t\treturn self._Attributes[attr]",
"def get_attribute(self, att):\r\n if att in self.attributes:\r\n return self.attributes[att]\r\n else:\r\n return None",
"def get_setting(cls, settings, key):\n part1, _, part2 = key.partition('/')\n if part2:\n value = settings[part1][part2]\n else:\n value = settings[part1]\n return value",
"def getBasicAttribute(self, name):\n return getattr(self, \"_\" + name + \"_value_\").getValue()",
"def get_attribute(self, name):\n return self._fields[name]",
"def get_attribute(self, atributo):\r\n return self.__atributos[atributo]",
"def extractValue(self, model, item):\n return getattr(item, self.attribute.attrname)",
"def get(self, botconf, cat=None):\n setting = botconf.get(self.name)\n return setting if (setting is not None) else self.default",
"def read_attribute_value(stream, compiler):\n ch = stream.text[stream.ptr]\n\n if ch in STRING_LITERALS:\n value = read_quoted_string(stream)\n\n if compiler.options.escape_attrs:\n # TODO handle escape_attrs=once\n value = html_escape(value)\n\n elif ch.isdigit():\n value = read_number(stream)\n else:\n raw_value = read_word(stream)\n\n if raw_value.lower() in ATTRIBUTE_VALUE_KEYWORDS:\n value = ATTRIBUTE_VALUE_KEYWORDS[raw_value.lower()]\n else:\n value = \"{{ %s }}\" % raw_value\n\n return value"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a matrix indicating the master element for a given node
|
def generate_node_master_elem(self):
self.node_master_elem = np.zeros((self.num_node, 2), dtype=ct.c_int, order='F') - 1
for i_elem in range(self.num_elem):
for i_node_local in range(self.elements[i_elem].n_nodes):
if self.master[i_elem, i_node_local, 0] == -1:
if self.node_master_elem[self.connectivities[i_elem, i_node_local], 0] < 0:
self.node_master_elem[self.connectivities[i_elem, i_node_local], 0] = i_elem
self.node_master_elem[self.connectivities[i_elem, i_node_local], 1] = i_node_local
else:
master_elem = self.master[i_elem, i_node_local, 0]
master_node = self.master[i_elem, i_node_local, 1]
if self.node_master_elem[self.connectivities[i_elem, i_node_local], 0] < 0:
self.node_master_elem[self.connectivities[i_elem, i_node_local], 0] = master_elem
self.node_master_elem[self.connectivities[i_elem, i_node_local], 1] = master_node
|
[
"def get_inner_matrix(self):\n return self.matrix",
"def get_membership_matrix(self):\n import numpy as np\n matrix = []\n for i in self.clusters:\n matrix.append(self.clusters[i]['indicator'])\n matrix = np.array(matrix)\n return matrix",
"def get_adjacency_matrix(self):\n return []",
"def get_adjacency_matrix(self):\n return nx.to_numpy_matrix(self.graph)",
"def node_to_arr(node_matrix):\n sol = []\n\n for y in range(9):\n\n row = []\n for x in range(9):\n row.append(node_matrix[y][x].value)\n\n sol.append(row)\n\n return sol",
"def get_root(self):\r\n if len(self.nodes) == 0:\r\n root = self.empty_sparse[self.levels]\r\n return root\r\n else:\r\n root = self.nodes['']\r\n return root",
"def SoBumpMapMatrixElement_makeIdentity(state: 'SoState', node: 'SoNode') -> \"void\":\n return _coin.SoBumpMapMatrixElement_makeIdentity(state, node)",
"def SoModelMatrixElement_makeIdentity(state: 'SoState', node: 'SoNode') -> \"void\":\n return _coin.SoModelMatrixElement_makeIdentity(state, node)",
"def SoGLViewingMatrixElement_getNodeId(state: 'SoState') -> \"SbUniqueId\":\n return _coin.SoGLViewingMatrixElement_getNodeId(state)",
"def get_adjacency_matrix(self):\n \n #initialize an empty 2D list\n length = len(self.nodes)\n matrix = [x[:] for x in [[0]*length]*length]\n for edge in self.edges:\n fromIndex = self.nodes.index(edge.node_from)\n toIndex = self.nodes.index(edge.node_to)\n matrix[fromIndex][toIndex] = edge.value\n return matrix",
"def getMatrix(self, action: 'SoGetMatrixAction') -> \"void\":\n return _coin.SoNodeKitListPart_getMatrix(self, action)",
"def _matrix_(self):\n return self.to_matrix()",
"def get_mapping_matrix(self):\r\n return self.matrix",
"def makeIdentity(state: 'SoState', node: 'SoNode') -> \"void\":\n return _coin.SoBumpMapMatrixElement_makeIdentity(state, node)",
"def SoModelMatrixElement_set(state: 'SoState', node: 'SoNode') -> \"SbMatrix &\":\n return _coin.SoModelMatrixElement_set(state, node)",
"def db_master(self):\n for node in self.nodes:\n if node.is_role('db_master'):\n return node\n return None",
"def SoBumpMapMatrixElement_set(state: 'SoState', node: 'SoNode') -> \"SbMatrix &\":\n return _coin.SoBumpMapMatrixElement_set(state, node)",
"def sub_to_master(self, index):\n for mapping in self.atom_mapping:\n if index == mapping[0]:\n return mapping[1]",
"def makeIdentity(state: 'SoState', node: 'SoNode') -> \"void\":\n return _coin.SoModelMatrixElement_makeIdentity(state, node)",
"def parent_matrix_template(sequences):\n parent_matrix = create_empty_matrix(sequences, False)\n parent_matrix[0][0] = (0,0)\n for i in range(1, len(parent_matrix[0])):\n parent_matrix[0][i] = (0,i -1)\n for j in range(1, len(parent_matrix)):\n parent_matrix[j][0] = (j - 1, 0)\n\n return parent_matrix"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
bits is a hex string returns a table mapping bitlengths to codes
|
def readACTable(bits):
table = {}
length = bits[:4]
tc = bits[4]
th = bits[5]
bits = bits[6:]
codeCounts = [0]*16
for i in range(16):
codeCounts[i] = int(bits[:2], 16)
bits = bits[2:]
for i in range(16):
numCodes = codeCounts[i]
table[i+1] = []
for _ in range(numCodes):
table[i+1].append(int(bits[:2], 16))
bits = bits[2:]
return table
|
[
"def parseBits(self, hexcode, width):\n bitarray = []\n for byte in hexcode[::-1]:\n bits = int(byte, 16)\n for x in range(4):\n bitarray.append(bool((2 ** x) & bits))\n bitarray = bitarray[::-1]\n return enumerate(bitarray[:width])",
"def string_to_bitlist(data: ByteString) -> List[int]:\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n i -= 1\n\n return result",
"def value_bits(bitstring):\n\n return int(bitstring.translate({ord('x'): '0'}), 2)",
"def bits2length(bits):\n chars = 89 # based on our friend, random-string.py\n return round(bits / math.log(chars, 2))",
"def hash_code(s):\n mask = (1 << 32) - 1 # limit to 32-bit integers\n h = 0\n for character in s:\n h = (h << 5 & mask) | (h >> 27) # 5-bit cyclic shift of running sum\n h += ord(character) # add in value of next character\n return h",
"def convert_str_to_int(str_bits):\n result_bit_key = 0\n while str_bits:\n result_bit_key = (result_bit_key << 1) + int(str_bits[0])\n str_bits = str_bits[1:]\n return result_bit_key",
"def fromhex(s: str) -> bitlist:\n return bitlist(bytes.fromhex(s))",
"def parse_bitfield(bitfield, word_bits):\n groups = bitfield.split(' ')\n group_count = len(groups)\n result_bits = []\n\n for group_index in xrange(group_count):\n group_val = int(groups[group_count - 1 - group_index], 16)\n\n for group_bit in xrange(word_bits):\n if group_val & (1 << group_bit):\n result_bits.append(group_index * word_bits + group_bit)\n\n return result_bits",
"def _mk_bits(self,data):\n if isinstance(data, bytes):\n return data[data.index(b\"\\xfc\") :]\n # handles int and unquoted hex\n if isinstance(data, int):\n length = data.bit_length() >> 3\n bites = int.to_bytes(data, length, byteorder=\"big\")\n return bites\n try:\n # Handles hex byte strings\n i = int(data, 16)\n i_len = i.bit_length() >> 3\n bites = int.to_bytes(i, i_len, byteorder=\"big\")\n return bites\n except (LookupError, TypeError, ValueError):\n if data[:2].lower() == \"0x\":\n data = data[2:]\n if data[:2].lower() == \"fc\":\n return bytes.fromhex(data)\n try:\n return b64decode(self.fix_bad_b64(data))\n except (LookupError, TypeError, ValueError):\n return data",
"def bit_string_to_integer(bit_string):\n result = 0;\n bit_string = bit_string[::-1]\n for i in range(0, bit_length):\n \tresult += (int(bit_string[i]) * (2**i))\n return result",
"def convert_bit_index(x):\n if x == 666666666:#if x is a non data value\n return 255\n x_string = str(x)\n sum = 0\n for i in range(1,6):\n if str(i) in x_string:\n sum += 2**i\n return sum",
"def countBits(x):\n # return bin(n).count(\"1\")\n n, res = \"\", 0\n while x > 0:\n y = str(x % 2)\n res += 1 if y == '1' else 0\n n = y + n\n x = int(x / 2)\n return res",
"def str_to_bits(text: str) -> np.ndarray:\n msg_bytes = text.encode('utf-8')\n bits = []\n for byte in msg_bytes:\n bits.extend([(byte >> i) & 3 for i in range(6, -1, -2)])\n bits.extend([3, 3, 3, 3])\n return np.array(bits)",
"def decodeBits(packets):\n raise SnmplibNotImplemented, \"SNMP BITS data type not implemented yet.\"",
"def __get_bin_list(string):\n return [1 if str(c).isupper() else 0 for c in string]",
"def convert_bytes_to_bit_field(input_bytes):\n byte_list = list(input_bytes)\n byte_list.reverse()\n result = []\n for byte in byte_list:\n bin_string = bin(ord(byte))[2:].rjust(8, '0')\n result.extend([int(x) for x in list(bin_string)])\n log.trace(\"Returning a bitfield of %s for input string: [%s]\", result, input_bytes)\n return result",
"def byte_to_bits(byte):\n return \"\".join([str(get_bit(byte, bit_num)) for bit_num in range(7, -1, -1)])",
"def decode(encoded: str) -> List[int]:\n\n parts = encoded.split('11')\n\n if not parts[-1]:\n parts = parts[:-1]\n\n ints = map(lambda x: '1' if not x else x + '1', parts)\n res = [sum([ensure_sequences(x + 2)[x + 2] for x, value in enumerate(integer) if value == '1']) for integer in ints]\n return integers_decode(res)",
"def to_braille(binary: str) -> list:\n output = []\n for count, value in enumerate(binary):\n if value == '1':\n output.append(count+1)\n return output"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
table is a table of bit lengths and corresponding code words root is the root of the resulting huffman tree huffman is a dictionary mapping codes to bits
|
def createHuffmanTree(table):
root = HuffmanNode()
leaves = Queue.Queue()
root.left = HuffmanNode(root, 0)
root.right = HuffmanNode(root, 1)
leaves.put(root.left)
leaves.put(root.right)
huffman = {}
for key in range(1, len(table.keys())+1):
for code in table[key]:
leaf = leaves.get()
leaf.code = code
huffman[code] = leaf.bits
nextLeaves = Queue.Queue()
while not leaves.empty():
node = leaves.get()
node.left = HuffmanNode(node, 0)
node.right = HuffmanNode(node, 1)
nextLeaves.put(node.left)
nextLeaves.put(node.right)
leaves = nextLeaves
return root, huffman
|
[
"def huffman_code(node, code = ''):\n \n if node.get_word() != None:\n # Leaf reached\n return {node.get_word() : code}\n child1, child2 = node.get_children()\n tree = {}\n tree.update(huffman_code(child1, code + '1'))\n tree.update(huffman_code(child2, code + '0'))\n return tree",
"def __build_binary_tree(self):\r\n\r\n # Create starting leaves\r\n for i in range(256):\r\n self.huffman_tree.append({\r\n 'frq': self.huffman_freqs[i],\r\n 'asc': i,\r\n })\r\n\r\n # Pair leaves and branches based on frequency until there is a\r\n # single root\r\n for i in range(255):\r\n lowest_key1 = -1\r\n lowest_key2 = -1\r\n lowest_frq1 = 1e30\r\n lowest_frq2 = 1e30\r\n\r\n # Find two lowest frequencies\r\n for j in range(256):\r\n if not self.huffman_tree[j]:\r\n continue\r\n if self.huffman_tree[j]['frq'] < lowest_frq1:\r\n lowest_key2 = lowest_key1\r\n lowest_frq2 = lowest_frq1\r\n lowest_key1 = j\r\n lowest_frq1 = self.huffman_tree[j]['frq']\r\n elif self.huffman_tree[j]['frq'] < lowest_frq2:\r\n lowest_key2 = j\r\n lowest_frq2 = self.huffman_tree[j]['frq']\r\n\r\n # Join the two together under a new branch\r\n self.huffman_tree[lowest_key1] = {\r\n 'frq': lowest_frq1 + lowest_frq2,\r\n '0': self.huffman_tree[lowest_key2],\r\n '1': self.huffman_tree[lowest_key1],\r\n }\r\n self.huffman_tree[lowest_key2] = None\r\n\r\n # Make the root the list\r\n self.huffman_tree = self.huffman_tree[lowest_key1]",
"def generate_coding(freqs):\n if len(freqs) == 0:\n return None, None\n if len(freqs) == 1:\n k, v = freqs.items()[0]\n coding = {k:'0'}\n left = TreeNode(freq=v, symbol = k)\n root = TreeNode(left=left)\n return coding, root\n freqs = dict(freqs)\n import heapq\n h = []\n rank = 0\n items = sorted(freqs.items(), key=lambda x:x[0])\n #print items\n for x, v in items:\n node = TreeNode(freq = v, symbol = x, rank = rank)\n rank = rank + 1\n h.append(node)\n heapq.heapify(h)\n while len(h) != 1:\n right = heapq.heappop(h)\n left = heapq.heappop(h)\n newnode = TreeNode(freq = left.freq+right.freq,\n left = left, right = right, rank = rank)\n rank = rank + 1\n heapq.heappush(h, newnode)\n # h[0] is the root of Huffman tree\n coding = {}\n dfs_for_coding(h[0], coding, \"\")\n return coding, h[0]",
"def encoded_huffman_tree(tree):\n\n\tbinary_string = '' #huffman tree in binary form stored as string\n\tno_keys = 0 #count number of item in huffman tree, needed for decompression\n\tfor item in tree:\n\t\tkey = [bin(ord(x))[2:].zfill(16) for x in item][0] #convert each key into 16 bit ascii\n\t\tno_bits = \"{:08b}\".format(len(tree[item])) #convert the number of bits used for each huffman code to binary\n\t\tcode = tree[item] #get huffman code\n\t\tno_keys +=1\n\t\tbinary_string += key+no_bits+code #item in tree is stored as | key | length of code | code | \n\n\tno_keys = \"{:08b}\".format(no_keys) #number of items in huffman tree in binary form\n\n\tbinary_string = no_keys+binary_string \n\n\treturn binary_string",
"def canonical_huffman(__freq_map):\n if not isinstance(__freq_map, dict):\n raise TypeError(\"dict expected, got '%s'\" % type(__freq_map).__name__)\n\n if len(__freq_map) < 2:\n if len(__freq_map) == 0:\n raise ValueError(\"cannot create Huffman code with no symbols\")\n # Only one symbol: see note above in huffman_code()\n sym = list(__freq_map)[0]\n return {sym: bitarray('0', 'big')}, [0, 1], [sym]\n\n code_length = {} # map symbols to their code length\n\n def traverse(nd, length=0):\n # traverse the Huffman tree, but (unlike in huffman_code() above) we\n # now just simply record the length for reaching each symbol\n try: # leaf\n code_length[nd.symbol] = length\n except AttributeError: # parent, so traverse each of the children\n traverse(nd.child[0], length + 1)\n traverse(nd.child[1], length + 1)\n\n traverse(_huffman_tree(__freq_map))\n\n # we now have a mapping of symbols to their code length,\n # which is all we need\n\n table = sorted(code_length.items(), key=lambda item: (item[1], item[0]))\n\n maxbits = max(item[1] for item in table)\n codedict = {}\n count = (maxbits + 1) * [0]\n\n code = 0\n for i, (sym, length) in enumerate(table):\n codedict[sym] = int2ba(code, length, 'big')\n count[length] += 1\n if i + 1 < len(table):\n code += 1\n code <<= table[i + 1][1] - length\n\n return codedict, count, [item[0] for item in table]",
"def extract_huff_map(inp_bytes: bytes,\n verbose: bool = False) -> Tuple[HuffCode, int]:\n if verbose:\n print(\"Extracting Huffman Tree\")\n rev_str = list(inp_bytes)\n rev_str.reverse()\n rev_bytes = bytearray(rev_str)\n huff_len_bytes = []\n for r in rev_bytes:\n if r == ord('}'):\n break\n huff_len_bytes.append(r)\n huff_len_bytes.reverse()\n huff_len = to_dec(list(map(lambda x: chr(x), huff_len_bytes)), 36)\n len_of_len = len(huff_len_bytes)\n huff_dic_str = inp_bytes[-(huff_len + len_of_len): -len_of_len]\n huff_map = {int(k): v\n for k, v in json.loads(bytearray(huff_dic_str)).items()}\n\n # convert huff_map values from radix-36 to binary (base-2)\n # and remove leading \"1\", which was added to handle encoded\n # sequences starting with 0. e.g. 4 --> 100 --> 00\n huff_map = {k: basen(v, 36, 2, True)[1:] for k, v in huff_map.items()}\n\n return HuffCode(data=huff_map), len_of_len + len(huff_dic_str)",
"def makeCode(root,string,dic = {}):\r\n #Base case\r\n # If the left and the right of the root are none\r\n # Then it is a leaf node so we just print its value\r\n if root.left == None and root.right == None:\r\n # Make the string its Huffman Code for future use\r\n dic[root.data] = string\r\n return dic\r\n\r\n # if we go to left then add \"0\" to the code.\r\n # if we go to the right add \"1\" to the code.\r\n \r\n makeCode(root.left, string+\"0\",dic)\r\n makeCode(root.right, string+\"1\",dic)",
"def process_huffman(self, path: str) -> None:\n filename, _file_extension = os.path.splitext(path)\n output_path = filename + \".bin\"\n code_book_path = filename + \".json\"\n statistic_path = filename + \".csv\"\n\n with open(path, 'r+') as file,\\\n open(output_path, 'wb') as output,\\\n open(code_book_path, 'w') as code_book_file,\\\n open(statistic_path, 'w', newline='') as csvfile:\n\n text = file.read()\n encoded_text = self.compress(text)\n code = self.code\n code_book_file.write(json.dumps(code))\n\n padded_encoded_text = self.prep_encoded_text(encoded_text)\n byte_array = self.build_byte_array(padded_encoded_text)\n output.write(bytes(byte_array))\n\n sorted_leafs = self.leafs\n stats = csv.writer(csvfile, delimiter=',', quotechar='\"')\n\n stats.writerow(self.header)\n for leaf in reversed(sorted_leafs):\n symbol = leaf.value\n if symbol == \"\\n\":\n symbol = \"\\\\n\"\n elif symbol == \" \":\n symbol = \"space\"\n stats.writerow([symbol,\n leaf.weight*len(text),\n leaf.weight,\n -math.log2(leaf.weight),\n '= \"' + str(code[leaf.value]) + '\"',\n ])\n\n bytes_bef = os.path.getsize(path)\n bytes_aft = os.path.getsize(output_path)\n bytes_code = os.path.getsize(code_book_path)\n print(\"Bytes before:\", bytes_bef, \"bytes\")\n print(\"Bytes after:\", bytes_aft, \"bytes\")\n print(\"Bytes codefile:\", bytes_code, \"bytes\")\n print(\"Compression ratio bin/txt:\", bytes_aft/bytes_bef*100, \"%\")\n print(\"Compression ratio (bin+json)/txt:\",\n (bytes_aft+bytes_code)/bytes_bef*100, \"%\")\n print(\"Compressed!\")",
"def reduce_tree(tree, code):\n huff_map = {}\n if not tree:\n return huff_map\n if tree.is_leaf():\n huff_map[tree.char] = code\n huff_map.update(reduce_tree(tree.left, code + '0'))\n huff_map.update(reduce_tree(tree.right, code + '1'))\n return huff_map",
"def huffman_decoding(data, tree):\n decoded = ''\n current_node = tree\n for num in data:\n if num == '0':\n if current_node.left_child.char is None:\n current_node = current_node.left_child\n else:\n decoded += current_node.left_child.char\n current_node = tree\n else:\n if current_node.right_child.char is None:\n current_node = current_node.right_child\n else:\n decoded += current_node.right_child.char\n current_node = tree\n\n return decoded",
"def encode_huffman(text:str,node_list):\n heapify(node_list) #convert into heap the node list\n encoding=[[] for _ in range(127-36)] \n \n while len(node_list)>1: #ensure two pops are done\n node1=heappop(node_list)\n node2=heappop(node_list)\n new_node=Node(node1.char+node2.char,node1.freq+node2.freq)\n\n for c in node1.char: #append to the encoding of the char for each char in the node\n node1ord=ord(c)-36\n encoding[node1ord].append(\"0\")\n for c in node2.char:\n node2ord=ord(c)-36\n encoding[node2ord].append(\"1\")\n heappush(node_list,new_node)\n \n encoding=reverse_huffman(encoding) #reverse the encoding before return\n return encoding",
"def encode_huffman(file):\r\n f = open(file,'r')\r\n s = \"\"\r\n for line in f.readlines():\r\n s = s + line\r\n f.close()\r\n # Convert the file text into a giant string\r\n # And make a Huffman Tree out of it\r\n root = makeHuffmanTree(s)\r\n # Get a dictionary that stores the codes\r\n dic = {}\r\n makeCode(root,\"\",dic)\r\n\r\n ####################################################################\r\n # For debugging one can print the codes to see if they are correct\r\n # print(dic)\r\n ###################################################################\r\n \r\n # Make a new encoded file\r\n code_file = open(\"Huffman \"+file,'w')\r\n # Make an encoded string\r\n enc_s = \"\"\r\n # Replace each character by it Huffman Code.\r\n for char in s:\r\n enc_s = enc_s + str(dic[char])\r\n code_file.write(enc_s)\r\n code_file.close()",
"def create_huff_tree(char_freq):\n fleb = []\n for i in range(len(char_freq)):\n if char_freq[i] != 0:\n fleb.append(HuffmanNode(i, char_freq[i]))\n\n if len(fleb) == 1:\n return fleb[0]\n if len(fleb) == 0:\n return None\n\n for j in range(len(fleb)):\n minval = j\n for k in range(j, len(fleb)):\n if comes_before(fleb[k], fleb[j]):\n minval = k\n temp = fleb[k]\n fleb[k] = fleb[j]\n fleb[j] = temp\n\n while len(fleb) != 2:\n gud = combine(fleb[0], fleb[1])\n fleb.pop(0)\n fleb.pop(0)\n for i in range(len(fleb)):\n if i == len(fleb) - 1 and comes_before(fleb[i], gud):\n fleb.append(gud)\n elif comes_before(gud, fleb[i]):\n fleb.insert(i, gud)\n break\n gudlast = combine(fleb[0], fleb[1])\n fleb.pop(0)\n fleb.pop(0)\n fleb.append(gudlast)\n return fleb[0]",
"def decode_tree(self,encoded_tree):\n def _read_bits(): \n \"\"\"\n helper generator function that lazily iterate over the encoded\n huffman tree bitstream to return a single encoded leaf or non leaf node \n \"\"\"\n \n yield encoded_tree[:schema.HEADER_BITS]\n\n current_idx = schema.HEADER_BITS\n end = len(encoded_tree[current_idx:])\n\n while current_idx < end:\n yield encoded_tree[current_idx]\n current_idx += 1\n if encoded_tree[current_idx-1] == str(schema.LEAF_NODE):\n yield encoded_tree[current_idx:current_idx+schema.BITS_PER_VALUE]\n current_idx += schema.BITS_PER_VALUE\n \n\n def _makeTree(bitstream):\n \"\"\"\n recursively construct a huffman tree from the encoding bit stream\n based on the schema specified in schema.py\n\n Time Complexity: O(m) where m is the no of individual unique chars\n used in the data. Note that indivdual number of chars are typically much\n smaller than total no of chars encoded or decoded. For example, the\n value of m will be 127 to decode any size of ascii data.\n \"\"\"\n try:\n b = next(bitstream)\n if b == str(schema.LEAF_NODE):\n n = next(bitstream)\n return tree.Node(int(n,2))\n\n nodeleft = _makeTree(bitstream)\n noderight = _makeTree(bitstream)\n if noderight == None:\n return nodeleft\n\n node = tree.Node('*')\n node.add_left_child(nodeleft)\n node.add_right_child(noderight)\n return node\n except StopIteration:\n return None\n\n\n root = _makeTree(_read_bits())\n self.tree = tree.Tree(root)\n\n return self.tree",
"def huffman_code(__freq_map, endian=None):\n if not isinstance(__freq_map, dict):\n raise TypeError(\"dict expected, got '%s'\" % type(__freq_map).__name__)\n\n b0 = bitarray('0', endian)\n b1 = bitarray('1', endian)\n\n if len(__freq_map) < 2:\n if len(__freq_map) == 0:\n raise ValueError(\"cannot create Huffman code with no symbols\")\n # Only one symbol: Normally if only one symbol is given, the code\n # could be represented with zero bits. However here, the code should\n # be at least one bit for the .encode() and .decode() methods to work.\n # So we represent the symbol by a single code of length one, in\n # particular one 0 bit. This is an incomplete code, since if a 1 bit\n # is received, it has no meaning and will result in an error.\n return {list(__freq_map)[0]: b0}\n\n result = {}\n\n def traverse(nd, prefix=bitarray(0, endian)):\n try: # leaf\n result[nd.symbol] = prefix\n except AttributeError: # parent, so traverse each of the children\n traverse(nd.child[0], prefix + b0)\n traverse(nd.child[1], prefix + b1)\n\n traverse(_huffman_tree(__freq_map))\n return result",
"def _huffman_tree(__freq_map):\n from heapq import heappush, heappop\n\n class Node(object):\n \"\"\"\n A Node instance will either have a 'symbol' (leaf node) or\n a 'child' (a tuple with both children) attribute.\n The 'freq' attribute will always be present.\n \"\"\"\n def __lt__(self, other):\n # heapq needs to be able to compare the nodes\n return self.freq < other.freq\n\n minheap = []\n # create all leaf nodes and push them onto the queue\n for sym, f in __freq_map.items():\n leaf = Node()\n leaf.symbol = sym\n leaf.freq = f\n heappush(minheap, leaf)\n\n # repeat the process until only one node remains\n while len(minheap) > 1:\n # take the two nodes with lowest frequencies from the queue\n # to construct a new node and push it onto the queue\n parent = Node()\n parent.child = heappop(minheap), heappop(minheap)\n parent.freq = parent.child[0].freq + parent.child[1].freq\n heappush(minheap, parent)\n\n # the single remaining node is the root of the Huffman tree\n return minheap[0]",
"def huffman(counts, verbose=0):\n c = [] ## array of nodes\n m = 0\n for value in counts:\n m += 1;\n c.append(node(value, m))\n root = iterate(c) # make huffman code\n if (verbose):\n reportcode(c)\n return (c, root)",
"def readACTable(bits):\n table = {}\n length = bits[:4]\n tc = bits[4]\n th = bits[5]\n bits = bits[6:]\n codeCounts = [0]*16\n\n for i in range(16):\n codeCounts[i] = int(bits[:2], 16)\n bits = bits[2:]\n\n\n for i in range(16):\n numCodes = codeCounts[i]\n table[i+1] = []\n for _ in range(numCodes):\n table[i+1].append(int(bits[:2], 16))\n bits = bits[2:]\n return table",
"def form_huffman_tree(h_elements):\n root = HuffmanNode(None, None)\n recursive_build(root, h_elements)\n return root"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove duplicate data that ``sort_col BETWEEN lower AND upper``.
|
def remove_duplicate(self, lower, upper, _raise_error=False):
self.drop_temp_table()
self.create_temp_table()
with self.engine.begin() as connection:
connection.execute(self.sql_insert_dupe_ids(lower, upper))
connection.execute(self.sql_insert_distinct_copy(lower, upper))
connection.execute(self.sql_remove_dupe_rows(lower, upper))
if _raise_error:
raise InterruptedError("Manually raise error to test atomic")
connection.execute(self.sql_insert_back())
self.drop_temp_table()
|
[
"def _tidy(self) -> None:\n if self.no_overlap:\n self.remove_overlap(self.no_contiguous) # will sort\n else:\n self._sort()",
"def select_sort2(data):\n if len(data) <= 1:\n return data\n\n for i in range(len(data) - 1):\n max_idx = len(data) - 1 - i\n last_idx = max_idx\n exchange = False\n for j in range(0, last_idx):\n if data[j] > data[max_idx]:\n max_idx = j\n exchange = True\n if exchange:\n data[last_idx], data[max_idx] = data[max_idx], data[last_idx]\n return data",
"def _sort_ranges(self):\r\n for linkedlist in self._rangesets:\r\n linkedlist.gnomesort()",
"def clear_sort(column1,column2,column3): \n column1.entry.config(state=NORMAL)\n column1.entry.delete(0,tix.END)\n column1.entry.config(state=DISABLED)\n \n column2.entry.config(state=NORMAL)\n column2.entry.delete(0,tix.END)\n column2.entry.config(state=DISABLED)\n \n column3.entry.config(state=NORMAL)\n column3.entry.delete(0,tix.END)\n column3.entry.config(state=DISABLED)\n refresh_display(screen_def)",
"def remove_duplicates_and_sort_tags(self):\n self.tags = list(set(self.tags))\n self.sort_tags()",
"def __mergeSortRange(self, low, high):\n if low < high:\n middle = (low + high)/2\n self.__mergeSortRange(low,middle)\n self.__mergeSortRange(middle+1,high)\n self.__merge(low,middle,high)",
"def refactor_and_sort_data(color_data):\n return sorted(color_data)",
"def sort(self):",
"def find_unique(self):\n self.unique_data = self.data.drop_duplicates(subset=\"find_unique\")",
"def shell_sort(data):\n if len(data) <= 1:\n return data\n\n gap = len(data) // 2\n while gap > 0:\n for i in range(gap, len(data)):\n last = data[i]\n j = i\n while j >= gap and data[j - gap] > last:\n data[j] = data[j - gap]\n j -= gap\n data[j] = last\n gap = gap // 2\n\n return data",
"def sortCaseInsensitive():\n pass",
"def quicksort_range_helper(values, left, right, start, end, style):\n # ---start student section---\n pass\n # ===end student section===",
"def cleaning(dataset, feature, upper):\n\n #Copying original dataset and dropping all values above the upper limit\n dataset_original = dataset\n dataset = dataset.drop(dataset[dataset['{}'.format(feature)] > upper].index)\n\n return dataset",
"def reorder_by_length(self):\r\n if sort_condition:\r\n self.database.sort_values('Length', ascending=False, inplace=True)\r\n else:\r\n self.database.sort_values('Length', ascending=True, inplace=True)",
"def _sort(self) -> None:\n self.intervals.sort()",
"def insert_sort2(data):\n if len(data) <= 1:\n return data\n\n for i in range(1, len(data)):\n last = data[i]\n j = i\n while j >= 1 and data[j - 1] > last:\n data[j] = data[j - 1]\n j -= 1\n data[j] = last\n\n return data",
"def trim_sort(v):\n v = np.delete(v, [np.argmin(np.abs(v)), np.argmax(np.abs(v))])\n v = sorted(v, key=lambda x: (abs(x), x.imag))\n return v",
"def filter_pvalue (self, pvalue_cut_low, pvalue_cut_up=None ):\n peaks = self.peaks\n new_peaks = {}\n chrs = peaks.keys()\n chrs.sort()\n if pvalue_cut_up:\n for chrom in chrs:\n new_peaks[chrom]=[p for p in peaks[chrom] if p[6] >= pvalue_cut_low and p[6]<pvalue_cut_up]\n if not new_peaks[chrom]: del new_peaks[chrom]\n else:\n for chrom in chrs:\n new_peaks[chrom]=[p for p in peaks[chrom] if p[6] >= pvalue_cut_low]\n if not new_peaks[chrom]: del new_peaks[chrom]\n self.peaks = new_peaks",
"def clean_overlap ( self ):\n regions = self.regions\n new_regions = {}\n chrs = regions.keys()\n chrs.sort()\n for chrom in chrs:\n new_regions[chrom]=[]\n n_append = new_regions[chrom].append\n prev_region = None\n regions_chr = regions[chrom]\n for i in xrange(len(regions_chr)):\n if not prev_region:\n prev_region = regions_chr[i]\n continue\n else:\n if regions_chr[i][0] <= prev_region[1]:\n s_new_region = prev_region[0]\n e_new_region = max(regions_chr[i][1],prev_region[1])\n l_new_region = e_new_region-s_new_region\n prev_region = (s_new_region,e_new_region)\n else:\n n_append(prev_region)\n prev_region = regions_chr[i]\n if prev_region:\n n_append(prev_region)\n del regions\n self.regions = new_regions\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return min / max value in sort key.
|
def sort_key_min_max(self):
sql = select([func.min(self.sort_col), func.max(self.sort_col)])
results = self.engine.execute(sql).fetchall()
if len(results) == 1:
min_value, max_value = results[0]
return min_value, max_value
else:
raise ValueError
|
[
"def min_max_keys(d):\n\n # thinking to change the key list to an array, sort it, then take 0 element\n # for key_min and -1 for key_max\n\n if (len(d) > 0):\n key_list = [key for key in d.keys()]\n key_list.sort()\n return (key_list[0], key_list[-1])\n else:\n return ()",
"def keywithminval (self, d):\n v=list(d.values())\n print(v)\n k=list(d.keys())\n return k[v.index(min(v))]",
"def _get_minmax_and_indices(self, min=None, max=None):\n self._get_sort_index()\n s=self['sort_index']\n\n dowhere=False\n if min is not None:\n xmin = min\n dowhere=True\n else:\n xmin = self.x[s[0]]\n\n\n if max is not None:\n xmax = max\n dowhere=True\n else:\n xmax = self.x[s[-1]]\n \n self.dmin = xmin\n self.dmax = xmax\n\n self[self.xpref+'min'] = xmin\n self[self.xpref+'max'] = xmax\n\n if dowhere:\n # where function will preserve order, so subscript with s\n w,=numpy.where( (self.x[s] >= xmin) & (self.x[s] <= xmax) )\n if w.size == 0:\n raise ValueError(\"No data in specified min/max range: [%s,%s]\" % (xmin,xmax))\n self['wsort'] = s[w]\n else:\n self['wsort'] = s",
"def getMinKey(self):\n if not self.minDict or self.minVal == float('inf'): return ''\n for key in self.minDict[self.minVal]: return key",
"def __swap_min_max(self):\n return \"MAX\" if (self.level == \"MIN\") else \"MIN\"",
"def _get_max_min(self, that):\n maxstart = max(self.start, that.start)\n minstop = min(self.stop, that.stop)\n return (maxstart, minstop)",
"def keywithmaxval(self, d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]",
"def keywithmaxval(d):\n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]",
"def getMinKey(self):\n try:\n return list(self.valdictionary[self.minvalue])[0]\n except IndexError:\n return \"\"\n\n\n # Your AllOne object will be instantiated and called as such:",
"def best_in_series(metric):\n if greater_is_better(metric):\n return \"max\"\n else:\n return \"min\"",
"def max_min_score (self):\n peaks = self.peaks\n chrs = peaks.keys()\n chrs.sort()\n x = 0\n y = 100000\n for chrom in chrs:\n if peaks[chrom]:\n m = max([i[4] for i in peaks[chrom]])\n if m>x:\n x=m\n m = min([i[4] for i in peaks[chrom]])\n if m<y:\n y=m\n return (x,y)",
"def get_min_max_values(self):\n\n min_val = min(self.array_like)\n max_val = max(self.array_like)\n\n self.min_max_val = (min_val, max_val)",
"def compValues(self, key1, key2):\n e1 = self[key1]\n e2 = self[key2]\n if e1 < e2:\n return -1\n elif e1 > e2:\n return 1\n return 0",
"def minmax(values):\n mn, mx = None, None\n for v in values:\n if v is not None:\n if mn is None or mn > v:\n mn = v\n if mx is None or mx < v:\n mx = v\n return mn, mx",
"def min_max(arr: StaticArray) -> ():\n if arr.size() == 1:\n output = (arr[0], arr[0])\n return output\n\n max_val = arr[0]\n min_val = arr[0]\n\n for index in range(arr.size()):\n if arr[index] > max_val:\n max_val = arr[index]\n if arr[index] < min_val:\n min_val = arr[index]\n\n output = (min_val, max_val)\n return output",
"def find_max_less_than(self, x: T) -> Union[T, None]:\n for bucket in reversed(self._buckets):\n if bucket[0] < x:\n return bucket[bisect_left(bucket, x) - 1]\n return None",
"def find_min_greater_than_or_equal_to(self, x: T) -> Union[T, None]:\n for bucket in self._buckets:\n if bucket[-1] >= x:\n return bucket[bisect_left(bucket, x)]",
"def get_min(a, b):\n return b if a > b else a",
"def max_and_min(list):\n\n # return tuple containig max and min of list\n return (max(list), min(list))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all request names that haven't been tested by a uat job.
|
def get_untested(cls) -> List:
return db.session.query(RequestName). \
filter(
RequestName.uat_job_id == None # pylint: disable=singleton-comparison # noqa: E711;
).all()
|
[
"def get_unverified(cls) -> List:\n return db.session.query(RequestName). \\\n filter(\n RequestName.auto_analyse_result != RequestName.Results.ERROR.value,\n RequestName.uat_result == None # pylint: disable=singleton-comparison # noqa: E711;\n ).all()",
"def get_unexposed_user_log_names(self):\n raise SkipTest(\"No unexposed user log names defined.\")",
"def user_not_tracked_conditions():\n\n unused_conditions = []\n\n all_conditions = Condition.query.all()\n user_conditions = user_tracked_conditions_name()\n\n for condition in all_conditions:\n if condition not in user_conditions:\n unused_conditions.append(condition)\n\n return unused_conditions",
"def getUnknownNames(self):\n return list(self.unknowns.keys())",
"def _GetFailedTestNames(self):\n return set(r.test_name for r in self._test_results if r.failed)",
"def test_missing_request():\n _user_logging(\n {'X-CorrelationID': '298ebf9d-be1d-11e7-88ff-2c44fd152860'},\n {},\n {'correlation_id': v_str('-')},\n False\n )",
"def get_all_job_names() -> List[str]:\n return base_jobs.JobMetaclass.get_all_job_names()",
"def reserved_names(self):\n\t\treturn set_(['DoNotRender','index','index-Names','index-Tags',\n\t\t\t\t\t'index-Timeline'])",
"def assertRequestNotInHeader(self, name):\n headers = self.requests_mock.last_request.headers\n self.assertNotIn(name, headers)",
"def get_not_always_used(self):\n results_list = []\n\n # initial list is made of fixtures that are in the children\n initial_list = self.gather_all_required(include_parents=False)\n\n for c in self.get_leaves():\n j = 0\n for i in range(len(initial_list)):\n fixture_name = initial_list[j]\n if fixture_name not in c.gather_all_required():\n del initial_list[j]\n results_list.append(fixture_name)\n else:\n j += 1\n\n return results_list",
"def test_unsRegisteredNames() -> json:\r\n\r\n # Action\r\n status, result = u.unsRegisteredNames()\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)",
"def __generateUndefinedNamesList(self):\n return [\n self.unList.item(row).text()\n for row in range(self.unList.count())\n ]",
"def get_fixture_requests(self, assigned=None):\n return self.requestdb.items(assigned=assigned)",
"def get_all_names(self):\n all_names = set()\n \n return all_names\n\n # Might be helpful... I know nothing about nginx lens",
"def get_all_requests():",
"def test_excluded_default_handlers(self):\n self.assertNotIn('request-timeout-hint', self.server._request_handlers)\n self.assertNotIn('version-list', self.server._request_handlers)",
"def get_required_scenario_names():",
"def _get_claim_names(self) -> Set[str]:\n\n return set(self._get_claim_set(with_empty_claims=True).keys())",
"def without(self, *names):\n only_vars = {}\n\n for name in self.request_variables:\n if name not in names:\n only_vars[name] = self.request_variables.get(name)\n\n return only_vars"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get all request names that haven't been tested by a uat job.
|
def get_unverified(cls) -> List:
return db.session.query(RequestName). \
filter(
RequestName.auto_analyse_result != RequestName.Results.ERROR.value,
RequestName.uat_result == None # pylint: disable=singleton-comparison # noqa: E711;
).all()
|
[
"def get_untested(cls) -> List:\n return db.session.query(RequestName). \\\n filter(\n RequestName.uat_job_id == None # pylint: disable=singleton-comparison # noqa: E711;\n ).all()",
"def get_unexposed_user_log_names(self):\n raise SkipTest(\"No unexposed user log names defined.\")",
"def user_not_tracked_conditions():\n\n unused_conditions = []\n\n all_conditions = Condition.query.all()\n user_conditions = user_tracked_conditions_name()\n\n for condition in all_conditions:\n if condition not in user_conditions:\n unused_conditions.append(condition)\n\n return unused_conditions",
"def getUnknownNames(self):\n return list(self.unknowns.keys())",
"def _GetFailedTestNames(self):\n return set(r.test_name for r in self._test_results if r.failed)",
"def test_missing_request():\n _user_logging(\n {'X-CorrelationID': '298ebf9d-be1d-11e7-88ff-2c44fd152860'},\n {},\n {'correlation_id': v_str('-')},\n False\n )",
"def get_all_job_names() -> List[str]:\n return base_jobs.JobMetaclass.get_all_job_names()",
"def reserved_names(self):\n\t\treturn set_(['DoNotRender','index','index-Names','index-Tags',\n\t\t\t\t\t'index-Timeline'])",
"def assertRequestNotInHeader(self, name):\n headers = self.requests_mock.last_request.headers\n self.assertNotIn(name, headers)",
"def get_not_always_used(self):\n results_list = []\n\n # initial list is made of fixtures that are in the children\n initial_list = self.gather_all_required(include_parents=False)\n\n for c in self.get_leaves():\n j = 0\n for i in range(len(initial_list)):\n fixture_name = initial_list[j]\n if fixture_name not in c.gather_all_required():\n del initial_list[j]\n results_list.append(fixture_name)\n else:\n j += 1\n\n return results_list",
"def test_unsRegisteredNames() -> json:\r\n\r\n # Action\r\n status, result = u.unsRegisteredNames()\r\n\r\n # Assertion\r\n AssertNotEmptyOrError(status, result)",
"def __generateUndefinedNamesList(self):\n return [\n self.unList.item(row).text()\n for row in range(self.unList.count())\n ]",
"def get_fixture_requests(self, assigned=None):\n return self.requestdb.items(assigned=assigned)",
"def get_all_names(self):\n all_names = set()\n \n return all_names\n\n # Might be helpful... I know nothing about nginx lens",
"def get_all_requests():",
"def test_excluded_default_handlers(self):\n self.assertNotIn('request-timeout-hint', self.server._request_handlers)\n self.assertNotIn('version-list', self.server._request_handlers)",
"def get_required_scenario_names():",
"def _get_claim_names(self) -> Set[str]:\n\n return set(self._get_claim_set(with_empty_claims=True).keys())",
"def without(self, *names):\n only_vars = {}\n\n for name in self.request_variables:\n if name not in names:\n only_vars[name] = self.request_variables.get(name)\n\n return only_vars"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
run select.select, with a timeout specified as a datetime object
|
def select_timeout(timeout, rlist=[], wlist=[], xlist=[]):
delta = timeout - datetime.datetime.now()
if delta.days >= 0:
assert(delta.days == 0) # unimplemented, and insane!
secs = delta.seconds + delta.microseconds / 1000000.0
assert(secs > 0)
return select.select(rlist, wlist, xlist, secs)
else: # already timed out
return ([], [], [])
|
[
"def doSelect(nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', usertimeout: 'timeval *') -> \"int\":\n return _coin.SoDB_doSelect(nfds, readfds, writefds, exceptfds, usertimeout)",
"def SoDB_doSelect(nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', usertimeout: 'timeval *') -> \"int\":\n return _coin.SoDB_doSelect(nfds, readfds, writefds, exceptfds, usertimeout)",
"def cli_loop_select(self, timeout):\n parsed_some = True # requires thight loop, as it may be sending messages core<->cmd\n while parsed_some:\n parsed_some = False\n self.debug(\"Checking if data ready: %s // to %s\"%(repr(self.filenos()), timeout) )\n for n, clients_ready in enumerate(select.select(self.filenos(),[],[], timeout)):\n # self.debug(\"Clients ready[%s]: \"%n, clients_ready)\n for c in clients_ready:\n # self.debug(\"Data ready at %s\"%repr(c))\n parsed_some |= c.recv_and_parse()\n # self.debug(\"parsed_more\", parsed_some)\n timeout=0.1\n # self.debug(\"User input\", parsed_some)",
"def set_timeout(cls, timeout):\n ...",
"def select(read_streams, timeout=0):\n\n write_streams = []\n exception_streams = []\n\n try:\n return builtin_select.select(\n read_streams,\n write_streams,\n exception_streams,\n timeout,\n )[0]\n except builtin_select.error as e:\n # POSIX signals interrupt select()\n if e[0] == errno.EINTR:\n return []\n else:\n raise e",
"def liveSelection(time, stop):",
"def _select(self):\n readable = []\n for sock in self.serverTransport:\n readable.append(sock.handle.fileno())\n writable = []\n print(\"33339999\")\n try:\n res = select.select(readable, writable, readable)\n except Exception as e:\n res = None\n print(\"333399991%s\" % [res, e])\n else:\n print(\"333399992%s\" % [res])\n return res",
"def alarm(iterator, timeout, errorType=TimeoutError):",
"def wait_packet(pypcap_pc,timeout,count=None):\n packetlist=[]\n fd=pypcap_pc.fileno()\n starttime=time.time()\n while True:\n if time.time()-starttime>=timeout:\n break\n if count and len(packetlist)>=count:\n break\n readlist,writelist,errorlist=select.select([fd,],[],[],(starttime+timeout-time.time()))\n if readlist:\n ptime,pdata=pypcap_pc.next()\n packetlist.append((ptime,pdata))\n else:\n break\n return packetlist",
"def sethttpstimeout(timeout):\n if _under_26():\n opener = urllib2.build_opener(TimeoutHTTPSHandler(timeout))\n urllib2.install_opener(opener)\n else:\n raise Error(\"This python version has timeout builtin\")",
"def set_timeout(self, timeout):\r\n self.timeout = float(timeout)/1000.",
"def doSelect(self, nfds: 'int', readfds: 'void *', writefds: 'void *', exceptfds: 'void *', userTimeOut: 'timeval *') -> \"int\":\n return _coin.SoSensorManager_doSelect(self, nfds, readfds, writefds, exceptfds, userTimeOut)",
"def verify_selected(self, timeout=TIMEOUT):\n\t\treturn self.extend().selected().verify(timeout)",
"def msselect(self, *args, **kwargs):\n return _ms.ms_msselect(self, *args, **kwargs)",
"def _settimeout(self, timeout):\n if timeout is None:\n timeout = socket.getdefaulttimeout()\n _socket_settimeout(self, timeout)",
"def wait_precisely(timeout, start_time, event):\n wait(timeout - (time.time() - start_time), event)",
"def timeout(self):\n raise NotImplementedError",
"def _wait_timeout(self, conn, time):\n\n state = conn.poll()\n if state == psycopg2.extensions.POLL_OK:\n return self.ASYNC_OK\n elif state == psycopg2.extensions.POLL_WRITE:\n # Wait for the given time and then check the return status\n # If three empty lists are returned then the time-out is reached.\n timeout_status = select.select([], [conn.fileno()], [], time)\n if timeout_status == ([], [], []):\n return self.ASYNC_WRITE_TIMEOUT\n\n # poll again to check the state if it is still POLL_WRITE\n # then return ASYNC_WRITE_TIMEOUT else return ASYNC_OK.\n state = conn.poll()\n if state == psycopg2.extensions.POLL_WRITE:\n return self.ASYNC_WRITE_TIMEOUT\n return self.ASYNC_OK\n elif state == psycopg2.extensions.POLL_READ:\n # Wait for the given time and then check the return status\n # If three empty lists are returned then the time-out is reached.\n timeout_status = select.select([conn.fileno()], [], [], time)\n if timeout_status == ([], [], []):\n return self.ASYNC_READ_TIMEOUT\n\n # poll again to check the state if it is still POLL_READ\n # then return ASYNC_READ_TIMEOUT else return ASYNC_OK.\n state = conn.poll()\n if state == psycopg2.extensions.POLL_READ:\n return self.ASYNC_READ_TIMEOUT\n return self.ASYNC_OK\n else:\n raise psycopg2.OperationalError(\n \"poll() returned %s from _wait_timeout function\" % state\n )",
"def timeout_ipdb(locals_, timeout: float = 3):\n cv = threading.Condition()\n thread = threading.Thread(target=_timeout_enter_ipdb_thread, args=(locals_, cv, timeout))\n thread.start()\n yield\n with cv:\n cv.notify_all()",
"def set_timeout(self, timeout):\n self.m_timeout = timeout"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse mssql banner information
|
def parse_banner_mssql(self, task: IscoutTask, level: int,
portinfo: PortInfo, resultfi: str):
try:
if not os.path.isfile(resultfi):
self._logger.error(
f"Resultfi not exists:\ntaskid:{task.taskid}\nbatchid:{task.batchid}\nresultfi:{resultfi}"
)
return
# its' one json object per line
linenum = 1
with open(resultfi, mode='r') as fs:
while True:
try:
line = fs.readline()
if line is None or line == '':
break
sj = json.loads(line)
if sj is None:
continue
# 这个暂时没有使用
res = self._parse_mssql(sj, task, level, portinfo)
# 如果成功了则证明已经将mssql的信息解析出来了就不用再继续解析了
if res:
break
except Exception:
self._logger.error(
"Parse one mssql banner json line error:\ntaskid:{}\nbatchid:{}\nresultfi:{}\nlinenum:{}"
.format(task.taskid, task.batchid, resultfi,
linenum))
finally:
linenum += 1
except Exception:
self._logger.error(
"Parse mssql banner error:\ntaskid:{}\nbatchid:{}\nresultfi:{}"
.format(task.taskid, task.batchid, resultfi))
|
[
"def metadata(soup):\n header = soup.find('div', {\"class\":\"stationTextHeader\"}).text.strip()\n return header.split('\\n')[:-1]",
"def _convert_tvdb_tvshow_metadata(tvdb_show, imdb_id, banners=True, language=\"en\"):\n info = {}\n if tvdb_show is None:\n return info\n info['tvdb_id'] = str(tvdb_show['id'])\n info['name'] = tvdb_show['seriesname']\n info['title'] = tvdb_show['seriesname']\n info['tvshowtitle'] = tvdb_show['seriesname']\n info['originaltitle'] = tvdb_show['seriesname']\n # info['genre'] =\n info['plot'] = tvdb_show.get('overview', '')\n info['fanart'] = tvdb_show.get('fanart', '')\n info['rating'] = tvdb_show.get('rating')\n info['votes'] = tvdb_show.get('ratingcount')\n info['year'] = tvdb_show.get('year', 0)\n info['studio'] = tvdb_show.get('network', '')\n info['imdb_id'] = tvdb_show.get('imdb_id', '')\n info['genre'] = u\" / \".join(tvdb_show.get('genre', '').split(\"|\")[1:-1])\n info[\"imdb_id\"] = imdb_id\n if banners:\n info['poster'] = tvdb_show.get_poster(language=language)\n return info",
"def _parseWaveDesc(self, raw_metadata):\n m = raw_metadata.split('\\n')\n metadata = {}\n m.pop() # the first line is always an echo of the command used. removing.\n m.pop() # the second line is equally unhelpful\n for i in m:\n x = i.split(':')\n if not len(x) == 2:\n continue\n x[0] = x[0].strip().lower()\n try:\n x[1] = float(x[1].strip())\n except:\n x[1] = x[1].strip()\n metadata[x[0]] = x[1]\n return metadata",
"def _convert_tvdb_season_metadata(show_metadata,\n season,\n banners=True,\n language=\"en\"):\n info = copy.deepcopy(show_metadata)\n del info['title']\n info['season'] = season.num\n if banners:\n info['poster'] = season.get_poster(language=language)\n return info",
"def parse_description(self):\n string = self.description\n name, host_genus = \\\n basic.parse_names_from_record_field(string)\n self._description_name = name\n self._description_host_genus = host_genus",
"def get_narrowcasting_banners() -> List[Dict]:\n result = []\n banners = TelevisionBanner.objects.filter(start_date__lte=timezone.now(), end_date__gte=timezone.now(), active=True)\n\n for banner in banners:\n result.append({\n \"name\": banner.name,\n \"image\": \"%s%s\" % (settings.MEDIA_URL, str(banner.picture)),\n \"id\": banner.id\n })\n\n return result",
"def get_verb_infos(self, tag):\n try:\n i = tag.text.index(' (')\n release_date = re.findall(r'\\(([^()]+)\\)', tag.text)[0]\n except ValueError:\n i = -1\n release_date = 'Unknown'\n album_title = tag.text[:i]\n\n return album_title, release_date",
"def get_bridge_desciption(self, ip, port):\n br_info = {}\n\n protocol = 'http'\n if str(port) == '443':\n protocol = 'https'\n\n requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)\n r = requests.get(protocol + '://' + ip + ':' + str(port) + '/description.xml', verify=False)\n if r.status_code == 200:\n xmldict = xmltodict.parse(r.text)\n br_info['ip'] = ip\n br_info['port'] = str(port)\n br_info['friendlyName'] = str(xmldict['root']['device']['friendlyName'])\n br_info['manufacturer'] = str(xmldict['root']['device']['manufacturer'])\n br_info['manufacturerURL'] = str(xmldict['root']['device']['manufacturerURL'])\n br_info['modelDescription'] = str(xmldict['root']['device']['modelDescription'])\n br_info['modelName'] = str(xmldict['root']['device']['modelName'])\n br_info['modelURL'] = str(xmldict['root']['device']['modelURL'])\n br_info['modelNumber'] = str(xmldict['root']['device']['modelNumber'])\n br_info['serialNumber'] = str(xmldict['root']['device']['serialNumber'])\n br_info['UDN'] = str(xmldict['root']['device']['UDN'])\n br_info['gatewayName'] = str(xmldict['root']['device'].get('gatewayName', ''))\n\n br_info['URLBase'] = str(xmldict['root']['URLBase'])\n if br_info['modelName'] == 'Philips hue bridge 2012':\n br_info['version'] = 'v1'\n elif br_info['modelName'] == 'Philips hue bridge 2015':\n br_info['version'] = 'v2'\n else:\n br_info['version'] = 'unknown'\n\n # get API information\n api_config = self.get_api_config_of_bridge(br_info['URLBase'])\n br_info['datastoreversion'] = api_config.get('datastoreversion', '')\n br_info['apiversion'] = api_config.get('apiversion', '')\n br_info['swversion'] = api_config.get('swversion', '')\n\n return br_info",
"def _parse_header(header):\n names, lengths = [], []\n for line in header:\n if line.startswith(\"@SQ\"):\n for field in line.split(\"\\t\"):\n if field.startswith(\"SN:\"):\n names.append(field[3:])\n elif field.startswith(\"LN:\"):\n lengths.append(int(field[3:]))\n return names, lengths",
"def parse_tracking_data(offer_markup):\n html_parser = BeautifulSoup(offer_markup, \"html.parser\")\n scripts = html_parser.head.find_all(\"script\")\n metadata_script = None\n for script in scripts:\n if \"ad_id\" in script.text:\n metadata_script = script.text\n break\n if not metadata_script:\n return None, None, None\n data_dict = json.loads(re.split(\"pageView|;\", metadata_script)[3].replace('\":{', \"{\").replace(\"}}'\", \"}\"))\n return int(data_dict.get(\"ad_price\", 0)) or None, data_dict.get(\"price_currency\"), data_dict[\"ad_id\"]",
"def parse_head(curl):\n param_list = curl.split(\"' -H '\")\n return param_list",
"def _extract_conn_tags(conn_kwargs):\n try:\n return {\n net.TARGET_HOST: conn_kwargs['host'],\n net.TARGET_PORT: conn_kwargs['port'],\n redisx.DB: conn_kwargs['db'] or 0,\n }\n except Exception:\n return {}",
"def parse_vminfo_output(raw_stdout):\n lines = [re.split(\"=\", line) for line in re.split(r\"\\r?\\n\", raw_stdout)]\n try:\n res = dict()\n for line in lines:\n if len(line) > 1:\n res[line[0]] = line[1].strip(\"\\\"\\'\")\n return res\n except Exception as e:\n print(e)\n raise e",
"def parse_sparkDatasourceInfo_tag(spec):\n def parse_datasource(spec):\n toks = spec.split(\",\")\n dct = {}\n for tok in toks:\n k,v = tok.split(\"=\")\n dct[k] = v\n return dct\n toks = spec.split(\"\\n\")\n return [ parse_datasource(tok) for tok in toks ]",
"def decode_SnoopFileHeader(B_datastring):\n header = {}\n header['id_pattern'] = B_datastring[0:8]\n header['version_number'] = B_datastring[8:12]\n header['datalink_type'] = B_datastring[12:16]\n return header",
"def parsed_metadata(self, msg):\n ips = msg.get_header_ips()\n result = []\n for ipaddr in ips:\n country = self.get_country(ipaddr)\n result.append(str(country))\n if result:\n result = \" \".join(result)\n msg.headers[\"X-Relay-Countries\"].append(result)\n self.ctxt.log.debug(\"X-Relay-Countries: '%s'\", result)\n msg.plugin_tags[\"RELAYCOUNTRY\"] = result",
"def _parse_guild_info(cls, builder, info_container):\n if m := founded_regex.search(info_container.text):\n description = m.group(\"desc\").strip()\n builder.description(description or None)\n builder.world(m.group(\"world\"))\n builder.founded(parse_tibia_date(m.group(\"date\").replace(\"\\xa0\", \" \")))\n builder.active(\"currently active\" in m.group(\"status\"))",
"def get_visit_info(instcat):\n with open(instcat) as fd:\n for line in fd:\n if line.startswith('filter'):\n band = 'ugrizy'[int(line.strip().split()[1])]\n elif line.startswith('obshistid'):\n visit = int(line.strip().split()[1])\n if line.startswith('object'):\n break\n return visit, band",
"def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:5]\n window = meta[:, 5:11]\n active_class_ids = meta[:, 11:]\n return image_id, image_shape, window, active_class_ids"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates the warehouse_unittest database, builds the schema and returns an SQLALchemy Connection to the database.
|
def database(request):
if os.getenv('WAREHOUSE_DATABASE_URL'):
# Assume that the database was externally created
url = os.getenv('WAREHOUSE_DATABASE_URL')
else:
# (Drop and) create the warehouse_unittest database with UTF-8 encoding
# (in case the default encoding was changed from UTF-8)
subprocess.call(['dropdb', 'warehouse_unittest'])
subprocess.check_call(['createdb', '-E', 'UTF8', 'warehouse_unittest'])
url = 'postgresql:///warehouse_unittest'
engine = create_engine(url, poolclass=AssertionPool)
request.addfinalizer(engine.dispose)
if not os.getenv('WAREHOUSE_DATABASE_URL'):
request.addfinalizer(
lambda: subprocess.call(['dropdb', 'warehouse_unittest'])
)
# Connect to the database and create the necessary extensions
engine.execute('CREATE EXTENSION IF NOT EXISTS "citext"')
engine.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp"')
# Have Alembic create the schema
alembic_cfg = alembic.config.Config()
alembic_cfg.set_main_option(
"script_location",
"warehouse:migrations",
)
alembic_cfg.set_main_option("url", url)
alembic.command.upgrade(alembic_cfg, "head")
return engine
|
[
"def create_test_db(self):\n self.engine = sqlalchemy.create_engine(\"sqlite:///:memory:\")\n self.slave = self.engine\n self.metadata = Metadata()\n self.create_db()\n self.reset_db()",
"def test_create_database(self):\n\n # Setup the tables\n CreateDatabase.run(app=self.app)\n engine = create_engine(TestManagePy.postgresql_url)\n connection = engine.connect()\n\n for model in [User, Library, Permissions]:\n exists = engine.dialect.has_table(connection, model.__tablename__)\n self.assertTrue(exists)\n\n # Clean up the tables\n Base.metadata.drop_all(bind=self.app.db.engine)",
"def setup_database(config=None):\n if config is None:\n # TODO: How could we support orion.core.config.storage.database as well?\n config = orion.core.config.database.to_dict()\n\n db_opts = config\n dbtype = db_opts.pop(\"type\")\n\n log.debug(\"Creating %s database client with args: %s\", dbtype, db_opts)\n\n return database_factory.create(dbtype, **db_opts)",
"def setup_db(request):\n def fin():\n os.remove('test.sql')\n request.addfinalizer(fin)\n\n cfg = load_test_yaml()\n sql = db.Adapter(cfg['engine'])\n\n for process, directives in cfg['process'].items():\n if not 'pk' in directives:\n directives['pk'] = '_id'\n if directives['action'] == 'store':\n sql.declare(directives['tablename'], directives['pk'], directives['schema'])\n return sql",
"def create_db_and_tables():\r\n engine = create_connection_db()\r\n delete_db(engine)\r\n create_db(engine)\r\n create_tables_db(engine)",
"def create_database(self):\n self._create_tables()\n self._create_functions()\n self._create_triggers()",
"def create_testdata_db(self):\n\n try:\n dsn = CommandlineTool.get_input_option('yoda-db-testdata-dsn')\n force = CommandlineTool.get_input_option('force')\n if (not dsn):\n dsn = self._mh.ext_cfg['Yoda']['db_testdata_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)\n db = DBO(dsn)._dbo_driver\n db._parse_dsn(dsn)\n\n result = True\n if (not db.database_exists() or force):\n if (force):\n dmsg(self._mh._trn.msg('yoda_remove_testdata_db', dsn))\n db.remove_database()\n\n print(self._mh._trn.msg('yoda_create_testdata_db', dsn))\n db.connect()\n dbdir = os.path.join(self._mh.ext_cfg['Yoda']['test_repo_root'].format(var_dir=syscfg.HTK_VAR_DIR), 'db_testdata')\n script = file_get_contents(\n os.path.join(dbdir, 'db_struct.sql'))\n db._cursor.executescript(script)\n script = file_get_contents(os.path.join(dbdir, 'db_data.sql'))\n db._cursor.executescript(script)\n print(self._mh._trn.msg('yoda_testdata_db_created'))\n else:\n print(self._mh._trn.msg('yoda_testdata_db_exists', dsn))\n result = False\n\n return result\n except Error as ex:\n print(self._mh._trn.msg('yoda_testdata_db_error', ex))\n return False",
"def extraSetUp(self):\n if self.good_sql is None:\n raise unittest.SkipTest('no good sql for reconnect test')\n self.startDB()\n self.dbpool = self.makePool(cp_max=1, cp_reconnect=True,\n cp_good_sql=self.good_sql)\n self.dbpool.start()\n return self.dbpool.runOperation(simple_table_schema)",
"def connect_to_database(engine_connection_string, debug):\n engine = create_engine(engine_connection_string, echo=debug)\n sql_model.metadata.create_all(engine)\n connection = engine.connect()\n return connection",
"def test_database_setup():\n\n TEST_DB_NAME = 'test_db.sqlite3'\n with contextlib.suppress(FileNotFoundError):\n TIMEOUT_SECONDS = 60\n cutoff_time = datetime.now() + timedelta(seconds=TIMEOUT_SECONDS)\n db_deleted = False\n while not db_deleted and datetime.now() < cutoff_time:\n try:\n os.remove(TEST_DB_NAME)\n db_deleted = True\n except PermissionError:\n # DB file lock is probably still held by last Django server instance.\n # Let's give it a moment to release it.\n pass\n\n if not db_deleted:\n raise TimeoutError(f\"Could not delete {TEST_DB_NAME}\")\n\n # Just doing:\n # `subprocess.call(f'sqlite3 db.sqlite3 .schema | sqlite3 {self.TEST_DB_NAME}', shell=True)`\n # would be nicer, but unfortunately sqlite creates a default table (sqlite_sequence) that we need to\n # remove from the schema before passing it back in again\n schema_byte_string = subprocess.check_output('sqlite3 db.sqlite3 .schema', shell=True)\n schema_string = str(schema_byte_string, 'utf-8')\n schema_one_line = schema_string.replace('\\r','').replace('\\n','')\n schema_without_sqlite_sequence = schema_one_line.replace('CREATE TABLE sqlite_sequence(name,seq);','')\n subprocess.call(f'echo {schema_without_sqlite_sequence} | sqlite3 {TEST_DB_NAME}', shell=True)\n\n # populate new database as is needed for testing\n with open('logs/test_setup_log.txt', 'a') as log:\n subprocess.call(\n ['py', 'manage.py', 'test_setup', '--settings=charity_configuration.test_settings'],\n stdout=log,\n )",
"def _init_inner_db():\n db.create_all(bind=\"octopus_db\")",
"def testdatabase_factory(request):\n # Inline imports so this plugin can work if you don't have sqlalchemy and don't need this fixture\n from sqlalchemy import create_engine, event\n from sqlalchemy.engine.url import make_url\n from sqlalchemy.orm import sessionmaker, scoped_session\n\n def _testdatabase_factory(base, database_url, setup_global_test_fixtures=None):\n \"\"\"\n Params:\n - base: Python module that contains Session, Base\n - database_url: The main DATABASE_URL\n - setup_global_test_fixtures: Callback to set up global fixtures\n \"\"\"\n\n def testdatabase():\n \"\"\"\n Set up session-wide test database\n Returns a dictionary, db_params, with engine and connection\n \"\"\"\n reset_db = request.config.getoption(\"--reset-db\")\n Base = base.Base\n\n db_params = {}\n engine = base.engine\n\n # Using the original DATABASE_URL, make a new TEST_DATABASE_URL for the test database\n # (same server but just different db name)\n url_parsed = make_url(database_url)\n\n # Just get the \"base\" Postgres URL without a database name\n BASE_DB_SERVER_URL = 'postgres://{}:{}@{}:{}'.format(\n url_parsed.username, url_parsed.password_original, url_parsed.host, url_parsed.port or 5432\n )\n # Assume there is a basic database called Postgres (we always have to connect to some database)\n DB_SERVER_URL = '{}/postgres'.format(BASE_DB_SERVER_URL)\n\n # Construct a separate URL for the test db (same server but separate test database name)\n test_db_name = '{}_test'.format(url_parsed.database)\n TEST_DB_URL = '{}/{}'.format(BASE_DB_SERVER_URL, test_db_name)\n\n temp_engine = create_engine(DB_SERVER_URL)\n temp_conn = temp_engine.connect() # todo add code to wait for Postgres to be running\n temp_conn.execute('commit') # end the already open transaction\n\n check_existing_db_query = (\n \"SELECT datname FROM pg_catalog.pg_database WHERE lower(datname) = lower('{}')\".format(\n test_db_name\n )\n )\n res = temp_conn.execute(check_existing_db_query)\n existing_database_found = False\n tables_exist = None\n\n if len(list(res)) > 0:\n existing_database_found = True\n print('Test database already exists')\n else:\n temp_conn.execute('create database {}'.format(test_db_name))\n print('Creating test database:', test_db_name)\n tables_exist = False\n\n temp_conn.close()\n\n # Clear tables to reset schema, if requested\n if existing_database_found and reset_db:\n # Do dropdb instead of Base.metadata.drop_all_engine() because sometimes cascade-deletes fails\n # with FKs, if we are removing a table / changing the schema a lot\n print('Dropping/creating test database')\n subprocess.run('dropdb {}'.format(test_db_name), shell=True, check=True)\n subprocess.run('createdb {}'.format(test_db_name), shell=True, check=True)\n tables_exist = False\n\n base.initialize_database(TEST_DB_URL)\n engine = base.engine\n connection = engine.connect()\n\n if tables_exist is None:\n # Find out if tables exist\n num_tables = list(\n connection.execute(\"select count(*) from information_schema.tables where table_schema='public'\")\n )[0][0]\n tables_exist = num_tables > 0\n\n # Recreate sessions to bind to this new connection\n session_factory = sessionmaker(bind=connection)\n Session = scoped_session(session_factory)\n base.Session = Session\n\n Base.metadata.create_all(engine)\n print('Created all tables')\n\n if tables_exist is False and setup_global_test_fixtures is not None:\n # Set up global fixture if it's first time making DB (easier doing this vs trying to set up\n # another nested transaction\n setup_global_test_fixtures()\n\n session = Session\n\n db_params['engine'] = engine\n db_params['connection'] = connection\n db_params['session'] = session\n\n # Allow for nested transactions inside tests\n @event.listens_for(session, \"after_transaction_end\")\n def restart_savepoint(session, transaction):\n if transaction.nested and not transaction._parent.nested:\n session.expire_all()\n session.begin_nested()\n\n @event.listens_for(session, 'before_commit')\n def check_before_commit(session):\n if not current_test_settings['db_allowed']:\n raise Exception(\"Test tried to access the database without declaring 'db' fixture\")\n\n yield db_params\n\n connection.close()\n\n return testdatabase()\n\n return _testdatabase_factory",
"def setup_db():\n create_service_db()",
"def setUp(self):\n db.create_all()\n self.db = db",
"def create(self):\n # Set up some infrastructure that is independent of the db.\n nscfg = self.getNetServerConfig()\n util.mkdirChain(nscfg.tmpDir)\n for contDir in self.contentsDirs:\n util.mkdirChain(contDir)\n\n # Now do the driver-specfic bits and initialize the schema.\n self._create()\n db = self.getReposDB()\n conary_schema.loadSchema(db)",
"def _create_engine_test(*args, **kwargs):\n module = f\"tests.components.recorder.db_schema_{str(start_version)}\"\n importlib.import_module(module)\n old_models = sys.modules[module]\n engine = create_engine(*args, **kwargs)\n old_models.Base.metadata.create_all(engine)\n if start_version > 0:\n with Session(engine) as session:\n session.add(\n recorder.db_schema.SchemaChanges(schema_version=start_version)\n )\n session.commit()\n return engine",
"def initialize_database(self):\n self.database = self.loader.request_library(\"common_libs\", \"database\")\n self.database.create_connection(\"production\")\n self.database.load_mappings()\n\n self.migrator = self.loader.request_library(\"database_tools\", \"migrator\")\n self.migrator.migrate()",
"def create_test_db(self, verbosity, autoclobber):\n \n if verbosity >= 1:\n print \"Creating test database '%s'...\" % self.connection.alias\n\n # Replace the NAME field in the database settings with the test keyspace name\n settings_dict = self.connection.settings_dict\n if settings_dict.get('TEST_NAME'):\n test_keyspace_name = settings_dict['TEST_NAME']\n else:\n test_keyspace_name = TEST_DATABASE_PREFIX + settings_dict['NAME']\n\n settings_dict['NAME'] = test_keyspace_name\n \n # First make sure we've destroyed an existing test keyspace\n # FIXME: Should probably do something with autoclobber here, but why\n # would you ever not want to autoclobber when running the tests?\n self.drop_keyspace(test_keyspace_name, verbosity)\n \n # Call syncdb to create the necessary tables/column families\n call_command('syncdb', verbosity=False, interactive=False, database=self.connection.alias)\n \n return test_keyspace_name",
"def create_database(self):\n import MySQLdb\n try:\n conn = self.root_connection()\n except MySQLdb.OperationalError, e:\n code = e.args[0]\n if code == self.unknown_database:\n pass\n else:\n self.logger.fatal(\"Error connecting as root: %s\" % e)\n raise\n else:\n # Database exists fine\n self.logger.debug('Database exists')\n conn.close()\n return\n conn = MySQLdb.connect(\n host=self.db_host,\n user='root',\n **self.passkw(self._root_password_override or self.db_root_password))\n ## FIXME: ideally the character set would be checked even if the database existed,\n ## and updated with ALTER DATABASE <dbname> CHARACTER SET <db_charset>\n plan = 'CREATE DATABASE %s CHARACTER SET %%s' % self.db_name\n charset = self.db_charset\n self.logger.info('Executing %s' % (plan % repr(charset)))\n if not self.maker.simulate:\n conn.cursor().execute(plan, (charset,))\n conn.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A helper function that takes a query and returns a function that will query the database and return a scalar.
|
def scalar(query, default=None):
def inner(model, *args, **kwargs):
val = model.engine.execute(query, *args, **kwargs).scalar()
if default is not None and val is None:
return default
else:
return val
return inner
|
[
"def first(query, default=None):\r\n def inner(model, *args, **kwargs):\r\n val = model.engine.execute(query, *args, **kwargs).first()\r\n\r\n if default is not None and val is None:\r\n return default\r\n else:\r\n return val\r\n\r\n return inner",
"def sql_query(dbname, query):\n ...",
"def get_one(cur, query):\n\tnummatches = cur.execute(query)\n\treturn cur.fetchone()",
"async def typed_retrieve_one_query(\n database: str, data_type: Type[T], query: str, values: Optional[Tuple[Any, ...]] = None,\n) -> T:\n\n rows = await typed_retrieve_query(database, data_type, query, values)\n\n try:\n return rows[0]\n except KeyError as e:\n bot_logger.error(f'Retrieve One Query (\"{query}\"). {e}.')\n raise aiosqlite.Error(f'Failed to fetch any rows')",
"def executeQuery(conn, query):\n cur = conn.cursor()\n cur.execute(query)\n return cur",
"def run_query(query):\n db_connection = connect_to_db()\n cursor = db_connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n cursor.close()\n db_connection.close()\n return results",
"async def queryone(self, stmt, *args):\n result = await self.query(stmt, *args)\n if len(result) == 0:\n raise NoResultError()\n elif len(result) > 1:\n raise ValueError(\"Expectecd 1 result, got %d\" % len(result))\n return result[0]",
"def query(self, sql):",
"def retrieve_record(tinydb, **kwargs):\n q = _construct_query(**kwargs)\n #print(q)\n with get_tinydb(tinydb) as db:\n result = eval(q)\n #print(result)\n return result",
"def execute_query(query):\n conn, cursor = db_connect()\n cursor.execute(query)\n results = cursor.fetchall()\n conn.close()\n return results",
"def find_one(collection, query):\n return DB.DATABASE[collection].find_one(query)",
"def fqlQuery(self, query, callback):\n j = Json().put(u\"query\", query)\n self.callMethod(u\"fql.query\", j.getJavaScriptObject(), callback)",
"def _execute_query(sql_raw, params, qry_type):\n conn = psycopg2.connect(config.DATABASE_STRING)\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cur.execute(sql_raw, params)\n\n if qry_type == 'sel_single':\n results = cur.fetchone()\n elif qry_type == 'sel_multi':\n results = cur.fetchall()\n elif qry_type == 'insert':\n results = cur.fetchone()\n conn.commit()\n elif qry_type == 'update':\n results = cur.fetchone()\n conn.commit()\n else:\n raise Exception('Invalid query type defined.')\n\n conn.close()\n return results",
"def generate_function_query(function: str, args: tuple) -> str:\n return f\"SELECT {function}({', '.join(args)})\"",
"def fetch_query(query):\n\n connection, cursor = connect()\n cursor.execute(query)\n results = cursor.fetchall()\n connection.close()\n return results",
"def user_input_query(conn):\r\n\tquery = input(\"Enter a query for the database: \")\r\n\tprint(\"\")\r\n\tcur = conn.cursor() #this is the cursor for the db\r\n\tcur.execute(query) #after we select cursor from earlier line, \r\n\t#this will input the query that is defined at the start of the function and execute the command\r\n\t\r\n\trows = cur.fetchall() #this gets all the data returned from the db and stores it in variable\r\n\tfor row in rows:\r\n\t\tprint(row)",
"def influx_query_(self, q):\n if self.influx_cli is None:\n self.err(\n self.influx_query_,\n \"No database connected. Please initialize a connection\")\n return\n try:\n return self.influx_cli.query(q)\n except Exception as e:\n self.err(e, self.influx_query_,\n \"Can not query database\")",
"def query_item(self, *args):\n table = args[0]\n column = args[1]\n value = args[2]\n query_item = \"\"\"\n SELECT * FROM {} WHERE {} = '{}';\n \"\"\".format(table, column, value)\n cursor.execute(query_item)\n item = cursor.fetchone()\n return item",
"def executeQueryFetch(self, query = None):\n\t\tif query == None:\n\t\t\tquery = self.query\n\t\tself.cursor.execute(query)\n\t\tself.result = self.cursor.fetchall()\n\t\treturn self.result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A helper function that takes a query and returns a function that will query the database and return the first row
|
def first(query, default=None):
def inner(model, *args, **kwargs):
val = model.engine.execute(query, *args, **kwargs).first()
if default is not None and val is None:
return default
else:
return val
return inner
|
[
"def query_and_return_the_first_row_where(statement):\n\n db = current.db\n s3db = current.s3db\n\n cmd = \"db(%s).select(\\\n limitby=(0,1) ).first()\" % statement\n logger.info(\"Executing query %s\" % cmd)\n\n output = eval(cmd)\n return output",
"def first(self):\n try:\n row = self.cursor_strategy.fetchone()\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )\n\n try:\n if row is not None:\n return self.process_rows([row])[0]\n else:\n return None\n finally:\n self.close()",
"def get_one(cur, query):\n\tnummatches = cur.execute(query)\n\treturn cur.fetchone()",
"async def typed_retrieve_one_query(\n database: str, data_type: Type[T], query: str, values: Optional[Tuple[Any, ...]] = None,\n) -> T:\n\n rows = await typed_retrieve_query(database, data_type, query, values)\n\n try:\n return rows[0]\n except KeyError as e:\n bot_logger.error(f'Retrieve One Query (\"{query}\"). {e}.')\n raise aiosqlite.Error(f'Failed to fetch any rows')",
"def select_single(self, table, rownum):\n\n with self.connection:\n return self.cursor.execute(f'SELECT * FROM {table} WHERE id = ?',\n (rownum,)).fetchall()[0]",
"async def queryone(self, stmt, *args):\n result = await self.query(stmt, *args)\n if len(result) == 0:\n raise NoResultError()\n elif len(result) > 1:\n raise ValueError(\"Expectecd 1 result, got %d\" % len(result))\n return result[0]",
"def first(database):\r\n Model = eval(database)\r\n e = db.session.query(Model).order_by(Model.id.asc()).first()\r\n print (e)",
"def find_one(collection, query):\n return DB.DATABASE[collection].find_one(query)",
"def fetchone(self):\n try:\n row = self.cursor_strategy.fetchone()\n if row is not None:\n return self.process_rows([row])[0]\n else:\n self._soft_close()\n return None\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )",
"def execute_fetchone(self, sql, sql_args=None):\n with self._sql_executor(sql, sql_args) as pgcursor:\n result = pgcursor.fetchone()\n\n return result",
"def first(cls, **kwargs):\n\n return cls.query.filter_by(**kwargs).first()",
"def select_one(self, q, args=None, columns=None):\n rows = self.select(q, args=args, columns=columns)\n if rows is None or len(rows) == 0:\n return {}\n return rows[0]",
"def query_one(self, conditions):\n rows = self.query_all(conditions, limit=1)\n try:\n return next(rows)\n except StopIteration:\n return None, None",
"def findfirst(fn, collection, default=None):\n return next(iter(filter(fn, collection)), default)",
"def test_first_column_1(self):\n querying.first_column(self.mock_engine, self.mock_executable)\n\n self.mock_engine.execute.assert_called()\n self.mock_proxy.fetchall.assert_called()",
"def fetchOne(self):\n\t\tself.result = self.cursor.fetchone()\n\t\treturn self.result",
"def query_item(self, *args):\n table = args[0]\n column = args[1]\n value = args[2]\n query_item = \"\"\"\n SELECT * FROM {} WHERE {} = '{}';\n \"\"\".format(table, column, value)\n cursor.execute(query_item)\n item = cursor.fetchone()\n return item",
"def find_one(self, **kwargs):\n q = self.compile_query(**kwargs)\n for f in six.itervalues(self.facts):\n if q(f):\n return f\n return None",
"def get_first(session, criteria, target_id=None):\n query = transaction_gen_query(session, criteria, target_id)\n return get_transaction(query.first())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A helper function that takes a query and returns a function that will query the database and return a list of rows with the row_func applied to each.
|
def rows(query, row_func=dict):
def inner(model, *args, **kwargs):
return [row_func(r) for r in
model.engine.execute(query, *args, **kwargs)]
return inner
|
[
"def run_query(conn, query):\n with conn.cursor(as_dict=True) as cursor:\n cursor.execute(query)\n for row in cursor:\n yield row",
"def get_results(query):\n with psycopg2.connect('dbname=news') as conn:\n cur = conn.cursor()\n cur.execute(query)\n return cur.fetchall()",
"def rows_from_table(mycursor, table, db):\n\n mycursor.execute(f\"SELECT * FROM {table};\")\n records = mycursor.fetchall()\n db.commit()\n return records",
"def get_query_results(query):\n db, cursor = connect()\n cursor.execute(query)\n result = cursor.fetchall()\n db.close()\n return result",
"def get_db_query_results(db, query):\r\n if db == 'cmi':\r\n connection = pymysql.connect(config.cmi_host, config.cmi_user, config.cmi_pw, config.cmi_db)\r\n elif db == 'powercampus':\r\n connection = pypyodbc.connect(config.powercampus_connection_string)\r\n else:\r\n return None\r\n cursor = connection.cursor()\r\n cursor.execute(query)\r\n column_names = [column[0] for column in cursor.description]\r\n rows = [list(row) for row in cursor.fetchall()]\r\n cursor.close()\r\n connection.close()\r\n # return [dict(zip(column_names, list(row))) for row in rows]\r\n return rows, column_names",
"def run_query(query):\n db_connection = connect_to_db()\n cursor = db_connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n cursor.close()\n db_connection.close()\n return results",
"def query_and_fetchall(self, query):\n with vertica_python.connect(**conn_info) as conn:\n cur = conn.cursor()\n cur.execute(query)\n\n return cur.fetchall()",
"def get_records(self, query: str) -> Tuple[List, Any]:\n raise NotImplementedError()",
"def get_all(self, table, **query):\n if query:\n name, query = query.popitem()\n self._cursor.execute(f\"\"\"\nSELECT * FROM {table} WHERE {name}=={query!r}\n\"\"\")\n else:\n self._cursor.execute(f\"\"\"\nSELECT * FROM {table}\n\"\"\")\n return self._cursor.fetchall()",
"def return_rows_as_list(self, conn_cursor):\n raise NotImplementedError",
"def _one_statement_register__list(self, sql):\n def _wrapped_method(self, list_kwargs):\n \"\"\"\n Exec the statement and return a list with the rows\n\n @param list_kwargs: the keyword arguments of the query\n @type list_kwargs: dict\n\n @return: the queried rows\n @rtype: list of rows (tuples) or None\n \"\"\"\n result = []\n\n with self.tx_manager as conn:\n cursor = conn.cursor()\n\n for kwargs in list_kwargs:\n cursor.execute(sql, kwargs)\n\n result.append(cursor.fetchone())\n\n return result\n\n return _wrapped_method",
"def fetch_query(query):\n\n connection, cursor = connect()\n cursor.execute(query)\n results = cursor.fetchall()\n connection.close()\n return results",
"def _one_statement_table__list(self, sql):\n def _wrapped_method(self, list_kwargs):\n \"\"\"\n Exec the statement and return a list with the queries\n\n @param list_kwargs: the keyword arguments of the query\n @type list_kwargs: dict\n\n @return: the queried tables\n @rtype: list of queries (tables, list of tuples...) or None\n \"\"\"\n result = []\n\n with self.tx_manager as conn:\n cursor = conn.cursor()\n\n for kwargs in list_kwargs:\n cursor.execute(sql, kwargs)\n\n result.append(cursor.fetchall())\n\n return result\n\n return _wrapped_method",
"def read(self, table: str, sql_filter: str) -> list:\n t = sqlalchemy.text('SELECT * FROM {} WHERE {}'.format(table, sql_filter))\n rs = self.conn.execute(t)\n list_of_rows = list()\n for row in rs:\n row_as_dict = dict(row)\n list_of_rows.append(row_as_dict)\n\n return list_of_rows",
"def _all(self, **kwargs: Any) -> list[RowType]:\n self.validate_kwargs(kwargs)\n\n where = (\n f'WHERE {fields_to_search_str(kwargs.keys())}'\n if len(kwargs) > 0\n else ''\n )\n\n with self.connect() as db:\n rows = db.execute(\n f'SELECT * FROM {self.name} {where}',\n kwargs,\n ).fetchall()\n return [self._row_type(*row) for row in rows]",
"def read_rows(\n self,\n ) -> Callable[[bigtable.ReadRowsRequest], Awaitable[bigtable.ReadRowsResponse]]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"read_rows\" not in self._stubs:\n self._stubs[\"read_rows\"] = self.grpc_channel.unary_stream(\n \"/google.bigtable.v2.Bigtable/ReadRows\",\n request_serializer=bigtable.ReadRowsRequest.serialize,\n response_deserializer=bigtable.ReadRowsResponse.deserialize,\n )\n return self._stubs[\"read_rows\"]",
"def fetch_query(query):\n try:\n db, c = connect()\n c.execute(query)\n results = c.fetchall()\n except psycopg2.ProgrammingError:\n print(\"ERROR: unable to execute query\")\n db.close()\n raise\n except psycopg2.Error:\n raise\n else:\n db.close()\n return results",
"def execute_query(query):\n conn, cursor = db_connect()\n cursor.execute(query)\n results = cursor.fetchall()\n conn.close()\n return results",
"def produce_rows_lst():\n\n soup = open_custom_html('')\n rows = soup.findChildren(\"tr\")[1:]\n return rows"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A helper function that takes a query, a key_func, and a value_func and will
|
def mapping(query, key_func=lambda r: r[0], value_func=lambda r: r[1]):
def inner(model, *args, **kwargs):
return {
key_func(r): value_func(r)
for r in model.engine.execute(query, *args, **kwargs)
}
return inner
|
[
"def compose_keys(f, g):\n return lambda v: f(g(v))",
"def makeKeyGetter( k ):\n def myFunc( v ):\n return k( v[1] )\n print('making key getter for k=', k)\n return myFunc",
"def _create_getter(dct, lst, func):\n def _wrapper():\n return dct, lst, func\n return _wrapper",
"def filter(func):\n def expand_kv(kv):\n return func(*kv)\n\n def filter_values(value):\n cls = type(value)\n if isinstance(value, dict):\n return cls(_filter(expand_kv, value.items()))\n else:\n return cls(_filter(func, value))\n\n return transform(filter_values)",
"def any(cls, result_key, func):\n def scanner(self, obj):\n current_value = getattr(self, result_key, None)\n setattr(self, result_key, current_value or func(obj))\n\n cls._scan(result_key, scanner)",
"def _query(self, *args, **kwargs: Any) -> dict:\n query = dict()\n key: str\n val: Any\n for key, val in kwargs.keys(): # type: ignore\n if val is not None:\n query[key] = val\n return query",
"def modifiyItems(dic, keyFunction, valueFunction):\n return {keyFunction(key, value): valueFunction(key, value) for key, value in dic.items()}",
"def cmp_to_key(function):\n class Key():\n \"\"\"\n Key class\n \"\"\"\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n return function(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return function(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return function(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return function(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return function(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return function(self.obj, other.obj) != 0\n return Key",
"def _get_query_dict(**kwargs):\n def __get_quey_dict(**kwargs):\n for k, v in six.iteritems(kwargs):\n if v is not None:\n yield k, v\n return dict(__get_quey_dict(**kwargs))",
"def map(func):\n # text is an alias for basestring on Python 2, which cannot be\n # instantiated and therefore can't be used to transform the value,\n # so we force to unicode instead.\n if is_py2 and text == func:\n func = unicode\n\n def expand_kv(kv):\n return func(*kv)\n\n def map_values(value):\n cls = type(value)\n if isinstance(value, dict):\n return cls(_map(expand_kv, value.items()))\n else:\n return cls(_map(func, value))\n\n return transform(map_values)",
"def rows(query, row_func=dict):\r\n def inner(model, *args, **kwargs):\r\n return [row_func(r) for r in\r\n model.engine.execute(query, *args, **kwargs)]\r\n\r\n return inner",
"def get_helper(func, data, key):\n\n data = json.loads(data)\n\n # create an array from the values of key in data\n arr = np.empty(len(data))\n for i, d in enumerate(data):\n try:\n arr[i] = d[key]\n except KeyError:\n arr[i] = d[\"data\"][key]\n\n # get result of func on arr, get argresult if is exists\n # argresult does not exist for some functions like np.mean, np.std, etc\n result = func(arr)\n try:\n index = np.where(arr == result)[0][0]\n argresult = data[index]\n except IndexError:\n argresult = None\n\n return result, argresult",
"def map_field(field, func, dict_sequence):\n for item in dict_sequence:\n try:\n item[field] = func(item.get(field, None))\n yield item\n except ValueError:\n pass",
"def eval(self,opseq,valueDict):\n for (dstName,funName,inputNames) in opseq:\n inputValues = map(lambda a:valueDict[a], inputNames)\n fun = EVAL_FUNS[funName] \n result = fun(*inputValues)\n valueDict[dstName] = result\n return valueDict",
"def reduceByKey(self, func):\n buckets = col.defaultdict(list)\n for d in self.data:\n buckets[d[0]].append(d[1])\n\n reduced_buckets = dict()\n for key in buckets:\n reduced_buckets[key] = ft.reduce(func, buckets[key])\n\n return ParallelData(reduced_buckets.items())",
"def iapply(self, f):\n for key, val in self.iteritems():\n self[key] = f(key, val)\n\n return self",
"def _make_key_func(self, key_func, input_dataset):\n\n @function.Defun(*nest.flatten(input_dataset.output_types))\n def tf_key_func(*args):\n \"\"\"A wrapper for Defun that facilitates shape inference.\"\"\"\n # Pass in shape information from the input_dataset.\n for arg, shape in zip(args, nest.flatten(input_dataset.output_shapes)):\n arg.set_shape(shape)\n nested_args = nest.pack_sequence_as(input_dataset.output_types, args)\n # pylint: disable=protected-access\n if dataset_ops._should_unpack_args(nested_args):\n ret = key_func(*nested_args)\n # pylint: enable=protected-access\n else:\n ret = key_func(nested_args)\n ret = ops.convert_to_tensor(ret, dtype=dtypes.int64)\n if ret.dtype != dtypes.int64:\n raise ValueError(\"`key_func` must return a single tf.int64 tensor.\")\n return ret\n\n self._key_func = tf_key_func\n self._key_func.add_to_graph(ops.get_default_graph())",
"def iteratee(func):\n if callable(func):\n cbk = func\n else:\n if isinstance(func, int):\n func = str(func)\n\n if isinstance(func, str):\n cbk = property_(func)\n elif isinstance(func, list) and len(func) == 1:\n cbk = property_(func)\n elif isinstance(func, list) and len(func) > 1:\n cbk = matches_property(*func[:2])\n elif isinstance(func, tuple):\n cbk = properties(*func)\n elif isinstance(func, dict):\n cbk = matches(func)\n else:\n cbk = identity\n\n # Optimize iteratee by specifying the exact number of arguments the iteratee takes so that\n # arg inspection (costly process) can be skipped in helpers.callit().\n cbk._argcount = 1\n\n return cbk",
"def get_update_function(\n func: UpdateFunction, columns: Columns\n) -> EvalFunction:\n if not isinstance(func, EvalFunction):\n if isinstance(func, dict):\n return Lookup(columns=columns, mapping=func)\n elif isinstance(func, ValueFunction) or callable(func):\n return Eval(columns=columns, func=func)\n else:\n return Const(func)\n return func"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Validate that the keys of the argument_dict passed match columns in table that are not in the blacklist list. return TypeError if there is a key where this condition is not met.
|
def validate_argument_column_mapping(argument_dict, table,
blacklist=None):
if blacklist is None:
blacklist = []
columns = set((c.key for c in table.columns if c.key not in blacklist))
for argument_name in argument_dict:
if argument_name not in columns:
raise TypeError("Key {0} does not match a column in {1}".format(
argument_name, table.name
))
|
[
"def _check_field_mappings(\n column_names: List[str],\n feature_table_name: str,\n feature_table_timestamp_column: str,\n feature_table_field_mappings: Dict[str, str],\n) -> None:\n\n if feature_table_timestamp_column not in column_names:\n raise ValueError(\n f\"Provided data source does not contain timestamp column {feature_table_timestamp_column} in columns {column_names}\"\n )\n\n specified_field_mappings = list()\n for k, v in feature_table_field_mappings.items():\n specified_field_mappings.append(v)\n\n is_valid = all(col_name in column_names for col_name in specified_field_mappings)\n\n if not is_valid:\n raise Exception(\n f\"Provided data source does not contain all field mappings previously \"\n f\"defined for FeatureTable, {feature_table_name}.\"\n )",
"def _ValidateInsertion(self, **kwargs):\n column_map = {c.name: c for c in self._columns}\n\n # Verify that no unexpected columns are passed in.\n unexpected_columns = set(kwargs.keys()) - {c.name for c in self._columns}\n if unexpected_columns:\n raise UnexpectedColumnError(sorted(list(unexpected_columns)))\n\n # Verify that all REQUIRED columns are present.\n required_columns = {\n c.name for c in self._columns if c.mode == MODE.REQUIRED}\n missing_columns = required_columns - set(kwargs.keys())\n if missing_columns:\n raise MissingColumnError(sorted(list(missing_columns)))\n\n for k, v in kwargs.iteritems():\n\n column = column_map[k]\n expected_types = FIELD_TYPE_MAP[column.field_type]\n\n # Verify that all non-NULLABLE columns have values.\n if column.mode != MODE.NULLABLE and v is None:\n raise UnexpectedNullError('Column \"%s\" is None' % k)\n\n # Verify that all REPEATED columns are lists.\n if column.mode == MODE.REPEATED and not isinstance(v, list):\n raise InvalidRepeatedError('Column \"%s\" is not a list' % k)\n\n # Verify that all REPEATED lists contain the correct type.\n if column.mode == MODE.REPEATED:\n for item in v:\n actual_type = type(item)\n if actual_type not in expected_types:\n raise InvalidTypeError(\n 'Column \"%s\" contains a value of type %s, must be one of %s' % (\n k, actual_type.__name__, sorted(list(expected_types))))\n\n # Verify that all REQUIRED fields contain the correct type.\n if column.mode == MODE.REQUIRED:\n actual_type = type(v)\n if actual_type not in expected_types:\n raise InvalidTypeError(\n 'Column \"%s\" is of type %s, must be one of %s' % (\n k, actual_type.__name__, sorted(list(expected_types))))\n\n # Verify that all values are allowed if the column specifies choices.\n if column.choices:\n v = v if column.mode == MODE.REPEATED else [v]\n for item in v:\n if item not in column.choices:\n raise InvalidValueError(\n 'Column \"%s\" contains an invalid value: %s' % (k, item))",
"def _validate_inputs(self,col_list):\n if not set(col_list).difference(self.raw_data.columns):\n print 'Columns is ok,Begin to Run....'\n else:\n raise ValueError('''The columns not in data's columns ''')",
"def checkColumns(data: Union[dict, collections.OrderedDict]):\n for expert in data.keys():\n if ('from' not in [x.lower() for x in data[expert].columns]) | \\\n ('to' not in [x.lower() for x in data[expert].columns]):\n raise ValueError('Columns From --> To were not found. Check the data!')",
"def _validate_constraint_columns(self, table_data):\n if any(col not in table_data.columns for col in self._constraint_columns):\n raise MissingConstraintColumnError()",
"def _assert_cols_in_df(cls, columns_provided, columns_df):\n col_not_valids = (\n set([column for column in columns_provided]).difference(set([column for column in columns_df])))\n assert (col_not_valids == set()), 'Error: The following columns do not exits in dataFrame: %s' % col_not_valids",
"def validate_map(self, sorted_tables):\n# tables = {}\n# for table in sorted_tables:\n# tables[table.name] = table\n tables = {}\n for table in sorted_tables:\n tables[table.lower()] = sorted_tables[table]\n for key in self.tbdict.keys():\n if not tables.has_key(self.tbdict[key]):\n raise Error(\"table %s doesn't exist\" % self.tbdict[key])\n else:\n table = tables[self.tbdict[key]]\n if not table.columns.has_key(self.coldict[key].lower()) \\\n and not table.columns.has_key(self.coldict[key]):\n raise Error(\"column %s doesn't exist in table %s \"% \\\n ( self.coldict[key], self.tbdict[key]))\n return True",
"def check_mask_params(mask_params):\n if not isinstance(mask_params, dict):\n raise ValueError(\n \"\"\"\n mask_params must be a dict \n \"\"\"\n )\n else:\n conform_arguments = [\"features_to_hide\", \"threshold\", \"positive\", \"max_contrib\"]\n mask_arguments_not_conform = [argument for argument in mask_params.keys()\n if argument not in conform_arguments]\n if len(mask_arguments_not_conform) != 0:\n raise ValueError(\n \"\"\"\n mask_params must only have the following key arguments:\n -feature_to_hide\n -threshold\n -positive\n -max_contrib \n \"\"\"\n )",
"def check_valid_column(observation):\n \n valid_columns = {\n \"observation_id\",\n \"Type\",\n \"Date\",\n \"Part of a policing operation\",\n \"Latitude\",\n \"Longitude\",\n \"Gender\",\n \"Age range\",\n \"Officer-defined ethnicity\",\n \"Legislation\",\n \"Object of search\",\n \"station\"\n }\n \n keys = set(observation.keys())\n \n if len(valid_columns - keys) > 0: \n missing = valid_columns - keys\n error = \"Missing columns: {}\".format(missing)\n return False, error\n \n if len(keys - valid_columns) > 0: \n extra = keys - valid_columns\n error = \"Unrecognized columns provided: {}\".format(extra)\n return False, error \n\n return True, \"\"",
"def validate_request(user_columns, data_columns):\n\n # Isolate our user- and data-columns into sets.\n data_columns_set = set(data_columns)\n user_columns_set = set(user_columns)\n\n # If the user denotes :all keyword, analyze all columns.\n if ':all' in user_columns_set:\n return data_columns\n\n # Valid columns are in the intersection between the two,\n # invalid columns are in the difference from user to data columns.\n valid, invalid = (\n user_columns_set.intersection(data_columns_set),\n user_columns_set.difference(data_columns_set)\n )\n\n # For all invalid columns, inform the user of their invalidity.\n for column in invalid:\n print(\"`{}` is not a valid column --- skipping.\".format(column))\n\n # Proceed with the analysis using only valid columns.\n return valid",
"def validate_input_dict(inputs: dict, key: str):\n if not all([k in inputs.keys() for k in LAYER_KWARGS[key]]):\n raise KeyError(\n f\"Not all required arguments for {key:s} layer were specified!\"\n )",
"def checkColumns(self, row, columns, log):\n rescols = set(row.keys())\n cols = set(columns.values())\n if not rescols >= cols:\n log.error(\n \"result missing columns: '%s'\",\n \",\".join(cols.difference(rescols)),\n )\n return False\n return True",
"def validate_kwargs(self, kwargs: dict[str, Any]) -> None:\n for field, value in kwargs.items():\n if field not in self._fields:\n raise ValueError(\n f'Field {field} is not a member of {self._row_name}.',\n )\n if not isinstance(value, self.fields[field].python_type):\n raise ValueError(\n f'Type of {field} is {type(value)} but expected '\n f'{self.fields[field].python_type}.',\n )",
"def validate_input_csv(fieldnames: Optional[Sequence[str]]) -> None:\n if not fieldnames:\n raise ValueError(\"Column names are missing\")\n\n if ACTIVITIES_KEY not in fieldnames:\n raise ValueError(f\"{ACTIVITIES_KEY} column missing - found: {fieldnames}\")",
"def __validateDictionary(dictionary: Dict[str, Any], *, keyType: type = str, dictionaryName: str = \"argument\") -> None:\n if dictionary is None:\n return\n if not type(dictionary) is dict:\n raise TypeError(f\"Provided {dictionaryName} '{dictionary}' is of type {type(dictionary).__name__}, it needs to be of type dict\")\n for key in dictionary.keys():\n if not type(key) is keyType:\n raise TypeError(f\"Key '{key}' in dictionary '{dictionaryName}' is of type {type(key).__name__}, it needs to be of type {keyType.__name__}\")",
"def _validate_params(dataframe, name, index, time_index, logical_types,\n table_metadata, column_metadata, semantic_tags,\n make_index, column_descriptions):\n _check_unique_column_names(dataframe)\n if name and not isinstance(name, str):\n raise TypeError('DataTable name must be a string')\n if index is not None or make_index:\n _check_index(dataframe, index, make_index)\n if logical_types:\n _check_logical_types(dataframe, logical_types)\n if table_metadata:\n _check_table_metadata(table_metadata)\n if column_metadata:\n _check_column_metadata(dataframe, column_metadata)\n if time_index is not None:\n datetime_format = None\n logical_type = None\n if logical_types is not None and time_index in logical_types:\n logical_type = logical_types[time_index]\n if _get_ltype_class(logical_types[time_index]) == Datetime:\n datetime_format = logical_types[time_index].datetime_format\n\n _check_time_index(dataframe, time_index, datetime_format=datetime_format, logical_type=logical_type)\n\n if semantic_tags:\n _check_semantic_tags(dataframe, semantic_tags)\n\n if column_descriptions:\n _check_column_descriptions(dataframe, column_descriptions)",
"def __guard_against_non_existent_columns__(df: pd.DataFrame, subset: Union[List[str], str] = None):\n if subset is None:\n return\n if isinstance(subset, str):\n subset = [subset]\n\n if not all(column in df.columns for column in subset):\n not_in = np.setdiff1d(subset, df.columns, assume_unique=True)\n raise ValueError('[{}] does not exist in this DataFrame'.format(', '.join(not_in)))",
"def check_ingress_required_columns(self, col_names):\n if not set(col_names).issuperset(INGRESS_REQUIRED_COLUMNS):\n if not set(col_names).issuperset(INGRESS_ALT_COLUMNS):\n missing_columns = [x for x in INGRESS_REQUIRED_COLUMNS if x not in col_names]\n return missing_columns\n return None",
"def __validate_colnames(self, names_table1, names_table2, description):\n\n if len(names_table1) != len(names_table2):\n raise Exception(\"Number of \" + description + \" columns does not match\")\n\n ## Check that all strings in names_table1 are contained in\n ## names_table2.\n for nname in names_table1:\n if nname not in names_table2:\n raise Exception(\"Missing column in \" + description + \":'\" + nname + \"'\")\n\n ## Check that all strings in names_table2 are contained in\n ## names_table1.\n for nname in names_table1:\n if nname not in names_table2:\n raise Exception(\"Missing column in \" + description + \":'\" + nname + \"'\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The CacheControl generalheader field is used to specify directives that MUST be obeyed by all caching mechanisms along the request/response chain.
|
def surrogate_control(self):
def on_update(surrogate_control):
if not surrogate_control and "surrogate-control" in self.headers:
del self.headers["surrogate-control"]
elif surrogate_control: # pragma: no cover
self.headers["Surrogate-Control"] = \
surrogate_control.to_header()
return parse_cache_control_header(
self.headers.get("surrogate-control"),
on_update,
ResponseCacheControl,
)
|
[
"def cachecontrolheaders():\n t = get_template(\"templates/cachecontrol.html\")\n htmlcontent = t.render(Context({}))\n return(htmlcontent)",
"def _patch_header(response: HttpResponse, status: Status) -> None:\n # Patch cache-control with no-cache if it is not already set.\n if status == Status.SKIP and not response.get(\"Cache-Control\", None):\n response[\"Cache-Control\"] = CacheControl.NOCACHE.value\n # Add our custom header.\n if wagtailcache_settings.WAGTAIL_CACHE_HEADER:\n response[wagtailcache_settings.WAGTAIL_CACHE_HEADER] = status.value",
"def patch_cache_control(response, **kwargs):\n\n def dictitem(s):\n t = s.split(\"=\", 1)\n if len(t) > 1:\n return (t[0].lower(), t[1])\n else:\n return (t[0].lower(), True)\n\n def dictvalue(*t):\n if t[1] is True:\n return t[0]\n else:\n return \"%s=%s\" % (t[0], t[1])\n\n cc = defaultdict(set)\n if response.get(\"Cache-Control\"):\n for field in cc_delim_re.split(response.headers[\"Cache-Control\"]):\n directive, value = dictitem(field)\n if directive == \"no-cache\":\n # no-cache supports multiple field names.\n cc[directive].add(value)\n else:\n cc[directive] = value\n\n # If there's already a max-age header but we're being asked to set a new\n # max-age, use the minimum of the two ages. In practice this happens when\n # a decorator and a piece of middleware both operate on a given view.\n if \"max-age\" in cc and \"max_age\" in kwargs:\n kwargs[\"max_age\"] = min(int(cc[\"max-age\"]), kwargs[\"max_age\"])\n\n # Allow overriding private caching and vice versa\n if \"private\" in cc and \"public\" in kwargs:\n del cc[\"private\"]\n elif \"public\" in cc and \"private\" in kwargs:\n del cc[\"public\"]\n\n for k, v in kwargs.items():\n directive = k.replace(\"_\", \"-\")\n if directive == \"no-cache\":\n # no-cache supports multiple field names.\n cc[directive].add(v)\n else:\n cc[directive] = v\n\n directives = []\n for directive, values in cc.items():\n if isinstance(values, set):\n if True in values:\n # True takes precedence.\n values = {True}\n directives.extend([dictvalue(directive, value) for value in values])\n else:\n directives.append(dictvalue(directive, values))\n cc = \", \".join(directives)\n response.headers[\"Cache-Control\"] = cc",
"def add_header(r):\n\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n # To fix the issue of making a new team on the dashboard, and using the\n #browser's nav button to go back\n return r",
"def private_response(response, force=True):\n if force or CDN_CACHE_CONTROL_HEADER not in response.headers:\n response.headers[CDN_CACHE_CONTROL_HEADER] = \"private\"",
"def _cache_control__get(self):\n env = self.environ\n value = env.get('HTTP_CACHE_CONTROL', '')\n cache_header, cache_obj = env.get('webob._cache_control', (None, None))\n if cache_obj is not None and cache_header == value:\n return cache_obj\n cache_obj = CacheControl.parse(value,\n updates_to=self._update_cache_control,\n type='request')\n env['webob._cache_control'] = (value, cache_obj)\n return cache_obj",
"def add_never_cache_headers(response):\n patch_response_headers(response, cache_timeout=-1)\n patch_cache_control(\n response, no_cache=True, no_store=True, must_revalidate=True, private=True\n )",
"def testCachingHeaders(self):\n self.handler.get(\"status\")\n self.assertEquals(\"public; max-age=300\",\n self.handler.response.headers[\"Cache-Control\"])",
"def cache_response(response, cache_tags=None, force=True):\n if cache_tags:\n add_cache_tags(response, cache_tags)\n if force or CDN_CACHE_CONTROL_HEADER not in response.headers:\n response.headers[CDN_CACHE_CONTROL_HEADER] = \"public\"",
"def secure_headers():\n headers = cherrypy.response.headers\n headers['Cache-Control'] = 'no-cache, no-store, private, mustrevalidate'\n headers['Pragma'] = 'no-cache'\n headers['X-XSS-Protection'] = '1; mode=block'\n headers['Content-Security-Policy'] = \"default-src='self'\"",
"def fudge_headers(response, stats):\n if not stats:\n add_never_cache_headers(response)\n else:\n patch_cache_control(response, max_age=seven_days)",
"def static_cache(res):\n if request.endpoint == 'static':\n expires = datetime.now() + timedelta(days=365)\n res.headers['Expires'] = expires.isoformat()\n return res",
"def test_vary_header_simple():\n # test that the vary header is sent\n http = httplib2.Http(cache=tests.get_cache_path())\n response = tests.http_response_bytes(\n headers={'vary': 'Accept', 'cache-control': 'max-age=300'},\n add_date=True,\n )\n with tests.server_const_bytes(response, request_count=3) as uri:\n response, content = http.request(uri, 'GET', headers={'accept': 'text/plain'})\n assert response.status == 200\n assert 'vary' in response\n\n # get the resource again, from the cache since accept header in this\n # request is the same as the request\n response, content = http.request(uri, 'GET', headers={'Accept': 'text/plain'})\n assert response.status == 200\n assert response.fromcache, \"Should be from cache\"\n\n # get the resource again, not from cache since Accept headers does not match\n response, content = http.request(uri, 'GET', headers={'Accept': 'text/html'})\n assert response.status == 200\n assert not response.fromcache, \"Should not be from cache\"\n\n # get the resource again, without any Accept header, so again no match\n response, content = http.request(uri, 'GET')\n assert response.status == 200\n assert not response.fromcache, \"Should not be from cache\"",
"def test_headers(rmaker: RequestMaker, pri_data: PrimaryData):\n response = rmaker.make_request(path='/api/v1/config')\n\n required_headers = ('Cache-Control', 'Expires', 'Strict-Transport-Security', 'Content-Security-Policy')\n for h in required_headers:\n assert h in response.headers.keys()",
"def _extract_headers(self, req):\n last_modified = req.headers['last-modified']\n # cached = req.headers['X-Apublish-Id']\n self.last_modified = last_modified",
"def _default_headers_hook():\n bottle.response.headers['Cache-Control'] = 'no-store'\n bottle.response.headers['Server'] = 'Hibiki/2016'\n bottle.response.headers['X-Username'] = get_current_username() or ''",
"def InitializeHeader (self):\n self.Nonce = ''.join(random.choice(string.digits) for _ in range (9))\n self.AuthDateTime = datetime.datetime.now().strftime('%m%d%H%M%S')\n \n \n \n self.Headers = {\n \n 'Accept-Language': 'en-US', \n 'nonce': self.Nonce, \n 'Accept': '*/*', \n 'authdatetime': self.AuthDateTime, \n 'Keep-Alive': 'timeout=1, max=1', \n 'user': self.UserID, \n 'Accept-Encoding': 'gzip, deflate',\n 'User-Agent': 'VCS/3.0.3.524 (iOS 9.3.5)'\n }",
"def check_cached_headers(self, headers):\n self.assertIn(\"x-powered-by\", headers.keys(), \"Unexpected headers (not from WordPress?)\")\n return \"age\" in headers",
"def _cache_ratelimit_headers(\n self, headers: CaseInsensitiveDict,\n resource: str=CORE_RESOURCE) -> dict:\n if not self._ratelimit_cache:\n self._ratelimit_cache = {}\n if self._has_ratelimit_headers(headers):\n self._ratelimit_cache[resource] = {\n 'limit': headers.get(self.RATELIMIT_LIMIT_HEADER),\n 'remaining': headers.get(self.RATELIMIT_REMAINING_HEADER),\n 'reset': headers.get(self.RATELIMIT_RESET_HEADER)\n }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dump the last N days' updates as an RSS feed.
|
def rss(app, request):
releases = app.db.packaging.get_recently_updated(num=40)
for release in releases:
# TODO update _force_external to _external when Flask-ification is done
url = url_for(request, 'warehouse.packaging.views.project_detail',
project_name=release['name'], version=release['version'],
_force_external=True)
release.update(dict(url=url))
response = render_response(
app, request, "legacy/rss.xml",
description='package updates',
releases=releases,
site=app.config.site,
)
response.mimetype = 'text/xml; charset=utf-8'
# TODO: throw in a last-modified header too?
return response
|
[
"def rss(app, request):\n releases = app.db.packaging.get_recently_updated(num=40)\n for release in releases:\n # TODO update _force_external to _external when Flask-ification is done\n url = url_for(request, 'warehouse.packaging.views.project_detail',\n project_name=release['name'], version=release['version'],\n _force_external=True)\n release.update(dict(url=url))\n\n response = render_response(\n app, request, \"legacy/rss.xml\",\n description='package updates',\n releases=releases,\n site=app.config.site,\n )\n response.mimetype = 'text/xml; charset=utf-8'\n # TODO: throw in a last-modified header too?\n return response",
"def generate_feed(entries):\n entries = list(entries)\n if len(entries) == 0:\n return None\n\n # Get the update date. Assumption: most recent entry is first.\n updated = entries[0]['created']\n\n with contextlib.closing(stringio.StringIO()) as out:\n builder = utils.XMLBuilder(out)\n build_feed(\n builder,\n web.config.app.site_name,\n updated,\n web.config.app.tag_uri,\n entries)\n return out.getvalue()",
"def create_rss(self):\n\n sorted_items = sorted(self.items, key=lambda date: date.pubDate, reverse=True)\n if self.total_items != -1:\n sorted_items = sorted_items[:self.total_items] # Restrict number of items\n rss_obj = PyRSS2Gen.RSS2(\n title=self.channel.title,\n link=self.channel.link,\n description=self.channel.description,\n lastBuildDate=datetime.datetime.utcnow(),\n items=sorted_items)\n rss_obj = xml.dom.minidom.parseString(rss_obj.to_xml()) # Prettify\n return rss_obj.toprettyxml()",
"def get_rss(self, size):\n articles = Diary.objects.order_by('-publish_time')[:size]\n items = []\n for article in articles:\n content = article.html\n\n url = Config.SITE_URL + '/diary/' + str(article.pk) + '/' + \\\n article.title\n items.append(PyRSS2Gen.RSSItem(\n title=article.title,\n link=url,\n description=content,\n guid=PyRSS2Gen.Guid(url),\n pubDate=article.publish_time,\n ))\n rss = PyRSS2Gen.RSS2(\n title=Config.MAIN_TITLE,\n link=Config.SITE_URL,\n description=Config.DESCRIPTION,\n lastBuildDate=datetime.datetime.now(),\n items=items\n ).to_xml('utf-8')\n return rss",
"def packages_rss(app, request):\r\n releases = app.db.packaging.get_recent_projects(num=40)\r\n for release in releases:\r\n # TODO update _force_external to _external when Flask-ification is done\r\n url = url_for(request, 'warehouse.packaging.views.project_detail',\r\n project_name=release['name'], _force_external=True)\r\n release.update(dict(url=url))\r\n\r\n response = render_response(\r\n app, request, \"legacy/rss.xml\",\r\n description='new projects',\r\n releases=releases,\r\n site=app.config.site,\r\n )\r\n response.mimetype = 'text/xml; charset=utf-8'\r\n # TODO: throw in a last-modified header too?\r\n return response",
"def packages_rss(app, request):\n releases = app.db.packaging.get_recent_projects(num=40)\n for release in releases:\n # TODO update _force_external to _external when Flask-ification is done\n url = url_for(request, 'warehouse.packaging.views.project_detail',\n project_name=release['name'], _force_external=True)\n release.update(dict(url=url))\n\n response = render_response(\n app, request, \"legacy/rss.xml\",\n description='new projects',\n releases=releases,\n site=app.config.site,\n )\n response.mimetype = 'text/xml; charset=utf-8'\n # TODO: throw in a last-modified header too?\n return response",
"def _update_feed(self):\n # Update the last update field\n feed = feedparser.parse(self.url)\n self.last_update = datetime.date.today()\n if feed.feed.has_key(\"link\"):\n self.link = feed.feed.link\n else:\n self.link = \"\"\n self.save()\n for item in feed.entries[:10]:\n # The RSS spec doesn't require the guid field so fall back on link\n if item.has_key(\"id\"):\n guid = item.id\n else:\n guid = item.link\n\n # Search for an existing item\n try:\n FeedItem.objects.get(guid=guid)\n except FeedItem.DoesNotExist:\n # Create it.\n if item.has_key(\"published_parsed\"):\n pub_date = datetime.datetime.fromtimestamp(mktime(item.published_parsed))\n elif item.has_key(\"updated_parsed\"):\n pub_date = datetime.datetime.fromtimestamp(mktime(item.updated_parsed))\n else:\n pub_date = datetime.datetime.now()\n\n feed_item = FeedItem(title=item.title, link=item.link, content=item.description,\n guid=guid, pub_date=pub_date, feed=self)\n feed_item.save()",
"def nixiefeed(self, irc, msg, args):\n print args\n feed = feedparser.parse(self.nixie_rss)\n items = feed['entries'][:5]\n [ irc.reply(item['published'] + \": \" + item['title'] + \" ::: \" + item['id'], private=True) for item in items]",
"def blog_rss(request):\n ents = Entry.objects.all().order_by('-when')[:10]\n c = RequestContext(request)\n c['entries'] = ents\n return render_to_response('rss.xml', c)",
"def update_feeds():\n feedurls = get_feedurls()\n for url in feedurls:\n query_args = { 'q': url, 'v':'1.0', 'num': '30' }\n qs = urllib.urlencode(query_args)\n loader = 'http://ajax.googleapis.com/ajax/services/feed/load'\n loadurl = '%s?%s' % (loader, qs)\n request = urllib2.Request(loadurl)\n request.add_header('Referer', 'http://www.planetzope.org/aggregator.txt')\n response = urllib2.urlopen(request)\n feed = response.read()\n urlsha1 = get_sha1(url)\n feedlookup.append((urlsha1, url)) \n feeddata = open(os.path.join(data, urlsha1),'wb')\n feeddata.write(feed)\n feeddata.close()\n print urlsha1, url\n time.sleep(1)\n fld = open(feedlookupdata, 'w')\n for a, b in feedlookup:\n fld.write('%s\\t%s\\n' % (a, b))\n fld.close()\n print 'feeds done'",
"def cmd_show_dates(self):\n # TODO: Use only --view.\n d = dict(flags=self.args.flags, number=self.args.number,\n sortkey=self.args.sortkey)\n names = None\n for feed in self.generate_feeds():\n try:\n deltas, stats, names = feed.get_daystats(include_now=True, **d)\n except ValueError:\n log.warning('Daystats not available for feed \"%s\"', feed)\n else:\n values = sorted(deltas) if self.args.verbose else stats\n lst = ['f{x: >7.1f}' for x in values] + [feed]\n messager.msg(*lst)\n # log.info('%.2f %s', feed.wait_to_refresh(), feed)\n if not self.args.verbose and names is not None:\n messager.msg(*names)",
"def refresh_rss_feeds(self):\n ## > IMPORTS ##\n import dryxPython.webcrawlers as wc\n import dryxPython.mysql as m\n import dryxPython.commonutils as cu\n\n ## >SETTINGS ##\n\n ## LOGGING HEADER ##\n log.info('<m> STARTING TO REFRESH THE FEEDS FOUND IN ' +\n self.subscriptionTable + '<m>')\n\n ###########################################################\n # >ACTION(S) #\n ###########################################################\n # CREATE DOWNLOADS DIRECTORY\n cu.dryx_mkdir(self._downloadDirectory)\n\n # READ THE FEED NAMES AND URLS FROM SUBSCRIPTION TABLE\n sqlQuery = 'SELECT rssFeedName, feedURL, rssFeedSource, dateLastRead, uniqueKeyCols from ' + \\\n self.subscriptionTable\n try:\n log.debug(\"attempting to reading feed data from the subscription table : %s\" % (\n self.subscriptionTable,))\n feeds = m.execute_mysql_read_query(sqlQuery, dbConn, log)\n except Exception, e:\n log.error(\"could not reading feed data from the subscription table : %s - failed with this error %s: \" %\n (self.subscriptionTable, str(e),))\n return -1\n\n # DOWNLOAD THE FEED CHANNEL XML FILES AND SWITCH TO LOCAL URL\n remoteURLList = []\n for feed in feeds:\n remoteURLList += [feed['feedURL']]\n try:\n log.debug(\"attempting to downloading the feed channel xml files\")\n localUrls = wc.multiWebDocumentDownloader(\n remoteURLList, self._downloadDirectory, 1)\n except Exception, e:\n log.error(\n \"could not downloading the feed channel xml files - failed with this error %s: \" % (str(e),))\n return -1\n\n ifc = 0\n for feed in feeds:\n feed['remoteFeedUrl'] = feed['feedURL']\n feed['feedURL'] = localUrls[ifc]\n ifc += 1\n\n # INSTANTIATE THE XML FILE OBJECT\n xf = xml_file()\n xf.feedUrl = feed['feedURL']\n xf.rssFeedName = feed['rssFeedName']\n\n # DETERMINE UNQUIE KEY\n ukCols = str.split(feed['uniqueKeyCols'])\n\n # CHANNEL ITEMS = BASE LEVEL XML FEED METADATA - THE NEWS/CONTENT\n # GRAB THE LIST OF XML ITEM DICTIONARIES\n xml_channel_items = xf.get_channel_items()\n # ADD EXTRA COLUMNS TO THE DICTIONARY\n now = str(cu.get_now_sql_datetime())\n for item in xml_channel_items:\n item['dateCreated'] = now\n item['dateLastModified'] = now\n item['awaitingAction'] = 1\n item['rssFeedUrl'] = feed['remoteFeedUrl']\n item['rssFeedName'] = feed['rssFeedName']\n item['rssFeedSource'] = feed['rssFeedSource']\n\n feedTableName = self._feedTablePrefix + feed['rssFeedName']\n feedTableName = cu.make_lowercase_nospace(feedTableName)\n\n # APPEND THE DATA TO THE TABLE\n try:\n log.debug(\"attempting to 'adding data to the %s table\" %\n (feedTableName,))\n for i in range(len(xml_channel_items)):\n log.debug('here is the element dictionary: %s' %\n (str(xml_channel_items[i].keys()),))\n m.convert_dictionary_to_mysql_table(\n dbConn, xml_channel_items[i], feedTableName, ukCols)\n except Exception, e:\n log.error(\"could not 'adding data to the %s table - failed with this error %s: \" %\n (feedTableName, str(e),))\n return -1\n\n ## LOGGING FOOTER ##\n log.info('<m> SUCCESSFULLY ATTEMPTED TO REFRESH THE FEEDS FOUND IN ' +\n self.subscriptionTable + '<m>')\n\n return None",
"def generate_rss(packages, herd):\n if not packages.count():\n return \"\"\"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?><rss version=\"2.0\"><channel><title>Meatoo - Gentoo vs. Freshmeat Releases</title><link>http://meatoo.gentooexperimental.org/</link><description>The latest Freshmeat releases with matching Gentoo versions.</description><lastBuildDate>%s</lastBuildDate><generator>PyRSS2Gen-0.1.1</generator><docs>http://blogs.law.harvard.edu/tech/rss</docs><item><title>Herd %s has no entries.</title><link>http://meatoo.gentooexperimental.org/</link><description>There are no entries for %s</description><pubDate>%s</pubDate></item></channel></rss>\"\"\" % (datetime.datetime.utcnow(), herd, herd, datetime.datetime.utcnow())\n items = []\n for pkg in packages:\n items.append(PyRSS2Gen.RSSItem(\n title = \"%s/%s-%s [%s]\" % \\\n (pkg.portageCategory, pkg.packageName, pkg.portageVersion, \\\n pkg.latestReleaseVersion),\n description = \"Freshmeat Release Date: %s<br><br><b>Portage desc:</b><br> %s<br><br><b>Freshmeat desc:</b><br> %s<br>http://freshmeat.net/projects/%s/\" % (pkg.latestReleaseDate, pkg.portageDesc, pkg.descShort, pkg.packageName),\n link = \"http://meatoo.gentooexperimental.org/\",\n pubDate = datetime.datetime.utcnow()\n ))\n\n rss = PyRSS2Gen.RSS2(\n title = \"Meatoo - Gentoo vs. Freshmeat Releases\",\n link = \"http://meatoo.gentooexperimental.org/\",\n description = \"The latest Freshmeat releases with matching Gentoo versions.\",\n lastBuildDate = datetime.datetime.utcnow(),\n items = items)\n return rss.to_xml()",
"def update_download_list(args):\n with PodcastDatabase(args.database) as _database:\n podcasts = _database.get_podcast_urls()\n total_added = 0\n for _tuple in podcasts:\n name, added = pyres.rss.add_episodes_from_feed(_database,\n _tuple[0],\n args.base_dir,\n int(_tuple[1]),\n _tuple[2])\n if added:\n total_added += added\n print(\"%-50s: %3d episodes since %s\" %\n (name, added, utils.date_as_string(_tuple[2])))\n print()\n print(\"There are a total of %d episodes to be updated.\" %\n (total_added))\n\n # go ahead and get those podcasts while we're here.\n process_rss_feeds(args)",
"def cmd_refresh(self):\n n_skipped = 0\n n_new = 0\n for feed in self.generate_feeds():\n r = feed.refresh(gracetime=self.args.gracetime,\n force=self.args.force)\n if r is None:\n n_skipped += 1\n else:\n n_new += r\n if self.args.verbose:\n s = 'Found {} new entries in {} feeds, skipped {} feeds'\n messager.msg(s.format(n_new, len(self.view.directory) - n_skipped,\n n_skipped))",
"def getNewFeedEntries(feeds, feed_db, entry_db):\n entries = []\n for uri in feeds:\n print \"Polling %s\" % uri\n try:\n # Get the notes rememebered for this feed.\n feed_data = feed_db.get(uri, {})\n last_poll = feed_data.get('last_poll', None)\n etag = feed_data.get('etag', None)\n modified = feed_data.get('modified', None)\n \n # Check to see whether it's time to poll this feed yet.\n if last_poll and (time.time() - last_poll) < 3600:\n print \"\\tFeed already polled within the last hour.\"\n \n else:\n # Fetch the feed using the ETag and Last-Modified notes.\n feed_data = feedparser.parse(uri,etag=etag,modified=modified)\n \n # If the feed HTTP status is 304, there was no change.\n if feed_data.status == 304:\n print \"\\tFeed unchanged.\"\n \n else:\n new_entries = 0\n \n for entry_data in feed_data.entries:\n \n # Wrap the entry data and get a hash for the entry.\n entry = EntryWrapper(feed_data, entry_data)\n hash = entry.hash()\n \n # If the hash for this entry is found in the DB, \n # it's not new.\n if entry_db.has_key(hash): continue\n\n # Flag entry as seen with the hash key, append to \n # list of new entries.\n entry_db[hash] = time.time()\n entries.append(entry)\n new_entries += 1\n \n print \"\\tFound %s new entries\" % new_entries\n\n # Finally, update the notes remembered for this feed.\n if feed_data.has_key('feed') and feed_data['feed'].has_key('title'):\n feed_title = feed_data['feed']['title']\n else:\n feed_title = 'Untitled'\n\n feed_db[uri] = {\n 'last_poll' : time.time(),\n 'etag' : feed_data.get('etag', None),\n 'modified' : feed_data.get('modified', None),\n 'title' : feed_title\n }\n \n except KeyboardInterrupt:\n raise\n except Exception, e:\n print \"Problem polling %s: %s\" % (uri, e)\n \n entries.sort()\n return entries",
"def atomstream(request):\n\n feed = Rss201rev2Feed(\n title=settings.SITE_NAME,\n link=settings.SITE_URL,\n description=_(u\"Search feed for %(username)s\") % request.jwt.user.username\n )\n objects = SubscriptionHit.objects.filter(\n notified_users=request.jwt.user\n )[:10]\n for item in objects:\n feed.add_item(\n title=item.subject,\n link=item.link,\n description=_(u\"No description provided\")\n )\n\n response = HttpResponse(content_type=\"application/rss+xml\")\n feed.write(response)\n return response",
"def rss_feed(request):\n site = get_current_site(request)\n articles = Article.objects.filter(sites__id=site.id).order_by('-published_date')[20:]\n return load_template(request, site, 'rss_articles.html', {'articles': articles})",
"def get_latest_feeds():\n\tall_feeds = Feed.objects.all()\n\n\tlatest_feeds = []\n\tfor feed in all_feeds:\n\t\tcur_feed = FeedSummary()\n\t\tcur_feed.title = feed.title\n\t\tcur_feed.url = feed.public_url\n\t\titems = FeedItem.objects.filter(feed__title=feed.title).order_by('-date_modified')\n\t\tcur_feed.items = items[:3]\n\n\t\tlatest_feeds.append(cur_feed)\n\n\treturn latest_feeds"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Dump the last N days' new projects as an RSS feed.
|
def packages_rss(app, request):
releases = app.db.packaging.get_recent_projects(num=40)
for release in releases:
# TODO update _force_external to _external when Flask-ification is done
url = url_for(request, 'warehouse.packaging.views.project_detail',
project_name=release['name'], _force_external=True)
release.update(dict(url=url))
response = render_response(
app, request, "legacy/rss.xml",
description='new projects',
releases=releases,
site=app.config.site,
)
response.mimetype = 'text/xml; charset=utf-8'
# TODO: throw in a last-modified header too?
return response
|
[
"def packages_rss(app, request):\n releases = app.db.packaging.get_recent_projects(num=40)\n for release in releases:\n # TODO update _force_external to _external when Flask-ification is done\n url = url_for(request, 'warehouse.packaging.views.project_detail',\n project_name=release['name'], _force_external=True)\n release.update(dict(url=url))\n\n response = render_response(\n app, request, \"legacy/rss.xml\",\n description='new projects',\n releases=releases,\n site=app.config.site,\n )\n response.mimetype = 'text/xml; charset=utf-8'\n # TODO: throw in a last-modified header too?\n return response",
"def rss(app, request):\r\n releases = app.db.packaging.get_recently_updated(num=40)\r\n for release in releases:\r\n # TODO update _force_external to _external when Flask-ification is done\r\n url = url_for(request, 'warehouse.packaging.views.project_detail',\r\n project_name=release['name'], version=release['version'],\r\n _force_external=True)\r\n release.update(dict(url=url))\r\n\r\n response = render_response(\r\n app, request, \"legacy/rss.xml\",\r\n description='package updates',\r\n releases=releases,\r\n site=app.config.site,\r\n )\r\n response.mimetype = 'text/xml; charset=utf-8'\r\n # TODO: throw in a last-modified header too?\r\n return response",
"def rss(app, request):\n releases = app.db.packaging.get_recently_updated(num=40)\n for release in releases:\n # TODO update _force_external to _external when Flask-ification is done\n url = url_for(request, 'warehouse.packaging.views.project_detail',\n project_name=release['name'], version=release['version'],\n _force_external=True)\n release.update(dict(url=url))\n\n response = render_response(\n app, request, \"legacy/rss.xml\",\n description='package updates',\n releases=releases,\n site=app.config.site,\n )\n response.mimetype = 'text/xml; charset=utf-8'\n # TODO: throw in a last-modified header too?\n return response",
"def create_rss(self):\n\n sorted_items = sorted(self.items, key=lambda date: date.pubDate, reverse=True)\n if self.total_items != -1:\n sorted_items = sorted_items[:self.total_items] # Restrict number of items\n rss_obj = PyRSS2Gen.RSS2(\n title=self.channel.title,\n link=self.channel.link,\n description=self.channel.description,\n lastBuildDate=datetime.datetime.utcnow(),\n items=sorted_items)\n rss_obj = xml.dom.minidom.parseString(rss_obj.to_xml()) # Prettify\n return rss_obj.toprettyxml()",
"def get_rss(self, size):\n articles = Diary.objects.order_by('-publish_time')[:size]\n items = []\n for article in articles:\n content = article.html\n\n url = Config.SITE_URL + '/diary/' + str(article.pk) + '/' + \\\n article.title\n items.append(PyRSS2Gen.RSSItem(\n title=article.title,\n link=url,\n description=content,\n guid=PyRSS2Gen.Guid(url),\n pubDate=article.publish_time,\n ))\n rss = PyRSS2Gen.RSS2(\n title=Config.MAIN_TITLE,\n link=Config.SITE_URL,\n description=Config.DESCRIPTION,\n lastBuildDate=datetime.datetime.now(),\n items=items\n ).to_xml('utf-8')\n return rss",
"def get_latest_feeds():\n\tall_feeds = Feed.objects.all()\n\n\tlatest_feeds = []\n\tfor feed in all_feeds:\n\t\tcur_feed = FeedSummary()\n\t\tcur_feed.title = feed.title\n\t\tcur_feed.url = feed.public_url\n\t\titems = FeedItem.objects.filter(feed__title=feed.title).order_by('-date_modified')\n\t\tcur_feed.items = items[:3]\n\n\t\tlatest_feeds.append(cur_feed)\n\n\treturn latest_feeds",
"def feeds():\n latest = get_latest_articles(1, ARTICLES_PER_FEED)\n feed = AtomFeed('Code Speculations', feed_url=request.url,\n url=request.url_root)\n\n for article in latest:\n summary = extract_preview(article.html)\n content = article.html.replace(DELIMITER, '')\n feed.add(article['title'],\n summary=summary,\n content=content,\n content_type=\"html\",\n author=ME,\n url=make_external(article['url']),\n updated=article['published'],\n )\n return feed.get_response()",
"def _create_atom_feed(self):\n posts = sorted(self.posts, key=lambda post: post.date,\n reverse=True)[:10]\n content = self.templates['atom'].render(posts=posts, site=self.site)\n path = os.path.join(BASE_DIR, self.paths['output'], 'atom.xml')\n write_to_path(content, path)",
"def feed(self):\n if self.__last_feed and (time.time() - self.__last_feed < 10):\n return\n else:\n self.__last_feed = time.time()\n\n items = []\n\n try:\n repositories = self.db_conn.repositories\n results = repositories.find(\n {\n 'error_count': {\n '$lt': self.MAX_RETRIES\n },\n '$or': [\n {\n 'indexed_on': {\n '$lt': datetime.today()\n }\n },\n {\n 'indexed_on': None\n }\n ],\n 'state': 0\n },\n limit=self.FEED_SIZE\n ).sort([('activity', -1)])\n\n for repo in results:\n items.append((repo['_id'], repo['url']))\n\n logger.info('Scheduling {} messages'.format(len(items)))\n\n # update selected repo status' to 'processing=1'\n if len(items):\n ids = [x[0] for x in items]\n self.__set_flag_processing(ids)\n\n # push to the MQ\n for _id, url in items:\n self.__add_to_queue(_id, url)\n else:\n logger.info('\\033[1;37mFeeding exhausted.\\033[0m')\n self.__stop_feeding = True\n\n return len(items)\n\n except Error as err:\n # TODO: implement error handling\n print err",
"def generate_feed(entries):\n entries = list(entries)\n if len(entries) == 0:\n return None\n\n # Get the update date. Assumption: most recent entry is first.\n updated = entries[0]['created']\n\n with contextlib.closing(stringio.StringIO()) as out:\n builder = utils.XMLBuilder(out)\n build_feed(\n builder,\n web.config.app.site_name,\n updated,\n web.config.app.tag_uri,\n entries)\n return out.getvalue()",
"def blog_rss(request):\n ents = Entry.objects.all().order_by('-when')[:10]\n c = RequestContext(request)\n c['entries'] = ents\n return render_to_response('rss.xml', c)",
"def cmd_show_dates(self):\n # TODO: Use only --view.\n d = dict(flags=self.args.flags, number=self.args.number,\n sortkey=self.args.sortkey)\n names = None\n for feed in self.generate_feeds():\n try:\n deltas, stats, names = feed.get_daystats(include_now=True, **d)\n except ValueError:\n log.warning('Daystats not available for feed \"%s\"', feed)\n else:\n values = sorted(deltas) if self.args.verbose else stats\n lst = ['f{x: >7.1f}' for x in values] + [feed]\n messager.msg(*lst)\n # log.info('%.2f %s', feed.wait_to_refresh(), feed)\n if not self.args.verbose and names is not None:\n messager.msg(*names)",
"def rss_feed(request):\n site = get_current_site(request)\n articles = Article.objects.filter(sites__id=site.id).order_by('-published_date')[20:]\n return load_template(request, site, 'rss_articles.html', {'articles': articles})",
"def build(feeds_urls, output_dir, max_old=None):\n\n # Convert max_old if needed.\n if max_old == None:\n max_old = timedelta.max\n\n # Give the feeds URLs to Feedparser to have nicely usable feed objects.\n feeds = [feedparser.parse(feed_url) for feed_url in feeds_urls]\n # Parse the feeds and grave useful information to build a structure\n # which will be passed to the templates.\n data = []\n\n ## Initialize some counters for the TOC IDs.\n ## We start counting at 2 because 1 is the TOC itself.\n feed_number = 1\n play_order = 1\n\n for feed in feeds:\n feed_number += 1\n play_order += 1\n local = {\n 'number': feed_number,\n 'play_order': play_order,\n 'entries': [],\n 'title': feed.feed.title,\n }\n entry_number = 0\n for entry in feed.entries:\n # We don't want old posts, just fresh news.\n if date.today() - date(*entry.published_parsed[0:3]) > max_old:\n continue\n\n\n play_order += 1\n entry_number += 1\n\n try:\n \tlocal_entry = {\n \t'number': entry_number,\n \t\t'play_order': play_order,\n \t'title': entry.title,\n \t'description': entry.description,\n \t'content': entry.content[0].value,\n \t}\n\t except AttributeError:\n\t\tlocal_entry = {\n 'number': entry_number,\n 'play_order': play_order,\n 'title': entry.title,\n 'description': entry.description,\n }\n local['entries'].append(local_entry)\n\n data.append(local)\n # Wrap data and today's date in a dict to use the magic of **.\n wrap = {\n 'date': date.today().isoformat(),\n 'feeds': data,\n }\n\n # Render and output templates\n\n ## TOC (NCX)\n render_and_write('toc.xml', wrap, 'toc.ncx', output_dir)\n ## TOC (HTML)\n render_and_write('toc.html', wrap, 'toc.html', output_dir)\n ## OPF\n render_and_write('opf.xml', wrap, 'daily.opf', output_dir)\n ## Content\n for feed in data:\n render_and_write('feed.html', feed, '%s.html' % feed['number'], output_dir)\n\n # Copy the assets\n for name in listdir(path.join(ROOT, 'assets')):\n copy(path.join(ROOT, 'assets', name), path.join(output_dir, name))\n # copytree(path.join(ROOT, 'assets'), output_dir)",
"def retProject(self):\n # remove last element of list\n self.projects.pop()\n # Next date is one month ago\n self.nextDate = self.nextDate.replace(months=-1, day=1)\n #print(\"DEBUG: month is now \"+self.nextDate.format('YYYY, MMMM', locale='fr_FR'))",
"def project_xml(self):\n\n self.xml = '<?xml version=\"1.0\" standalone=\"yes\"?>\\n' #encoding=\"UTF-8\"?>\\n'\n self.xml += '<Project '\n self.xml += 'xmlns=\"urn:QDA-XML:project:1.0\" '\n guid = self.create_guid()\n self.xml += 'creatingUserGUID=\"' + guid + '\" ' # there is no creating user in QualCoder\n cur = self.settings['conn'].cursor()\n cur.execute(\"select date,memo from project\")\n result = cur.fetchone()\n dtime = result[0].replace(\" \", \"T\")\n self.xml += 'creationDateTime=\"' + dtime + '\" '\n #self.xml += 'basePath=\"' + self.settings['directory'] + '\" '\n self.xml += 'name=\"' + self.settings['projectName'] + '\" '\n self.xml += 'xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" '\n self.xml += 'origin=\"Qualcoder-1.3\" '\n self.xml += 'xsi:schemaLocation=\"urn:QDA-XML:project:1:0 http://schema.qdasoftware.org/versions/Project/v1.0/Project.xsd\"'\n self.xml += '>\\n'\n # add users\n self.xml += \"<Users>\\n\"\n for row in self.users:\n self.xml += '<User guid=\"' + row['guid'] + '\" name=\"' + row['name'] + '\"/>\\n'\n self.xml += \"</Users>\\n\"\n self.xml += self.codebook_xml()\n self.xml += self.variables_xml()\n self.xml += self.cases_xml()\n self.xml += self.sources_xml()\n self.xml += self.notes_xml()\n #self.sets_xml()\n\n self.xml += '</Project>'",
"def generate_articles_list(number) -> str:\n conn = sqlite3.connect(gv.DB_PATH)\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT title, datetime FROM articles \n WHERE datetime < date('now')\n ORDER BY datetime DESC\n LIMIT (?)\"\"\",(number,))\n t = \"\"\n for elt in cur:\n t += f\"=> article?datetime={elt[1]}&title={elt[0]} {elt[0]}\\n\"\n return t",
"def generate_rss(packages, herd):\n if not packages.count():\n return \"\"\"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?><rss version=\"2.0\"><channel><title>Meatoo - Gentoo vs. Freshmeat Releases</title><link>http://meatoo.gentooexperimental.org/</link><description>The latest Freshmeat releases with matching Gentoo versions.</description><lastBuildDate>%s</lastBuildDate><generator>PyRSS2Gen-0.1.1</generator><docs>http://blogs.law.harvard.edu/tech/rss</docs><item><title>Herd %s has no entries.</title><link>http://meatoo.gentooexperimental.org/</link><description>There are no entries for %s</description><pubDate>%s</pubDate></item></channel></rss>\"\"\" % (datetime.datetime.utcnow(), herd, herd, datetime.datetime.utcnow())\n items = []\n for pkg in packages:\n items.append(PyRSS2Gen.RSSItem(\n title = \"%s/%s-%s [%s]\" % \\\n (pkg.portageCategory, pkg.packageName, pkg.portageVersion, \\\n pkg.latestReleaseVersion),\n description = \"Freshmeat Release Date: %s<br><br><b>Portage desc:</b><br> %s<br><br><b>Freshmeat desc:</b><br> %s<br>http://freshmeat.net/projects/%s/\" % (pkg.latestReleaseDate, pkg.portageDesc, pkg.descShort, pkg.packageName),\n link = \"http://meatoo.gentooexperimental.org/\",\n pubDate = datetime.datetime.utcnow()\n ))\n\n rss = PyRSS2Gen.RSS2(\n title = \"Meatoo - Gentoo vs. Freshmeat Releases\",\n link = \"http://meatoo.gentooexperimental.org/\",\n description = \"The latest Freshmeat releases with matching Gentoo versions.\",\n lastBuildDate = datetime.datetime.utcnow(),\n items = items)\n return rss.to_xml()",
"def tasksToday(self, projects, day, added=[]):\n entries = [entry for entry in self.entries if entry.getDay() == day]\n if entries == []:\n return []\n head, *tail = entries \n tasks = [(p, sp, i, task) for p in projects \n for sp in p.getSubprojects() \n for i, task in enumerate(sp.getAllTasks()) \n if not task.isDone() and not (p,sp,i,task) in added\n ]\n tasks = sorted(tasks, key=lambda x: x[3].getDueDate())\n l = [(p, sp, i, t) for p,sp,i,t in tasks \n if head.getProject().getName() == p.getName() and head.getSubproject().getName() == sp.getName()\n ]\n if l == []:\n return Schedule.fromDict(dict(\n self.toDict(), entries=[e.toDict() for e in tail]\n )).tasksToday(projects, day, added)\n first, *rest = l\n fp, fsp, _, ft = first\n if head.getDuration() > ft.getExpectedTime():\n return [(fp, fsp, ft)] + Schedule.fromDict(dict(\n self.toDict(), \n entries=[dict(head.toDict(), duration=head.getDuration() - ft.getExpectedTime())] + [\n e.toDict() for e in tail\n ]\n )).tasksToday(projects, day, [first] + added)\n elif head.getDuration() == ft.getExpectedTime():\n return [(fp, fsp, ft)] + Schedule.fromDict(dict(\n self.toDict(), \n entries=[e.toDict() for e in tail]\n )).tasksToday(projects, day, [first] + added)\n else:\n return [(fp, fsp, ft)] + Schedule.fromDict(dict(\n self.toDict(), entries=[e.toDict() for e in tail]\n )).tasksToday(projects, day, [first] + added)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output.
|
def run_migrations_offline():
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
|
[
"def run_migrations_offline():\r\n url = config.get_main_option(\"sqlalchemy.url\")\r\n context.configure(compare_type=True, url=url)\r\n\r\n with context.begin_transaction():\r\n context.run_migrations()",
"def run_migrations_offline():\n context.configure(\n url=url,\n target_metadata=target_metadata,\n literal_binds=True,\n include_schemas=True,\n include_object=use_schema,\n version_table=f\"alembic_version_{Constants.DATA_PIPELINE_EXECUTION_SCHEMA_NAME}\",\n )\n\n with context.begin_transaction():\n context.run_migrations()",
"def run_migrations_online():\r\n options = config.get_section(config.config_ini_section)\r\n url = options.pop(\"url\")\r\n engine = create_engine(url, poolclass=pool.NullPool)\r\n\r\n connection = engine.connect()\r\n context.configure(\r\n connection=connection,\r\n target_metadata=target_metadata\r\n )\r\n\r\n try:\r\n with context.begin_transaction():\r\n context.run_migrations()\r\n finally:\r\n connection.close()",
"def __run_migrations_online() -> None:\n connectable: Engine = engine_from_config(\n config.get_section(config.config_ini_section), prefix=\"sqlalchemy.\", poolclass=pool.NullPool,\n )\n\n with connectable.connect() as connection: # type: Connection\n context.configure(connection=connection, target_metadata=target_metadata)\n\n with context.begin_transaction():\n context.run_migrations()",
"async def run(self) -> None:\n main_database_configuration: dict = await self.__get_main_database_configuration()\n\n MigrationManager(await self.__get_migration_configuration(\n url=main_database_configuration.get(\"url\"),\n path=main_database_configuration.get(\"path\"),\n )).run()",
"def on_offline(self, func):\n self._on_offline = func",
"def migrate() -> None:\n run_migration()",
"def test_offline_upgrade(self):\n\n # Perform pre_upgrade operations on cluster\n before_tasks = self.async_run_operations(buckets=self.buckets,\n phase=\"before\")\n self._run_tasks([before_tasks])\n prepare_statements = self._create_prepare_statement()\n index_node = self.get_nodes_from_services_map(service_type=\"index\", get_all_nodes=True)[0]\n index_rest = RestConnection(index_node)\n pre_upgrade_index_stats = index_rest.get_all_index_stats()\n for server in self.servers:\n remote = RemoteMachineShellConnection(server)\n remote.stop_server()\n remote.disconnect()\n self.upgrade_servers.append(server)\n upgrade_threads = self._async_update(self.upgrade_to, self.servers)\n for upgrade_thread in upgrade_threads:\n upgrade_thread.join()\n self.add_built_in_server_user()\n ops_map = self.generate_operation_map(\"before\")\n if \"create_index\" in ops_map and not self.build_index_after_create:\n index_name_list = []\n for query_definition in self.query_definitions:\n index_name_list.append(query_definition.index_name)\n build_index_tasks = []\n for bucket in self.buckets:\n build_index_tasks.append(self.async_build_index(\n bucket, index_name_list))\n self._run_tasks([build_index_tasks])\n self.sleep(20)\n kv_ops = self.kv_mutations()\n for kv_op in kv_ops:\n kv_op.result()\n nodes = self.get_nodes_from_services_map(service_type=\"index\",\n get_all_nodes=True)\n for node in nodes:\n self._verify_indexer_storage_mode(node)\n self.multi_query_using_index(buckets=self.buckets, query_definitions=self.load_query_definitions)\n try:\n self._execute_prepare_statement(prepare_statements)\n except Exception as ex:\n msg = \"No such prepared statement\"\n self.assertIn(msg, str(ex), str(ex))\n self._verify_index_partitioning()\n post_upgrade_index_stats = index_rest.get_all_index_stats()\n\n # self.log.info(f\"PRE:{pre_upgrade_index_stats}\")\n # self.log.info(f\"POST:{post_upgrade_index_stats}\")\n self._post_upgrade_task(task='stats_comparison', stats_before_upgrade=pre_upgrade_index_stats,\n stats_after_upgrade=post_upgrade_index_stats)\n self._post_upgrade_task(task='create_collection')\n self._post_upgrade_task(task='create_indexes')\n self._post_upgrade_task(task='run_query')\n self._post_upgrade_task(task='request_plus_scans')\n if self.enable_dgm:\n self.assertTrue(self._is_dgm_reached())\n self._post_upgrade_task(task='rebalance_in', node=self.servers[4])\n self._post_upgrade_task(task='rebalance_out', node=self.servers[4])\n self._post_upgrade_task(task='drop_all_indexes')\n # creating indexes again to check plasma sharding\n self._post_upgrade_task(task='create_indexes')",
"def gen_migration(db_url):\n with temp_alembic_ini(ALEMBIC_DIR_LOCATION, db_url) as alembic_ini:\n subprocess.check_call(\n ['alembic', '-c', alembic_ini, 'revision', '--autogenerate']\n )",
"def migration():\n production()\n env.code_branch = 'staging'\n env.restart_server = False",
"def offline_deploy(id=None, silent=False):\n require('hosts')\n require('code_dir')\n\n # Show log of changes, return if nothing to do\n revset = show_log(id)\n if not revset:\n return\n\n migrations = migrate_diff(revset=revset, silent=True)\n if migrations:\n print colors.yellow(\"Will apply %d migrations:\" % len(migrations))\n print indent(migrations)\n\n if not silent:\n request_confirm(\"offline_deploy\")\n\n stop_server(silent=True)\n hg_update(id)\n migrate(silent=True)\n collectstatic()\n start_server(silent=True)",
"def run_migrations(self, migrations_path):\n self._before_migration()\n # Yoyo URIs are so broken.\n uri = self._yoyo_uri_re.sub(r'\\1:////', self._uri.geturl())\n migrate(uri, migrations_path)\n self._after_migration()",
"def fixture_run_globaldb_migrations() -> bool:\n return True",
"def on_offline(self):\n return self._on_offline",
"def alembic(context, args):\n return context.local['env'][\n 'PYTHONPATH=' + str(ROOT_PATH / 'src'),\n 'alembic',\n '-c', ROOT_PATH / 'src' / 'triage' / 'component' / 'results_schema' / 'alembic.ini',\n '-x', 'db_config_file=database.yaml',\n args.remainder,\n ]",
"def update_db(ctx):\r\n with ctx.lcd(settings.SRC_DIR):\r\n ctx.local('python2.6 ./vendor/src/schematic/schematic migrations')",
"def migrate():\n print(\"database\")",
"def _alembic(args):\n from nbexchange.app import NbExchange\n\n hub = NbExchange()\n hub.load_config_file(hub.config_file)\n db_url = hub.db_url\n with _temp_alembic_ini(db_url) as alembic_ini:\n check_call([\"alembic\", \"-c\", alembic_ini] + args)",
"def process_offline(self, data, **kwargs):\n raise NotImplementedError('Must be implemented by subclass.')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context.
|
def run_migrations_online():
options = config.get_section(config.config_ini_section)
url = options.pop("url")
engine = create_engine(url, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
|
[
"def __run_migrations_online() -> None:\n connectable: Engine = engine_from_config(\n config.get_section(config.config_ini_section), prefix=\"sqlalchemy.\", poolclass=pool.NullPool,\n )\n\n with connectable.connect() as connection: # type: Connection\n context.configure(connection=connection, target_metadata=target_metadata)\n\n with context.begin_transaction():\n context.run_migrations()",
"def run_migrations_offline():\r\n url = config.get_main_option(\"sqlalchemy.url\")\r\n context.configure(compare_type=True, url=url)\r\n\r\n with context.begin_transaction():\r\n context.run_migrations()",
"def run_migrations_offline():\r\n url = config.get_main_option(\"sqlalchemy.url\")\r\n context.configure(url=url)\r\n\r\n with context.begin_transaction():\r\n context.run_migrations()",
"def run_migrations_offline():\n context.configure(\n url=url,\n target_metadata=target_metadata,\n literal_binds=True,\n include_schemas=True,\n include_object=use_schema,\n version_table=f\"alembic_version_{Constants.DATA_PIPELINE_EXECUTION_SCHEMA_NAME}\",\n )\n\n with context.begin_transaction():\n context.run_migrations()",
"def migrate() -> None:\n run_migration()",
"def migration():\n production()\n env.code_branch = 'staging'\n env.restart_server = False",
"async def run(self) -> None:\n main_database_configuration: dict = await self.__get_main_database_configuration()\n\n MigrationManager(await self.__get_migration_configuration(\n url=main_database_configuration.get(\"url\"),\n path=main_database_configuration.get(\"path\"),\n )).run()",
"def migrate_all(self):\n # Closing the connection prior to running any migrations to prevent the\n # current connection from locking the database\n self.connection.close()\n\n self.prepare_next_migration()\n while not self.current_version == -1:\n self.migrate()\n self.version = self.migration.version\n self.prepare_next_migration()\n self.connection = sqlite3.connect(self.db)",
"def migrate():\n print(\"database\")",
"def run_migrations(self):\n\n while self.version < self.SCHEMA_VERSION:\n self.version += 1\n self.migrations.get(self.version, lambda _: None)(self)",
"async def on_startup():\n app.state.ENGINE_READER = create_engine(\n settings.reader_connection_string, echo=settings.debug\n )\n app.state.ENGINE_WRITER = create_engine(\n settings.writer_connection_string, echo=settings.debug\n )\n app.state.DB_READER = sessionmaker(\n autocommit=False, autoflush=False, bind=app.state.ENGINE_READER\n )\n app.state.DB_WRITER = sessionmaker(\n autocommit=False, autoflush=False, bind=app.state.ENGINE_WRITER\n )",
"def fixture_run_globaldb_migrations() -> bool:\n return True",
"def ensure_migrations():",
"def live_migrate_instance(self, ctxt, instance, block_migration,\n disk_over_commit, host_name):\n self.msg_runner.live_migrate_instance(ctxt, instance,\n block_migration,\n disk_over_commit,\n host_name)",
"def _before_migration(self):\n pass",
"def migrate_to_start(self):\n call_command('migrate', self.django_application, self.start_migration,\n verbosity=0)",
"def apply_migrations():\n applied_migrations = False\n retries = 0\n\n with app.app_context():\n # The migrations repo resides in the virtual env.\n # Specifically, Pipenv installs the mci-database repo in the `src` directory,\n # since the Pipfile marks it as \"editable.\"\n path_to_virtual_env = os.environ['VIRTUAL_ENV']\n migrations_dir = os.path.join(\n path_to_virtual_env, 'src', 'mci-database', 'mci_database', 'db', 'migrations')\n\n while retries < MAX_RETRIES and applied_migrations is False:\n print('Attempting to apply migrations ({} of {})...'.format(\n retries + 1, MAX_RETRIES))\n try:\n # apply the migrations\n upgrade(directory=migrations_dir)\n applied_migrations = True\n except Exception:\n retries += 1\n sleep(SLEEP)",
"def setup():\n\n subprocess.check_call([exe.replace(\"python3\", \"djangocms\"), \"testsite\", \"--verbose\", \"--no-sync\"])\n\n with open(\"testsite/testsite/settings.py\", \"a\") as f:\n f.write(\"\"\"\nfrom django.db.backends.signals import connection_created\ndef set_no_sychronous(sender, connection, **kwargs):\n if connection.vendor == 'sqlite':\n cursor = connection.cursor()\n cursor.execute('PRAGMA synchronous = OFF;')\n\nconnection_created.connect(set_no_sychronous)\n\"\"\")\n start = time.time()\n subprocess.check_call([exe, \"manage.py\", \"migrate\"], cwd=\"testsite\")\n elapsed = time.time() - start\n print(\"%.2fs to initialize db\" % (elapsed,))",
"def check_migrations(self):\n from django.db.migrations.executor import MigrationExecutor\n\n try:\n executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])\n except ImproperlyConfigured:\n # No databases are configured (or the dummy one)\n return\n\n plan = executor.migration_plan(executor.loader.graph.leaf_nodes())\n if plan:\n apps_waiting_migration = sorted(\n {migration.app_label for migration, backwards in plan}\n )\n self.stdout.write(\n self.style.NOTICE(\n \"\\nYou have %(unapplied_migration_count)s unapplied migration(s). \"\n \"Your project may not work properly until you apply the \"\n \"migrations for app(s): %(apps_waiting_migration)s.\"\n % {\n \"unapplied_migration_count\": len(plan),\n \"apps_waiting_migration\": \", \".join(apps_waiting_migration),\n }\n )\n )\n self.stdout.write(\n self.style.NOTICE(\"Run 'python manage.py migrate' to apply them.\")\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A simple helper that takes an app, request, template, and some context and constructs a TemplateResponse that will lazily render the template with the given context when the Response is evaluated.
|
def render_response(app, request, template, **context):
template = app.templates.get_template(template)
default_context = {
"config": app.config,
"csrf_token": functools.partial(helpers.csrf_token, request),
"gravatar_url": helpers.gravatar_url,
"static_url": functools.partial(helpers.static_url, app),
"url_for": functools.partial(helpers.url_for, request),
}
return TemplateResponse(
TemplateRenderer(template, context, default_context=default_context),
mimetype="text/html",
)
|
[
"def render(self, template, qcontext=None, lazy=True, **kw):\n response = Response(template=template, qcontext=qcontext, **kw)\n if not lazy:\n return response.render()\n return response",
"def on_template_response(self, context, **kwargs):\n request = kwargs.setdefault(\"request\", self.req())\n\n res = TemplateResponse(request, \"some/template.html\", context)\n\n return self.on_response(res, **kwargs)",
"def render(\n request, template_name, context=None, content_type=None, status=None, using=None\n):\n content = loader.render_to_string(template_name, context, request, using=using)\n return HttpResponse(content, content_type, status)",
"def _render_template(*args, **kwargs):\n rendered_template = render_template(*args, **kwargs, environment=current_app.config['ENVIRONMENT'], base_url=app.config['SERVER_BASE_URL'], alert_message=current_app.config['ALERT_MESSAGE'], disable_full_ads_link=current_app.config['DISABLE_FULL_ADS_LINK'])\n return rendered_template",
"def render_to_response(self, context, **response_kwargs):\n return SimpleTemplateResponse(\n template=self.get_template(),\n context=context,\n **response_kwargs\n )",
"def templated(template=None):\n #pylint: disable-msg=C0111\n def decorator(view_fn):\n @wraps(view_fn)\n def decorated_function(*args, **kwargs):\n template_name = template\n if template_name is None:\n template_name = request.endpoint.replace('.', '/') + '.html'\n ctx = view_fn(*args, **kwargs)\n if ctx is None:\n ctx = {}\n elif not isinstance(ctx, dict):\n return ctx\n return render_template(template_name, **ctx)\n return decorated_function\n return decorator",
"def stream_template(template_name, **context):\n app.update_template_context(context)\n template = app.jinja_env.get_template(template_name)\n stream = template.generate(context)\n return Response(stream_with_context(stream))",
"def render_template(context=None, template=\"default.jinja2\", cls=True):\n if not context:\n context = {}\n screen_cleaner(cls)\n template = env.get_template(template)\n print(template.render(**context))",
"def paranoid_render_to_response(template_name, dictionary=None,\n context_instance=None, *args, **kwargs):\n if dictionary is None:\n dictionary = {}\n with paranoid_context_manager(context_instance, dictionary.keys()) as c:\n return render_to_response(template_name, dictionary, c, *args, **kwargs)",
"def render_template(name, context=None, type='html'):\n return template.render(get_template_path('%s.%s'% (name, type)), context)",
"def jinja_render(context, template):\n jinja_environment = Environment(undefined=StrictUndefined,\n lstrip_blocks=True,\n trim_blocks=True,\n extensions=['jinja2.ext.loopcontrols',\n 'jinja2.ext.with_',\n 'jinja2.ext.autoescape'])\n jinja_template = jinja_environment.from_string(template)\n return jinja_template.render(context)",
"def render(self, template, **kw):\n t = jinja_env.get_template(template) \n self.response.out.write(t.render(kw))",
"def render_template(self, template_name, output_name, context):\n raise NotImplementedError()",
"def render_template(text, context=None):\n template = engines[\"django\"].from_string(text)\n if not context:\n context = {}\n return template.render(context)",
"def build_response(app, request, template, data):\n response = app.make_response(render_template(template, **data))\n value = secrets.token_hex()\n if 'WTSK' not in request.cookies:\n response.set_cookie('WTSK', value=value)\n return response, request.cookies.get('WTSK', value)",
"def _render_response(request, *args, **kwargs):\n httpresponse_kwargs = {'mimetype': kwargs.pop('mimetype', None)}\n status = kwargs.pop('status', 200)\n if 'context_instance' not in kwargs:\n kwargs['context_instance'] = RequestContext(request)\n return HttpResponse(loader.render_to_string(*args, **kwargs),\n status=status, **httpresponse_kwargs)",
"def get_template(self, template_name, app_label=None, model_name=None):\n template, origin = self.find_template(template_name,\n app_label=app_label,\n model_name=model_name)\n if not hasattr(template, 'render'):\n # template needs to be compiled\n template = Template(template, origin, template_name, engine=self)\n return template",
"def jinja2_factory(app):\n config = {\n 'globals': {'reverse': reverse},\n 'filters': {}\n }\n return jinja2.Jinja2(app, config)",
"def render_to_string(template_name, context):\n return Engine(app_dirs=True).render_to_string(template_name, context=context)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a response object (a WSGI application) that, if called, redirects the client to the target location. Supported codes are 301, 302, 303, 305, and 307. 300 is not supported because it's not a real redirect and 304 because it's the answer for a request with a request with defined IfModifiedSince headers.
|
def redirect(location, code=302):
display_location = escape(location)
if isinstance(location, str):
location = iri_to_uri(location)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(escape(location), display_location), code, mimetype="text/html")
response.headers["Location"] = location
return response
|
[
"def redirect(self, urls, code=None):\n return redirect(self.request, self.response, urls, code=code)",
"def process_response(self, request, response):\n if not self.redirected and response.status_code == 302: # This is a redirect\n referer = request.META.get('HTTP_REFERER')\n destination_url = response['LOCATION']\n destination = urlparse(destination_url).path\n\n new_destination = self.get_redirected_url(request.user, referer, destination)\n\n if new_destination != destination:\n new_url = destination_url.replace(destination, new_destination)\n response['LOCATION'] = new_url\n\n return response",
"def redirect_request(self, req, fp, code, msg, headers, newurl):\n return None",
"def follow_redirect(self, response):\n new_response = response\n while new_response.status_code in (301, 302, 303, 307):\n scheme, netloc, path, query, fragment = urlparse.urlsplit(new_response['location'])\n new_response = self.client.get(path, QueryDict(query))\n return new_response",
"def redirect_response(self, redirect):\n assert isinstance(redirect, core.Redirect)\n web.header('Location', redirect.url)\n web.ctx.status = '303 See Other'\n content_type = 'text/uri-list'\n web.header('Content-Type', content_type)\n web.ctx.hatrac_content_type = content_type\n body = redirect.url + '\\n'\n nbytes = len(body)\n web.header('Content-Length', nbytes)\n web.ctx.hatrac_request_content_range = '*/%d' % nbytes\n return body",
"def http_error_302(self, req, fp, code, msg, headers):\n self.location = headers.get('Location', '')\n uprint(\"headers['Location']=\" + self.location)\n def squote(s):\n return urllib.parse.quote(s, ';/?:&=+,$[]%^')\n try:\n self.location.encode('ascii')\n except UnicodeEncodeError:\n scheme, netloc, path, params, query, fragment = \\\n urllib.parse.urlparse(self.location)\n self.location = urllib.parse.urlunparse((\n scheme, netloc, urllib.parse.quote(path), squote(params), squote(query),\n fragment))\n headers.replace_header('Location', self.location)\n uprint(\"pquoted headers['Location']=\" + self.location)\n return urllib.request.HTTPRedirectHandler.http_error_302(\n self, req, fp, code, msg, headers)",
"def see_other(absolute_url):\n response = HTTPResponse()\n response.redirect(absolute_url, 303)\n return response",
"def redirect(self, absolute_url, status_code=302):\n self.status_code = status_code\n self.headers.append(('Location', absolute_url))\n self.skip_body = True",
"def permanent_redirect(absolute_url):\n response = HTTPResponse()\n response.redirect(absolute_url, 301)\n return response",
"def temporary_redirect(absolute_url):\n response = HTTPResponse()\n response.redirect(absolute_url, 307)\n return response",
"def json_redirect(location):\n result = {'__status__': 302, 'location': location}\n return jsonify(result)",
"def status_code(code):\n\n redirect = dict(headers=dict(location=REDIRECT_LOCATION))\n\n code_map = {\n 301: redirect,\n 302: redirect,\n 303: redirect,\n 304: dict(data=''),\n 305: redirect,\n 307: redirect,\n 401: dict(headers={'WWW-Authenticate': 'Basic realm=\"Fake Realm\"'}),\n 402: dict(\n data='Fuck you, pay me!',\n headers={\n 'x-more-info': 'http://vimeo.com/22053820'\n }\n ),\n 406: dict(data=json.dumps({\n 'message': 'Client did not request a supported media type.',\n 'accept': ACCEPTED_MEDIA_TYPES\n }),\n headers={\n 'Content-Type': 'application/json'\n }),\n 407: dict(headers={'Proxy-Authenticate': 'Basic realm=\"Fake Realm\"'}),\n 418: dict( # I'm a teapot!\n data=ASCII_ART,\n headers={\n 'x-more-info': 'http://tools.ietf.org/html/rfc2324'\n }\n ),\n\n }\n\n r = make_response()\n r.status_code = code\n\n if code in code_map:\n\n m = code_map[code]\n\n if 'data' in m:\n r.data = m['data']\n if 'headers' in m:\n r.headers = m['headers']\n\n return r",
"def test_redirect_view_redirects_to_original_url(self):\n get_response = self.client.get(\n reverse('redirect', kwargs={\"short_code\": self.post_response.data['short_code']})\n )\n self.assertEqual(get_response.status_code, status.HTTP_302_FOUND)\n self.assertRedirects(get_response, self.original_url, fetch_redirect_response=False)",
"def _redirect_from_postback(self, request, identity):\n if request.path == self.postback_path:\n came_from = request.params.get(self.came_from_field)\n if came_from is None:\n came_from = \"/\"\n response = Response()\n response.status = 302\n response.location = came_from\n request.environ[\"repoze.who.application\"] = response",
"def root_index_redirect():\n return redirect('/LightUpPi/', code=302)",
"def process_response(self, request, response):\n\n if not response.cookies.keys():\n return response\n\n # If setting cookie on a 301/2,\n # return 200 and replace the content with a javascript redirector\n if response.status_code != 200 and response.has_header('Location'):\n location = response.get('Location')\n response.content = REDIRECT_HTML.replace('REDIRECT_ME', location)\n response.status_code = 200\n\n pack = {}\n for key in response.cookies.keys():\n pack[key] = response.cookies[key].value\n del(response.cookies[key])\n\n pack_s = json.dumps(pack)\n encoded = base58.b58encode(pack_s)\n\n response.set_cookie('zappa', encoded)\n\n return response",
"def test_good_redirect(self):\n connector = MockConnector()\n factory = RequestFactory()\n ad_rep = AD_REP_FACTORY.create_ad_rep(url='joeshmoe')\n redirect_string = 'coupons/derma-laser-center-inc/3460/joeshmoe'\n request = factory.get(redirect_string)\n # WSGIRequest does not have a session.\n request.session = self.client.session\n request.session['ad_rep_id'] = ad_rep.id\n request.META['site_id'] = 2\n response = redirect_for_ad_rep(request, redirect_string, connector)\n self.assertEqual(response.status_code, 302)\n LOG.debug('response: %s' % response.__dict__)\n self.assertEqual(response['location'],\n '/hudson-valley/coupons/derma-laser-center-inc/3460/')\n self.assertEqual(request.session['ad_rep_id'], ad_rep.id)",
"def page_redirect(self, url):\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n return response",
"def before_request():\n scheme = request.headers.get('X-Forwarded-Proto')\n if scheme and scheme == 'http' and request.url.startswith('http://'):\n url = request.url.replace('http://', 'https://', 1)\n code = 301\n return redirect(url, code=code)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Normalizes a package name as per PEP426
|
def normalize_project_name(name):
name = re.sub("_", "-", name).lower()
if not PACKAGE_REGEX["permitted_characters"].match(name):
raise ValueError("name contains illegal characters! (See PEP-426)")
if not (PACKAGE_REGEX["start_with_alphanumeric"].match(name) and
PACKAGE_REGEX["end_with_alphanumeric"].match(name)):
raise ValueError(
"Distribution names MUST start with and end with " +
"an ASCII letter or digit (See PEP-426)"
)
return name
|
[
"def package_name(self, name: str) -> str:\n\n if name in self.package_aliases:\n return self.package_aliases[name]\n\n if not name:\n return name\n\n return \".\".join(\n self.package_aliases.get(part) or self._package_name(part)\n for part in name.split(\".\")\n )",
"def _replace_root_package(self, old_package, root_name):\n\n parts = old_package.split('.')\n parts.pop(0)\n parts.insert(0, root_name)\n\n return '.'.join(parts)",
"def format_name(self):\n return pkg_format_name(self.package.name)",
"def _sanitize_module(name):\n return _sanitize_identifier(name).lower()",
"def pkgname_filter(pkgname):\n if re.search('^py\\d{2}-', pkgname):\n # Strip Python version from pkgname, as it's present in the binary package name,\n # but is not present in the pkgsrc package name.\n return 'py-' + pkgname[5:]\n return pkgname",
"def sensible_pname(impl, egg_name):\n egg_name = safe_name(egg_name).replace('_', '-')\n if egg_name.startswith('python-'):\n egg_name = egg_name[7:]\n return '{}-{}'.format(PKG_PREFIX_MAP[impl], egg_name.lower())",
"def parse_package_string(path):\n parts = path.split('.')\n\n # Is the last entry in the path capitalized?\n if parts[-1][0].isupper():\n return \".\".join(parts[:-1]), parts[-1]\n\n return path, \"\"",
"def sanitize_module_name(module_name):\n module_name = module_name.replace(\"-\", \"_\").replace(\".\", \"_\")\n if module_name[0] not in string.ascii_letters:\n module_name = \"a\" + module_name\n return module_name",
"def normalize_module_name(self, name: str) -> str:\n wdir = osp.realpath(self.config.wdir)\n if wdir != self.rootdir:\n abspath = osp.join(self.rootdir, name)\n try:\n name = osp.relpath(abspath, start=wdir)\n except ValueError:\n # Happens on Windows if paths are on different drives\n pass\n\n if name.endswith('.py'):\n name = name[:-3]\n return name.replace(osp.sep, '.')",
"def humanize(module_name):\n module_name = module_name.lower()\n module_name = module_name.replace('_', ' ').replace('-', ' ')\n module_name = module_name.title()\n return module_name",
"def _normalize_namespace(key: str) -> str:\n if not key:\n return \"\"\n key = _normalize_spaces(key)\n return key[0].upper() + key[1:].lower()",
"def _sanitize_package(orig):\n line = orig\n # python3.3 packages (pygit2, P4Python)\n # Do these BEFORE general python3.3, since python3.3 prefix would match site-packages prefix\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:569(run)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:749(__flatten)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/p4python-2015.2.1205721-py3.3-macosx-10.6-intel.egg/P4.py:877(insert)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pygit2/repository.py:58(__init__)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pygit2/repository.py:71(_common_init)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/site-packages/pytz/__init__.py:245(__str__)\n python_packages_re = re.compile(r'.*/site-packages/(.*)')\n m = python_packages_re.match(orig)\n if m:\n line = m.group(1)\n package_module_re = re.compile(r'([^/]+)/(.*)')\n m = package_module_re.match(line)\n if m:\n package = m.group(1)\n module = m.group(2)\n for p in ['p4python', 'pygit2']:\n if p in package:\n package = p\n line = package + \"/\" + module\n return line\n\n # python3.3 library:\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/multiprocessing/synchronize.py:296(is_set)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/os.py:671(__getitem__)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/os.py:694(__iter__)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/re.py:158(search)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/re.py:212(compile)\n # /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/tempfile.py:386(__del__)\n python33_re = re.compile(r'.*/python3.3/(.*)')\n m = python33_re.match(line)\n if m:\n line = m.group(1)\n return line\n\n # Git Fusion\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_atomic_lock.py:177(update_all_gf_reviews)\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_atomic_lock.py:202(update_repo_reviews)\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_util_p4run_logged.py:49(_log_p4_request)\n # /Users/zig/Dropbox/git-fusion-main/bin/p4gf_util_p4run_logged.py:55(_log_p4_results)\n git_fusion_re = re.compile(r'.*/(p4gf_[^/]+)')\n m = git_fusion_re.match(line)\n if m:\n line = m.group(1)\n return line\n\n # Built-in (leave unchanged)\n # {built-in method chdir}\n # {built-in method discover_repository}\n # {built-in method getcwd}\n # {built-in method getfilesystemencoding}\n # {built-in method hasattr}\n # {built-in method isinstance}\n # {built-in method len}\n # {built-in method max}\n # {built-in method poll}\n # {built-in method proxy}\n # {built-in method sorted}\n # {built-in method time}\n # {method 'acquire' of '_multiprocessing.SemLock' objects}\n # {method 'add' of 'set' objects}\n # {method 'append' of 'collections.deque' objects}\n # {method 'append' of 'list' objects}\n # {method 'as_array' of 'P4API.P4Map' objects}\n # {method 'decode' of 'bytes' objects}\n\n return line",
"def nonColonizedName_to_moduleName(name):\r\n return re.sub('\\.', '_', name)",
"def pkgname(nevra):\n return nevra.rsplit('-', 2)[0]",
"def _get_package_name(self, path, root_path):\n\n return path_utils.get_package_name(path, root_path)",
"def standardize_groupname(label: str) -> str:\n new_label = label.replace(\"/\", \"-\")\n return new_label",
"def standardize_name(name):\n if name.startswith('H'):\n return name\n return 'H%s%s' % (name[0].capitalize(), name[1:])",
"def shorten_version(name, version):\n if version.lower().startswith(name.lower()):\n version = version[len(name):].lstrip()\n # For MoGo's stupidly long version string\n a, b, c = version.partition(\". Please read http:\")\n if b:\n version = a\n return version[:32].rstrip()",
"def display_name(\n path: pathlib.Path,\n package_name: Optional[str] = \"\",\n dir_display_name_converter: Optional[Callable] = None,\n) -> str:\n name_path = path / \"display_name.txt\"\n if name_path.exists():\n with open(name_path, \"r\") as name_fo:\n return name_fo.readline().rstrip(\"\\r\\n\")\n\n raw_name = package_name.split(\".\")[-1] if package_name else path.name\n\n if dir_display_name_converter:\n return dir_display_name_converter(raw_name)\n\n return string.capwords(raw_name.replace(\"_\", \" \"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to a different host and uses a safe scheme). Always returns ``False`` on an empty url.
|
def is_safe_url(url, host):
if not url:
return False
parsed = urllib.parse.urlparse(url)
return ((not parsed.netloc or parsed.netloc == host) and
(not parsed.scheme or parsed.scheme in ["http", "https"]))
|
[
"def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc",
"def is_safe_url(target):\r\n ref_url = urlparse(request.host_url)\r\n test_url = urlparse(urljoin(request.host_url, target))\r\n\r\n return test_url.scheme in ('http', 'https') and \\\r\n ref_url.netloc == test_url.netloc",
"def is_url(self):\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|\\\n (?:%[0-9a-fA-F][0-9a-fA-F]))+', self.token)\n if len(urls) == 0:\n return False\n else:\n return True",
"def es_url_valida(url_):\n url_parseado = urlparse.urlparse(url_)\n return all([url_parseado.scheme, url_parseado.netloc])",
"def is_url(self, path):\n # FIXME: cover more URL types ...\n return path[:7] == \"http://\"",
"def is_legacy_signed_url_valid(user, url):\n parsed = urlsplit(url)\n params = MultiDict(parse_qs(parsed.query))\n try:\n signature = params.pop('token')\n except KeyError:\n return False\n\n url = urlunsplit((\n '',\n '',\n parsed.path,\n urlencode(list(params.lists()), doseq=True),\n parsed.fragment\n ))\n signer = Signer(user.signing_secret, salt='url-signing')\n return signer.verify_signature(url.encode(), signature)",
"def is_secure(self):\r\n return self.url.startswith(\"https\")",
"def is_url(path):\n try:\n parse_result = urlparse(path)\n return all((parse_result.scheme, parse_result.netloc, parse_result.path))\n except ValueError:\n return False",
"def is_url(path):\n return path.startswith('http') or \\\n path.startswith('https') or \\\n path.startswith('ftp') or \\\n path.startswith('ftps')",
"def is_valid_url_scheme(url):\n if url is None or len(url.strip()) == 0:\n return False\n parsed_url = urlparse(url)\n scheme = parsed_url.scheme\n return scheme in (\"http\", \"https\")",
"def url_allowed(self, url):\n return get_netloc(url) in self.root_hosts",
"def _ManifestUrlHasSecureScheme(self):\n secure_schemes = (\n \"file\",\n \"https\",\n \"ssh\",\n \"persistent-https\",\n \"sso\",\n \"rpc\",\n )\n parse_results = urllib.parse.urlparse(self._manifest_url)\n return parse_results.scheme in secure_schemes",
"def valid_url(x: str) -> bool:\n if isinstance(x, str) and re.match(URL_PATTERN, x):\n return True\n else:\n return False",
"def is_absolute(self, url):\n return bool(urlparse(url).netloc)",
"def is_url_valid(current_page_url, href_text):\n\treturn \":\" not in href_text \\\n\t\tand \"http://en.wikipedia.org/wiki/\" in urlparse.urljoin(current_page_url, href_text)\\\n\t\tand \"http://en.wikipedia.org/wiki/Main_Page\" not in urlparse.urljoin(current_page_url, href_text)",
"def default_validation(url):\n return bool(urlparse(url).scheme)",
"def is_url(obj: Any) -> bool:\n if not isinstance(obj, str) or not \"://\" in obj:\n return False\n try:\n res = requests.compat.urlparse(obj)\n if not res.scheme or not res.netloc or not \".\" in res.netloc:\n return False\n res = requests.compat.urlparse(requests.compat.urljoin(obj, \"/\"))\n if not res.scheme or not res.netloc or not \".\" in res.netloc:\n return False\n except:\n return False\n return True",
"def is_absolute(self, url):\n return bool(urllib.parse.urlparse(url).netloc)",
"def se_puede_acceder_a_url(url_):\n try:\n respuesta = urllib2.urlopen(url_)\n return respuesta.code == 200\n except (urllib2.HTTPError, urllib2.URLError) as error:\n LOGGER.warning('Error al acceder a la URL %s. Error: %s', url_, error)\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pop key, value pair from front of dictionary.
|
def pop_front(self):
return self.dict.popitem(last=False)
|
[
"def popitem(self):\n heap = self._heap\n position = self._position\n\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n\n if heap:\n node = heap[0]\n heap[0] = end\n position[end.key] = 0\n self._sink(0)\n else:\n node = end\n del position[node.key]\n return node.key, node.value",
"def popitem(self):\r\n try:\r\n return self.maps[0].popitem()\r\n except KeyError:\r\n raise KeyError('No keys found in the first mapping.')",
"def pop(self, item=[], default=None):\r\n if item != []:\r\n return self._thedict.pop(item, default)\r\n else:\r\n try:\r\n return self._thedict.pop(self.keys()[0])\r\n except IndexError:\r\n raise KeyError(': \\'pop(): dictionary is empty\\'')",
"def pop(self):\n # Get our iter first to avoid catching and accidentally\n # ignoring POSKeyError\n it = iter(self)\n try:\n value = next(it)\n except StopIteration:\n raise KeyError\n self.discard(value)\n return value",
"def pop(self, key, *args):\r\n try:\r\n return self.maps[0].pop(key, *args)\r\n except KeyError:\r\n raise KeyError('Key not found in the first mapping: {!r}'.format(key))",
"def pop(self, key, default=None):\n return OrderedDict.pop(self, key.lower(), default)",
"def __pop(target_dict, location):\n removed = target_dict[location][0]\n target_dict[location] = target_dict[location][1:]\n if len(target_dict[location]) == 0:\n del target_dict[location]\n return removed",
"def Map_popitem(aMap):\n for bucket in aMap:\n if bucket != []:\n k, v = bucket[0]\n Map_delete(aMap, k)\n return (k, v)\n \n return None",
"def _cache_pop(self, k):\n self._cache.move_to_end(k)\n _, v = self._cache.popitem()\n return v",
"def pop(self, key, default=None):\n # type: (Any, Any) -> Any\n try:\n value = self._del(key)\n return value\n except KeyError:\n return default",
"def PopValueOfType(a_dict, key, value_type, value_description):\n ret = GetValueOfType(a_dict, key, value_type, value_description)\n # We were able to get that value, so the key must exist.\n a_dict.pop(key)\n return ret",
"def pop(self, item, default=_sentinel):\r\n try:\r\n return self.popitem(item)[3]\r\n except KeyError:\r\n if default != RangeDict._sentinel:\r\n return default\r\n raise",
"def remove(self, key):\n \n value = self._linear_search(key)\n if value != -1:\n value = self._values.pop(value)\n else:\n value = None\n \n \n return value",
"def stash_pop(self):",
"def pop(self, key: str, default: Any = DEFAULT):\n if default is self.DEFAULT:\n try:\n value = self.args.pop(key)\n except KeyError:\n raise ConfigurationError(\"key \\\"{}\\\" is required at location \\\"{}\\\"\".format(key, self.history))\n else:\n value = self.args.pop(key, default)\n if not isinstance(value, dict):\n logger.info(self.history + key + \" = \" + str(value)) # type: ignore\n return self._check_is_dict(key, value)",
"def _pop_key(self, doc, key):\n path = key.split('.')\n cur = doc\n for step in path[:-1]:\n cur = cur[step]\n cur.pop(path[-1], None)",
"def remove(self, key):\n hashkey = self.hash(key)\n if self.hashmap[hashkey]:\n self.hashmap[hashkey][self.pos(key)] = None",
"def pop_key(self):\n return _ldns.ldns_key_list_pop_key(self)\n #parameters: ldns_key_list *,\n #retvals: ldns_key *",
"def pop(self, key): \n logger.debug('FrameBuffer.pop called [%s]' % key)\n\n assert key is not None, 'FrameBuffer.pop key cannot be None'\n assert key in self, 'FrameBuffer.pop key must be a property of FrameBuffer object'\n\n val = self.__dict__[key][0]\n del self.__dict__[key][0]\n return val"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Insert keyvalue pair into end of dictionary. If the key already exists, the keyvalue pair will be moved to the end of the dictionary and the value will be updated.
|
def insert_end(self, key, value):
if key in self.dict:
self.dict.pop(key)
self.dict[key] = value
if len(self.dict) > self.maxsize:
self.dict.popitem(last=False)
|
[
"def insert(self, key, value):\n # hash the key and map that hash to a bucket\n hash_key = self.hash_function(key) % len(self.buckets)\n\n bucket = self.buckets[hash_key]\n\n for i, val in enumerate(bucket):\n # check if exists, and override if so\n if val[0] == key:\n bucket[i] = (key, value)\n return\n # insert new\n bucket.append((key, value))",
"def insert(self, key: str, value: Any) -> None:\r\n i = 0\r\n index = self.horner_hash(key)\r\n factor = i ** 2\r\n if self.hash_table[index] is None:\r\n self.num_items += 1\r\n self.hash_table[index + factor] = (key, [value])\r\n else:\r\n m = 0\r\n while key != self.hash_table[index + (i ** 2) - m][0]:\r\n i += 1\r\n if self.table_size <= index + (i ** 2) - m:\r\n m = m + self.table_size\r\n if self.hash_table[index + (i ** 2) - m] is not None:\r\n continue\r\n self.num_items += 1\r\n self.hash_table[index + (i ** 2) - m] = (key, [value])\r\n break\r\n if self.hash_table[index + (i ** 2) - m][0] == key and value not in self.hash_table[index + (i ** 2) - m][1]:\r\n self.hash_table[index + (i ** 2) - m][1].append(value)\r\n if 0.5 < self.get_load_factor():\r\n self.rehash_helper()",
"def add(self, key, value):\r\n index = self.hash(key)\r\n\r\n if self.array[index] is not None:\r\n # This index contains some values.\r\n # We need to check if the key we're adding already exists, this\r\n # way, we can update it with the new value, this way, we can update\r\n # it with the new value\r\n\r\n # kvp = key/value pair\r\n for kvp in self.array[index]:\r\n # If the key is found, then update the current value to the new\r\n # value.\r\n\r\n if kvp[0] == key:\r\n kvp[1] = value\r\n break\r\n\r\n # Remember for/else, the else executes after the loop completetes\r\n # normally. Meaning, if no breaks happen, it will execute this else\r\n # statement.\r\n else:\r\n # If no breaks happened, it means that no existing key was\r\n # found. Therefore, we can simply append it to the end of the\r\n # list at this index.\r\n self.array[index].append([key, value])\r\n\r\n else:\r\n # This index is empty. We will create an empty list and append the\r\n # key value pair.\r\n self.array[index] = []\r\n self.array[index].append([key, value])",
"def _insert(self, key, value):\n location = self.hash(key, True)\n\n if location is not None: # there is an open spot in table\n # location is None or previously deleted\n if self.table[location] is None or self.table[location].deleted:\n new_node = HashNode(key, value)\n self.table[location] = new_node\n self.size += 1\n\n load_factor = self.size/self.capacity\n \n if load_factor >= 0.5:\n self._grow()\n else: # key already exists\n self.table[location].value = value",
"def safe_insert(key, value, my_dict):\r\n return",
"def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data)-1) # upheap newly added position",
"def add(self, key, value): # 3\r\n self._data.append(self._Item(key, value))\r\n self._upheap(len(self._data) - 1) # upheap newly added position\r",
"def append(self, key, value):\n if key not in self._fields.keys():\n raise KeyError(key)\n self._values[key].append(value)",
"def update_dict(dic, key, value):\n if key in dic:\n oldvalue = dic[key]\n oldvalue.append(value)\n dic[key] = oldvalue\n return dic\n else:\n dic[key] = [value]\n return dic",
"def insert(self, key, value):\r\n entry = Entry(key, value)\r\n # self.check_load()\r\n index = self.horner_hash(entry.key)\r\n j = 0\r\n for i in range(0, self.table_size):\r\n j = (index + i**2) % self.table_size #quad probing in case of collision\r\n\r\n if not self.hash_table[j]: #insert if there is nothing there\r\n self.hash_table[j] = entry\r\n self.num_items += 1\r\n self.check_load()\r\n return\r\n\r\n elif self.hash_table[j].key == entry.key: #insert if it's the same word\r\n self.hash_table[j] = entry\r\n return",
"def put(self, key, value):\n index = self.hash_index(key)\n current = self.storage[index].head\n while current:\n if current.key == key:\n current.value = value\n current = current.next\n\n node = HashTableEntry(key, value)\n self.storage[index].insert_at_head(node)\n self.count += 1\n\n # init linked list at index position\n # if collision -> reassign to either head or tail",
"def insert(self, val):\n if self.dict.get(val) is not None:\n return False\n else:\n self.set_list.append(val)\n self.dict[val] = len(self.set_list) - 1\n return True",
"def append(self, key, value):\r\n return self.execute_command(\"APPEND\", key, value)",
"def insert(self, key, value):\n h = hashlib.sha256(key)\n index = int(h.hexdigest(), 16) % 10000\n self.hash_table[index].append([key, value])",
"def add(self, key, content=None):\n\n if key in self._map and self._map.get(key) is content:\n return # do nothing as this key already exists with the same value\n elif len(self._map) >= self._max_len:\n self.delete_least_used()\n\n self._map[key] = content\n self._popularity[key] = 0",
"def _make_or_add(dictionary, key, item):\n if not dictionary.get(key):\n dictionary[key] = [item]\n return\n\n dictionary[key].append(item)",
"def __setitem__(self, key, value):\n k = self._lowerOrReturn(key)\n self.data[k] = (key, value)",
"def append_or_add_set_to_dict(k, val, dic):\n if k in dic.keys():\n dic[k].add(val)\n else:\n dic[k] = {val}",
"def forceput(self, key, val):\n self._put(key, val, overwrite_key=True, overwrite_val=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Delete an entry from the dictionary
|
def delete(self, key):
del self.dict[key]
|
[
"def _safe_delete(self, my_dictionary: Dict[Any, Any], key: Any) -> None:\n if key in my_dictionary:\n del my_dictionary[key]",
"def remove_item(self, key, value):\n ...",
"def delete(self,key):\n\t\tdel self.form_dict[key]",
"def safe_delete(mydict, key):\n if key in mydict:\n del(mydict[key])\n\n return mydict",
"def Map_delete(aMap, key):\n bucket = Map_get_bucket(aMap, key)\n \n for i in xrange(len(bucket)):\n k, v = bucket[i]\n if key == k:\n del bucket[i]\n break",
"def __delitem__(self, key):\n query = self.store.delete().where(self.store.c.key == key)\n result = self.conn.execute(query)\n if result.rowcount == 0:\n raise KeyError",
"def __delitem__(self,k):\n for j in range(len(self._table)):\n if k == self._table[j]._key: # Found a match\n self._table.pop(j) # remove item\n return # and quit\n raise KeyError(\"Key Error: \"+ repr(k))",
"def remove(self, key: int) -> None:\n if self.my_map.get(key):\n self.my_map.pop(key)",
"def __delitem__(self, key):\n hash_val = self._hash(key)\n if self.table[hash_val] != self.defVal and (isinstance(self.table[hash_val], tuple) and\n self.table[hash_val][0] == key and\n self.table[hash_val][2] == True):\n self.table[hash_val] = (self.table[hash_val][0], self.table[hash_val][1], False)\n else:\n key_found = False\n iter_count = 0\n while not key_found:\n if hash_val >= self.capacity:\n hash_val = 0\n if self.table[hash_val] == self.defVal:\n \traise KeyError\n if self.table[hash_val] != self.defVal:\n if self.table[hash_val][0] == key:\n if self.table[hash_val][2] == True:\n self.table[hash_val] = (self.table[hash_val][0], \n self.table[hash_val][1], False)\n key_found = True\n break\n hash_val += 1\n iter_count += 1",
"def remove(self, key):\n hashkey = self.hash(key)\n if self.hashmap[hashkey]:\n self.hashmap[hashkey][self.pos(key)] = None",
"def remove(key: str, value: object, catname: str=''):",
"def remove(self, key):\r\n hash_idx = hash_string(key, self.slots)\r\n num = 1\r\n while self.table[hash_idx] and self.table[hash_idx].key != key:\r\n hash_idx = (hash_idx + num*num) % self.slots\r\n num += 1\r\n if self.table[hash_idx] is None:\r\n raise KeyError\r\n return_val = self.table[hash_idx]\r\n self.table[hash_idx] = self.deleted\r\n self.num_items -= 1\r\n return return_val",
"def remove(self, key):\n self.react_dict.remove_reaction(key)\n self.react_dict.save_dict_to_file()",
"def delete_feed(key): \n SH = shelve.open('feeds.db', writeback=True)\n feed_dict = SH['feeds']\n # print(\"FEED DICT IN DEL\", key)\n print(\"FEED DICT KEYY\", feed_dict[key])\n del feed_dict[key]\n SH.close()\n choice = input(\"Delete another feed? Y/n\")\n if choice == \"y\".lower():\n unsubscribe_menu()\n else:\n main_menu_logic()\n pass",
"def delete(self, key, item): # noqa\n return self.execute_command(CF_DEL, key, item)",
"def remove(self, e):\n \n del self.vals[e]",
"def delete(entry, commit=True):\n DB.session.delete(entry)\n if commit:\n _commit()",
"def delete(self, key: str, path: Optional[str] = Path.root_path()) -> int:\n return self.execute_command(\"JSON.DEL\", key, str(path))",
"def delete_key_cloud_map_entry(self, keyname):\n\n keycloudmap = self.db.find(KEYCLOUDMAP,\n output=\"object\",\n key_name=keyname)\n # print(keycloudmap.values())\n for key in keycloudmap:\n # print(\"Deleting: {:}\".format(key))\n self.db.delete(key)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Starts the training asynchronously using the flask executor It runs the training based on the DSI_EXECUTE_ON environment variable and at the end, removes the future from the executor
|
def start_training():
logging.getLogger(__name__).info("Training execution started...")
# noinspection PyBroadException
try:
environment = execution_environment()
if environment == DSI_EXECUTE_ON_LOCAL:
if dvc_remote():
train(dvc_data_repo=dvc_remote(), dvc_ssh_user=ssh_username(),
dvc_ssh_password=ssh_password())
else:
train()
elif environment == DSI_EXECUTE_ON_SSH:
connection = SSHRemoteExecutor(host=ssh_host(),
username=ssh_username(),
password=ssh_password(),
debug_mode=debug_mode() or flask_args.debug,
port=ssh_port(),
dvc_remote=dvc_remote())
connection.setup_prerequisites()
connection.run_training()
connection.save_model_locally()
else:
raise Exception("{0} has a unknown value '{1}'".format(DSI_EXECUTE_ON, environment))
logging.getLogger(__name__).info("Training execution ended!!!")
except Exception as training_exc:
# This exception is broad because we cannot forseen all possible exceptions in
# the DS train code.
# Also, since this train is beeing executed in a separed thread all exceptions
# should be catched
logging.getLogger(__name__).info("Training execution raised an exception...")
f = io.StringIO()
traceback.print_exc(file=f)
f.seek(0)
logging.getLogger(__name__).error(f.read())
raise ValueError(training_exc)
|
[
"def do_training():\n train_cls = Train()\n train_cls.run()",
"async def start_background_tasks(app):\n await license_init(app)\n await matlab_starter(app)",
"def main():\n\n config = SimCLRConfig.parse_arguments()\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in config.gpus])\n num_gpus_per_node = len(config.gpus)\n world_size = config.num_nodes * num_gpus_per_node\n distributed = world_size > 1\n setattr(config, 'num_gpus_per_node', num_gpus_per_node)\n setattr(config, 'world_size', world_size)\n setattr(config, 'distributed', distributed)\n \n rich.print(config.__dict__)\n config.save()\n\n if config.distributed:\n rich.print(f\"Distributed training on {world_size} GPUs.\")\n mp.spawn(\n main_worker,\n nprocs=config.num_gpus_per_node,\n args=(config, )\n )\n else:\n rich.print(f\"Single GPU training.\")\n main_worker(0, config=config) # single machine, single gpu",
"def run(self):\n ssc = StreamingContext(sc, 5)\n from OneTestScenario import SparkApp\n SparkApp(model=model, Context=sc, streamingContext=ssc)\n print \"\\nSpark thread ended.\\n\"",
"def train_and_eval(self, train_input_fn, eval_input_fn):\n\n self._save_config()\n output_dir = os.path.join(self._runtime_config.model_dir, 'eval')\n tf.io.gfile.makedirs(output_dir)\n\n train_run_config = self.build_strategy_configuration('train')\n train_params = self.build_model_parameters('train')\n # Prevent 'save_summary_steps' race condition in multicard scenario.\n # Value is still used by training hooks.\n save_summary_steps = train_params['save_summary_steps']\n # Remove value for workers other than 0.\n if MPI_is_distributed() and MPI_rank() != 0:\n train_params['save_summary_steps'] = None\n train_run_config = train_run_config.replace(save_summary_steps=None)\n train_estimator = self.build_mask_rcnn_estimator(train_params, train_run_config, 'train')\n\n eval_estimator = None\n eval_results = None\n\n num_cycles = math.ceil(self._runtime_config.total_steps / self._runtime_config.num_steps_per_eval)\n\n training_hooks = get_training_hooks(\n mode=\"train\",\n model_dir=self._runtime_config.model_dir,\n checkpoint_path=self._runtime_config.checkpoint,\n skip_checkpoint_variables=self._runtime_config.skip_checkpoint_variables,\n batch_size=train_params['batch_size'],\n save_summary_steps=save_summary_steps,\n profile_steps=train_params['profile']\n )\n\n for cycle in range(1, num_cycles + 1):\n\n if not MPI_is_distributed() or MPI_rank() == 0:\n\n print() # Visual Spacing\n logging.info(\"=================================\")\n logging.info(' Start training cycle %02d' % cycle)\n logging.info(\"=================================\\n\")\n\n max_cycle_step = min(int(cycle * self._runtime_config.num_steps_per_eval), self._runtime_config.total_steps)\n\n PROFILER_ENABLED = False\n\n if (not MPI_is_distributed() or MPI_rank() == 0) and PROFILER_ENABLED:\n profiler_context_manager = tf.contrib.tfprof.ProfileContext\n\n else:\n from contextlib import suppress\n profiler_context_manager = lambda *args, **kwargs: suppress() # No-Op context manager\n\n with profiler_context_manager(\n '/workspace/profiling/',\n trace_steps=range(100, 200, 3),\n dump_steps=[200]\n ) as pctx:\n\n if (not MPI_is_distributed() or MPI_rank() == 0) and PROFILER_ENABLED:\n opts = tf.compat.v1.profiler.ProfileOptionBuilder.time_and_memory()\n pctx.add_auto_profiling('op', opts, [150, 200])\n\n train_estimator.train(\n input_fn=train_input_fn,\n max_steps=max_cycle_step,\n hooks=training_hooks,\n )\n\n if MPI_is_distributed():\n from mpi4py import MPI\n MPI.COMM_WORLD.Barrier() # Needed to have correct eval step measurements\n\n if not MPI_is_distributed() or MPI_rank() == 0:\n\n print() # Visual Spacing\n\n if self._runtime_config.skip_cycles < cycle:\n logging.info(\"=================================\")\n logging.info(' Start evaluation cycle %02d' % cycle)\n logging.info(\"=================================\\n\")\n else:\n logging.info(\"=================================\")\n logging.info(' Skip evaluation cycle %02d' % cycle)\n logging.info(\"=================================\\n\")\n\n if self._runtime_config.skip_cycles < cycle:\n if eval_estimator is None:\n eval_run_config = self.build_strategy_configuration('eval')\n eval_params = self.build_model_parameters('eval')\n eval_estimator = self.build_mask_rcnn_estimator(eval_params, eval_run_config, 'eval')\n\n last_ckpt = self.get_last_checkpoint_path()\n logging.info(\"Restoring parameters from %s\\n\" % last_ckpt)\n\n eval_results, predictions = evaluation.evaluate(\n eval_estimator,\n eval_input_fn,\n self._runtime_config.eval_samples,\n self._runtime_config.eval_batch_size,\n self._runtime_config.include_mask,\n self._runtime_config.val_json_file,\n report_frequency=self._runtime_config.report_frequency,\n checkpoint_path=last_ckpt,\n hooks=[TimeToTrainEstimatorHook(train_or_eval='eval',\n output_dir=self._runtime_config.model_dir)]\n )\n self._write_summary(output_dir, eval_results, predictions, max_cycle_step)\n\n return eval_results",
"def start(self) -> None:\n self.celery_executor.start()\n self.kubernetes_executor.start()",
"def execute(project_root_path: str):\n # ensures pre_processing in done\n raw_processing.execute(project_root_path)\n # start timer\n start_nlp = datetime.datetime.now().timestamp()\n print(\"\\n* NLP - Process started\")\n # 2 Threads to compute annotations from spaCy lib\n pool = ThreadPool(processes=2)\n # start the threads and wait for them to finish\n train_ann = pool.apply_async(coarse_ann_computations, args=[\"training\", project_root_path])\n # wait for 1 second to avoid creation of same directory by different threads\n time.sleep(1)\n test_ann = pool.apply_async(coarse_ann_computations, args=[\"test\", project_root_path])\n # wait for 1 second to avoid creation of same directory by different threads\n time.sleep(1)\n test_ann_status = test_ann.get()\n train_ann_status = train_ann.get()\n if not train_ann_status:\n print(\"- Error: In computing annotations for training data\")\n if not test_ann_status:\n print(\"- Error: In computing annotations for test data\")\n # timer for end time\n end_nlp = datetime.datetime.now().timestamp()\n total_nlp = datetime.datetime.utcfromtimestamp(end_nlp - start_nlp)\n print(\"- NLP : Done in {0}h {1}m {2}s\".format(total_nlp.hour, total_nlp.minute, total_nlp.second))\n # separate the computed Natural language properties for each coarse class (main categories)\n print(\"\\n* Separating NLP properties for each of the Coarse classes (categories)\")\n # use the same thread created before\n start_sep = datetime.datetime.now().timestamp()\n train_sep_ann = pool.apply_async(fine_prop_separation, args=[\"training\", project_root_path, \"doc\"])\n # wait for 1 second to avoid creation of same directory by different threads\n time.sleep(1)\n test_sep_ann = pool.apply_async(fine_prop_separation, args=[\"test\", project_root_path, \"doc\"])\n # wait for 1 second to avoid creation of same directory by different threads\n time.sleep(1)\n train_sep_ann_status = train_sep_ann.get()\n test_sep_ann_status = test_sep_ann.get()\n if not train_sep_ann_status:\n print(\"- Error: In separating annotations for training data\")\n if not test_sep_ann_status:\n print(\"- Error: In separating annotations for test data\")\n if train_sep_ann_status and test_sep_ann_status:\n # timer for end time\n end_sep = datetime.datetime.now().timestamp()\n total_sep = datetime.datetime.utcfromtimestamp(end_sep - start_sep)\n print(\"- NLP properties separation : Done in {0}h {1}m {2}s\"\n .format(total_sep.hour, total_sep.minute, total_sep.second))",
"def run():\n with tf.Graph().as_default():\n with tf.device('/cpu:0'):\n image_bytes_feed = tf.placeholder(dtype=tf.string)\n\n logits, resized_image, endpoints = _get_joint_position_inference_graph(\n image_bytes_feed)\n\n session = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir=RESTORE_PATH)\n assert latest_checkpoint is not None\n\n variables_to_restore = {\n var.op.name.replace('resnet_v1_50_pyramid', 'pyramid'): var for var in tf.global_variables()\n }\n restorer = tf.train.Saver(var_list=variables_to_restore)\n restorer.restore(sess=session, save_path=latest_checkpoint)\n\n request_handler = TFHttpRequestHandlerFactory(session,\n image_bytes_feed,\n logits,\n resized_image,\n endpoints)\n server_address = ('localhost', 8765)\n httpd = http.server.HTTPServer(server_address, request_handler)\n httpd.socket = ssl.wrap_socket(httpd.socket,\n keyfile='./domain.key',\n certfile='./signed.crt',\n server_side=True)\n\n print('Serving!')\n httpd.serve_forever()",
"def start(self):\n logger.info(\"Launching opa runner\")\n self._run_task = asyncio.create_task(self._run())",
"def train_online(args, cfg):\n data_path = args.data_path\n seq_name = args.seq_name\n seq_name_list = {\n 'blackswan': 1e-4,\n 'goat': 1e-4,\n 'car-shadow': 5e-6,\n 'cows': 5e-5,\n 'car-roundabout': 1e-5,\n 'paragliding-launch': 1e-4,\n 'horsejump-high': 1e-4,\n 'dance-twirl': 7e-6,\n 'drift-straight': 5e-9,\n 'motocross-jump': 7e-7,\n 'parkour': 1e-5,\n 'soapbox': 5e-6,\n 'camel': 7e-5,\n 'kite-surf': 1e-5,\n 'dog': 5e-7,\n 'libby': 1e-5,\n 'bmx-trees': 7e-5,\n 'breakdance': 5e-5,\n 'drift-chicane': 5e-7,\n 'scooter-black': 5e-8,\n }\n print(\"Start of Online Training, sequence: \" + seq_name)\n\n context.set_context(device_id=args.device_id)\n lr = seq_name_list[seq_name]\n epoch_size = cfg.to_epoch_size\n batch_size = cfg.to_batch_size\n\n print(f'lr:{lr}')\n\n save_dir = cfg.dirResult + '/online/' + args.seq_name\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n\n\n dataset_train = create_dataset(mode=\"Train\",\n data_path=data_path,\n batch_size=batch_size,\n seq_name=seq_name,\n num_of_workers=4,\n num_of_epoch=1)\n\n batch_num = dataset_train.get_dataset_size()\n print(f'batch_num:{batch_num}')\n\n net = OSVOS()\n param_dict = load_checkpoint(args.parent_ckpt_path)\n load_param_into_net(net, param_dict)\n net.set_train()\n\n learning_rate = []\n warm_up = [lr/ math.floor(epoch_size / 5) * (i + 1) for _ in range(batch_num) for i in\n range(math.floor(epoch_size / 5))]\n shrink = [lr / (16 * (i + 1)) for _ in range(batch_num)\n for i in range(math.floor(epoch_size * 2 / 5))]\n normal_run = [lr for _ in range(batch_num) for i in\n range(epoch_size - math.floor(epoch_size / 5) - math.floor(epoch_size * 2 / 5))]\n learning_rate = learning_rate + warm_up + normal_run + shrink\n opt = nn.Adam(net.trainable_params(),\n learning_rate=learning_rate, use_nesterov=True, weight_decay=1e-5)\n\n net_loss = ClassBalancedCrossEntropyLoss(online=True)\n loss_scale_manager = FixedLossScaleManager(1024, drop_overflow_update=False)\n model = Model(net, loss_fn=net_loss, optimizer=opt, loss_scale_manager=loss_scale_manager)\n time_cb = TimeMonitor(data_size=batch_num)\n loss_cb = LossMonitor(per_print_times=batch_num)\n cb = [time_cb, loss_cb]\n\n config_ck = CheckpointConfig(keep_checkpoint_max=10, saved_network=net)\n ckpoint_cb = ModelCheckpoint(prefix='checkpoint_online', directory=save_dir, config=config_ck)\n cb.append(ckpoint_cb)\n\n print(\"start train...\")\n start = time.time()\n model.train(epoch_size, dataset_train, callbacks=cb)\n end = time.time()\n print(f\"train success, use time {(end-start)/60} minutes\")",
"def shell_train(global_profile, profiles):\n run_shell(global_profile, profiles, 'pyspark_train')",
"def run_train():\n parser = argparse.ArgumentParser(description=\"GPT training\")\n parser.add_argument('--device_id', type=int, default=0, help=\"Device id, default is 0.\")\n parser.add_argument(\"--device_num\", type=int, default=1, help=\"Use device nums, default is 1.\")\n parser.add_argument(\"--distribute\", type=str, default=\"false\", choices=[\"true\", \"false\"],\n help=\"Run distribute, default is false.\")\n parser.add_argument(\"--optimizer\", type=str, default=\"adam\", choices=[\"adam\", \"lamb\"],\n help=\"select which optimizer to be used, default adam\")\n parser.add_argument(\"--epoch_size\", type=int, default=10, help=\"Epoch size, default is 10.\")\n parser.add_argument(\"--warmup_step\", type=int, default=10000, help=\"Warmup step, default is 10000.\")\n parser.add_argument(\"--data_path\", type=str, default=\"\", help=\"Data path of your MindRecord files.\")\n parser.add_argument(\"--start_lr\", type=float, default=\"5e-5\", help=\"Start learning rate, default is 5e-5.\")\n parser.add_argument(\"--end_lr\", type=float, default=\"1e-10\", help=\"End learning rate, default is 1e-10.\")\n parser.add_argument(\"--sink_size\", type=int, default=100, help=\"Sink size for every iteration, default is 100\")\n parser.add_argument(\"--model_parallel_num\", type=int, default=8, help=\"Num of model parallel, default is 8\")\n\n\n args_opt = parser.parse_args()\n device_id = int(os.getenv(\"DEVICE_ID\", '0'))\n context.set_context(mode=context.GRAPH_MODE, device_target=\"Ascend\", device_id=device_id)\n if args_opt.distribute == \"true\":\n D.init()\n device_num = args_opt.device_num\n rank = device_id % device_num\n print(\"device_id is {}, rank_id is {}\".format(device_id, rank))\n\n context.reset_auto_parallel_context()\n context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,\n device_num=device_num)\n\n else:\n rank = 0\n device_num = 1\n\n config = GPTConfig(batch_size=4,\n seq_length=1024,\n vocab_size=50257,\n embedding_size=1024,\n num_layers=24,\n num_heads=16,\n expand_ratio=4,\n post_layernorm_residual=False,\n dropout_rate=0.1,\n compute_dtype=mstype.float16,\n use_past=False)\n gpt = GPT(config)\n model_parallel_num = args_opt.model_parallel_num\n data_parallel_num = int(device_num / model_parallel_num)\n parallel_config = TransformerOpParallelConfig(data_parallel=data_parallel_num,\n model_parallel=model_parallel_num)\n loss = CrossEntropyLoss(parallel_config.dp_mp_config)\n gpt_with_loss = GPTWithLoss(gpt, loss)\n\n ds = create_dataset(config.batch_size, data_path=args_opt.data_path, device_num=device_num, rank=rank)\n\n\n epoch_num = args_opt.epoch_size\n step_per_epoch = ds.get_dataset_size()\n\n lr = LearningRate(learning_rate=args_opt.start_lr,\n end_learning_rate=args_opt.end_lr,\n warmup_steps=args_opt.warmup_step,\n decay_steps=epoch_num*step_per_epoch)\n\n decay_filter = lambda x: 'layernorm' not in x.name.lower() and \"bias\" not in x.name.lower()\n params = gpt.trainable_params()\n decay_params = list(filter(decay_filter, params))\n other_params = list(filter(lambda x: not decay_filter(x), params))\n group_params = [{'params': decay_params, 'weight_decay': 1e-2},\n {'params': other_params, 'weight_decay': 0.0},\n {'order_params': params}]\n\n if args_opt.optimizer == \"lamb\":\n optimizer = nn.Lamb(group_params, learning_rate=lr)\n else:\n optimizer = nn.AdamWeightDecay(group_params, learning_rate=lr)\n\n callback_size = args_opt.sink_size\n actual_epoch_num = int(epoch_num * step_per_epoch/callback_size)\n callback = [TimeMonitor(callback_size), LossMonitor(callback_size)]\n\n config_ck = CheckpointConfig(save_checkpoint_steps=step_per_epoch, keep_checkpoint_max=1)\n ckpoint_cb = ModelCheckpoint(prefix=\"GPT2\", config=config_ck)\n callback.append(ckpoint_cb)\n\n\n update_cell = DynamicLossScaleUpdateCell(loss_scale_value=1024,\n scale_factor=2,\n scale_window=1000)\n\n gpt_with_grads = GPTTrainOneStepWithLossScaleCell(gpt_with_loss, optimizer=optimizer,\n scale_update_cell=update_cell)\n\n\n model = Model(gpt_with_grads)\n model.train(actual_epoch_num, ds, callbacks=callback, dataset_sink_mode=True, sink_size=callback_size)",
"def launch_inference(self):\n\n self.logger.info('Beginning to submit inference tasks')\n # Make a folder for the models\n model_folder = self.output_dir.joinpath('models')\n model_folder.mkdir(exist_ok=True)\n \n # Submit the chunks to the workflow engine\n for mid in range(len(self.mpnns)):\n # Get a model that is ready for inference\n model = self.ready_models.get()\n \n # Convert it to a pickle-able message\n model_msg = MPNNMessage(model)\n \n # Proxy it once, to be used by all inference tasks\n model_msg_proxy = ps.store.get_store(self.ps_names['infer']).proxy(model_msg, key=f'model-{mid}-{self.inference_batch}')\n \n # Run inference with all segements available\n for cid, (chunk, chunk_msg) in enumerate(zip(self.inference_chunks, self.inference_proxies)):\n self.queues.send_inputs([model_msg_proxy], chunk_msg,\n topic='infer', method='evaluate_mpnn',\n keep_inputs=False,\n task_info={'chunk_id': cid, 'chunk_size': len(chunk), 'model_id': mid})\n self.logger.info('Finished submitting molecules for inference')",
"async def run_runtime(self) -> None:\n self._state.set(RuntimeStates.starting)\n await asyncio.gather(\n self._start_multiplexer(), self._start_agent_loop(), self._start_storage()\n )",
"def main_process_start():\n global MAIN_PROCESS, socketio\n\n if not MAIN_PROCESS:\n if request.method == \"POST\":\n MAIN_PROCESS = eventlet.spawn(main_run, experiment_description=request.json)\n else:\n MAIN_PROCESS = eventlet.spawn(main_run)\n logger.info(request.method)\n time.sleep(0.1)\n\n return main_process_status()",
"def train(self):\n print(\"Spawning and initializing communication...\")\n # Spawn processes\n self._spawn()\n\n # Initialize communication\n for proc in self.processes:\n proc.init_communication.remote()\n\n # Run main training loop\n print(\"Running main training loop...\")\n run_procs = [proc.run.remote() for proc in self.processes]\n futures = ray.get(run_procs)\n\n # Retreive workers' data and write to wandb\n # NOTE: Logger logs the mean scores of each episode per update step\n if self.args.log:\n worker_logs = [f for f in futures if f is not None]\n self.logger.write_worker_log.remote(\n worker_logs, self.hyper_params.worker_update_interval\n )\n print(\"Exiting training...\")",
"def run_local():\n run_all_tasks()",
"def launch(self):\n # Make it easy to run TensorBoard inside other programs, e.g. Colab.\n server = self._make_server()\n thread = threading.Thread(target=server.serve_forever, name='GRTensorBoard')\n thread.daemon = True\n thread.start()\n return server.get_url()",
"def setup_and_run():\r\n tf.config.threading.set_inter_op_parallelism_threads(num_threads=1)\r\n tf.config.threading.set_intra_op_parallelism_threads(num_threads=1)\r\n\r\n current_time = int(time.time() * 1000)\r\n setup_id = hashlib.md5(str(current_time).encode())\r\n\r\n # --------------------------\r\n # definition of the different parameters for which PINNs shall be trained\r\n setup_table_names = ['N_datapoints',\r\n 'n_runge_kutta_stages',\r\n 'seed_tensorflow',\r\n 'seed_numpy']\r\n\r\n # elements to be usable in \"itertools.product(*parameters)\"\r\n N_datapoints = [50, 100, 200, 1000]\r\n n_runge_kutta_stages = [4, 8, 16, 32]\r\n seed_tensorflow = np.random.randint(0, 1000000, 7).tolist()\r\n seed_numpy = np.random.randint(0, 1000000, 7).tolist()\r\n\r\n parameters = [N_datapoints,\r\n n_runge_kutta_stages,\r\n seed_tensorflow,\r\n seed_numpy]\r\n\r\n # create the 'setup_table' that contains a combination of all the parameters\r\n setup_table = pd.DataFrame(itertools.product(*parameters), columns=setup_table_names)\r\n\r\n # add setupID and a quasi unique simulation_id\r\n setup_table.insert(0, \"setupID\", setup_id.hexdigest())\r\n\r\n simulation_ids_unhashed = current_time + 1 + setup_table.index.values\r\n simulation_ids = []\r\n for simulation_id in simulation_ids_unhashed:\r\n simulation_ids_hashed = hashlib.md5(str(simulation_id).encode())\r\n simulation_ids.append(simulation_ids_hashed.hexdigest())\r\n\r\n setup_table.insert(1, \"simulation_id\", simulation_ids)\r\n\r\n # save the setup_table\r\n with open(global_PATHs.PATH_setup_tables / f'setupID_{setup_id.hexdigest()}.pickle', \"wb\") as f:\r\n pickle.dump(setup_table, f)\r\n\r\n print('Created setup table with %i entries' % setup_table.shape[0])\r\n\r\n # map the relevant variables for the parallelisation\r\n starmap_variables = [(simulation_id, N_datapoints, n_runge_kutta_stages, seed_tensorflow, seed_numpy) for\r\n (simulation_id, N_datapoints, n_runge_kutta_stages, seed_tensorflow, seed_numpy) in\r\n zip(setup_table['simulation_id'], setup_table['N_datapoints'],\r\n setup_table['n_runge_kutta_stages'],\r\n setup_table['seed_tensorflow'], setup_table['seed_numpy'])]\r\n\r\n # train the models in parallel\r\n with mp.Pool(4) as pool:\r\n pool.starmap(train_model, starmap_variables)\r\n\r\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Download the trained model if any or reports 404 when the model is not available
|
def get_model():
if _executor.futures.running(TRAINING_KEY):
return jsonify({'error': "Model is not ready"}), 404
model_path = "{0}".format(GIT_COMMIT)
if os.path.exists(model_path):
file = open(model_path, 'rb')
return send_file(filename_or_fp=file,
mimetype="octet-stream",
attachment_filename=model_path,
as_attachment=True), 200
else:
return jsonify({'error': "Model could not be found"}), 404
|
[
"def check_model():\n\n if not os.path.exists(MODEL_PICKLE_PATH):\n print('[*] Beginning model download from', MODEL_PICKLE_REMOTE_URL)\n download_file(MODEL_PICKLE_REMOTE_URL, MODEL_PICKLE_PATH)",
"def maybe_download():\r\n\r\n print(\"Downloading Inception 5h Model ...\")\r\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def download_model():\n # path = '/home/tomas/code/tomasaltilio/Food_Detective/ResNET_acc32'\n path = 'gs://food-models-le-wagon/ResNET_acc32/'\n model = models.load_model(path)\n return model",
"def check_cached_model():\n\n if not os.path.exists(CACHED_MODEL):\n print('[*] Beginning download of cached model from', CACHED_MODEL_REMOTE_URL)\n download_file(CACHED_MODEL_REMOTE_URL, CACHED_MODEL)",
"def _download_model(self) -> None:\n if not self.interactive:\n if not self.silent:\n print(\n f\"CLTK message: Going to download required Stanza models to ``{self.model_path}`` ...\"\n ) # pragma: no cover\n stanza.download(lang=self.stanza_code, package=self.treebank)\n else:\n print( # pragma: no cover\n \"CLTK message: This part of the CLTK depends upon the Stanza NLP library.\"\n ) # pragma: no cover\n dl_is_allowed = query_yes_no(\n f\"CLTK message: Allow download of Stanza models to ``{self.model_path}``?\"\n ) # type: bool\n if dl_is_allowed:\n stanza.download(lang=self.stanza_code, package=self.treebank)\n else:\n raise CLTKException(\n f\"Download of necessary Stanza model declined for '{self.language}'. Unable to continue with Stanza's processing.\"\n )\n # if file model still not available after attempted DL, then raise error\n if not file_exists(self.model_path):\n raise FileNotFoundError(\n \"Missing required models for ``stanza`` at ``{0}``.\".format(\n self.model_path\n )\n )",
"def download_and_extract_pretrained_model(args):\n if tf.gfile.Exists(os.path.join(args.checkpoints_dir, args.model_name + '.ckpt')):\n return\n if not tf.gfile.Exists(args.checkpoints_dir):\n tf.gfile.MakeDirs(args.checkpoints_dir)\n\n url_base = \"http://download.tensorflow.org/models/\"\n model_tarballs = {\n \"inception_v1\" : \"inception_v1_2016_08_28.tar.gz\",\n \"inception_v2\" : \"inception_v2_2016_08_28.tar.gz\",\n \"inception_v3\" : \"inception_v3_2016_08_28.tar.gz\",\n \"inception_v4\" : \"inception_v4_2016_09_09.tar.gz\",\n }\n url = url_base + model_tarballs[args.model_name]\n dataset_utils.download_and_uncompress_tarball(url, args.checkpoints_dir)",
"def cache_pretrained_model(self):\n if utils.get_cache_entry(self.pretrained_model_cache_entry) is None:\n logger.info(\n \"downloading and caching pretrained model from tensorflow website\"\n )\n logger.info(\"url: %s\", self.pretrained_model_url)\n utils.download_and_extract_url_tarball_to_cache_dir(\n self.pretrained_model_url, self.pretrained_model_cache_entry\n )",
"def download_pretrained_model(tag, download_dir='.'):\n assert tag in PRETRAINED_MODEL_DICT, f\"{tag} model does not exists.\"\n id_ = PRETRAINED_MODEL_DICT[tag]\n output_path = f\"{download_dir}/{tag}.tar.gz\"\n os.makedirs(f\"{download_dir}\", exist_ok=True)\n if not os.path.exists(output_path):\n gdown.download(f\"https://drive.google.com/uc?id={id_}\", output_path, quiet=False)\n\n with tarfile.open(output_path, 'r:*') as tar:\n model_folder = tar.getnames()[0]\n tar.extractall(download_dir)\n model_path = os.path.join(download_dir, model_folder)\n assert os.path.exists(os.path.join(model_path, 'args.pth')), 'args.pth file nor found. please use default checkpointing process'\n assert os.path.isdir(model_path), 'Tar file contains more than main folder, please use default checkpointing process'\n\n return model_path",
"def _download_model(self, lang_code, version):\n \n model_name = '{}-{}'.format(lang_code, version)\n model_path_cloud = os.path.join(self.cloud_path, '{}.zip'.format(model_name))\n model_path_local = os.path.join(self.disk_path, '{}.zip'.format(model_name))\n \n # Download and extract models for provided language. \n self._download_and_extract_lang_model(model_path_cloud, model_path_local) \n self.metadata.read(os.path.join(self.disk_path,lang_code+\"-\"+str(version),\"metadata.json\"))\n \n # Download Facebook embeddings based on the metadata read from the model\n self._download_embeddings(self.metadata.embeddings_remote_link, self.metadata.embeddings_file_name)\n sys.stdout.write(\"\\n\")",
"def download_model(model_date, model_name):\n\n model_file = model_name + '.tar.gz'\n url = os.path.join('http://download.tensorflow.org/models/object_detection/tf2',\n model_date,\n model_file)\n\n # Download model\n urllib.request.urlretrieve(url, model_file)\n\n # Untar and clean\n tar = tarfile.open(model_file)\n tar.extractall()\n tar.close()\n os.remove(model_file)",
"def download_pretrained_model(tag, download_dir=None):\r\n assert tag in PRETRAINED_MODEL_LIST, f\"{tag} does not exists.\"\r\n id_ = PRETRAINED_MODEL_LIST[tag]\r\n if download_dir is None:\r\n download_dir = os.path.expanduser(\"~/.cache/parallel_wavegan\")\r\n output_path = f\"{download_dir}/{tag}.tar.gz\"\r\n os.makedirs(f\"{download_dir}\", exist_ok=True)\r\n if not os.path.exists(output_path):\r\n # lazy load for compatibility\r\n import gdown\r\n\r\n gdown.download(f\"https://drive.google.com/uc?id={id_}\", output_path, quiet=False)\r\n with tarfile.open(output_path, 'r:*') as tar:\r\n for member in tar.getmembers():\r\n if member.isreg():\r\n member.name = os.path.basename(member.name)\r\n tar.extract(member, f\"{download_dir}/{tag}\")\r\n checkpoint_path = find_files(f\"{download_dir}/{tag}\", \"checkpoint*.pkl\")\r\n\r\n return checkpoint_path[0]",
"def download(self, path=None, mode=\"pretrained\", verbose=False,\n url=OPENDR_SERVER_URL + \"perception/pose_estimation/lightweight_open_pose/\"):\n valid_modes = [\"weights\", \"pretrained\", \"test_data\"]\n if mode not in valid_modes:\n raise UserWarning(\"mode parameter not valid:\", mode, \", file should be one of:\", valid_modes)\n\n if path is None:\n path = self.temp_path\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n if mode == \"pretrained\":\n # Create model's folder\n path = os.path.join(path, \"openpose_default\")\n if not os.path.exists(path):\n os.makedirs(path)\n\n if verbose:\n print(\"Downloading pretrained model...\")\n\n # Download the model's files\n if self.backbone == \"mobilenet\":\n if not os.path.exists(os.path.join(path, \"openpose_default.json\")):\n file_url = os.path.join(url, \"openpose_default/openpose_default.json\")\n urlretrieve(file_url, os.path.join(path, \"openpose_default.json\"))\n if verbose:\n print(\"Downloaded metadata json.\")\n else:\n if verbose:\n print(\"Metadata json file already exists.\")\n if not os.path.exists(os.path.join(path, \"openpose_default.pth\")):\n file_url = os.path.join(url, \"openpose_default/openpose_default.pth\")\n urlretrieve(file_url, os.path.join(path, \"openpose_default.pth\"))\n else:\n if verbose:\n print(\"Trained model .pth file already exists.\")\n elif self.backbone == \"mobilenetv2\":\n raise UserWarning(\"mobilenetv2 does not support pretrained model.\")\n elif self.backbone == \"shufflenet\":\n raise UserWarning(\"shufflenet does not support pretrained model.\")\n if verbose:\n print(\"Pretrained model download complete.\")\n\n elif mode == \"weights\":\n if verbose:\n print(\"Downloading weights file...\")\n if self.backbone == \"mobilenet\":\n if not os.path.exists(os.path.join(self.temp_path, \"mobilenet_sgd_68.848.pth.tar\")):\n file_url = os.path.join(url, \"mobilenet_sgd_68.848.pth.tar\")\n urlretrieve(file_url, os.path.join(self.temp_path, \"mobilenet_sgd_68.848.pth.tar\"))\n if verbose:\n print(\"Downloaded mobilenet weights.\")\n else:\n if verbose:\n print(\"Weights file already exists.\")\n elif self.backbone == \"mobilenetv2\":\n if not os.path.exists(os.path.join(self.temp_path, \"mobilenetv2_1.0-f2a8633.pth.tar\")):\n file_url = os.path.join(url, \"mobilenetv2_1.0-f2a8633.pth.tar\")\n urlretrieve(file_url, os.path.join(self.temp_path, \"mobilenetv2_1.0-f2a8633.pth.tar\"))\n if verbose:\n print(\"Downloaded mobilenetv2 weights.\")\n else:\n if verbose:\n print(\"Weights file already exists.\")\n elif self.backbone == \"shufflenet\":\n if not os.path.exists(os.path.join(self.temp_path, \"shufflenet.pth.tar\")):\n file_url = os.path.join(url, \"shufflenet.pth.tar\")\n urlretrieve(file_url, os.path.join(self.temp_path, \"shufflenet.pth.tar\"))\n if verbose:\n print(\"Downloaded shufflenet weights.\")\n else:\n if verbose:\n print(\"Weights file already exists.\")\n if verbose:\n print(\"Weights file download complete.\")\n\n elif mode == \"test_data\":\n if verbose:\n print(\"Downloading test data...\")\n if not os.path.exists(os.path.join(self.temp_path, \"dataset\")):\n os.makedirs(os.path.join(self.temp_path, \"dataset\"))\n if not os.path.exists(os.path.join(self.temp_path, \"dataset\", \"image\")):\n os.makedirs(os.path.join(self.temp_path, \"dataset\", \"image\"))\n # Download annotation file\n file_url = os.path.join(url, \"dataset\", \"annotation.json\")\n urlretrieve(file_url, os.path.join(self.temp_path, \"dataset\", \"annotation.json\"))\n # Download test image\n file_url = os.path.join(url, \"dataset\", \"image\", \"000000000785.jpg\")\n urlretrieve(file_url, os.path.join(self.temp_path, \"dataset\", \"image\", \"000000000785.jpg\"))\n\n if verbose:\n print(\"Test data download complete.\")",
"def download_pretrained_models(\n models_root_dir='./models',\n pretrained_models_url=PRETRAINED_MODELS_URL):\n tf.gfile.MakeDirs(models_root_dir)\n zip_path = os.path.join(\n models_root_dir, os.path.basename(pretrained_models_url))\n if os.path.isfile(zip_path):\n tf.logging.info('%s already exists, using cached copy', zip_path)\n else:\n tf.logging.info('Downloading pretrained models from %s...',\n pretrained_models_url)\n urllib.urlretrieve(pretrained_models_url, zip_path)\n tf.logging.info('Download complete.')\n tf.logging.info('Unzipping %s...', zip_path)\n with zipfile.ZipFile(zip_path) as models_zip:\n models_zip.extractall(models_root_dir)\n tf.logging.info('Unzipping complete.')",
"def download():\n ResNet50(weights='imagenet', include_top=False)",
"def download(lang: str) -> None:\n _check_language(lang)\n try:\n _check_models_dir(lang)\n except Exception:\n os.makedirs(MODELS_DIR)\n if LANGUAGES[lang] in os.listdir(MODELS_DIR):\n print(f\"Already downloaded a model for the '{lang}' language\")\n return\n url = f\"{BASE_URL}/{LANGUAGES[lang]}\"\n filename = os.path.join(MODELS_DIR, LANGUAGES[lang])\n urllib.request.urlretrieve(url=url, filename=filename)\n print(f\"Downloaded pre-trained UDPipe model for '{lang}' language\")",
"def _load_inception_from_url(inception_url: str) -> nn.Module:\n inception_url = inception_url if inception_url else TERO_INCEPTION_URL\n print_log(f'Try to download Inception Model from {inception_url}...',\n 'current')\n try:\n path = download_from_url(inception_url, dest_dir=MMAGIC_CACHE_DIR)\n print_log('Download Finished.', 'current')\n return _load_inception_from_path(path)\n except Exception as e:\n print_log(f'Download Failed. {e} occurs.', 'current')\n return None",
"def download_model(model_id, file_format=\"json\", save=True, path=\".\"):\n\n if save:\n response = requests.get(\"http://bigg.ucsd.edu/static/models/%s.%s\" % (model_id, file_format), stream=True)\n response.raise_for_status()\n with open(os.path.join(path, \"%s.%s\" % (model_id, file_format)), \"wb\") as model_file:\n for block in response.iter_content(1024):\n model_file.write(block)\n else:\n response = requests.get(\"http://bigg.ucsd.edu/static/models/%s.json\" % model_id, stream=True)\n response.raise_for_status()\n return model_from_dict(response.json())",
"def load_model(self):\n\n # getting model name given checkpoint\n if(self.checkpoint<0):\n model_name = \"model_trained\"\n else:\n model_name = f\"model_epoch_{self.checkpoint}\"\n path_to_model = os.path.join(self.models_path, model_name)\n\n # making sure the model exists\n if(not os.path.exists(path_to_model)):\n print(\"ERROR!\")\n print(f\"Model: {model_name} was not found in path {self.models_path}\")\n exit()\n\n # creating model architecture\n # setting up the device\n torch.backends.cudnn.fastest = True\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # initializing the model and loading the state dicitionary\n model = model_setup.setup_model(exp_data=self.exp_data, exp_path=self.exp_path)\n model.load_state_dict(torch.load(path_to_model))\n self.model = model.to(self.device)\n\n # setting up model hyper-parameters\n self.optimizer, self.loss_function, self.scheduler = model_setup.hyperparameter_setup(self.exp_data, self.model)\n return",
"def test_download_dest_default(self):\n\n model_path = download_and_checksum_mlmodel(TestModel.TEST_VALID)\n target_path = os.path.join(LIBRARY_DIR, 'valid.model')\n self.assertEqual(model_path, target_path)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reorders x_in based on prev hyp ids.
|
def ReOrderHyps(x_in):
if isinstance(x_in, tf.Tensor) and x_in.shape.ndims > 0:
# For rank > 1 tensors we make use of an efficient matmul based gather
# on tpu that takes in account the range of the values. For R1, we
# rely on the tf.gather and xla to optimize it efficiently for R1
# layout.
if x_in.shape.ndims > 1:
if p.batch_major_state:
num_hyps = tf.shape(old_hyp_ids)[0]
x_out = beam_search_tpu_ops.fast_gather(
x_in,
old_hyp_ids,
num_hyps,
max_value=None,
batch_major_state=p.batch_major_state)
else:
# Use corrected indices only here for batch major compute as
# key/value caches are the states being affected.
correct_old_hyp_ids = (
old_hyp_ids_in_cache_order
if p.batch_major_compute else old_hyp_ids)
def _GatherStep(x_in, t):
"""Gather for one time step.
Args:
x_in: in the shape of [T, B, ...] we first get slice(t) from the
tensors, then gather old_hyp_ids from the slice and write the
interpolated slice inplace to update the original x_in.
t: current time step
Returns:
Updated x_in and time step
"""
x = tf.gather(tf.gather(x_in, t), correct_old_hyp_ids)
return inplace_ops.alias_inplace_update(x_in, t, x), t + 1
x_out, _ = tf.while_loop(lambda _, t: t <= cur_step, _GatherStep,
(x_in, tf.zeros([], tf.int32)))
else:
x_out = tf.gather(x_in, old_hyp_ids)
x_out.set_shape(x_in.get_shape())
return x_out
else:
return x_in
|
[
"def reorder_series_by_ids(self, neworder, *, inplace=False):\n if inplace:\n out = self\n else:\n out = self.copy()\n\n neworder = [self.series_ids.index(x) for x in neworder]\n\n oldorder = list(range(len(neworder)))\n for oi, ni in enumerate(neworder):\n frm = oldorder.index(ni)\n to = oi\n utils.swap_rows(out._data, frm, to)\n out._series_ids[frm], out._series_ids[to] = out._series_ids[to], out._series_ids[frm]\n # TODO: re-build series tags (tag system not yet implemented)\n oldorder[frm], oldorder[to] = oldorder[to], oldorder[frm]\n\n out.__renew__()\n return out",
"def _reorder_series_by_idx(self, neworder, inplace=False):\n\n if inplace:\n out = self\n else:\n out = self.copy()\n\n oldorder = list(range(len(neworder)))\n for oi, ni in enumerate(neworder):\n frm = oldorder.index(ni)\n to = oi\n utils.swap_rows(out._data, frm, to)\n out._series_ids[frm], out._series_ids[to] = out._series_ids[to], out._series_ids[frm]\n # TODO: re-build series tags (tag system not yet implemented)\n oldorder[frm], oldorder[to] = oldorder[to], oldorder[frm]\n out.__renew__()\n\n return out",
"def _data_reorder(self):\n temp_log = self._get_temper_switches()\n runner, trj_frame_incr = self._get_swap_rates()\n c = 0\n d = self.data.copy()\n for i in range(0, temp_log.shape[0], runner):\n if runner != 1:\n cr = slice(c, c+1, 1)\n c += 1\n else:\n cr = slice(trj_frame_incr*(i), trj_frame_incr*(i+1), 1)\n self.data[temp_log[i, 1:], cr, :] = d[:, cr, :]\n return self.data.copy()",
"def orderPlots(self, ids):\n #-------------------------\n order = []\n plots = []\n for id in ids:\n n = self._plots.get(str(id), -1)\n if n >= 0:\n order.append(n)\n plots.append(self._plotlist[n])\n for i, n in enumerate(sorted(order)):\n self._plotlist[n] = plots[i]\n self._plots[plots[i][0]] = n\n self.update()",
"def reorder_incremental_state(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n new_order: Tensor,\n ):\n self.self_attn.reorder_incremental_state(incremental_state, new_order)\n\n if self.encoder_attn is not None:\n self.encoder_attn.reorder_incremental_state(incremental_state, new_order)\n\n if self.num_cross_attentions > 0:\n [attn.reorder_incremental_state(incremental_state, new_order) for attn in self.cross_attentions]\n #for i in range(len(self.cross_attentions)):\n # self.cross_attentions[i].reorder_incremental_state(incremental_state, new_order)",
"def _sort(self):\n self.objects = self.objects[np.lexsort((self.dx, self.dy))[::-1]]",
"def addOrder(intfTable, baselineTable):\n order = []\n\n for i in range(len(intfTable)):\n # Get indicies of scenes in intf\n mi = baselineTable[baselineTable['Date'] == intfTable['Master'][i]].index\n ri = baselineTable[baselineTable['Date'] == intfTable['Repeat'][i]].index\n order.append((ri - mi)[0])\n\n # Append column to imnput intfTable\n newIntfTable = intfTable\n newIntfTable['Order'] = order\n\n return newIntfTable",
"def _sort_prep(self):\n self._sort_outdated = True\n self._last_node_id = self.nodes[self.clineno]",
"def sorted_points_and_ids(xin, yin, zin, xperiod, yperiod, zperiod, \n approx_xcell_size, approx_ycell_size, approx_zcell_size):\n npts = len(xin)\n num_xdivs, xcell_size = determine_cell_size(xperiod, approx_xcell_size)\n num_ydivs, ycell_size = determine_cell_size(yperiod, approx_ycell_size)\n num_zdivs, zcell_size = determine_cell_size(zperiod, approx_zcell_size)\n ncells = num_xdivs*num_ydivs*num_zdivs\n\n ix = digitized_position(xin, xcell_size, num_xdivs)\n iy = digitized_position(yin, ycell_size, num_ydivs)\n iz = digitized_position(zin, zcell_size, num_zdivs)\n\n cell_ids = cell_id_from_cell_tuple(ix, iy, iz, num_ydivs, num_zdivs)\n cell_id_sorting_indices = np.argsort(cell_ids)\n\n cell_id_indices = np.searchsorted(cell_ids, np.arange(ncells), \n sorter = cell_id_sorting_indices)\n cell_id_indices = np.append(cell_id_indices, npts)\n\n xout = np.ascontiguousarray(xin[cell_id_sorting_indices], dtype=np.float64)\n yout = np.ascontiguousarray(yin[cell_id_sorting_indices], dtype=np.float64)\n zout = np.ascontiguousarray(zin[cell_id_sorting_indices], dtype=np.float64)\n\n cell_id_indices = np.ascontiguousarray(cell_id_indices, dtype=np.int64)\n\n return xout, yout, zout, cell_id_indices",
"def sort_by_id(self, increasing = True):\r\n self.neighbors.sort(reverse = not increasing)",
"def pre_arranged(self, pre_arranged):\n\n self._pre_arranged = pre_arranged",
"def reorderEvents(self):\n self.events.sort(key=lambda x: x.startTime, reverse=False)",
"def sortrows(data):\n\n ## FIX: this method assumes the data to be continuous! we now make sure of that explicitely\n data = sp.ascontiguousarray(data)\n ## XIF\n return sp.sort(\n data.view([('', data.dtype)] * data.shape[1]), axis=0\n ).view(data.dtype)",
"def updateorders(self):\n self.dataset = self.dataset.sort_values('InvoiceDate')\n self.dataset['Ordersep'] = self.dataset[['CustomerID', 'InvoiceDate']].groupby(['CustomerID']).InvoiceDate.apply(lambda x: x.diff()).fillna(0)\n self.dataset['Ordersep'] = self.dataset['Ordersep'].apply(lambda x: x.days)",
"def preorder(self,parent,child):\n pass",
"def reorder(objects, relative=int, back=bool, front=bool):\n pass",
"def get_ordered_page_items(self):\n items = self.queryset.all()\n if self.direction == 'prev':\n items.reverse()\n return items",
"def _permutation_to_jaxpr_order(jaxpr, submodules_in_call_order):\n permutation = []\n submodule_execution_index_by_name = {submodule.name: index for index, submodule in\n enumerate(submodules_in_call_order)}\n\n for eqn in jaxpr.eqns:\n execution_index = submodule_execution_index_by_name.pop(eqn.primitive.name, None)\n if execution_index is not None:\n permutation.append(execution_index)\n\n assert len(submodule_execution_index_by_name) == 0\n assert len(permutation) == len(submodules_in_call_order)\n\n return parametrized.inverse_permutation(permutation)",
"def front_x(t):\n initial = []\n for x in t:\n if x.startswith(\"x\"):\n initial.append(x)\n t.remove(x)\n initial.sort()\n t.sort()\n new = initial + t\n return new"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use short_seq optimization when cur_step is smaller than limit.
|
def LoopContinueShort(cur_step, all_done, unused_step_ids,
unused_core_bs_states, unused_other_states_list):
return tf.math.logical_and(cur_step < p.short_seq_limit,
tf.math.logical_not(all_done))
|
[
"def create_short_sequence():\n\n return final_sequences('short')",
"def first_500(seq):\n\n return seq[:500]",
"def shortest_seq(seq,threshold,dbg=False):\n assert None != seq\n assert None != threshold\n assert len(seq) > 0\n s_cur,s_min = 0,0 # current sum and minimum sum found.\n ix,ix_min = 0,0\n ln,ln_min = len(seq),len(seq)\n cnt_neg = 0 # count of the negative number within the sequence.\n found = False # whether a solu has been found.\n for i in range(len(seq)):\n if s_cur > 0:\n s_cur += seq[i]\n if seq[i] < 0:\n cnt_neg += 1\n else:\n s_cur = seq[i]\n ix = i\n cnt_neg = 1 if seq[i] < 0 else 0\n\n if dbg: print(seq[ix:i+1],s_cur)\n\n if s_cur >= threshold: # found one solu.\n found = True\n # try to find a shorter one ending with seq[i] only when seq[i] > 0,\n # and there no shorter one if seq[i] <= 0.\n if seq[i] > 0:\n # no negative num within seq[ix:i+1], trim from left-size.\n if 0 == cnt_neg:\n if dbg: print('trim left size')\n while s_cur - seq[ix] >= threshold:\n s_cur -= seq[ix]\n ix += 1\n # negative exists, try find a shorter sequence ending\n # with seq[i] by constructing it backward starting from seq[i].\n else: # cnt_neg > 0\n if dbg: print('construct from right size')\n cnt_neg = 0\n s_tmp = 0\n for j in range(i,ix-1,-1):\n s_tmp += seq[j]\n if seq[j] < 0:\n cnt_neg += 1\n if s_tmp >= threshold: break\n # no need to continue the search when its length >= ln_min\n if i - j + 1 >= ln_min: break\n if ix < j and s_tmp >= threshold:\n ix = j\n s_cur = s_tmp\n if dbg: print('found shorter:',seq[ix:i+1],\"sum:%d, ln:%d\"%(s_cur,i-j+1))\n ln = i - ix + 1\n if ln < ln_min:\n if dbg: print('replace')\n ln_min = ln\n ix_min = ix\n s_min = s_cur\n if 1 == ln_min: # cannot be shorter.\n break\n if found:\n return (ix_min,ln_min,s_min)\n else:\n return None",
"def smoothstep(min, max, x):\n\n pass",
"def compute_short(self):\r\n #Check whether antenna/short start and ends have already been computed\r\n if self.start_short is None:\r\n self.find_data_chunks(switch_type='short')\r\n if self.end_antenna is None:\r\n self.find_data_chunks(switch_type='antenna')\r\n\r\n # TODO: automate freq dim detection in case the future changes\r\n # iterate over every short chunk\r\n short0 = np.zeros((len(self.end_short), 4096))\r\n short1 = np.zeros((len(self.end_short), 4096))\r\n for i in range(0, len(self.start_short)):\r\n start_i = self.start_short[i]\r\n end_i = self.end_short[i] + 1\r\n # select short measurments for this chunk of data and compute average\r\n short0[i, :] = np.average(self.data_dictionary[self.antenna]['pol0.scio'][start_i:end_i, :], axis=0)\r\n short1[i, :] = np.average(self.data_dictionary[self.antenna]['pol1.scio'][start_i:end_i, :], axis=0)\r\n\r\n # TODO: understand this bit. Deals with figuring out whether there are short measurements missing\r\n # IF missing interpolate\r\n # I am guessing this deals with some sort of fluke in the short measurements\r\n # if they are too long use the prizmatoid function to create a replacement\r\n # The function seems to go through all data even the attempt here is to only replace ONE averaged short measurement\r\n # And only pick out the solution that is relevant.\r\n # Should the loop be thrown away entirely and just be replaced with the pzt\r\n dif = np.diff(self.end_short)\r\n base = np.average(dif)\r\n for i in range(0, len(dif)):\r\n if dif[i] > base + base / 2:\r\n short0.insert(i + 1, interpolate_short(self.data_dictionary, antenna=self.antenna,\r\n polarization='pol0.scio')[i])\r\n short1.insert(i + 1, interpolate_short(self.data_dictionary, antenna=self.antenna,\r\n polarization='pol1.scio')[i])\r\n\r\n # Same here if the dataset doesn't end with a short measurement recompute similar to above\r\n # TODO optimise this in terms of using pzt interpolation and writing to the array\r\n # Give a subset of the complete data array??\r\n # Gives a different interpolation as compared to Kelly's code because it uses the trimmed flags!\r\n if self.end_short[-1] < self.end_antenna[-1]:\r\n old_pol0 = short0.copy()\r\n old_pol1 = short1.copy()\r\n\r\n short0 = np.zeros((old_pol0.shape[0] + 1, old_pol0.shape[1]))\r\n short1 = np.zeros((old_pol0.shape[0] + 1, old_pol0.shape[1]))\r\n\r\n short0[:-1, :] = old_pol0\r\n short1[:-1, :] = old_pol1\r\n short0[-1, :] = interpolate_short(self.data_dictionary, antenna=self.antenna, polarization='pol0.scio',\r\n trim=(0, 0))[-1]\r\n short1[-1, :] = interpolate_short(self.data_dictionary, antenna=self.antenna, polarization='pol1.scio',\r\n trim=(0, 0))[-1]\r\n self.short_pol0 = short0\r\n self.short_pol1 = short1\r\n return short0, short1",
"def create_long_sequence():\n\n return final_sequences('long')",
"def set_sequence(step=1):\r\n seq = [\r\n [1, 0, 0, 1],\r\n [1, 0, 0, 0],\r\n [1, 1, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 1, 1, 0],\r\n [0, 0, 1, 0],\r\n [0, 0, 1, 1],\r\n [0, 0, 0, 1]\r\n ]\r\n if step == 1:\r\n return seq\r\n elif step == 2:\r\n i = 1\r\n full_seq = []\r\n while i < len(seq):\r\n full_seq.append(seq[i])\r\n i += 2\r\n return full_seq",
"def _limit_helper(stream, limit):\n for value in stream:\n yield value\n if limit == 1:\n return\n else:\n limit = limit - 1 # FIXME",
"def _adapt_mini_batch_length(self,\n base_expected,\n mini_batch_length: int):\n if mini_batch_length == 1:\n return base_expected\n\n # The transformation is mostly about finding the corresponding\n # counterpart (indices) in the base case for each of the new positions\n # when mini_batch_length is greater than 1. We need to compute\n #\n # base_index: base_index[b, t] = k means the new value on batch b's t-th\n # step should be filled with the k-th of the counterpart in base\n # case expected data.\n #\n # We EXPLICITLY compute them instead duplicating the batch-computation\n # logic since this is unit test and this helps verify the\n # batch-computation logic from the main algorithm is correct.\n step_types = self._step_types()\n base_index = []\n for env_id in range(2):\n for start_pos in range(14):\n if start_pos + mini_batch_length > 14:\n break\n base_index.append([])\n for pos in range(start_pos, start_pos + mini_batch_length):\n base_index[-1].append(env_id * 14 + pos)\n base_index = torch.tensor(base_index, dtype=torch.int64)\n\n def _transform(path, x):\n return x.squeeze(dim=1)[base_index]\n\n return alf.nest.py_map_structure_with_path(_transform, base_expected)",
"def _run_short_rollout(self, base_state):\r\n # We change agent's mode to short rollout so that transitions will be stored\r\n # and agent's train op will be run\r\n self._agent.main_trajectory = False\r\n\r\n step_number = 0\r\n total_reward = 0\r\n # We have to switch epsilon settings to rollout\r\n self._agent.switch_epsilon_settings(main=False)\r\n\r\n env_cpy, obs, length = base_state\r\n self._environment.environment.unwrapped.restore_full_state(env_cpy)\r\n action = self._agent.begin_episode(obs)\r\n\r\n while True:\r\n observation, reward, is_terminal = self._run_one_step(action)\r\n total_reward += reward\r\n step_number += 1\r\n reward = np.clip(reward, -1, 1)\r\n\r\n if(is_terminal or step_number >= length):\r\n # We cut rollout without setting terminal flag\r\n # and storing final transition\r\n break\r\n\r\n action = self._agent.step(reward, observation)\r\n\r\n # We switch agent's settings back to main trajectory mode:\r\n self._agent.main_trajectory = True\r\n\r\n return step_number, total_reward",
"def prune_short(self, min_seqlen_perc=0.75):\n if sum(self.orig_seqlen) != 0:\n avg_seqlen = sum(self.orig_seqlen) / len(self.orig_seqlen)\n seq_len_cutoff = avg_seqlen * min_seqlen_perc\n else:\n for tax, seq in self.aln.items():\n seqlen = len(self.aln[tax].symbols_as_string())\n break\n seq_len_cutoff = seqlen * min_seqlen_perc\n prune = []\n aln_ids = set()\n for tax, seq in self.aln.items():\n aln_ids.add(tax.label)\n if len(seq.symbols_as_string().translate(None, \"-?\")) <= seq_len_cutoff:\n prune.append(tax)\n treed_taxa = set()\n for leaf in self.tre.leaf_nodes():\n treed_taxa.add(leaf.taxon.label)\n if prune:\n fi = open(\"{}/pruned_taxa\".format(self.workdir), 'a')\n fi.write(\"Taxa pruned from tree and alignment in prune short \"\n \"step due to sequence shorter than {}\\n\".format(seq_len_cutoff))\n for tax in prune:\n self.remove_taxa_aln_tre(tax.label)\n fi.write(\"{}, {}\\n\".format(tax.label, self.otu_dict[tax.label].get('^ot:originalLabel')))\n fi.close()\n for tax in prune:\n self.otu_dict[tax.label][\"^physcraper:status\"] = \"deleted in prune short\"\n # out-comented next line, as this does not run if we prune aln before placing new seq in tre\n # assert self.aln.taxon_namespace == self.tre.taxon_namespace\n assert treed_taxa.issubset(aln_ids)\n self.orig_seqlen = [len(self.aln[tax].symbols_as_string().replace(\"-\", \"\").replace(\"N\", \"\")) for tax in self.aln]\n self.trim()\n self._reconciled = 1",
"def drop_short_segments(self, min_length):\n min_length = parse_depth(min_length, check_positive=True, var_name=\"min_length\")\n wells = self.iter_level(-2)\n for well in wells:\n well.segments = [segment for segment in well if segment.length >= min_length]\n return self.prune()",
"def shorten_tail():\n\n global TAIL\n\n TAIL = TAIL.ahead\n TAIL.behind = None",
"def seq_generator(start=1):\n while True:\n yield start\n start += 1",
"def step_sequence(self, seq=[]):\n self.__stepSequence = seq",
"def ContinuousSplite(self,node,arg):\n subindex=node.index\n nt=subindex.size\n subset=self.dataset.loc[subindex,arg]\n subset.sort_values(inplace=True)\n subtarget=self.target.loc[subset.index]\n bestcut=subset.iloc[[0,1]].mean()\n i=partpoint=1\n maxeffect=(subtarget.iloc[0]**2)/nt+((subtarget.iloc[1:].sum())**2)/(nt*(nt-1))\n sl,sr=subtarget.iloc[0],subtarget.iloc[1:].sum()\n while i<=nt-2:\n sl+=subtarget.iloc[i]\n sr-=subtarget.iloc[i]\n if subset.iloc[i+1]>subset.iloc[i]:\n effect=((sl**2)/(i+1)+(sr**2)/(nt-i-1))/nt\n if effect>maxeffect:\n bestcut=subset.iloc[[i,i+1]].mean()\n maxeffect=effect\n partpoint=i\n i+=1\n Lindex=subset.index[:partpoint+1]\n Rindex=subset.index[partpoint+1:]\n return maxeffect,bestcut,Lindex,Rindex",
"def _n_step_lstm(self):",
"def min_output_buffer(self, i):\n return _raw_util.raw_divide_ff_sptr_min_output_buffer(self, i)",
"def expand_stage_range(nc, start, end):\n stages = [start, end]\n statements = [\n 'MATCH p=shortestPath((s:FBDV {short_form:\"%s\"})<-[:immediately_preceded_by*]-(e:FBDV {short_form:\"%s\"})) RETURN extract(x IN nodes(p) | x.short_form)' % (start, end)]\n r = nc.commit_list(statements)\n stages.append(r[0]['data'][0]['row'][0])\n return stages"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Util for getting nested strucutre of shapes from structure of tensors.
|
def _GetShapes(tensors, none_shapes=False):
shapes = []
for t in tf.nest.flatten(tensors):
shape = t.get_shape() if isinstance(t, tf.Tensor) else None
if none_shapes:
if shape:
shapes.append(tf.TensorShape([None] * len(shape)))
else:
shapes.append(tf.TensorShape(None))
else:
shapes.append(tf.TensorShape(shape))
return type(tensors)(tf.nest.pack_sequence_as(tensors, shapes))
|
[
"def nestshape(data):\n import ubelt as ub\n\n def _recurse(d):\n try:\n import torch\n except ImportError:\n torch = None\n if isinstance(d, dict):\n return ub.odict(sorted([(k, _recurse(v)) for k, v in d.items()]))\n\n clsname = type(d).__name__\n if 'Container' in clsname:\n meta = ub.odict(sorted([\n ('stack', d.stack),\n # ('padding_value', d.padding_value),\n # ('pad_dims', d.pad_dims),\n # ('datatype', d.datatype),\n ('cpu_only', d.cpu_only),\n ]))\n meta = ub.repr2(meta, nl=0)\n return {type(d).__name__ + meta: _recurse(d.data)}\n elif isinstance(d, list):\n return [_recurse(v) for v in d]\n elif isinstance(d, tuple):\n return tuple([_recurse(v) for v in d])\n elif torch is not None and isinstance(d, torch.Tensor):\n return d.shape\n elif isinstance(d, np.ndarray):\n return d.shape\n elif isinstance(d, (str, bytes)):\n return d\n elif isinstance(d, (int, float)):\n return d\n elif isinstance(d, slice):\n return d\n elif 'PolygonMasks' == clsname:\n # hack for mmdet\n return repr(d)\n elif 'BitmapMasks' == clsname:\n # hack for mmdet\n return repr(d)\n elif hasattr(d, 'shape'):\n return d.shape\n elif hasattr(d, 'items'):\n # hack for dict-like objects\n return ub.odict(sorted([(k, _recurse(v)) for k, v in d.items()]))\n elif d is None:\n return None\n else:\n raise TypeError(type(d))\n\n # globals()['_recurse'] = _recurse\n d = _recurse(data)\n return d",
"def _get_shape_str(shape):\n return '{}_{}'.format(*shape[:2])",
"def _shapes(x):\n def shape(x):\n try:\n return tuple([int(i) for i in x.shape])\n except Exception: # pylint: disable=broad-except\n return ()\n return tuple(nested_map(shape, x))",
"def describe_data_shape(data):\n def helper(data):\n if not isinstance(data, (list, tuple)):\n return 0, type(data).__name__\n else:\n result = type(data).__name__\n result += \" [{}]\".format(len(data))\n if len(data) > 0:\n child = data[0]\n child_nesting, child_result = helper(child)\n result += \" of \" + child_result\n else:\n child_nesting = 0\n return (child_nesting + 1), result\n\n nesting, result = helper(data)\n return \"Level {}: {}\".format(nesting, result)",
"def extract_shape(temp):\n tmp=[]\n for i in range(0, len(temp)):\n if type(temp[i]) == np.ndarray:\n a = np.shape(temp[i])\n if len(a) == 1: tmp.append(str(a[0]) + \"x1\")\n else: tmp.append(str(a[0]) + \"x\" + str(a[1]))\n else:\n tmp.append(temp[i])\n return tmp",
"def shape(self) -> S:",
"def layer_shapes(image_shape, model):\n shape = {model.layers[0].name: (None,) + image_shape,}\n\n for layer in model.layers[1:]:\n nodes = layer._inbound_nodes\n for node in nodes:\n inputs = [shape[lr.name] for lr in node.inbound_layers]\n if not inputs:\n continue\n shape[layer.name] = layer.compute_output_shape(inputs[0] if len(inputs) == 1 else inputs)\n\n return shape",
"def shape_from_tagged(node: TaggedDict) -> list[int]:\n if \"shape\" in node: # this should not be reached but lets make sure\n return node[\"shape\"]\n return [1] # scalar",
"def get_node_io_shapes(node, key):\n out_shape = []\n for shape in node.attr[key].list.shape:\n out_shape.append([dim.size for dim in shape.dim])\n return out_shape",
"def map_shape(x: Dict[Text, tf.Tensor]) -> Dict[Text, Sequence[int]]:\n return tf.nest.map_structure(lambda t: list(tf.shape(t).numpy()), x)",
"def shape_to_string(shape):\n s = str(shape).replace(' ', '')\n m = re.match(r'^\\((\\d+),\\)', s)\n if m:\n return m.group(1)\n return s",
"def _onnx_dims_to_tb_shape(onnx_tensor, tb_tensor):\n tb_tensor_shape = tensor_shape_pb2.TensorShapeProto()\n for dim in onnx_tensor.dims:\n tb_dim = tensor_shape_pb2.TensorShapeProto.Dim()\n tb_dim.size = dim\n tb_tensor_shape.dim.extend([tb_dim])\n tb_tensor.tensor_shape.CopyFrom(tb_tensor_shape)",
"def _internal_weight_shapes(self):\n coeff = 4 if self._use_lstm else 1\n shapes = []\n\n # Initial fully-connected layers.\n prev_dim = self._n_in\n for n_fc in self._fc_layers_pre:\n shapes.append([n_fc, prev_dim])\n if self._use_bias:\n shapes.append([n_fc])\n\n prev_dim = n_fc\n\n # Recurrent layers.\n for n_rec in self._rnn_layers:\n # Input-to-hidden\n shapes.append([n_rec*coeff, prev_dim])\n if self._use_bias:\n shapes.append([n_rec*coeff])\n\n # Hidden-to-hidden\n shapes.append([n_rec*coeff, n_rec])\n if self._use_bias:\n shapes.append([n_rec*coeff])\n\n if not self._use_lstm:\n # Hidden-to-output\n shapes.append([n_rec, n_rec])\n if self._use_bias:\n shapes.append([n_rec])\n\n prev_dim = n_rec\n\n # Fully-connected layers.\n for n_fc in self._fc_layers:\n shapes.append([n_fc, prev_dim])\n if self._use_bias:\n shapes.append([n_fc])\n\n prev_dim = n_fc\n\n return shapes",
"def get_shape_per_tensor(tensor_list):\n try:\n shape_per_tensor = torch.tensor([t.shape[:-1] for t in tensor_list], dtype=torch.long)\n except ValueError as err:\n ndim = tensor_list[0].ndim\n for i, t in enumerate(tensor_list):\n if t.ndim != ndim:\n raise ValueError(f\"Expected all tensors to have {ndim} dimensions \"\n f\"but got {t.ndim} at index {i}\")\n raise err # Unknown error\n return shape_per_tensor",
"def shape(x):\n\treturn tf.shape(x)",
"def disassemble_tree(obj: TensorLikeType) -> Tuple[TensorLikeType, List[Tensor]]:\n if obj is None:\n return MISSING_TENSOR, []\n elif isinstance(obj, Tensor):\n return None, [obj]\n elif isinstance(obj, (tuple, list)):\n keys = []\n values = []\n for item in obj:\n key, value = disassemble_tree(item)\n keys.append(key)\n values.extend(value)\n return (tuple(keys) if isinstance(obj, tuple) else keys), values\n elif isinstance(obj, dict):\n keys = {}\n values = []\n for name, item in obj.items():\n key, value = disassemble_tree(item)\n keys[name] = key\n values.extend(value)\n return keys, values\n elif isinstance(obj, TensorLike):\n attributes = variable_attributes(obj)\n keys = {}\n values = []\n for attr in attributes:\n key, value = disassemble_tree(getattr(obj, attr))\n keys[attr] = key\n values.extend(value)\n return copy_with(obj, **keys), values\n else:\n backend = choose_backend(obj)\n sizes = backend.staticshape(obj)\n shape = Shape(sizes, [f\"dim{i}\" for i in range(len(sizes))], [None] * len(sizes))\n shape.is_native_shape = True\n if backend.ndims(obj) != 0:\n warnings.warn(f\"Only scalar native tensors should be used in function inputs/outputs but got tensor with shape {backend.staticshape(obj)}. Consider using phi.math.Tensor instances instead. Using shape {shape}.\")\n return None, [NativeTensor(obj, shape)]",
"def get_tensor_shape(tensor):\n if not isinstance(tensor, tf.Tensor):\n raise ValueError('The input is not an instance of tf.Tensor.')\n\n shape_static = tensor.get_shape().as_list()\n shape_dynamic = tf.shape(tensor)\n\n shape = []\n for i, v in enumerate(shape_static):\n if v is None:\n v = shape_dynamic[i]\n shape.append(v)\n return shape",
"def _infer_raw_shape(tt_cores):\n num_dims = len(tt_cores)\n num_tensor_shapes = len(tt_cores[0].shape) - 2\n raw_shape = [[] for _ in range(num_tensor_shapes)]\n for dim in range(num_dims):\n curr_core_shape = tt_cores[dim].shape \n for i in range(num_tensor_shapes):\n raw_shape[i].append(curr_core_shape[i+1])\n for i in range(num_tensor_shapes):\n raw_shape[i] = list(raw_shape[i])\n\n return tuple(raw_shape)",
"def _flatten_nested_observations(self, observations, is_batched):\n\n def np_flatten(x):\n # Check if observations are batch, and if so keep the batch dimension and\n # flatten the all other dimensions into one.\n if is_batched:\n return np.reshape(x, [x.shape[0], -1])\n else:\n return np.reshape(x, [-1])\n\n # Flatten the individual observations if they are multi-dimensional and then\n # flatten the nested structure.\n flat_observations = [np_flatten(x) for x in tf.nest.flatten(observations)]\n axis = 1 if is_batched else 0\n return np.concatenate(flat_observations, axis=axis)",
"def get_shape(shape_x, shape_bias, axis_, num_axes, bias_from_blob):\n\n length_x = len(shape_x)\n length_bias = len(shape_bias)\n if bias_from_blob:\n if num_axes == -1:\n shape_left = [1] * axis_\n shape = shape_left + list(shape_bias)\n elif num_axes == 0:\n shape = [1] * length_x\n else:\n left_length = length_x - num_axes - axis_\n shape_left = [1] * axis_\n shape_right = [1] * left_length\n shape = shape_left + list(shape_bias) + shape_right\n else:\n if length_bias == 1 and shape_bias[0] == 1:\n shape = [1] * length_x\n else:\n left_length = length_x - length_bias - axis_\n shape_left = [1] * axis_\n shape_right = [1] * left_length\n shape = shape_left + list(shape_bias) + shape_right\n\n return shape"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns ranks in [0, len(x))
|
def compute_ranks(x):
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
|
[
"def compute_ranks(self, x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks",
"def _columnRanks(u):\r\n\r\n out = np.zeros(u.shape)\r\n for j in np.arange(u.shape[1]):\r\n out[:, j] = _argrank(u[:, j])\r\n return out.astype(int)",
"def rank(x, axis = -1):\n\n temp = np.argsort(np.argsort(x, axis = axis), axis = axis)\n return temp + 1",
"def get_num_ranks(self):\n return constants.NUM_RANK",
"def getRank(self):\n \n rank = list(self.data)\n rank.sort( lambda x, y: -1 if x[VOTES] < y[VOTES]\n else (0 if x[VOTES] == y[VOTES] else 1),\n reverse = True\n )\n return rank",
"def get_ranking(self):\n d = self.get_ranks()\n return ['+'.join(p for p in d if d[p] == k) for k in range(1, self.get_number_of_players() + 1)]",
"def _argrank(vec):\r\n sorti = np.argsort(vec)\r\n ranks = np.empty(len(vec), int)\r\n try:\r\n ranks[sorti] = np.arange(len(vec))\r\n except IndexError:\r\n ranks[sorti.values] = np.arange(len(vec))\r\n return ranks",
"def borda_count(x, axis = -1): \n \n return rank(np.sum(x, axis = axis), axis = axis)",
"def mine_rank(nodes_list):\r\n return [node.get_rank() for node in nodes_list]",
"def count_points(ranks):\n\n points = 0\n for rank in ranks:\n if rank not in ['A', 'J', 'Q', 'K']:\n points += int(rank)\n elif rank in ['A']:\n points += 1\n else:\n points += 10\n\n return points",
"def generate_ranking(self, scores):\n ranking = sorted(\n range(self.nplayers),\n key=lambda i: numpy.median(scores[i]))\n return ranking",
"def extract_ranks(hand):\n substitutions = {\n 'T': '10',\n 'J': '11',\n 'Q': '12',\n 'K': '13',\n 'A': '14',\n }\n ranks = [card[0] for card in hand]\n for idx, card_rank in enumerate(ranks):\n if card_rank in substitutions.keys():\n ranks[idx] = substitutions[card_rank]\n ranks = sorted(map(int, ranks), reverse=True)\n if ranks == [14, 5, 4, 3, 2]: #check for ace low straight\n return [5, 4, 3, 2, 1]\n return ranks",
"def rank_order():\r\n \r\n numbers = [str(i) for i in range(2, 10)] + [\"0\"]\r\n royals = [\"J\", \"Q\", \"K\", \"A\"]\r\n rank_orders = numbers + royals\r\n \r\n return rank_orders",
"def _infer_tt_ranks(tt_cores):\n tt_ranks = []\n for i in range(len(tt_cores)):\n tt_ranks.append(tt_cores[i].shape[0])\n tt_ranks.append(tt_cores[-1].shape[-1])\n\n return list(tt_ranks)",
"def rank(self):\n return self.group_generators().cardinality()",
"def get_ranks(self):\n if not self.is_fit_:\n raise ValueError(\"Decomposition has not yet been computed\")\n\n joint_rank = self.common_.rank\n indiv_ranks = {bn: self.blocks_[bn].individual.rank for bn in\n self.block_names}\n return joint_rank, indiv_ranks",
"def score(self):\n if len(self.minions) == 0:\n return 0\n return sum([minion.rank for minion in self.minions]) + self.rank",
"def _get_counts_per_rank(cards: List[Card]) -> ValuesView:\n return Counter(card.rank for card in cards).values()",
"def get_spartan_ranks(self):\n return self._metadata_request(\"spartan-ranks\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads a CSV file containing results. Produces a mapping of benchmark names to numerical results.
|
def read_results(file_name):
results = {}
with open(file_name, 'r') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in islice(spamreader, 1, None):
results[row[0]] = float(row[1])
return results
|
[
"def get_results(self):\n # read the csv report\n with open(self.aggr_all) as f:\n reader = csv.DictReader(f)\n results = dict()\n # built the benchmark\n for row in reader:\n # get needed information from row\n # TODO: Warum manchmal 6 trusted judgments?\n answers = row['are_these_two_sounds_similar']\n score = answers.count('first_option') # first_option = similar\n query = int(row['query_id'])\n result = row['result_id']\n result = int(result.split('-')[1])\n # append to benchmark\n if not results.get(query):\n results[query] = []\n results[query].append((result, score))\n # sort results by appearence in table\n for query in results:\n result_list = results[query]\n results[query] = sorted(result_list,\n key=lambda x: all_sounds.index(x[0]))\n return results",
"def get_raw_scores(results_dir_path, name, measure):\n filepath = results_dir_path + name + \".csv\"\n if os.path.exists(filepath):\n df = pd.read_csv(filepath)\n measures = df[df['measure'] == measure]['score']\n return measures.tolist()\n else:\n print(\"Results file doesn't exist for {}\".format(name))",
"def get_statistics_dict() -> Dict[str, Tuple[int, int]]:\n\n scoreboard = defaultdict(lambda: (0, 0))\n\n with open(CSV_FILE, 'r') as college_quotes:\n\n for quote in csv.reader(college_quotes):\n for index in range(1, len(quote), 2):\n for author in quote[index].split(' & '):\n quotes, memes = scoreboard[author]\n scoreboard[author] = quotes + 1, memes\n\n for author in Path(MEMES_PATH).iterdir():\n quotes, memes = scoreboard[author.stem.title()]\n scoreboard[author.stem.title()] = quotes, len(list(author.iterdir()))\n\n return scoreboard",
"def load_results(path):\n with open(path) as results_file:\n data = json.load(results_file)\n\n return BenchmarkResults(**data)",
"def map_values(threadName, q):\n while not mapperExitFlag:\n if not q.empty():\n mapLock.acquire()\n file = q.get()\n mapLock.release()\n\n result = []\n\n with open(file, 'r') as csv_file:\n csvreader = csv.reader(csv_file, delimiter=',', quotechar='\"')\n for line in csvreader:\n key = line[stringToPos[mapFrom]]\n\n try:\n # value = int(float(line[stringToPos[mapTo]]))\n tup = (key, 1)\n result.append(tup)\n except ValueError:\n pass # print('Could not convert \"%s\"' % line[stringToPos[mapTo]])\n\n reduceLock.acquire()\n reducingTasks.put(result)\n reduceLock.release()",
"def load_unit_info(filename):\n \n hf.verify_directory(filename+ '.csv')\n unitinfo = []\n \n # open the csv\n with open(filename+'.csv') as csvfile:\n csvreader = csv.reader(csvfile)\n for row in csvreader:\n unitinfo.append(map(int, row)) # unit info is list of [tetrode, unit] pairs\n \n return unitinfo",
"def get_mapping():\n result = {}\n with open('cust_version_mapping.csv', 'r') as fh:\n result = {rows[0]:rows[1] for rows in csv.reader(fh)}\n return result",
"def _process_csv(filename):\n import csv\n\n node_dict, neighbor_dict = {}, {}\n\n with open(filename, \"r\") as csv_file:\n for row in csv.DictReader(csv_file):\n node = EuclideanNode(\n node_type=row['NodeType'],\n name=row['Name'],\n floor=row['Floor'],\n coord=eval(row['Coordinates'])\n )\n node_dict[row['Name']] = node\n neighbor_dict[row['Name']] = eval(row['Neighbors'])\n return node_dict, neighbor_dict",
"def readcsv():\n\n with open(INPUT_CSV, newline = '') as csvfile:\n # Skips the first row\n next(csvfile)\n csvdata = csv.reader(csvfile)\n\n # Itterates over each row, adding the rating and year to the dictionary\n for row in csvdata:\n rating = row[1]\n year = row[2]\n\n data_dict[year].append(rating)",
"def load_benchmark_result_from_logs_dir(logs_dir):\n check_path_exists(logs_dir)\n\n log_file_path = lambda log_file: os.path.join(logs_dir, log_file)\n result_lambda = lambda log_file: (\n log_file,\n parse_log_file(log_file_path(log_file)),\n )\n\n return dict(map(result_lambda, os.listdir(logs_dir)))",
"def test_run(filename='net_returns.csv'):\n net_returns = pd.Series.from_csv(filename, header=0, sep=',')\n t, p = analyze_returns(net_returns)\n print(\"t-statistic: {:.3f}\\np-value: {:.6f}\".format(t, p))",
"def csv_to_factor(filename):\r\n f = {'dom':None, 'table':odict([])}\r\n with open(filename, \"r\") as file:\r\n for i, line in enumerate(file):\r\n line = line.strip()\r\n L = line.split(',')\r\n L = [v for v in L if v]\r\n if i == 0: #header\r\n f['dom'] = tuple(L)\r\n else: #data\r\n L = [float(v) for v in L]\r\n f['table'][tuple(L[:-1])] = L[-1]\r\n return f",
"def read_map(filename):\n result = []\n try:\n with open(filename,'r') as f:\n reader = csv.reader(f, delimiter=' ')\n try:\n for row in reader:\n result.append(row)\n except csv.Error as e:\n print (\"Exit Status: 4\")\n sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n return result\n except IOError:\n print(\"Exit Status: 3\")\n sys.exit (3)",
"def load_feature_durations(filepath):\n feature_durations = {}\n\n with open(filepath, newline=\"\") as file:\n reader = csv.reader(file, delimiter=\",\")\n next(reader, None)\n\n for row in reader:\n feature_durations[row[0]] = float(row[1])\n\n return feature_durations",
"def read_csv(filename, port):\n with open(filename) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n key = str(row[0])+str(row[3])\n val = str(row)\n index = getHash(key) % NODES\n call_rpc(port,'find_successor',index)\n successor = call_rpc(port,'find_successor',index)\n call_rpc(successor, 'update_keys_values',key,val)",
"def uploadResults(fileName):\n results = []\n seqLen = []\n \n with open(fileName, 'rb') as csvfile:\n lines = csv.reader(csvfile)\n \n for line in lines:\n if not line[0] == 'Number':\n results.append(int(line[0]))\n seqLen.append(len(line[1]))\n\n return results, seqLen",
"def parse_file(filename):\n user_ratings = coll.defaultdict(dict)\n movie_ratings = coll.defaultdict(dict)\n f = open(filename, 'rb')\n f = csv.reader(f)\n for row in f:\n user_ratings[int(row[1])][int(row[0])] = float(row[2])\n movie_ratings[int(row[0])][int(row[1])] = float(row[2])\n return user_ratings, movie_ratings",
"def load_runs(fn):\n runs = defaultdict(dict)\n with open(fn, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n qid, _, docid, _, score, _ = line.strip().split()\n runs[qid][docid] = float(score)\n return runs",
"def read_lut(csvfile, header, delimiter, quotechar):\n # Read in file\n with open(csvfile, 'rb') as f:\n csvreader = csv.reader(f, delimiter=delimiter, quotechar=quotechar)\n if header:\n csvreader.next()\n lut = { int(row[0]) : int(row[1]) for row in csvreader }\n\n if DEBUG:\n print 'Read in LUT:\\n {0}'.format(lut)\n\n return lut"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Aggregates results. Takes a baseline and a number of other measurements, divides all measurements by the baseline on a perbenchmark basis.
|
def aggregate_results(baseline, *others):
def aggregate_benchmark(key, results):
if key in results and results[key] != 0.0:
return results[key] / baseline[key]
else:
return float('nan')
results = []
for key in sorted(baseline.keys()):
results.append((key, 1.0) + tuple(aggregate_benchmark(key, xs)
for xs in others))
return results
|
[
"def _calculate_baseline(self):\n if self.data_counter % self.window_step == 0 and len(self.data_queue) == self.window_length:\n measurement = util.to_list(self.measurement_func(list(self.data_queue)))\n if self.baseline is None:\n self.baseline = [[feature] for feature in measurement]\n else:\n for baseline_feature, feature in zip(self.baseline, measurement):\n baseline_feature.append(feature)\n if self.data_counter >= self.baseline_length:\n self.baseline = [abs(sum(feature)) / len(feature) for feature in self.baseline]\n self._handle_datapoint = self._calculate_measurement",
"def aggregate_category(baseline_file, *other_files):\n baseline = read_results(baseline_file)\n others = [read_results(name) for name in other_files]\n return aggregate_results(baseline, *others)",
"def process_benchmarks(benchmarks):\n print(\"Benchmarking started...\")\n for bm in benchmarks:\n print(\"Benchmarking: \" + bm)\n f = open(bm, \"r\")\n data = json.load(f)\n f.close()\n bms = OrderedDict(sorted(data[\"bms\"].items()))\n flds = [\"profile_id\",\n \"suburi_keys\",\n \"mediatype_keys\",\n \"time_keys\",\n \"language_keys\",\n \"profile_size\",\n \"profile_size_compressed\",\n \"cdx_processing_time\",\n \"stats_calculation_time\",\n \"profiling_time\",\n \"collection\",\n \"cdx_size\",\n \"extract_size\",\n \"urim_count\",\n \"urir_count\"]\n opstr = \", \".join(flds)\n for k, v in bms.iteritems():\n v[\"profile_id\"] = k\n opstr += \"\\n\" + \", \".join([str(v[i]) for i in flds])\n print(opstr)\n bmdir = os.path.dirname(os.path.abspath(bm))\n summary = os.path.join(bmdir, \"summary-{0}.csv\".format(data[\"about\"][\"id\"]))\n f = open(summary, \"w\")\n f.write(opstr)\n f.close()",
"def extract_metrics(result, variety, backend):\n\n timings = result['timings']\n batch_size = result['batchSize']\n warmup_time = result['warmupTime']\n total_time = result['totalTime']\n training_time = total_time - warmup_time\n batch_count = len(timings)\n\n timings_s = np.array(timings) / 1000\n wall_time = total_time / 1000.0\n\n # Average examples per second across the entire benchmark run,\n # including warmup period. Assumes two warmup batches.\n # TODO: Lower to one batch when we have better-shaped zero tangent vectors.\n total_time_s = total_time / 1000.0\n total_num_examples = batch_size * (batch_count + 2)\n average_examples_per_second = total_num_examples / total_time_s\n\n # Examples per second, calculated after warmup period\n # of the measurements.\n warm_time_s = training_time / 1000.0\n warm_num_examples = batch_size * batch_count\n examples_per_second = warm_num_examples / warm_time_s\n\n metrics = [{\n 'name': 'exp_per_second',\n 'value': examples_per_second\n }, {\n 'name': 'avg_exp_per_second',\n 'value': average_examples_per_second\n }, {\n 'name': 'startup_time',\n 'value': warmup_time / 1000.0\n }, {\n 'name': 'step_time_median',\n 'value': np.median(timings_s)\n }, {\n 'name': 'step_time_min',\n 'value': np.min(timings_s)\n }, {\n 'name': 'step_time_max',\n 'value': np.max(timings_s)\n }]\n\n return (wall_time, metrics)",
"def baseline(self, *args, **kwargs):\n return _measures.measures_baseline(self, *args, **kwargs)",
"def run(self):\n logging.info(\"Running benchmark suite...\")\n for benchmark in self._benchmarks:\n result = self.run_method(benchmark)\n print(result)\n if self._table is None:\n self._table = Table([result])\n else:\n self._table.update([result])\n self.write_results()\n self.host_results()",
"def create_baseline(interval, running_time, moving_average_time_period):\n moving_average_time_period = int(moving_average_time_period)\n running_time = int(running_time)\n\n if moving_average_time_period >= running_time:\n print(\"Running time must be more than rolling average period\")\n exit()\n # get running time in minutes, div by interval plus 1 sec for network baseline\n num_of_polls = int((running_time * 60) / (interval + 1))\n moving_average_num_of_polls = int((moving_average_time_period * 60) / (interval + 1))\n cpu_utilisation = list()\n network_usage = list()\n memory_usage = list()\n cpu_utilisation_max = list()\n network_usage_max = list()\n memory_usage_max = list()\n i = 0\n max_cpu_average = 0\n max_network_average = 0\n max_memory_average = 0\n\n while i < num_of_polls:\n # create a list containing the CPU utilisation, memory usage and network usage over the entire period the\n # baseline is ran for.\n cpu = sysmon.get_cpu_utilisation()\n network = sysmon.get_network_interface_traffic(NETWORK_ADAPTOR)\n memory = sysmon.get_memory_usage()['memory_in_use']\n cpu_utilisation.append(cpu)\n network_usage.append(network)\n memory_usage.append(memory)\n\n # get the maximum values for the maximum cpu utilisation, network usage and memory usage. These values\n # are calculated using a move average over a specified period of time\n # first the minimum number of values are collected\n if i <= moving_average_num_of_polls:\n cpu_utilisation_max.append(cpu)\n network_usage_max.append(network)\n memory_usage_max.append(memory)\n # once the minimum number of values have been collected to calculate the average over. The size of the\n # list is maintained by dropping the first value and adding one to the end\n else:\n del cpu_utilisation_max[0]\n cpu_utilisation_max.append(cpu)\n cpu_average = get_mean(cpu_utilisation_max)\n if cpu_average > max_cpu_average:\n max_cpu_average = cpu_average\n del network_usage_max[0]\n network_usage_max.append(network)\n network_average = get_mean(network_usage_max)\n if network_average > max_network_average:\n max_network_average = network_average\n del memory_usage_max[0]\n memory_usage_max.append(memory)\n memory_average = get_mean(memory_usage_max)\n if memory_average > max_memory_average:\n max_memory_average = memory_average\n\n time.sleep(interval)\n i += 1\n\n # calculate the mean average from all the values in the list\n average_cpu_utilisation = get_mean(cpu_utilisation)\n average_network_usage = get_mean(network_usage)\n average_memory_usage = get_mean(memory_usage)\n\n # print the results to the console and write them to server_stats.txt\n resource_stats = \"CPU Average: %0.2f %%\\nCPU Max %0.2f %%\\nNetwork Average: %0.2f bytes per second\\n\" \\\n \"Network Max: %0.2f bytes per second\\nMemory Average: %0.2f MB\\nMemory Max: %0.2f MB\" % \\\n (average_cpu_utilisation*100, max_cpu_average*100, average_network_usage,\n max_network_average, average_memory_usage/1000, max_memory_average/1000)\n directory = 'resources'\n if not os.path.exists(directory):\n os.makedirs(directory)\n f = open('resources/server_stats.txt', 'w')\n f.write(resource_stats)\n f.close()\n print_results()",
"def benchmarkAggregateCombineManualActuation(self):\n\n # Run InputsToExtracts manually.\n records = []\n for x in self._dataset.read_raw_dataset(\n deserialize=False, limit=self._max_num_examples()):\n records.append({tfma.constants.INPUT_KEY: x})\n\n fn = tfma.extractors.legacy_predict_extractor._TFMAPredictionDoFn( # pylint: disable=protected-access\n eval_shared_models={\"\": tfma.default_eval_shared_model(\n eval_saved_model_path=self._dataset.tfma_saved_model_path())},\n eval_config=None)\n fn.setup()\n\n # Predict\n predict_batch_size = 1000\n predict_result = []\n for batch in benchmark_utils.batched_iterator(records, predict_batch_size):\n predict_result.extend(fn.process(batch))\n\n # AggregateCombineFn\n #\n # We simulate accumulating records into multiple different accumulators,\n # each with inputs_per_accumulator records, and then merging the resulting\n # accumulators together at one go.\n\n # Number of elements to feed into a single accumulator.\n # (This means we will have len(records) / inputs_per_accumulator\n # accumulators to merge).\n inputs_per_accumulator = 1000\n\n combiner = tfma.evaluators.legacy_aggregate._AggregateCombineFn( # pylint: disable=protected-access\n eval_shared_model=tfma.default_eval_shared_model(\n eval_saved_model_path=self._dataset.tfma_saved_model_path()))\n combiner.setup()\n accumulators = []\n\n start = time.time()\n for batch in benchmark_utils.batched_iterator(predict_result,\n inputs_per_accumulator):\n accumulator = combiner.create_accumulator()\n for elem in batch:\n combiner.add_input(accumulator, elem)\n accumulators.append(accumulator)\n final_accumulator = combiner.merge_accumulators(accumulators)\n final_output = combiner.extract_output(final_accumulator)\n end = time.time()\n delta = end - start\n\n # Extract output to sanity check example count. This is not timed.\n extract_fn = tfma.evaluators.legacy_aggregate._ExtractOutputDoFn( # pylint: disable=protected-access\n eval_shared_model=tfma.default_eval_shared_model(\n eval_saved_model_path=self._dataset.tfma_saved_model_path()))\n extract_fn.setup()\n interpreted_output = list(extract_fn.process(((), final_output)))\n if len(interpreted_output) != 1:\n raise ValueError(\"expecting exactly 1 interpreted output, got %d\" %\n (len(interpreted_output)))\n got_example_count = interpreted_output[0][1].get(\n \"post_export_metrics/example_count\")\n if got_example_count != self._dataset.num_examples(\n limit=self._max_num_examples()):\n raise ValueError(\n \"example count mismatch: expecting %d got %d\" %\n (self._dataset.num_examples(limit=self._max_num_examples()),\n got_example_count))\n\n self.report_benchmark(\n iters=1,\n wall_time=delta,\n extras={\n \"inputs_per_accumulator\":\n inputs_per_accumulator,\n \"num_examples\":\n self._dataset.num_examples(limit=self._max_num_examples())\n })",
"def get_stats(baseline, proposed):\n global total_segment_durationsb, total_segment_durationsp, \\\n max_segment_durationb, max_segment_durationp, \\\n min_segment_durationb, min_segment_durationp\n for entry in baseline:\n for segment in entry[1]:\n this_seg = segment[1] - segment[0]\n if this_seg > max_segment_durationb:\n max_segment_durationb = this_seg\n if this_seg < min_segment_durationb or min_segment_durationb == 0.0:\n min_segment_durationb = this_seg\n total_segment_durationsb += this_seg\n for entry in proposed:\n for segment in entry[1]:\n this_seg = segment[1] - segment[0]\n if this_seg > max_segment_durationp:\n max_segment_durationp = this_seg\n if this_seg < min_segment_durationp or min_segment_durationp == 0.0:\n min_segment_durationp = this_seg\n total_segment_durationsp += this_seg\n return (total_segment_durationsb, max_segment_durationb, min_segment_durationb), \\\n (total_segment_durationsp, max_segment_durationp, min_segment_durationp)",
"def aggregate_cache_perfdata(perfdata):\n cache = perfdata.get(\"cache\")\n if cache:\n total = sum(v for k, v in cache.items() if k != \"overall\")\n cache[\"overall\"] = total",
"def compute_average(self):\n self.report_dict = OrderedDict(sorted(self.report_dict.items(), key=lambda x: x[0], reverse=True))\n date_list = list(self.report_dict.keys())[::-1]\n\n print('Calculating the running total Initiated')\n running_total_dict = dict()\n for date in date_list:\n for border_measure, attributes in self.report_dict[date].items():\n if border_measure not in running_total_dict:\n running_total_dict[border_measure] = [attributes['sum'], 1]\n self.report_dict[date][border_measure]['running_total'] = 0\n else:\n calcul = running_total_dict[border_measure][0] / running_total_dict[border_measure][1]\n self.report_dict[date][border_measure]['running_total'] = math.ceil(calcul) if (float(\n calcul) % 1) >= 0.5 else round(calcul)\n running_total_dict[border_measure][0] += attributes['sum']\n running_total_dict[border_measure][1] += 1",
"def handle_benchmark(self, benchmark, results, used_argsets):\n print colored.cyan(\"Benchmarking `%s`...\" % benchmark.__name__)\n print \"=\" * 80\n # for each set of arguments\n #TODO: don't reference benchmark.arguments ever; it could be any\n # iterator that yields some values, for all we know.\n for argset in used_argsets:\n print \"With the argument set \" + str(argset)\n print \"-\" * 80\n # these are all the Result objects for this argument set\n arg_results = [r for r in results if r.args == argset]\n stats = self.statistics(arg_results)\n # a function that scales a float into an int that will fit in\n # the console width (assume 80 for now).\n # 76 because 80 - 2 (for the brackets) - 2 (for the indent)\n #TODO: don't depend on stats having a `max` field here.\n scaled = lambda x: int(76 * (x/stats.max.max))\n # for each \n for r in arg_results:\n print \"%s:\" % colored.cyan(r.method.__name__)\n with indent(2):\n # for each of the statistic functions\n for f in self.stat_functions:\n time = f(r.results)\n # get the statistics for this stat function\n these_stats = getattr(stats, f.__name__)\n # the color is green if it's the fastest, red if\n # it's the slowest, and cyan otherwise\n if time == getattr(these_stats, \"min\", 0.):\n color = colored.green\n elif time == getattr(these_stats, \"max\", None):\n color = colored.red\n else:\n color = colored.cyan\n # print a line for the number of seconds\n puts(\"%s: %s\" % (f.__name__, color(\"%f\" % time))),\n # print a bar to show what proportion of time this is\n puts(\"[%s]\" % color(\"=\" * scaled(time)))\n print \"-\" * 80\n #TODO: scale for terminal width but still look pretty",
"def multi_results(benchmark):\n # Read in results\n tensat_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n taso_root = os.path.join(os.path.dirname(tensat_root), \"TASO\")\n\n taso_benchmark_name = benchmark\n if benchmark == 'nasneta':\n taso_benchmark_name = 'nasnet_a'\n elif benchmark == 'vgg':\n taso_benchmark_name = 'vgg19-7'\n taso_runtime_file = os.path.join(taso_root, \"examples/{}_time.txt\".format(taso_benchmark_name))\n\n with open(taso_runtime_file, 'r') as f:\n content = f.readlines()\n\n orig_runtimes = []\n for line in content[-5:]:\n times = line.split('\\t')\n orig_runtimes.append(float(times[0]))\n orig_mean = np.mean(orig_runtimes)\n\n\n # iter=0\n mean_iter_0, mean_sat_iter_0, mean_ext_iter_0, mean_nodes_iter_0 = get_iter_stats(benchmark, tensat_root, iter=0)\n\n # iter=1\n mean_iter_1, mean_sat_iter_1, mean_ext_iter_1, mean_nodes_iter_1 = get_iter_stats(benchmark, tensat_root, iter=1)\n\n # iter=2\n mean_iter_2, mean_sat_iter_2, mean_ext_iter_2, mean_nodes_iter_2 = get_iter_stats(benchmark, tensat_root, iter=2)\n\n # iter=3\n mean_iter_3, mean_sat_iter_3, mean_ext_iter_3, mean_nodes_iter_3 = get_iter_stats(benchmark, tensat_root, iter=3)\n\n # Plot runtime & optimizer time v.s. iter\n speedup = [orig_mean/mean_iter_0, orig_mean/mean_iter_1, orig_mean/mean_iter_2]\n optimizer_time = [mean_sat_iter_0+mean_ext_iter_0, mean_sat_iter_1+mean_ext_iter_1, mean_sat_iter_2+mean_ext_iter_2]\n if mean_iter_3 > 0:\n speedup.append(orig_mean/mean_iter_3)\n optimizer_time.append(mean_sat_iter_3+mean_ext_iter_3)\n\n speedup = [(i-1)*100 for i in speedup]\n\n nodes = [mean_nodes_iter_0, mean_nodes_iter_1, mean_nodes_iter_2, mean_nodes_iter_3]\n\n result = {}\n result['speedup'] = speedup\n result['optimizer'] = optimizer_time\n result['nodes'] = nodes\n\n return result",
"def __merge_aggregate_data(self):\n \n logging.info(\"Merging the output data with baseline aggregate data.\")\n \n # Merging the output data with baseline aggregate data.\n self.output_data = pd.merge(self.output_data\n ,self.__aggregate_data\n ,how=\"inner\", on=[\"Location\", \"Month\"])\n \n logging.info(\"Completed merging the output data with baseline aggregate data.\")",
"def _aggregate_stats(self) -> None:\n self._stats = pd.DataFrame(columns=self._stats_columns)\n if len(self) == 0:\n return\n with tqdm(\n range(len(self)),\n desc=\"Aggregating stats\",\n unit=\"record\",\n dynamic_ncols=True,\n mininterval=1.0,\n disable=(self.verbose < 1),\n ) as pbar:\n for idx in pbar:\n rec_ann = self.load_ann(idx)\n beat_type_num = {\n k: v\n for k, v in Counter(\n [item.symbol for item in rec_ann[\"beat\"]]\n ).most_common()\n }\n beat_num = sum(beat_type_num.values())\n rhythm_len = {\n k: sum([itv[1] - itv[0] for itv in v])\n for k, v in rec_ann[\"rhythm\"].items()\n }\n self._stats = pd.concat(\n [\n self._stats,\n pd.DataFrame(\n [\n [\n self._all_records[idx],\n beat_num,\n beat_type_num,\n rhythm_len,\n ]\n ],\n columns=self._stats_columns,\n ),\n ],\n ignore_index=True,\n )",
"def testScaleBenchmarks(self):\n # First test that method returns expected dictionaries.\n for i in ('design', 'stretch'):\n benchmark = opsimUtils.scaleBenchmarks(10.0, i)\n self.assertIsInstance(benchmark, dict)\n expectedkeys = ('Area', 'nvisitsTotal', 'nvisits', 'seeing', 'skybrightness',\n 'singleVisitDepth')\n expectedfilters = ('u', 'g', 'r', 'i', 'z', 'y')\n for k in expectedkeys:\n self.assertIn(k, benchmark)\n expecteddictkeys = ('nvisits', 'seeing', 'skybrightness', 'singleVisitDepth')\n for k in expecteddictkeys:\n for f in expectedfilters:\n self.assertIn(f, benchmark[k])",
"def combine_results(results_list):\n results_overall = {}\n results_overall['overall'] = {}\n results_overall['predicate'] = {}\n\n # find metrics for overall\n for metric, value in results_list['overall'].items():\n if metric in ('map', 'roc_auc'):\n results_overall['overall'][metric] = np.mean(np.array(results_list['overall'][metric]))\n results_overall['overall']['{}_std'.format(metric)] = np.std(np.array(results_list['overall'][metric]))\n elif metric == 'cm':\n TN = np.array([v[0][0] for v in value])\n FP = np.array([v[0][1] for v in value])\n FN = np.array([v[1][0] for v in value])\n TP = np.array([v[1][1] for v in value])\n\n precision = TP / (TP + FP)\n recall = TP / (TP + FN)\n accuracy = (TP + TN) / (TN + FP + FN + TP)\n known_true = TP + FN\n known_neg = TN + FP\n predicted_true = TP + FP\n predicted_neg = FN + TN\n specificity = TN / (FP + TN)\n npv = TN / (TN + FN)\n fdr = FP / (FP + TP)\n\n results_overall['overall']['cm'] = np.mean(np.array(value), axis=0)\n results_overall['overall']['cm_std'] = np.std(np.array(value), axis=0)\n results_overall['overall']['precision'] = np.mean(precision)\n results_overall['overall']['precision_std'] = np.std(precision)\n results_overall['overall']['recall'] = np.mean(recall)\n results_overall['overall']['recall_std'] = np.std(recall)\n results_overall['overall']['accuracy'] = np.mean(accuracy)\n results_overall['overall']['accuracy_std'] = np.std(accuracy)\n results_overall['overall']['known_true'] = np.mean(known_true)\n results_overall['overall']['known_true_std'] = np.std(known_true)\n results_overall['overall']['known_neg'] = np.mean(known_neg)\n results_overall['overall']['known_neg_std'] = np.std(known_neg)\n results_overall['overall']['predicted_true'] = np.mean(predicted_true)\n results_overall['overall']['predicted_true_std'] = np.std(predicted_true)\n results_overall['overall']['predicted_neg'] = np.mean(predicted_neg)\n results_overall['overall']['predicted_neg_std'] = np.std(predicted_neg)\n results_overall['overall']['specificity'] = np.mean(specificity)\n results_overall['overall']['specificity_std'] = np.std(specificity)\n results_overall['overall']['npv'] = np.mean(npv)\n results_overall['overall']['npv_std'] = np.std(npv)\n results_overall['overall']['fdr'] = np.mean(fdr)\n results_overall['overall']['fdr_std'] = np.std(fdr)\n\n f1 = 2 * (precision * recall) / (precision + recall)\n results_overall['overall']['f1'] = np.mean(f1)\n results_overall['overall']['f1_std'] = np.std(f1)\n\n # find metrics for each predicate\n for predicate, metrics in results_list['predicate'].items():\n results_overall['predicate'][predicate] = {}\n\n for metric, value in metrics.items():\n if metric in ('map', 'roc_auc'):\n results_overall['predicate'][predicate][metric] = np.mean(np.array(results_list['overall'][metric]))\n results_overall['predicate'][predicate]['{}_std'.format(metric)] = np.std(np.array(results_list['overall'][metric]))\n elif metric == 'cm':\n TN = np.array([v[0][0] for v in value])\n FP = np.array([v[0][1] for v in value])\n FN = np.array([v[1][0] for v in value])\n TP = np.array([v[1][1] for v in value])\n\n precision = TP / (TP + FP)\n recall = TP / (TP +FN)\n accuracy = (TP + TN) / (TN + FP + FN + TP)\n known_true = TP + FN\n known_neg = TN + FP\n predicted_true = TP + FP\n predicted_neg = FN + TN\n specificity = TN / (FP + TN)\n npv = TN / (TN + FN)\n fdr = FP / (FP + TP)\n\n results_overall['predicate'][predicate]['cm'] = np.mean(np.array(value), axis=0)\n results_overall['predicate'][predicate]['cm_std'] = np.std(np.array(value), axis=0)\n results_overall['predicate'][predicate]['precision'] = np.mean(precision)\n results_overall['predicate'][predicate]['precision_std'] = np.std(precision)\n results_overall['predicate'][predicate]['recall'] = np.mean(recall)\n results_overall['predicate'][predicate]['recall_std'] = np.std(recall)\n results_overall['predicate'][predicate]['accuracy'] = np.mean(accuracy)\n results_overall['predicate'][predicate]['accuracy_std'] = np.std(accuracy)\n results_overall['predicate'][predicate]['known_true'] = np.mean(known_true)\n results_overall['predicate'][predicate]['known_true_std'] = np.std(known_true)\n results_overall['predicate'][predicate]['known_neg'] = np.mean(known_neg)\n results_overall['predicate'][predicate]['known_neg_std'] = np.std(known_neg)\n results_overall['predicate'][predicate]['predicted_true'] = np.mean(predicted_true)\n results_overall['predicate'][predicate]['predicted_true_std'] = np.std(predicted_true)\n results_overall['predicate'][predicate]['predicted_neg'] = np.mean(predicted_neg)\n results_overall['predicate'][predicate]['predicted_neg_std'] = np.std(predicted_neg)\n results_overall['predicate'][predicate]['specificity'] = np.mean(specificity)\n results_overall['predicate'][predicate]['specificity_std'] = np.std(specificity)\n results_overall['predicate'][predicate]['npv'] = np.mean(npv)\n results_overall['predicate'][predicate]['npv_std'] = np.std(npv)\n results_overall['predicate'][predicate]['fdr'] = np.mean(fdr)\n results_overall['predicate'][predicate]['fdr_std'] = np.std(fdr)\n\n f1 = 2 * (precision * recall) / (precision + recall)\n results_overall['predicate'][predicate]['f1'] = np.mean(f1)\n results_overall['predicate'][predicate]['f1_std'] = np.std(f1)\n\n return results_overall",
"def calculate_overall_statistics(yearly_stats):\n nested_dict = lambda: collections.defaultdict(nested_dict)\n overall_stats = nested_dict()\n\n for dataset in ('train', 'test'):\n for metric in METRICS:\n results = []\n\n for year in yearly_stats:\n results.append(yearly_stats[year][dataset][metric])\n\n overall_stats[dataset]['median'][metric] = \\\n statistics.median(results)\n overall_stats[dataset]['mean'][metric] = \\\n statistics.mean(results)\n\n return overall_stats",
"def run_baselines(self, X, y, pass_n='0', targets=None, label_col='majority_type'):\n models = self.baseline_models[int(pass_n)]\n b_acc = []\n for m in models:\n pred = m.predict(X)\n pred = pd.Series(pred).apply(lambda x: step[x] if type(x) is not bool else x).values\n score = m.score(pred, y, scoring=['accuracy', 'recall', 'precision'])\n b_acc.append(score)\n print(\"Baseline scores on pass {}: {}\".format(pass_n+1, b_acc))\n return b_acc"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Aggregates result files for a particular category of benchmarks.
|
def aggregate_category(baseline_file, *other_files):
baseline = read_results(baseline_file)
others = [read_results(name) for name in other_files]
return aggregate_results(baseline, *others)
|
[
"def cat(ctx, files):\n\n gb = GoogleBenchmark()\n for file in files:\n gb += GoogleBenchmark(stream=file)\n click.echo(gb.json())",
"def summariseFiles(origfiles, aggfiles, outputfile):\n \n summaries = []\n for (of, af) in zip(origfiles, aggfiles): \n summer = Summariser(of)\n summer.summariseOutput(af)\n summaries.append(summer)\n \n writeToCSV(summaries, outputfile)",
"def summarize_results(res_dir):\n\n print(\"Gathering Results...\")\n\n with open(\"simulations.csv\", 'a') as f:\n f.write('Datetime $ N $ M $ d $ parts $ ttype $ method $ instance $ time_c $ time_i $ n_iter $ '\n 'inter_s $ inter_e $ intra_s $ intra_e $ total $ true_inter $ clustering\\n')\n\n results = {}\n for k in CLUSTER_SIZE:\n results[k] = {}\n for l in DATAFOLDER.values():\n results[k][l] = {}\n for m in ['G', 'R', 'S', 'BCS', 'BCG', 'BCR', 'RSR', 'RSG', 'RSS', 'NN']:\n results[k][l][m] = {'N': [], 'avg_cl': [], 'max_cl': [], 'min_cl': [], 'sd_cl': [],\n 'avg_ii': [], 'max_ii': [], 'min_ii': [], 'sd_ii': [], 'avg_tot': [],\n 'max_tot': [], 'min_tot': [], 'sd_tot': [], 'avg_iter': [], 'min_iter': [],\n 'max_iter': [], 'sd_iter': [], 'avg_optgap': [], 'min_optgap': [], 'max_optgap': [],\n 'sd_optgap': [], 'avg_optper': [], 'min_optper': [], 'max_optper': [],\n 'sd_optper': [],\n 'avg_impper': [], 'min_impper': [], 'max_impper': [], 'sd_impper': []}\n\n log_dir = os.path.join(os.getcwd(), \"..\", \"Logs\", res_dir, \"*.log\")\n logs = glob.glob(log_dir)\n\n for f in logs:\n ttype, method, N, M, d, parts = get_info(f)\n print(get_info(f))\n info = get_results(f)\n\n res = results[d][DATAFOLDER[int(ttype)]][method]\n res['N'].append(N if int(N) >= 64 else str(N) + '_' + str(M))\n res['avg_cl'].append(info[0])\n res['max_cl'].append(info[1])\n res['min_cl'].append(info[2])\n res['sd_cl'].append(info[3])\n res['avg_ii'].append(info[4])\n res['max_ii'].append(info[5])\n res['min_ii'].append(info[6])\n res['sd_ii'].append(info[7])\n res['avg_tot'].append(info[8])\n res['max_tot'].append(info[9])\n res['min_tot'].append(info[10])\n res['sd_tot'].append(info[11])\n res['avg_iter'].append(info[12])\n res['min_iter'].append(info[13])\n res['max_iter'].append(info[14])\n res['sd_iter'].append(info[15])\n res['avg_optgap'].append(info[16])\n res['min_optgap'].append(info[17])\n res['max_optgap'].append(info[18])\n res['sd_optgap'].append(info[19])\n res['avg_optper'].append(info[20])\n res['min_optper'].append(info[21])\n res['max_optper'].append(info[22])\n res['sd_optper'].append(info[23])\n res['avg_impper'].append(info[24])\n res['min_impper'].append(info[25])\n res['max_impper'].append(info[26])\n res['sd_impper'].append(info[27])\n\n res_filename = os.path.join(os.getcwd(), RES_PATH, res_dir + \"__\" + str(date.today()))\n fjson = res_filename + \".json\"\n fcsv = res_filename + \".csv\"\n print(\"Saving Results in : \" + res_filename)\n\n with open(fjson, 'w') as fp:\n dump(results, fp, indent=4)\n\n if os.path.exists(\"simulations.csv\"):\n os.rename(\"simulations.csv\", fcsv)",
"def CombineFiles():\r\n \r\n # current list of files\r\n current_logs = os.listdir('logs')\r\n\r\n # split files into groups\r\n expecetimaxGroup = [file for file in current_logs if 'Star' in file]\r\n mctsGroup = [file for file in current_logs if 'MCTS' in file or 'M_RAVE' in file]\r\n statsGroup = [file for file in current_logs if 'Match_Stats' in file]\r\n\r\n groups = [expecetimaxGroup, mctsGroup, statsGroup]\r\n fileNames = ['ExpectimaxStats', 'MCTSStats', 'PlayerStats']\r\n\r\n i = 0\r\n\r\n for group in groups:\r\n if group == []:\r\n i += 1\r\n continue\r\n first = True\r\n \r\n for file in group:\r\n if first:\r\n df = pd.read_csv('logs/' + file)\r\n first = False\r\n else:\r\n dfNew = pd.read_csv('logs/' + file)\r\n df = df.append(dfNew)\r\n \r\n df.to_csv('logs/' + fileNames[i] + '.csv', index=False)\r\n i += 1",
"def process_benchmarks(benchmarks):\n print(\"Benchmarking started...\")\n for bm in benchmarks:\n print(\"Benchmarking: \" + bm)\n f = open(bm, \"r\")\n data = json.load(f)\n f.close()\n bms = OrderedDict(sorted(data[\"bms\"].items()))\n flds = [\"profile_id\",\n \"suburi_keys\",\n \"mediatype_keys\",\n \"time_keys\",\n \"language_keys\",\n \"profile_size\",\n \"profile_size_compressed\",\n \"cdx_processing_time\",\n \"stats_calculation_time\",\n \"profiling_time\",\n \"collection\",\n \"cdx_size\",\n \"extract_size\",\n \"urim_count\",\n \"urir_count\"]\n opstr = \", \".join(flds)\n for k, v in bms.iteritems():\n v[\"profile_id\"] = k\n opstr += \"\\n\" + \", \".join([str(v[i]) for i in flds])\n print(opstr)\n bmdir = os.path.dirname(os.path.abspath(bm))\n summary = os.path.join(bmdir, \"summary-{0}.csv\".format(data[\"about\"][\"id\"]))\n f = open(summary, \"w\")\n f.write(opstr)\n f.close()",
"def read_results(self):\n for analyse_type, input_files in PerfTests.INPUT_FILES.items():\n self._results[analyse_type] = self.read_analysis_results(input_files)",
"def send_results(config, artifact_processor):\n ret_code = 0\n for bench_name in sorted(config.benchmarks):\n filename = \"{}.json\".format(bench_name)\n gbench_run_results = GBenchRunResult.from_benchmark_file(filename)\n\n for key in sorted(gbench_run_results.benchmarks.keys()):\n result = gbench_run_results.benchmarks.get(key)\n LOG.debug(\"%s Result:\\n%s\", bench_name, result)\n\n comparison = artifact_processor.get_comparison_for_publish_result(bench_name, result, config.lax_tolerance)\n try:\n report_microbenchmark_result(\n config.publish_results_env, result.timestamp, config, comparison)\n except Exception as err:\n LOG.error(\"Error reporting results to performance storage service\")\n LOG.error(err)\n ret_code = 1\n\n return ret_code",
"def get_all_profiler_metrics_data(folder):\n calls_dfs = []\n with os.scandir(folder) as entries:\n for entry in entries:\n if entry.is_file() and entry.name.endswith('.xml'):\n full_path = os.path.join(folder, entry.name)\n calls_dfs.append(get_profiler_metrics_data(full_path))\n df = pd.concat(calls_dfs)\n df = df.groupby('Method')['Calls'].sum().reset_index()\n return df",
"def combine_metrics(out_dir: str):\n metric_files = tf.io.gfile.glob(f'{out_dir}/metrics/**/*.stat.csv')\n metrics_combined = f'{out_dir}/metrics.stat.csv'\n df_set = [pd.read_csv(tf.io.gfile.GFile(f)) for f in metric_files]\n df = pd.concat(df_set).sort_values(['metric', 'group', 'group_val'])\n with tf.io.gfile.GFile(metrics_combined, 'w') as f:\n df.to_csv(f, index=False)",
"def _handle_perf_results(\n benchmark_enabled_map, benchmark_directory_map, configuration_name,\n build_properties, extra_links, output_results_dir):\n begin_time = time.time()\n # Upload all eligible benchmarks to the perf dashboard\n results_dict = {}\n\n invocations = []\n for benchmark_name, directories in benchmark_directory_map.items():\n if not benchmark_enabled_map.get(benchmark_name, False):\n continue\n # Create a place to write the perf results that you will write out to\n # logdog.\n output_json_file = os.path.join(\n output_results_dir, (str(uuid.uuid4()) + benchmark_name))\n results_dict[benchmark_name] = output_json_file\n #TODO(crbug.com/1072729): pass final arguments instead of build properties\n # and configuration_name\n invocations.append((\n benchmark_name, directories, configuration_name,\n build_properties, output_json_file))\n\n # Kick off the uploads in multiple processes\n # crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves\n # to 2 processes to avoid this error. Uncomment the following code once\n # the problem is fixed on the dashboard side.\n # pool = multiprocessing.Pool(_GetCpuCount())\n pool = multiprocessing.Pool(2)\n upload_result_timeout = False\n try:\n async_result = pool.map_async(\n _upload_individual_benchmark, invocations)\n # TODO(crbug.com/947035): What timeout is reasonable?\n results = async_result.get(timeout=4000)\n except multiprocessing.TimeoutError:\n upload_result_timeout = True\n logging.error('Timeout uploading benchmarks to perf dashboard in parallel')\n results = []\n for benchmark_name in benchmark_directory_map:\n results.append((benchmark_name, False))\n finally:\n pool.terminate()\n\n # Keep a mapping of benchmarks to their upload results\n benchmark_upload_result_map = {}\n for r in results:\n benchmark_upload_result_map[r[0]] = r[1]\n\n logdog_dict = {}\n upload_failures_counter = 0\n logdog_stream = None\n logdog_label = 'Results Dashboard'\n for benchmark_name, output_file in results_dict.items():\n upload_succeed = benchmark_upload_result_map[benchmark_name]\n if not upload_succeed:\n upload_failures_counter += 1\n is_reference = '.reference' in benchmark_name\n _write_perf_data_to_logfile(\n benchmark_name, output_file,\n configuration_name, build_properties, logdog_dict,\n is_reference, upload_failure=not upload_succeed)\n\n logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')\n logdog_stream = logdog_helper.text(logdog_file_name,\n json.dumps(dict(logdog_dict), sort_keys=True,\n indent=4, separators=(',', ': ')),\n content_type=JSON_CONTENT_TYPE)\n if upload_failures_counter > 0:\n logdog_label += (' %s merge script perf data upload failures' %\n upload_failures_counter)\n extra_links[logdog_label] = logdog_stream\n end_time = time.time()\n print_duration('Uploading results to perf dashboard', begin_time, end_time)\n if upload_result_timeout or upload_failures_counter > 0:\n return 1, benchmark_upload_result_map\n return 0, benchmark_upload_result_map",
"def get_benchmarks_by_name(benchmark_folder, match_name=None):\n to_run = []\n\n files = [\n os.path.join(benchmark_folder, f) for f in os.listdir(benchmark_folder)\n if os.path.isfile(os.path.join(benchmark_folder, f)) and not f.startswith('.')]\n\n if match_name:\n for file in files:\n name = os.path.splitext(os.path.basename(file))[0]\n if '_' in match_name:\n if name == match_name:\n to_run.append(file)\n else:\n if '_' in name:\n if name[:name.index('_')] == match_name:\n to_run.append(file)\n else:\n if name == match_name:\n to_run.append(file)\n else:\n to_run.extend(files)\n\n to_run.sort()\n return to_run",
"def merge_statistics_files(vartype, config_data):\n options_dict = config_data['options_dict']\n col_rename_dict = {'regression_equation': 'Equation', 'r2': 'R Squared', 'mean_error': 'Mean Error', 'nmean_error': 'NMean Error', 'nmse': 'NMSE', \n 'nrmse': 'NRMSE', 'nash_sutcliffe': 'NSE', 'percent_bias': 'PBIAS', 'rsr': 'RSR', 'rmse': 'RMSE',\n 'mnly_regression_equation': 'Mnly Equation', 'mnly_r2': 'Mnly R Squared', 'mnly_mean_err': 'Mnly Mean Err', 'mnly_mean_error': 'Mnly Mean Err', \n 'mnly_nmean_error': 'Mnly NMean Err', 'mnly_nmse': 'Mnly NMSE', \n 'mnly_nrmse': 'Mnly NRMSE', 'mnly_nash_sutcliffe': 'Mnly NSE', 'mnly_percent_bias': 'Mnly PBIAS', 'mnly_rsr': 'Mnly RSR', \n 'mnly_rmse': 'Mnly RMSE', 'Study': 'Study', 'Amp Avg %Err': 'Amp Avg %Err', 'Avg Phase Err': 'Avg Phase Err'}\n\n import glob, os\n print('merging statistics files')\n filename_prefix_list=['summary_statistics_masked_time_period_', 'summary_statistics_unmasked_']\n for fp in filename_prefix_list:\n output_dir = options_dict['output_folder']\n os.makedirs(output_dir, exist_ok=True)\n files = glob.glob(output_dir + '0_'+fp+'*'+vartype.name+'*.csv')\n # files = glob.glob(output_dir + '0_summary_statistics_*'+vartype.name+'*.csv')\n frames = []\n for f in files:\n frames.append(pd.read_csv(f))\n if len(frames)>0:\n result_df = pd.concat(frames)\n\n result_df.rename(columns=col_rename_dict, inplace=True)\n\n result_df.sort_values(by=['Location', 'DSM2 Run'], inplace=True, ascending=True)\n # result_df.to_csv(output_dir + '1_summary_statistics_all_'+vartype.name+'.csv', index=False)\n result_df.to_csv(output_dir + '1_' + fp + 'all_'+vartype.name+'.csv', index=False)\n for f in files:\n os.remove(f)",
"def CollectResultFile(vm, interval_op_rate_list, interval_key_rate_list,\n latency_median_list, latency_95th_list,\n latency_99_9th_list,\n total_operation_time_list):\n result_path = _ResultFilePath(vm)\n vm.PullFile(vm_util.GetTempDir(), result_path)\n resp, _ = vm.RemoteCommand('tail ' + result_path)\n match = re.findall(r'[\\w\\t ]: +([\\d\\.:]+)', resp)\n if len(match) < 6:\n raise ValueError('Result not found in \"%s\"' % resp)\n interval_op_rate_list.append(int(match[0]))\n interval_key_rate_list.append(int(match[1]))\n latency_median_list.append(float(match[2]))\n latency_95th_list.append(float(match[3]))\n latency_99_9th_list.append(float(match[4]))\n raw_time_data = match[5].split(':')\n total_operation_time_list.append(\n int(raw_time_data[0]) * 3600 + int(raw_time_data[1]) * 60 + int(\n raw_time_data[2]))",
"def load_benchmark_result_from_logs_dir(logs_dir):\n check_path_exists(logs_dir)\n\n log_file_path = lambda log_file: os.path.join(logs_dir, log_file)\n result_lambda = lambda log_file: (\n log_file,\n parse_log_file(log_file_path(log_file)),\n )\n\n return dict(map(result_lambda, os.listdir(logs_dir)))",
"def collect(self):\n matches = []\n for root, dirnames, filenames in os.walk(self.CPUACCT_PATH):\n for filename in filenames:\n if filename == 'cpuacct.stat':\n # matches will contain a tuple contain path to cpuacct.stat\n # and the parent of the stat\n parent = root.replace(self.CPUACCT_PATH, \"\").replace(\"/\", \".\")\n if parent == '':\n parent = 'system'\n matches.append((parent, os.path.join(root, filename)))\n\n # Read utime and stime from cpuacct files\n results = {}\n for match in matches:\n results[match[0]] = {}\n with open(match[1]) as file:\n elements = [ line.split() for line in file ]\n for el in elements:\n results[match[0]][el[0]] = el[1]\n\n # create metrics from collected utimes and stimes for cgroups\n metrics = {}\n for parent, cpuacct in results.iteritems():\n for key, value in cpuacct.iteritems():\n # Get Metric Name\n metric_name = '.'.join([parent, key])\n # Get actual data\n metrics[metric_name] = self.derivative(metric_name, long(value),\n self.MAX_VALUES[key])\n\n ## Publish Metric Derivative\n for metric_name in metrics.keys():\n self.publish(metric_name, metrics[metric_name])\n return True",
"def __mergeResultFiles():\n\t# Get path of txt resutls\n\tresults_path = NEST.GetKernelStatus()['data_path']\n\t# Create structure - the dict of a lists. Main file (string) : child files (list)\n\tfiles_map = defaultdict(list)\n\t# Build tree of rough (threaded) files\n\tfiles_list = [file for file in os.listdir(results_path) if os.path.isfile(\"{}/{}\".format(results_path, file))]\n\n\tfor threaded_file in files_list:\n\t\tmain_file_name = \"{}.{}\".format(threaded_file.split('-')[0], # Get body name of the file without thread number\n\t\t threaded_file.split('.')[-1]) # Get file format\n\t\t# Add child file to the main_file's list in dictionary\n\t\tfiles_map[main_file_name].append(threaded_file)\n\t# For every main_file in dict an his childs list\n\tfor main_file, child_files in files_map.items():\n\t\t# Write to the main file\n\t\twith open(\"{}/{}\".format(results_path, main_file), 'w') as f_main:\n\t\t\t# Get data from every child files and write to the main file\n\t\t\tfor threaded_file in child_files:\n\t\t\t\twith open(\"{}/{}\".format(results_path, threaded_file), 'r') as f_child:\n\t\t\t\t\tfor line in f_child:\n\t\t\t\t\t\tf_main.write(line)\n\t\t\t\t# Delete finished needless child file\n\t\t\t\tos.remove(\"{}/{}\".format(results_path, threaded_file))",
"def get_results(self):\n # read the csv report\n with open(self.aggr_all) as f:\n reader = csv.DictReader(f)\n results = dict()\n # built the benchmark\n for row in reader:\n # get needed information from row\n # TODO: Warum manchmal 6 trusted judgments?\n answers = row['are_these_two_sounds_similar']\n score = answers.count('first_option') # first_option = similar\n query = int(row['query_id'])\n result = row['result_id']\n result = int(result.split('-')[1])\n # append to benchmark\n if not results.get(query):\n results[query] = []\n results[query].append((result, score))\n # sort results by appearence in table\n for query in results:\n result_list = results[query]\n results[query] = sorted(result_list,\n key=lambda x: all_sounds.index(x[0]))\n return results",
"def create_summary_csv(output_data_dir, output_dir):\n fieldnames = ['dataset', 'num_epochs', 'batch_size', 'lr',\n 'patience', 'target_fs', 'audio_window_size',\n 'k_smoothing', 'model_type']\n for subset_name in SUBSET_NAMES:\n for metric_name in BEAT_METRIC_NAMES:\n for stat_name in STAT_NAMES:\n field_name = \"{}_{}_{}\".format(subset_name, metric_name, stat_name)\n fieldnames.append(field_name)\n for subset_name in SUBSET_NAMES:\n for variant_name in TEMPO_VARIANTS:\n fieldnames.append('{}_{}_accuracy1'.format(subset_name, variant_name))\n fieldnames.append('{}_{}_accuracy2'.format(subset_name, variant_name))\n\n with open(os.path.join(output_dir, 'summary.csv'), 'w') as f:\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n\n for model_type_dir in next(os.walk(output_data_dir))[1]:\n for dataset_dir in next(os.walk(output_data_dir+'/'+model_type_dir+'/'))[1]:\n for file_dir in next(os.walk(os.path.join(output_data_dir,model_type_dir,dataset_dir,'model')))[1]:\n load_and_write(os.path.join(output_data_dir,model_type_dir,dataset_dir,'model',file_dir), writer)",
"def main(directory, output, processors):\n result = {}\n\n queue = multiprocessing.Queue()\n files_to_process = []\n\n for file in os.scandir(directory):\n if not is_log_file(file.name):\n continue\n files_to_process.append(file)\n if len(files_to_process) < processors:\n continue\n for data in process_files(files_to_process, queue):\n result = update_result(result, data)\n files_to_process = []\n if files_to_process:\n for data in process_files(files_to_process, queue):\n result = update_result(result, data)\n\n with open(output, 'w') as f:\n f.write(json.dumps(result, indent=4, sort_keys=True))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compares this node's attribute with the entry's attribute
|
def compareAttributes(self, entry):
if self.attributeType == 'str':
# Must be a categorical attribute, so compare this node's category with that of the entry
return self.comparisonValue == entry.attributes[self.attribute]
else:
# Must be a numeric attribute
return self.comparisonValue > entry.attributes[self.attribute]
|
[
"def compare(self, other: Attribute) -> float:\n return int(self._equal(self.value == other.value))",
"def match(self, compared_attribute):\n for k, v in self.attribute.items():\n if not (k in compared_attribute and compared_attribute[k] == v):\n return False\n return True",
"def __cmp__(self, element):\n if not isinstance(element, Element):\n raise TypeError\n # print \"Comparing: <%s>,\n # <%s>\"%(str(self.xmlname),str(element.xmlname))\n result = cmp(self.xmlname, element.xmlname)\n if result:\n return result\n # sort and compare all attributes\n selfAttrs = self.GetAttributes()\n selfAttrNames = selfAttrs.keys()\n self.SortNames(selfAttrNames)\n elementAttrs = element.GetAttributes()\n elementAttrNames = elementAttrs.keys()\n element.SortNames(elementAttrNames)\n # print \"Comparing attributes:\n # \\n%s\\n...\\n%s\"%(str(selfAttrNames),str(elementAttrNames))\n for i in xrange(len(selfAttrNames)):\n if i >= len(elementAttrNames):\n # We're bigger by virtue of having more attributes!\n return 1\n selfAName = selfAttrNames[i]\n elementAName = elementAttrNames[i]\n result = cmp(selfAName, elementAName)\n if result:\n return result\n result = cmp(selfAttrs[selfAName], elementAttrs[selfAName])\n if result:\n return result\n if len(elementAttrNames) > len(selfAttrNames):\n # They're bigger by virtue of having more attributes!\n return -1\n selfChildren = list(self.GetCanonicalChildren())\n elementChildren = list(element.GetCanonicalChildren())\n for i in xrange(len(selfChildren)):\n if i >= len(elementChildren):\n # We're bigger by virtue of having more children\n return 1\n if isinstance(selfChildren[i], Element):\n if isinstance(elementChildren[i], Element):\n result = cmp(selfChildren[i], elementChildren[i])\n else:\n # elements sort before data\n result = -1\n elif isinstance(elementChildren[i], Element):\n result = 1\n else:\n # Data sorts by string comparison\n result = cmp(selfChildren[i], elementChildren[i])\n if result:\n return result\n if len(elementChildren) > len(selfChildren):\n return -1\n # Name, all attributes and child elements match!!\n return 0",
"def test_eq(self):\n attr1 = Attribute(\"device\", \"read\")\n attr2 = Attribute(\"device\", \"read\")\n assert attr1 == attr2",
"def attrs_equal(self, col):\n if not isinstance(col, BaseColumn):\n raise ValueError('Comparison `col` must be a Column or MaskedColumn object')\n\n attrs = ('name', 'units', 'dtype', 'format', 'description', 'meta')\n equal = all(getattr(self, x) == getattr(col, x) for x in attrs)\n\n return equal",
"def _set_attr_main_(self, attr):\n if attr.lower() in self.attributes:\n self.attr_main = self.attributes.index(attr.lower())\n\n # if attribute is changed, check if main and sub attributes are the same\n if self.attr_main == self.attr_sub:\n self.is_same_attr = True\n else:\n self.is_same_attr = False",
"def _set_attr_sub_(self, attr):\n if attr.lower() in self.attributes:\n self.attr_sub = self.attributes.index(attr.lower())\n\n # if attribute is changed, check if main and sub attributes are the same\n if self.attr_main == self.attr_sub:\n self.is_same_attr = True\n else:\n self.is_same_attr = False",
"def __eq__(self, other):\n if isinstance(other, AttributesHolder):\n return self._attributes == other._attributes\n else:\n return False",
"def test_attribute_noteq(self):\n attr1 = Attribute(\"device\", \"read\")\n attr2 = Attribute(\"device\", \"write\")\n assert attr1 != attr2",
"def _attrs_equal(lhs, rhs):\n if isinstance(lhs, str) and isinstance(rhs, str):\n return lhs == rhs\n return lhs is rhs",
"def md_entry_same(entry_name, s1, s2):\r\n\r\n s1_val = s1[entry_name]\r\n s2_val = s2[entry_name]\r\n\r\n return (s1_val == s2_val, \"(\" + entry_name + \") \" + repr(s1_val) + \", \" + repr(s2_val))",
"def areAttributesEqual(varA, varB):\n if varA.attributes.keys() != varB.attributes.keys():\n return False\n \n for k in varA.attributes.keys():\n if varA.attributes[k] != varB.attributes[k]:\n return False\n \n return True",
"def _compare_command_table_entry(\n entry1: dict,\n entry2: dict,\n ):\n\n entry1_copy = deepcopy(entry1)\n entry2_copy = deepcopy(entry2)\n\n entry1_copy[\"index\"] = 0\n entry2_copy[\"index\"] = 0\n\n return entry1_copy == entry2_copy",
"def compare(self, other_file):\n if self.size == other_file.size:\n # Hashing is expensive and hashes don't matter unless filesizes match, so don't even bother unless we're already matched on size.\n # Even so, if hashing is already done, localutils.hash256 only hashes on None hashes unless force=True\n # We really only want to do this once, and only if necessary.\n self.sha256 = hash256(self, force=False)\n other_file.sha256 = hash256(other_file, force=False)\n if self.sha256 == other_file.sha256:\n if self.node_name == other_file.node_name:\n return \"match\"\n else:\n return \"content\"\n return False",
"def __eq__(self, other):\n if hasattr(other, \"element_info\"):\n return self.element_info == other.element_info\n else:\n return self.element_info == other",
"def test_compareName(self):\n nodes = self.TreeNode\n self.assertEqual(nodes['a'].compareName(nodes['a']), 0)\n self.assertEqual(nodes['a'].compareName(nodes['b']), -1)\n self.assertEqual(nodes['b'].compareName(nodes['a']), 1)",
"def _check_rule_has_attribute_equal(self, data_sources, conditions):\n return data_sources['attribute_value'] == conditions['attribute_value']",
"def test_cmp(self):\n nodes = self.TreeNode\n self.assertEqual(cmp(nodes['a'], nodes['a']), 0)\n self.assertNotEqual(cmp(nodes['b'], nodes['a']), 0)\n self.assertNotEqual(cmp(nodes['a'], nodes['b']), 0)",
"def isEqualToNode(self, other):\n is_lower = self.nodeName.lower() == other.nodeName.lower()\n same_name = self.namespace == other.namespace\n same_attrs = self.attributes == other.attributes\n is_equal = Node.isEqualToNode(self, other)\n return all([is_lower, same_name, same_attrs, is_equal])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.