query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Return the color temperature of this light.
def color_temp(self): return kelvin_to_mired(self._color_temp)
[ "def current_color_temperature(self) -> int | float | None:\n return self.color_temperature.value", "def temperature(self):\n temp_status = self._driver.query_temp_status(self._handle)\n return temp_status['imaging_ccd_temperature']", "def temperature(self):\n return self._temperature", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def get_temperature(self):\n pass", "def get_temperature_factor(self):\n return self.temp_factor", "def temperature(self):\n return _cantera.reactor_temperature(self.__reactor_id)", "def getTemperatureColor(): \n temperature = int(request.args.get('temperature'))\n return getHexForColor(temperature, '/temp.png')", "def get_temperature(self):\n self.temperature = self.temperature_sensors.get_temperature(\n self.channel)\n return self.temperature", "def get_temperature(self):\n return (self.call_sense_hat_function('get_temperature'), 'temp', 'c')", "def sky_temperature(self) -> float:\n\n return 0.0552 * (self.ambient_temperature**1.5)", "def get_temperature_color_preview(lamp_props):\n temperature = lamp_props.color_temperature\n\n mm = 1000.0 / temperature\n mm2 = mm ** 2\n mm3 = mm2 * mm\n x, y = 0, 0\n\n if temperature < 4000:\n x = -0.2661239 * mm3 - 0.2343580 * mm2 + 0.8776956 * mm + 0.179910\n else:\n x = -3.0258469 * mm3 + 2.1070379 * mm2 + 0.2226347 * mm + 0.240390\n\n x2 = x**2\n x3 = x2 * x\n if temperature < 2222:\n y = -1.1063814 * x3 - 1.34811020 * x2 + 2.18555832 * x - 0.20219683\n elif temperature < 4000:\n y = -0.9549476 * x3 - 1.37418593 * x2 + 2.09137015 * x - 0.16748867\n else:\n y = 3.0817580 * x3 - 5.87338670 * x2 + 3.75112997 * x - 0.37001483\n\n # xyY to XYZ, assuming Y=1.\n xyz = mathutils.Vector((x / y, 1, (1 - x - y) / y))\n return xyz_to_rgb * xyz", "def get_ccd_temperature(self):\n return self.client.getCCDTemperature()", "def current_temperature(self) -> float:\n return self._thermostat.current_temperatue", "def temperature(self):\n names = ['anc_air_temperature']\n return self.sensor.get_with_fallback('temperature', names)", "def get_temperature(self):\n temp_cpu = get_cpu_temp()\n # Calculates the real temperature compensating CPU heating.\n temp_avg = (self.get_temperature_from_humidity() + self.get_temperature_from_humidity()) / 2\n calibrated = temp_avg - ((temp_cpu - temp_avg) / 1.2)\n calibrated = get_smooth(calibrated)\n return calibrated", "def get_atsat_bright_temp(self):\n\n pass", "def get_color(self):\n return self.color", "def color(self):\n return self.uniform_buffer.data[\"color\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function returns the number of elements in the numbers list that are divisible by divide.
def listDivide(numbers, divide = 2): divisible_count = 0 for i in numbers: if i % divide == 0: divisible_count += 1 return divisible_count
[ "def listDivide(numbers,divide=2):\n div = 0\n for num in numbers:\n if num % divide == 0:\n div += 1\n return div", "def listDivide(numbers=[], divide=2):\n a = [x % divide for x in numbers]\n count = a.count(0)\n return count", "def answer(l):\n num_divisors = [0] * len(l)\n triple_count = 0\n for large in range(1, len(l)):\n for small in range (0, large):\n if l[large] % l[small] == 0:\n num_divisors[large] += 1\n triple_count += num_divisors[small]\n return triple_count", "def count_multiples( start, stop, divisor ):\n count = 0\n num = start\n while num < stop + 1:\n if num % divisor == 0:\n count += 1\n num += 1\n return count", "def count_divisor(number):\n factors = prime_factors(number)\n checked = []\n divisors = 1\n for i in factors:\n if i not in checked:\n divisors = divisors * (factors.count(i) + 1)\n checked.append(i)\n return divisors", "def count_divisor(num):\n ans = 2 # considering 1 and number itself by default\n for i in range(2, int(math.sqrt(num)) + 1):\n if num % i == 0:\n # for equal divisor (incase of perfect square)\n if (num / i) == i:\n ans += 1\n else:\n ans += 2\n return ans", "def get_total_divisors(number):\n if number <= 2:\n return number\n try:\n d = saved_divisors[number]\n return d\n except:\n a = math.sqrt(number)\n array = list(range(1,int(a)))\n div = filter(lambda x: number%x == 0, array)\n total_divisors = len(div) * 2\n saved_divisors[number]=total_divisors\n return total_divisors", "def get_divisores(num):\n divisores = [] #uso una lista para guardar los divisores\n for i in range(1, num):\n if num%i == 0:\n divisores.append(i)\n return divisores", "def number_of_divisors(input_number: int):\n x = 1\n list_of_divisors = []\n while x <= input_number:\n if input_number % x == 0:\n list_of_divisors.append(x)\n x += 1\n print(f'Below divisors list of {input_number} number:\\n{list_of_divisors}')", "def numDivisors(self) -> int:\n if self.primeFactorization.get(1, 0) == 1:\n return 1\n numDiv = 1\n for _, cnt in self.primeFactorization.items():\n numDiv *= cnt+1\n return numDiv", "def count_divisions(num, n):\n count = 0\n while pe_005.is_divisible(num, n):\n num = num // n\n count += 1\n return count, num", "def div_numbers(a: int, b: int) -> int:\n return a / b", "def div_by(n, list_of_num):\n for num in list_of_num:\n if not n % num:\n return True\n return False", "def verifica_element_divide_lista(numar, lista_divizori):\n for i in lista_divizori:\n if i == 0:\n return False\n if numar % i != 0:\n return False\n return True", "def div_sum(data: list) -> int:\n\n def even_quotient(nums: list) -> int:\n \"\"\"Finds the quotient of the only two numbers in the list that evennly divide.\"\"\"\n for i in range(len(nums[:-1])):\n for j in range(i + 1, len(nums)):\n if nums[i] % nums[j] == 0:\n return nums[i] // nums[j]\n elif nums[j] % nums[i] == 0:\n return nums[j] // nums[i]\n\n total = 0\n for row in data:\n total += even_quotient(row)\n return total", "def num_of_divisors_of(number, primes):\n result = 1\n for value in primes.factors_of(number).viewvalues():\n result *= (value + 1)\n return result", "def test_count_divisible_digits():\n print('Testing count_divisible_digits')\n\n # Cases given to test this problem\n assert_equals(4, hw1.count_divisible_digits(650899, 3))\n assert_equals(1, hw1.count_divisible_digits(-204, 5))\n assert_equals(0, hw1.count_divisible_digits(24, 5))\n assert_equals(0, hw1.count_divisible_digits(1, 0))\n\n # Additional cases to check the 0 check\n assert_equals(0, hw1.count_divisible_digits(0, 0))\n assert_equals(2, hw1.count_divisible_digits(-579300, 2))", "def divisions(self,domain,divisions):\n size = domain.height/divisions\n counter = []\n for i in range(divisions):\n count = ((self.z >= i*size) & (self.z < (i+1)*size)).sum()\n counter.append(count)\n return counter", "def testListDivide():\n assert listDivide([1,2,3,4,5]) == 2\n assert listDivide([2,4,6,8,10]) == 5\n assert listDivide([30, 54, 63,98, 100], divide = 10) == 2\n assert listDivide([]) == 0\n assert listDivide([1,2,3,4,5], 1) == 5" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function tests the listDivide function.
def testListDivide(): assert listDivide([1,2,3,4,5]) == 2 assert listDivide([2,4,6,8,10]) == 5 assert listDivide([30, 54, 63,98, 100], divide = 10) == 2 assert listDivide([]) == 0 assert listDivide([1,2,3,4,5], 1) == 5
[ "def testListDivide():\n listDivide([1, 2, 3, 4, 5])\n listDivide([2, 4, 6, 8, 10])\n listDivide([30, 54, 63, 98, 100], divide=10)\n listDivide([])\n listDivide([1, 2, 3, 4, 5], 1)", "def testListDivide():\n\n result = listDivide([1, 2, 3, 4, 5])\n if result != 2:\n raise ListDivideException(\"Test Failed\")\n result = listDivide([2, 4, 6, 8, 10])\n if result != 5:\n raise ListDivideException(\"Test Failed\")\n result = listDivide([30, 54, 63, 98, 100], divide=10)\n if result != 2:\n raise ListDivideException(\"Test Failed\")\n result = listDivide([])\n if result != 0:\n raise ListDivideException(\"Test Failed\")\n result = listDivide([1, 2, 3, 4, 5], 1)\n if result != 5:\n raise ListDivideException(\"Test Failed\")", "def testListDivide():\n\n result = listDivide([1, 2, 3, 4, 5])\n if result != 2:\n raise ListDivideException(\"Caught!\")\n result = listDivide([2, 4, 6, 8, 10])\n if result != 5:\n raise ListDivideException(\"Caught!\")\n result = listDivide([30, 54, 63, 98, 100], divide=10)\n if result != 2:\n raise ListDivideException(\"Caught!\")\n result = listDivide([])\n if result != 0:\n raise ListDivideException(\"Caught!\")\n result = listDivide([1, 2, 3, 4, 5], 1)\n if result != 5:\n raise ListDivideException(\"Caught!\")", "def test_divide(self):\n self.assertEqual(2, divide(6, 3))\n self.assertEqual(2.5, divide(5, 2))", "def listDivide(numbers,divide=2):\n div = 0\n for num in numbers:\n if num % divide == 0:\n div += 1\n return div", "def divide_list(ld, division):\n buckets = []\n current = []\n for obj in ld:\n if len(current) < division:\n current.append(obj)\n else:\n buckets.append(current)\n current = [obj]\n if len(current) > 0:\n buckets.append(current)\n return buckets", "def divideList(L):\n for x in range(len(L)):\n L[x] = L[x]/100.0\n return L", "def test_dividing(self):\n divider = Divider()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n if j != 0:\n self.assertEqual(i/j, divider.calc(j, i))", "def mergelistdiv(lst1,lst2):\r\n try:\r\n return [lst1[i]/lst2[i] for i in range(len(lst1))]\r\n except:\r\n print('incompatible lists')", "def divide(list1, list2):\n assert len(list1) == len(list2)\n return [e1 / e2 for (e1, e2) in zip(list1, list2)]", "def list_division(num_list1, num_list2):\n results = []\n for num in num_list1:\n results.append(num / num_list2[num])\n return results", "def seperate_list(list, division_part):\n avg = len(list) / float(division_part)\n out = []\n last = 0.0\n\n while last < len(list):\n out.append(list[int(last):int(last + avg)])\n last += avg\n return out", "def test_divide_success(self):\n with self.assertNoLogs():\n divide_by(10, 2)", "def listDivide(numbers, divide = 2):\n divisible_count = 0\n\n for i in numbers:\n if i % divide == 0:\n divisible_count += 1\n return divisible_count", "def listDivide(numbers=[], divide=2):\n a = [x % divide for x in numbers]\n count = a.count(0)\n return count", "def test_split(range_size, partition_size):\n dump = Mock()\n\n iterable = list(range(range_size))\n\n list(_split(partition_size=partition_size, dump=dump, iterable=iterable))\n expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))\n\n assert dump.call_count == expected_call_count", "def list_division(my_list_1, my_list_2, list_length):\n\n res_1 = []\n res = 0\n for n in range(0, list_length):\n try:\n res = my_list_1[n] / my_list_2[n]\n res_1.append(res)\n except ZeroDivisionError:\n print(\"division by 0\")\n res_1.append(0)\n except TypeError:\n print(\"wrong type\")\n res_1.append(0)\n except IndexError:\n print(\"out of range\")\n res_1.append(0)\n finally:\n pass\n return res_1", "def verifica_element_divide_lista(numar, lista_divizori):\n for i in lista_divizori:\n if i == 0:\n return False\n if numar % i != 0:\n return False\n return True", "def test_list_lists(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Drops an Operation, identified by it's Operation Id and it's children recursively Drop deletes the Operations from Database
def drop_operation(cls,operation_id): db = cls._core.get_db() stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;" cur = db.query(cls._core,stmnt,(operation_id,)) for row in cur.fetchallmap(): cls.drop_operation(row["OPE_ID"]) stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;" db.query(cls._core,stmnt,(operation_id,),commit=True)
[ "def cancel_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.cancel_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)", "def delete_op(self, t: TS) -> None:\n self.tree.remove(t)", "def remove_operation(self, name):\n\n del self.operations[name]", "def test_deletion(self):\n\n self.logger.info(\"Testing deletion tree from {}\".format(self))\n\n self.error_for_state()\n self.error_for_file_state()\n self.error_for_dependents()", "def test_drop(self):\n self._run_tests(\"drop\")", "def delete_import_operation(dbsession, operation: ImageImportOperation):\n logger.info(\"garbage collecting import operation: %s\", operation.uuid)\n\n obj_mgr = object_store.get_manager()\n failed = False\n uuid = operation.uuid\n\n for content in operation.contents:\n try:\n logger.debug(\n \"deleting import content digest %s of type %s for operation %s\",\n content.digest,\n content.content_type,\n operation.uuid,\n )\n obj_mgr.delete_document(\n userId=operation.account,\n bucket=content.content_storage_bucket,\n archiveid=content.content_storage_key,\n )\n dbsession.delete(content)\n logger.debug(\n \"deleted import content digest %s of type %s for operation %s successfully\",\n content.digest,\n content.content_type,\n operation.uuid,\n )\n except:\n logger.debug_exception(\n \"could not delete import content of type %s for operation %s with digest %s\",\n content.content_type,\n operation.uuid,\n content.digest,\n )\n failed = True\n\n if not failed:\n dbsession.delete(operation)\n else:\n return operation\n\n logger.info(\"garbage collection of import operation %s complete\", uuid)\n return None", "def drop(self, curs, objs, log = None):\r\n # make sure the creating & dropping happen in reverse order\r\n olist = self.object_list[:]\r\n olist.reverse()\r\n for o in olist:\r\n if o.type & objs:\r\n sql = o.get_drop_sql(curs)\r\n if not sql:\r\n continue\r\n if log:\r\n log.info('Dropping %s' % o.name)\r\n log.debug(sql)\r\n curs.execute(sql)", "async def delete_operation(self, ctx: context, op_number: str) -> None:\n config = self.config.get(str(ctx.guild.id), {})\n if not await Validators.validate_sign_up_channel(ctx.channel.id, config):\n return\n op = self.ops.get(str(ctx.guild.id), {}).get(str(op_number))\n if not op:\n message = await ctx.send(\"There is no Operation with that number.\")\n await message.delete(delay=10)\n return\n\n if not await self.is_owner_or_admin(ctx, op):\n await ctx.send(\"You are not authorised to use this command. Only an Admin or the person who created \"\n \"this operation may update it.\")\n return\n\n try:\n message = await ctx.fetch_message(op[\"Post_id\"])\n await message.unpin()\n except Exception as e:\n pass\n\n self.ops[str(ctx.guild.id)].pop(op_number)\n with open('./Ops.json', 'w') as f:\n dump(self.ops, f)\n\n await ctx.message.add_reaction('\\U0001f44d')", "def drop():\n drop_collection()", "def drop(self):\n\t\tdrop_model(self.name, self.cursor, print_info = False)", "def test_delete_root(self):\n pass", "def op_delete(self, args):\n stack_level = 0\n if args != None:\n stack_level = int(args[0])\n self.require_stack(stack_level+1)\n if stack_level == None:\n self.stack.pop()\n else:\n self.stack.pop(-stack_level-1)", "def decompose_operation(self, operation: 'cirq.Operation'\n ) -> 'cirq.OP_TREE':\n return operation", "def _do_dropRD(opts, rdId, selectedIds=()):\n\ttry:\n\t\trd = api.getReferencedElement(rdId, forceType=api.RD)\n\texcept api.NotFoundError:\n\t\tif opts.force:\n\t\t\trd = None\n\t\telse:\n\t\t\traise\n\n\twith base.AdhocQuerier(base.getWritableAdminConn) as querier:\n\t\tif rd is not None:\n\t\t\tif opts.dropAll:\n\t\t\t\tdds = rd.dds\n\t\t\telse:\n\t\t\t\tdds = common.getPertainingDDs(rd, selectedIds)\n\n\t\t\tparseOptions = api.getParseOptions(systemImport=opts.systemImport)\n\n\t\t\tfor dd in dds:\n\t\t\t\tapi.Data.drop(dd, connection=querier.connection, \n\t\t\t\t\tparseOptions=parseOptions)\n\n\t\t\tif not selectedIds or opts.dropAll:\n\t\t\t\tfrom gavo.registry import servicelist\n\t\t\t\tservicelist.cleanServiceTablesFor(rd, querier.connection)\n\t\t\t\ttap.unpublishFromTAP(rd, querier.connection)\n\n\t\t\ttry:\n\t\t\t\twith querier.connection.savepoint():\n\t\t\t\t\tquerier.query(\"drop schema %s\"%rd.schema)\n\t\t\texcept Exception, msg:\n\t\t\t\tapi.ui.notifyWarning(\"Cannot drop RD %s's schema %s because:\"\n\t\t\t\t\t\" %s.\"%(rd.sourceId, rd.schema, utils.safe_str(msg)))\n\n\t\telse:\n\t\t\t# If the RD doesn't exist any more, just manually purge it\n\t\t\t# from wherever it could have been mentioned.\n\t\t\tfor tableName in [\"dc.tablemeta\", \"tap_schema.tables\", \n\t\t\t\t\t\"tap_schema.columns\", \"tap_schema.keys\", \"tap_schema.key_columns\",\n\t\t\t\t\t\"dc.resources\", \"dc.interfaces\", \"dc.sets\", \"dc.subjects\",\n\t\t\t\t\t\"dc.authors\", \"dc.res_dependencies\"]:\n\t\t\t\tif querier.tableExists(tableName):\n\t\t\t\t\tquerier.query(\n\t\t\t\t\t\t\"delete from %s where sourceRd=%%(sourceRD)s\"%tableName,\n\t\t\t\t\t\t{\"sourceRD\": rdId})\n\n\t\trestoreObscore(querier.connection)", "def remove_aggregation(self):\n\n # delete associated metadata and map xml document\n istorage = self.resource.get_irods_storage()\n if istorage.exists(self.metadata_file_path):\n istorage.delete(self.metadata_file_path)\n if istorage.exists(self.map_file_path):\n istorage.delete(self.map_file_path)\n\n # find if there is a parent aggregation - files in this (self) aggregation\n # need to be added to parent if exists\n parent_aggr = self.get_parent()\n\n res_files = []\n res_files.extend(self.files.all())\n\n # first need to set the aggregation for each of the associated resource files to None\n # so that deleting the aggregation (logical file) does not cascade to deleting of\n # resource files associated with the aggregation\n for res_file in self.files.all():\n res_file.logical_file_content_object = None\n res_file.save()\n\n # delete logical file (aggregation) first then delete the associated metadata file object\n # deleting the logical file object will not automatically delete the associated\n # metadata file object\n metadata = self.metadata if self.has_metadata else None\n self.delete()\n\n if metadata is not None:\n # this should also delete on all metadata elements that have generic relations with\n # the metadata object\n metadata.delete()\n\n # make all the resource files of this (self) aggregation part of the parent aggregation\n if parent_aggr is not None:\n for res_file in res_files:\n parent_aggr.add_resource_file(res_file)\n\n # need to regenerate the xml files for the parent so that the references to this deleted aggregation\n # can be removed from the parent xml files - so need to set the parent aggregation metadata to dirty\n parent_aggr.set_metadata_dirty()\n\n post_remove_file_aggregation.send(\n sender=self.__class__,\n resource=self.resource,\n res_files=self.files.all()\n )\n\n self.resource.setAVU(\"bag_modified\", True)\n self.resource.setAVU('metadata_dirty', 'true')", "def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]", "def drop(self, request, pk=None):\n try:\n mission = models.Mission.objects.get(pk=pk)\n except models.Mission.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n objective_player_relations = models.ObjectivePlayer.objects.filter(objective__mission_id=mission.id)\n\n for relation in objective_player_relations:\n relation.delete()\n\n return Response(status=status.HTTP_200_OK)", "def delete(self, tree_path):\n\t\traise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the state of an operation and it's children recursively to 0 (PENDING) The operation is identified by a given operationId
def retry_operation(cls,operation_id): db = cls._core.get_db() stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;" cur = db.query(cls._core,stmnt,(operation_id,)) for row in cur.fetchallmap(): cls.retry_operation(row["OPE_ID"]) stmnt = "UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;" db.query(cls._core,stmnt,(operation_id,),commit=True)
[ "def cancel_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.cancel_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def reset_mcts(self, root_state: np.ndarray) -> None:\n self.mcts.root_node = None\n self.mcts.root_state = root_state", "def resetOperationCount():\n global _operationCount\n _countLock.acquire()\n try:\n _operationCount = 0\n finally:\n _countLock.release()", "def drop_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.drop_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def reset_tree() -> None:\n global task_tree\n task_tree = TaskTreeNode(NoOperation())\n task_tree.start_time = datetime.datetime.now()\n task_tree.status = TaskStatus.RUNNING", "def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None", "def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)", "def reset(self):\n self.current_branch = self.root", "def setOperationId(self, opid) :\n self.operation_id = opid", "def operation_id(self, operation_id):\n\n self._operation_id = operation_id", "def _ExecuteAll(self, operation_id=None):\r\n self._requery = False\r\n\r\n results = yield gen.Task(Lock.TryAcquire,\r\n self._client,\r\n LockResourceType.Operation,\r\n str(self._user_id),\r\n resource_data=operation_id,\r\n detect_abandonment=True)\r\n self._lock, status = results.args\r\n\r\n if status == Lock.FAILED_TO_ACQUIRE_LOCK:\r\n # Another server has the lock, so can't wait synchronously for the operations to complete.\r\n # TODO(Andy): We could poll the operations table if we want to support this.\r\n for operation_id in self._sync_cb_map.keys():\r\n self._InvokeSyncCallbacks(operation_id, CannotWaitError,\r\n 'Cannot wait for the operation to complete, because another server '\r\n 'owns the operation lock.')\r\n return\r\n\r\n try:\r\n next_ops = None\r\n if status == Lock.ACQUIRED_ABANDONED_LOCK and self._lock.resource_data is not None:\r\n # Execute the operation stored in lock.resource_data if it still exists. It is important\r\n # to continue with whatever operation was currently running when the abandon occurred.\r\n # This is because that operation may have only been partly complete.\r\n op = yield gen.Task(Operation.Query,\r\n self._client,\r\n self._user_id,\r\n self._lock.resource_data,\r\n col_names=None,\r\n must_exist=False,\r\n consistent_read=True)\r\n next_ops = [op]\r\n\r\n last_op_id = None\r\n while True:\r\n if next_ops is None:\r\n # Get 10 ops at a time, looking for one that is not in quarantine.\r\n # Use consistent reads, in order to avoid reading already deleted operations. We've\r\n # seen cases where an op runs, then deletes itself, but then an inconsistent read\r\n # gets an old version that hasn't yet been deleted and re-runs it.\r\n next_ops = yield gen.Task(Operation.RangeQuery,\r\n self._client,\r\n self._user_id,\r\n range_desc=None,\r\n limit=10,\r\n col_names=None,\r\n excl_start_key=last_op_id,\r\n consistent_read=True)\r\n if len(next_ops) == 0:\r\n # No more operations to process.\r\n break\r\n\r\n for op in next_ops:\r\n # Run the op if it is not in quarantine or if it's no longer in backoff.\r\n if not op.quarantine or not op.IsBackedOff():\r\n yield self._ExecuteOp(op)\r\n\r\n # Look for next op to run; always run earliest op possible.\r\n last_op_id = None\r\n break\r\n else:\r\n # Skip quarantined operation.\r\n logging.info('queried quarantined operation \"%s\", user %d backed off for %.2fs; skipping...' %\r\n (op.operation_id, op.user_id, op.backoff - time.time()))\r\n last_op_id = op.operation_id\r\n\r\n next_ops = None\r\n finally:\r\n # Release the operation lock.\r\n yield gen.Task(self._lock.Release, self._client)\r\n\r\n if self._lock.acquire_failures is not None:\r\n # Another caller tried to acquire the lock, so there may be more operations available.\r\n logging.info('other servers tried to acquire lock \"%s\"; there may be more operations pending' % self._lock)\r\n self._requery = True", "def update_tree(self, action):\n child_actions = [child.action for child in self.root.children]\n if action in child_actions:\n self.root = self.root.children[child_actions.index(action)]\n self.root.parent = None\n else:\n self.root = MCTSNode(None, None)", "def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()", "def reset(self):\n # print \"Node \" + self.name_ + \" resetting.\"\n self.reset_self()\n for C in self.children_:\n C.reset()", "def operation_state(self, operation_state):\n\n self._operation_state = operation_state", "def reset(self):\n self.state = EvaluationState.ready\n\n for child in self.children:\n if hasattr(child, \"reset\"):\n child.reset()", "def reset_tree(self):\n for i in self.tree.get_children():\n self.tree.delete(i)", "def __reset_aggregation_state(self):\n with self.__state_lock:\n with self.__resulting_model_lock:\n with self.__participants_that_dont_need_model_lock:\n self.__previous_aggregation_ids.add(self.__active_aggregation_id)\n self.__participants_that_dont_need_model = set()\n self.__num_nodes = None\n self.__resulting_model = None\n self.__active_aggregation_id = None\n self.__partial_model_queue_waiting_list = []\n print(f'\\t[{self.address}]: Active aggregation id set to {self.__active_aggregation_id}')\n self.__state = ClientState.NO_JOB\n with self.__aggregation_model_queues_lock:\n self.__aggregation_model_queues = None", "def reset(self, id: Optional[Union[int, List[int]]] = None):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cancels an Operation, identified by it's Operation Id and it's children recursively Cancel Deletes the Operation from Database
def cancel_operation(cls,operation_id): db = cls._core.get_db() stmnt = "SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;" cur = db.query(cls._core,stmnt,(operation_id,)) for row in cur.fetchallmap(): cls.cancel_operation(row["OPE_ID"]) stmnt = "DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;" db.query(cls._core,stmnt,(operation_id,),commit=True)
[ "def drop_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.drop_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def cancel_operation(\n self,\n ) -> Callable[[operations_pb2.CancelOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"cancel_operation\" not in self._stubs:\n self._stubs[\"cancel_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/CancelOperation\",\n request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"cancel_operation\"]", "def cancel(self, order_id: int):", "def do_cancel(order):\r\n self.gox.cancel(order.oid)", "def _do_cancel(self):\r\n\r\n def do_cancel(order):\r\n \"\"\"cancel a single order\"\"\"\r\n self.gox.cancel(order.oid)\r\n\r\n if not len(self.items):\r\n return\r\n if not len(self.selected):\r\n order = self.items[self.item_sel]\r\n do_cancel(order)\r\n else:\r\n for order in self.selected:\r\n do_cancel(order)", "def cancel(self):\n self.session.rollback()", "def cancelarOperacion(self):\n\n ok=QtGui.QMessageBox.warning(self,\"Aviso\",\"¿Desea cancelar la operación?\",\\\n QtGui.QMessageBox.Cancel | QtGui.QMessageBox.Ok)\n if ok == QtGui.QMessageBox.Ok:\n if self.factura != None:\n self.factura.anular()\n for detalle in self.lotesVentas:\n for loteVenta in self.lotesVentas[detalle]:\n loteVenta[0].aumentarCantidad(loteVenta[1])\n loteVenta[0].modificar(self.sesion)\n detalle.eliminarLotesAsociados(self.sesion)\n detalle.borrar(self.sesion)\n self.objectModified.emit()\n self.limpiarVentana()", "def retry_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.retry_operation(row[\"OPE_ID\"])\n\n stmnt = \"UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def manager_cancel(execution_id, force, logger, client, tenant_name):\n if tenant_name:\n logger.info('Explicitly using tenant `{0}`'.format(tenant_name))\n logger.info('{0}Cancelling execution {1}'.format(\n 'Force-' if force else '', execution_id))\n client.executions.cancel(execution_id, force)\n logger.info(\n \"A cancel request for execution {0} has been sent. \"\n \"To track the execution's status, use:\\n\"\n \"cfy executions get {0}\".format(execution_id))", "def cancel(self):\n\n self.update()\n\n if self.status != WAIT_OPEN:\n return -1\n\n if MODE == TEST:\n\n if self.status == WAIT_OPEN and self._orders['open'] and self._orders['close'] is None:\n self._expose()\n self.status = CANCELED\n return 0\n\n return -1\n\n if self.status == WAIT_OPEN and self._orders['open'] and self._orders['close'] is None:\n\n try:\n if self._expose() != 0:\n self.status = ERROR\n return -1\n\n binance.cancel_order(id=self._orders['open']['id'], symbol=self.symbol)\n self.status = CANCELED\n\n except Exception as e:\n print(\"Error while trying to cancel the opening order\")\n print(e)\n return -1\n\n return 0", "def cancel(self):\n # We can only be cancelled if: \n # (1) There are no foreign threads blocking for us (flagged via self.uncancellable) AND\n # (2) our parent request (if any) is already cancelled AND\n # (3) all requests that are pending for this one are already cancelled\n with self._lock:\n cancelled = not self.uncancellable\n cancelled &= (self.parent_request is None or self.parent_request.cancelled)\n for r in self.pending_requests:\n cancelled &= r.cancelled\n\n self.cancelled = cancelled\n if cancelled:\n # Any children added after this point will receive our same cancelled status\n child_requests = self.child_requests\n self.child_requests = set()\n\n if self.cancelled:\n # Cancel all requests that were spawned from this one.\n for child in child_requests:\n child.cancel()", "def cancel(self):\n log.debug('({}) Cancel job'.format(self.name))\n os.system('condor_rm {}'.format(\" \".join(self._job_id.keys())))", "def cancel(self, orderid=None, transid=None):\n ##############\n # ASSERTIONS #\n ##############\n assert type(orderid) == str\n\n #################\n # BEGIN ROUTINE #\n #################\n\n c = self.credientials\n username = c.get(\"username\")\n password = c.get(\"password\")\n clientid = c.get(\"company\")\n\n document = XMLBuilder()\n elements = document.createElementsWithTextNodes(Name=username, Password=password, ClientId=clientid,\n Mode=\"P\", OrderId=orderid, Type=\"Void\", Currency=\"949\")\n document.appendListOfElementsToElement(document.root(), elements)\n if transid:\n element = document.createElementWithTextNode(\"TransId\", transid)\n document.root().appendChild(element)\n\n service = self.__connect()\n method = \"POST\"\n url = c.get(\"cancelOrderURL\")\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n xml = document.toxml()\n\n # If debug mode is enabled, dump the HTTP request!\n if self.debug:\n print \"HTTP Request\"\n print \"HOST : %s:%d%s\" % (service.host, service.port, url)\n print \"Header : %s\" % str(headers)\n print \"Request : %s\" % xml\n # end of debugging request\n\n body = urllib.urlencode({\"DATA\": xml})\n service.request(method=method, url=url, body=body, headers=headers)\n httpresponse = service.getresponse()\n response = httpresponse.read()\n service.close()\n self.raw_response = response\n\n # If debug mode is enabled, dump the HTTP response!\n if self.debug:\n print\n print \"HTTP Response\"\n print \"Header : %s\" % str(httpresponse.getheaders())\n print \"Status : %d\" % httpresponse.status\n print \"Response : %s\" % response\n # end of debugging response\n\n\n ###################\n # PARSING PROCESS #\n ###################\n try:\n response_utf = minidom.parseString(response).toxml(\"utf-8\")\n orderid = XMLBuilder.get_data(response_utf, \"OrderId\")\n groupid = XMLBuilder.get_data(response_utf, \"GroupId\")\n transid = XMLBuilder.get_data(response_utf, \"TransId\")\n resp = XMLBuilder.get_data(response_utf, \"Response\")\n return_code = XMLBuilder.get_data(response_utf, \"ProcReturnCode\")\n err_msg = XMLBuilder.get_data(response_utf, \"ErrMsg\")\n host_msg = XMLBuilder.get_data(response_utf, \"HOSTMSG\")\n host_ref_num = XMLBuilder.get_data(response_utf, \"HostRefNum\")\n auth_code = XMLBuilder.get_data(response_utf, \"AuthCode\")\n trx_date = XMLBuilder.get_data(response_utf, \"TRXDATE\")\n\n try:\n result = not (True and int(return_code)) or False\n except:\n result = False\n\n try:\n trx_date = datetime.datetime.strptime(trx_date.split(\".\")[0], \"%Y-%m-%d %H:%M:%S\")\n except:\n pass\n\n _dict = {\"orderid\": orderid, \"transid\": transid, \"groupid\": groupid, \"response\": resp, \"error_msg\": err_msg,\n \"host_msg\": host_msg, \"host_ref_num\": host_ref_num, \"return_code\": return_code,\n \"auth_code\": auth_code,\n \"transaction_time\": trx_date}\n\n return result, _dict\n except Exception, detail:\n return False, {\"exception\": str(detail)}\n ###############\n # END ROUTINE #\n ###############", "def cancel_operation(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n self.proceed = False\n self.entry_view.destroy()", "def futures_cancel_order(self, **params):\n return self._request_futures_api('delete', 'order', True, data=params)", "def cancelActions(self, context):\n status = True\n\n if context.actionGroupDepth == 0:\n return status\n\n interface = self.currentManager()._getInterface()\n status = interface.cancelTransaction(context.managerInterfaceState)\n\n context.actionGroupDepth = 0\n\n return status", "def delete_operation(\n self,\n ) -> Callable[[operations_pb2.DeleteOperationRequest], None]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_operation\" not in self._stubs:\n self._stubs[\"delete_operation\"] = self.grpc_channel.unary_unary(\n \"/google.longrunning.Operations/DeleteOperation\",\n request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,\n response_deserializer=None,\n )\n return self._stubs[\"delete_operation\"]", "def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)", "def cancel(self):\n if not self.parent_node.is_job:\n return\n\n # First perform clean operation\n self.clean()\n\n self.winstance.send_event('Cancelling job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.cancel',\n kwargs={\"name\": self.name})\n self.winstance.send_event('.. job canceled')\n result.task.wait_for_terminated()\n\n self._status = 'CANCELLED'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restore an Operationobject stored in the database by a Dataset consisting of
def restore_operation(cls, operation_record): classname = operation_record["OPE_TYPE"] module = "" #TODO Implement modulename from database if Operation belongs to Module is_operation_of_module = False exec """ try: type(%(class)s) except NameError,e: is_operation_of_module = True"""%{'class':classname} if is_operation_of_module: exec """ from %(module)s import %(class)s operation = %(class)s(cls._core)"""%{'class':classname,'module':module} else: exec """ operation = %(class)s(cls._core)"""%{'class':classname} operation.set_id(operation_record['OPE_ID']) db = cls._core.get_db() stmnt = "SELECT OPD_KEY, OPD_VALUE, OPD_TYPE FROM OPERATIONDATA WHERE OPD_OPE_ID = ? ;" cur = db.query(cls._core,stmnt,(operation_record["OPE_ID"],)) for row in cur.fetchallmap(): val = row["OPD_VALUE"] exec """val = %s(val)"""%row["OPD_TYPE"] operation.set_value(row["OPD_KEY"], val) return operation
[ "def restore(self, checkpoint):\n raise NotImplementedError", "def mos_object(self):\n return self._restore_fn(*self._restore_args)", "def _restore_state(self, data, *, copy=True):\n if self._state_metadata['dims'] == self.rhs.dims[1]:\n state = Qobj(unstack_columns(data),\n **self._state_metadata, copy=False)\n else:\n state = Qobj(data, **self._state_metadata, copy=copy)\n\n return state", "def _restore_target(self):\n cur = self.target['connection'].cursor()\n delete_sql = 'delete from {0} where {1} = %s'.format(\n self.database['table'], self.database['column']\n )\n cur.execute(delete_sql, (self.database['filter'], ))\n if self.target['connection'].affected_rows(cur) != 1:\n cur.close()\n self.target['connection'].rollback()\n self._error('restore_target: expected to delete only one row')\n if self.target['new_insert']:\n logging.warning('deleting (not restoring) as target row was inserted from scratch')\n # there was no row here when we started the script, so we just need to\n # delete what was inserted (done above)\n cur.close()\n self.target['connection'].commit()\n return\n ret = self.target['connection'].load(self.target['backup'], self.database['table'])\n if ret != 1:\n cur.close()\n self.target['connection'].rollback()\n self._error('restore_target: expected to load exactly one row')\n cur.close()\n self.target['connection'].commit()", "def _restore(self, recovery):\n\t\tfor record in recovery:\n\t\t\trecord[0].restore(record)", "def restore_data(self):\n self.R = self._Ro\n del self._Ro", "def restore(self, restore):\n self._restore = restore", "def restore(self, dataset: SomeResourceIds, **kwargs):\n data = kwargs\n if not isinstance(dataset, list):\n dataset = [dataset]\n\n data['datasets'] = dataset\n self._provider.post('restore-datasets', data=data, as_json=False)", "def persist(cls, dataset):\n return dataset", "def _restore_object(self, path: str, desired_storage_class: str, min_exp_days: int) -> None:", "def restore(self):\n\n self.brain.restore_checkpoint()", "def orchestration_restore(self, context, cancellation_context, saved_details):\r\n '''\r\n # The saved_details JSON will be defined according to the JSON Schema and is the same object returned via the\r\n # orchestration save function.\r\n # Example input:\r\n # {\r\n # \"saved_artifact\": {\r\n # \"artifact_type\": \"REPLACE_WITH_ARTIFACT_TYPE\",\r\n # \"identifier\": \"16_08_09 11_21_35_657000\"\r\n # },\r\n # \"resource_name\": \"some_resource\",\r\n # \"restore_rules\": {\r\n # \"requires_same_resource\": true\r\n # },\r\n # \"created_date\": \"2016-08-09T11:21:35.657000\"\r\n # }\r\n\r\n # The example code below just parses and prints the saved artifact identifier\r\n saved_details_object = json.loads(saved_details)\r\n return saved_details_object[u'saved_artifact'][u'identifier']\r\n '''\r\n pass", "def restore(self, filename,dbname=None):\n #self.dropDb(self.dbname)\n #self.createDb(self.dbname)\n self.adapter.restore(filename,dbname=dbname)", "def _rollback(self, name: str, orig_value: Union[Metadata, Dict], operation: str, exception: Exception):\n self.log.debug(f\"Rolling back metadata operation '{operation}' for instance '{name}' due to: {exception}\")\n if operation == \"create\": # remove the instance, orig_value is the newly-created instance.\n if isinstance(orig_value, Metadata):\n orig_value = orig_value.to_dict()\n self.metadata_store.delete_instance(orig_value)\n elif operation == \"update\": # restore original as an update\n if isinstance(orig_value, dict):\n orig_value = Metadata.from_dict(self.schemaspace, orig_value)\n self.metadata_store.store_instance(name, orig_value.prepare_write(), for_update=True)\n elif operation == \"delete\": # restore original as a create\n if isinstance(orig_value, dict):\n orig_value = Metadata.from_dict(self.schemaspace, orig_value)\n self.metadata_store.store_instance(name, orig_value.prepare_write(), for_update=False)\n self.log.warning(\n f\"Rolled back metadata operation '{operation}' for instance '{name}' due to \"\n f\"failure in post-processing method: {exception}\"\n )", "def __restoreBackup(self):\n pass #FIXME!!!", "def restore_from_save_data(self, data):\n try:\n parsed = json.loads(data)\n if parsed.get('version') != 1:\n raise InvalidSaveDataException('Unsupported save version.')\n #print('\"%s\" vs \"%s\"' % (parsed.get('checksum'), self._get_save_checksum()))\n if parsed.get('checksum') != self._get_save_checksum():\n raise InvalidSaveDataException('Unsupported save checksum.')\n \n # Preserve value of flags 2 (6.1.2)\n flags_2 = self.story.raw_data[Header.FLAGS_2]\n\n # Reset the story to initial values\n self.reset(restoring=True)\n\n # Restore state\n self.state = parsed['state']\n\n # Setup routines\n self.routines = []\n for routine in parsed['routines']:\n self.routines.append(Routine(self.story.raw_data, \n self.story.header.global_variables_address,\n 0,\n 0,\n 0,\n 0,\n 0,\n data=routine))\n # Restore memory\n mem = parsed['dynamic_memory']\n for idx in range(0,len(mem)):\n self.story.raw_data._raw_data[idx] = mem[idx] \n\n # Set the flags to the saved state\n self.story.raw_data[Header.FLAGS_2] = flags_2\n\n self.pc = parsed['pc']\n self._parse_buffer_addr = int(parsed.get('parse_buffer_addr','0'))\n self._text_buffer_addr = int(parsed.get('text_buffer_addr','0'))\n except IndexError as e:\n raise InvalidSaveDataException('Save data missing parameter: %s' % e)\n except ValueError:\n raise InvalidSaveDataException('File does not contain valid json')", "def restore(self, state: T):", "def restore(self, *args):\n if self._cluster:\n return self.execute(u'RESTORE', *args, shard_key=args[0])\n return self.execute(u'RESTORE', *args)", "def restore_database_snapshot(*args):\n return _ida_kernwin.restore_database_snapshot(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively executes the workloads of Operation's Childoperations It hereby catches exceptions in the workloads, sets the OPE_STATUS to 2 (FAILED) if a catch occurs, then passes the exception on to the higher layer. If an Operation succeeds, it's entry in DB gets deleted
def process_children(cls, operation): db = cls._core.get_db() stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;" stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;" cur = db.query(cls._core,stmnt,(operation.get_id(),)) for row in cur.fetchallmap(): child_operation = cls.restore_operation(row) db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True) try: cls.process_children(child_operation) child_operation.do_workload() except Exception,e: stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;" db.query(cls._core,stmnt_err,(int(row["OPE_ID"]),),commit=True) #TODO GENERATE ERROR IN LOG raise e stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_ID = ?;" db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)
[ "def retry_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.retry_operation(row[\"OPE_ID\"])\n\n stmnt = \"UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def cancel_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 0 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.cancel_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS = 0 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def process_next(cls):\n db = cls._core.get_db()\n configuration = cls._core.get_configuration()\n if os.path.exists(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\"):\n return False\n lockfile = open(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\",\"w\")\n lockfile.close()\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 \\\n WHERE OPE_ID IN ( \\\n SELECT OPE_ID FROM OPERATIONS \\\n WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0 \\\n AND OPE_INVOKED = ( \\\n SELECT MIN(OPE_INVOKED) FROM OPERATIONS \\\n WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0) \\\n ) ;\"\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 1 ;\"\n db.query(cls._core,stmnt_lock,commit=True)\n cur = db.query(cls._core,stmnt)\n res = cur.fetchallmap()\n if len(res) > 0:\n operation = cls.restore_operation(res[0])\n try:\n cls.process_children(operation)\n operation.do_workload()\n except Exception, e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(operation.get_id(),),commit=True)\n error = StringIO()\n print_exc(None,error)\n cls._core.log(error.getvalue())\n ret = True\n else:\n ret = False\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_STATUS = 1 ;\"\n db.query(cls._core,stmnt_delete,commit=True)\n db.commit()\n try:\n os.unlink(configuration.get_entry(\"core.webpath\")+\"/scv_operating.lck\")\n except OSError,e :\n raise OperationException(OperationException.get_msg(0))\n return ret", "def drop_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS IN (0, 2) ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.drop_operation(row[\"OPE_ID\"])\n\n stmnt = \"DELETE FROM OPERATIONS WHERE OPE_ID = ? AND OPE_STATUS IN (0, 2) ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def testFailedAbortableOp(self):\r\n def _OnQueryOrigOp(orig_op):\r\n # 3 base operations expected and no retries because the failed operation aborted.\r\n self._CheckCounters(3, 0)\r\n self.stop()\r\n\r\n def _OnSecondUploadOp(orig_op, op):\r\n \"\"\"Wait for the second operation and on completion, query the\r\n original operation which is still failing. It should have a retry.\r\n \"\"\"\r\n Operation.WaitForOp(self._client, op.user_id, op.operation_id,\r\n partial(_OnQueryOrigOp, orig_op))\r\n\r\n def _OnFirstUpload(orig_op):\r\n self._UploadPhotoOperation(orig_op.user_id, orig_op.device_id, 2,\r\n partial(_OnSecondUploadOp, orig_op))\r\n\r\n def _OnFirstUploadOp(orig_op, op):\r\n Operation.WaitForOp(self._client, op.user_id, op.operation_id,\r\n partial(_OnFirstUpload, orig_op))\r\n\r\n def _OnCreateOp(orig_op):\r\n self._UploadPhotoOperation(orig_op.user_id, orig_op.device_id, 1,\r\n partial(_OnFirstUploadOp, orig_op))\r\n\r\n self._CreateBadOperation(self._user.user_id, self._mobile_dev.device_id, _OnCreateOp)", "def testFailedRetriableOp(self):\r\n def _OnQueryOrigOp(orig_op):\r\n retry_count = orig_op.attempts - 1\r\n # 3 base operations expected plus 1 photo upload operation as part of _CreateBlockedOperation().\r\n self._CheckCounters(3 + 1 + retry_count, retry_count)\r\n self.stop()\r\n\r\n def _OnSecondUploadOp(orig_op, op):\r\n \"\"\"Wait for the second operation and on completion, query the\r\n original operation which is still failing. It should have a retry.\r\n \"\"\"\r\n Operation.WaitForOp(self._client, op.user_id, op.operation_id,\r\n partial(Operation.Query, self._client, orig_op.user_id,\r\n orig_op.operation_id, None, _OnQueryOrigOp))\r\n\r\n def _OnFirstUpload(orig_op):\r\n self._UploadPhotoOperation(orig_op.user_id, orig_op.device_id, 2,\r\n partial(_OnSecondUploadOp, orig_op))\r\n\r\n def _OnFirstUploadOp(orig_op, op):\r\n Operation.WaitForOp(self._client, op.user_id, op.operation_id,\r\n partial(_OnFirstUpload, orig_op))\r\n\r\n def _OnCreateOp(orig_op):\r\n \"\"\"Set the operation's quarantine boolean to true and update.\"\"\"\r\n orig_op.quarantine = 1\r\n orig_op.Update(self._client, partial(self._UploadPhotoOperation,\r\n orig_op.user_id, orig_op.device_id, 1,\r\n partial(_OnFirstUploadOp, orig_op)))\r\n\r\n self._CreateBlockedOperation(self._user.user_id, self._mobile_dev.device_id, _OnCreateOp)", "def testNestedOpError(self):\r\n @gen.coroutine\r\n def _InnerMethod(client):\r\n self._method_count += 1\r\n if self._method_count < 8:\r\n raise Exception('permanent error')\r\n\r\n @gen.coroutine\r\n def _OuterMethod(client):\r\n self._method_count += 1\r\n if self._method_count < 8:\r\n yield Operation.CreateNested(client, '_InnerMethod', {})\r\n self.assertEqual(Operation.GetCurrent().quarantine, 1)\r\n\r\n # Create custom OpManager and make it the current instance for duration of test.\r\n op_mgr = self._CreateOpManager(handlers=[_OuterMethod, _InnerMethod])\r\n OpManager.SetInstance(op_mgr)\r\n\r\n outer_op = self._CreateTestOp(user_id=1, handler=_OuterMethod)\r\n op_mgr.MaybeExecuteOp(self._client, 1, None)\r\n\r\n # Now run failed ops (they should eventually succeed due to method_count < 8 checks) and ensure\r\n # that ops complete.\r\n while len(self._RunAsync(Operation.RangeQuery, self._client, 1, None, None, None)) != 0:\r\n pass\r\n\r\n self.assertEqual(self._method_count, 9)", "def test_deletion(self):\n\n self.logger.info(\"Testing deletion tree from {}\".format(self))\n\n self.error_for_state()\n self.error_for_file_state()\n self.error_for_dependents()", "def test_doesnt_delete_parent_cascade(self):\n try:\n raise ValueError(\"This is a test\")\n except ValueError as e:\n test_exception = e\n\n location = (\"iamrole\", \"testing\", \"us-west-2\", \"testrole\")\n store_exception(\"tests\", location, test_exception)\n\n exc = ExceptionLogs.query.all()\n db.session.delete(exc[0])\n\n db.session.commit()\n\n assert len(Item.query.filter(Item.name == \"testrole\").all()) == 1\n assert len(Technology.query.filter(Technology.name == \"iamrole\").all()) == 1\n assert len(Account.query.filter(Account.name == \"testing\").all()) == 1", "def test_safe_child_deletion_cascade(self):\n try:\n raise ValueError(\"This is a test\")\n except ValueError as e:\n test_exception = e\n\n location = (\"iamrole\", \"testing\", \"us-west-2\", \"testrole\")\n store_exception(\"tests\", location, test_exception)\n\n db.session.delete(self.item)\n db.session.commit()\n\n exc = ExceptionLogs.query.all()\n assert len(exc) == 0\n\n assert len(Item.query.filter(Item.name == \"testrole\").all()) == 0\n assert len(Technology.query.filter(Technology.name == \"iamrole\").all()) == 1\n assert len(Account.query.filter(Account.name == \"testing\").all()) == 1", "def pass_down(self,operation):\n\n # Call if callable, otherwise do an exec.\n if callable(operation):\n operation()\n else:\n exec(operation)\n\n for child in self.children:\n child.pass_down(operation)", "def _ExecuteAll(self, operation_id=None):\r\n self._requery = False\r\n\r\n results = yield gen.Task(Lock.TryAcquire,\r\n self._client,\r\n LockResourceType.Operation,\r\n str(self._user_id),\r\n resource_data=operation_id,\r\n detect_abandonment=True)\r\n self._lock, status = results.args\r\n\r\n if status == Lock.FAILED_TO_ACQUIRE_LOCK:\r\n # Another server has the lock, so can't wait synchronously for the operations to complete.\r\n # TODO(Andy): We could poll the operations table if we want to support this.\r\n for operation_id in self._sync_cb_map.keys():\r\n self._InvokeSyncCallbacks(operation_id, CannotWaitError,\r\n 'Cannot wait for the operation to complete, because another server '\r\n 'owns the operation lock.')\r\n return\r\n\r\n try:\r\n next_ops = None\r\n if status == Lock.ACQUIRED_ABANDONED_LOCK and self._lock.resource_data is not None:\r\n # Execute the operation stored in lock.resource_data if it still exists. It is important\r\n # to continue with whatever operation was currently running when the abandon occurred.\r\n # This is because that operation may have only been partly complete.\r\n op = yield gen.Task(Operation.Query,\r\n self._client,\r\n self._user_id,\r\n self._lock.resource_data,\r\n col_names=None,\r\n must_exist=False,\r\n consistent_read=True)\r\n next_ops = [op]\r\n\r\n last_op_id = None\r\n while True:\r\n if next_ops is None:\r\n # Get 10 ops at a time, looking for one that is not in quarantine.\r\n # Use consistent reads, in order to avoid reading already deleted operations. We've\r\n # seen cases where an op runs, then deletes itself, but then an inconsistent read\r\n # gets an old version that hasn't yet been deleted and re-runs it.\r\n next_ops = yield gen.Task(Operation.RangeQuery,\r\n self._client,\r\n self._user_id,\r\n range_desc=None,\r\n limit=10,\r\n col_names=None,\r\n excl_start_key=last_op_id,\r\n consistent_read=True)\r\n if len(next_ops) == 0:\r\n # No more operations to process.\r\n break\r\n\r\n for op in next_ops:\r\n # Run the op if it is not in quarantine or if it's no longer in backoff.\r\n if not op.quarantine or not op.IsBackedOff():\r\n yield self._ExecuteOp(op)\r\n\r\n # Look for next op to run; always run earliest op possible.\r\n last_op_id = None\r\n break\r\n else:\r\n # Skip quarantined operation.\r\n logging.info('queried quarantined operation \"%s\", user %d backed off for %.2fs; skipping...' %\r\n (op.operation_id, op.user_id, op.backoff - time.time()))\r\n last_op_id = op.operation_id\r\n\r\n next_ops = None\r\n finally:\r\n # Release the operation lock.\r\n yield gen.Task(self._lock.Release, self._client)\r\n\r\n if self._lock.acquire_failures is not None:\r\n # Another caller tried to acquire the lock, so there may be more operations available.\r\n logging.info('other servers tried to acquire lock \"%s\"; there may be more operations pending' % self._lock)\r\n self._requery = True", "def test_child_deletion_cascade_check(self):\n try:\n raise ValueError(\"This is a test\")\n except ValueError as e:\n test_exception = e\n\n location = (\"iamrole\", \"testing\", \"us-west-2\", \"testrole\")\n store_exception(\"tests\", location, test_exception)\n\n assert len(self.item.exceptions) == 1\n assert len(self.account.exceptions) == 1\n assert len(self.technology.exceptions) == 1\n\n db.session.delete(self.item.exceptions[0])\n db.session.commit()\n\n exc = ExceptionLogs.query.all()\n assert len(exc) == 0\n\n assert len(Item.query.filter(Item.name == \"testrole\").all()) == 1\n assert len(Technology.query.filter(Technology.name == \"iamrole\").all()) == 1\n assert len(Account.query.filter(Account.name == \"testing\").all()) == 1\n\n assert len(self.item.exceptions) == 0\n assert len(self.account.exceptions) == 0\n assert len(self.technology.exceptions) == 0", "def _ExecuteOp(self, op):\r\n # If necessary, wait until back-off has expired before execution begins.\r\n if op.backoff is not None:\r\n yield gen.Task(IOLoop.current().add_timeout, op.backoff)\r\n\r\n # Enter execution scope for this operation, so that it can be accessed in OpContext, and so that op-specific\r\n # logging will be started.\r\n with OpContext.current().Enter(op):\r\n op_entry = self._op_map[op.method]\r\n op_args = json.loads(op.json)\r\n\r\n # If not already done, update the lock to remember the id of the op that is being run. In\r\n # case of server failure, the server that takes over this lock will know where to start.\r\n if self._lock.resource_data != op.operation_id:\r\n self._lock.resource_data = op.operation_id\r\n yield gen.Task(self._lock.Update, self._client)\r\n\r\n # Migrate the arguments to the current server message version, as the format in the operations\r\n # table may be out-dated. Remove the headers object from the message, since it's not an\r\n # expected argument to the method.\r\n op_message = message.Message(op_args)\r\n yield gen.Task(op_message.Migrate,\r\n self._client,\r\n migrate_version=message.MAX_MESSAGE_VERSION,\r\n migrators=op_entry.migrators)\r\n\r\n try:\r\n del op_args['headers']\r\n\r\n # Scrub the op args for logging in order to minimize personal information in the logs.\r\n scrubbed_op_args = op_args\r\n if op_entry.scrubber is not None:\r\n scrubbed_op_args = deepcopy(op_args)\r\n op_entry.scrubber(scrubbed_op_args)\r\n args_str = pprint.pformat(scrubbed_op_args)\r\n\r\n logging.info('EXECUTE: user: %d, device: %d, op: %s, method: %s:%s%s' %\r\n (op.user_id, op.device_id, op.operation_id, op.method,\r\n ('\\n' if args_str.find('\\n') != -1 else ' '), args_str))\r\n\r\n _ops_per_min.increment()\r\n if op.attempts > 0:\r\n _retries_per_min.increment()\r\n\r\n # Starting operation from beginning, so reset modified db state in the\r\n # OpMgrDBClient wrapper so we'll know if any modifications happened before an abort.\r\n self._client.ResetDBModified()\r\n\r\n # Actually execute the operation by invoking its handler method.\r\n results = yield gen.Task(op_entry.handler, self._client, **op_args)\r\n\r\n # Invokes synchronous callback if applicable.\r\n elapsed_secs = time.time() - op.timestamp\r\n logging.info('SUCCESS: user: %d, device: %d, op: %s, method: %s in %.3fs%s' %\r\n (op.user_id, op.device_id, op.operation_id, op.method, elapsed_secs,\r\n (': %s' % pprint.pformat(results) if results else '')))\r\n _avg_op_time.add(elapsed_secs)\r\n\r\n # Notify any waiting for op to finish that it's now complete.\r\n self._InvokeSyncCallbacks(op.operation_id)\r\n\r\n # Delete the op, now that it's been successfully executed.\r\n yield self._DeleteOp(op)\r\n except StopOperationError:\r\n # Stop the current operation in order to run a nested operation.\r\n pass\r\n except FailpointError:\r\n # Retry immediately if the operation is retried due to a failpoint.\r\n type, value, tb = sys.exc_info()\r\n logging.warning('restarting op due to failpoint: %s (%d)', value.filename, value.lineno)\r\n except Exception:\r\n type, value, tb = sys.exc_info()\r\n\r\n # Notify any waiting for op to finish that it failed (don't even wait for retries).\r\n self._InvokeSyncCallbacks(op.operation_id, type, value, tb)\r\n\r\n # Check for abortable exceptions, but only on 1st attempt.\r\n if op.attempts == 0 and issubclass(type, _ABORTABLE_EXCEPTIONS):\r\n yield self._AbortOp(op, type, value, tb)\r\n else:\r\n initial_backoff = UserOpManager._INITIAL_BACKOFF_SECS\r\n if issubclass(type, _SMALLER_RETRY_EXCEPTIONS):\r\n initial_backoff = UserOpManager._SMALL_INITIAL_BACKOFF_SECS\r\n yield self._FailOp(op, type, value, tb, initial_backoff_secs=initial_backoff)", "def execute(self, ops, exceptions=[], delay=5, maxretries=3):\n retry_errors = [NFS4ERR_DELAY, NFS4ERR_GRACE]\n state_errors = [NFS4ERR_STALE_CLIENTID, NFS4ERR_BADSESSION,\n NFS4ERR_BADSLOT, NFS4ERR_DEADSESSION]\n while True:\n res = self.sess.compound(ops)\n if res.status == NFS4_OK or res.status in exceptions:\n return res\n elif res.status in retry_errors:\n if maxretries > 0:\n maxretries -= 1\n time.sleep(delay)\n else:\n log.error(\"Too many retries with DS %s\" % self.server)\n raise Exception(\"Dataserver communication retry error\")\n elif res.status in state_errors:\n self.disconnect()\n self.connect()\n else:\n log.error(\"Unhandled status %s from DS %s\" %\n (nfsstat4[res.status], self.server))\n raise Exception(\"Dataserver communication error\")", "def delete_import_operation(dbsession, operation: ImageImportOperation):\n logger.info(\"garbage collecting import operation: %s\", operation.uuid)\n\n obj_mgr = object_store.get_manager()\n failed = False\n uuid = operation.uuid\n\n for content in operation.contents:\n try:\n logger.debug(\n \"deleting import content digest %s of type %s for operation %s\",\n content.digest,\n content.content_type,\n operation.uuid,\n )\n obj_mgr.delete_document(\n userId=operation.account,\n bucket=content.content_storage_bucket,\n archiveid=content.content_storage_key,\n )\n dbsession.delete(content)\n logger.debug(\n \"deleted import content digest %s of type %s for operation %s successfully\",\n content.digest,\n content.content_type,\n operation.uuid,\n )\n except:\n logger.debug_exception(\n \"could not delete import content of type %s for operation %s with digest %s\",\n content.content_type,\n operation.uuid,\n content.digest,\n )\n failed = True\n\n if not failed:\n dbsession.delete(operation)\n else:\n return operation\n\n logger.info(\"garbage collection of import operation %s complete\", uuid)\n return None", "def _executeOperation(self, request:CSERequest, reqRi:str) -> Result:\n\t\t# Execute the actual operation\n\t\trequest.args.operation == Operation.RETRIEVE and (operationResult := CSE.dispatcher.processRetrieveRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.CREATE and (operationResult := CSE.dispatcher.processCreateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.UPDATE and (operationResult := CSE.dispatcher.processUpdateRequest(request, request.headers.originator)) is not None\n\t\trequest.args.operation == Operation.DELETE and (operationResult := CSE.dispatcher.processDeleteRequest(request, request.headers.originator)) is not None\n\n\t\t# Retrieve the <request> resource\n\t\tif (res := CSE.dispatcher.retrieveResource(reqRi)).resource is None:\t\n\t\t\treturn Result(status=False) \t\t\t\t\t\t\t\t\t\t\t\t\t\t# No idea what we should do if this fails\n\t\treqres = res.resource\n\n\t\t# Fill the <request>\n\t\treqres['ors'] = {\t# operationResult\n\t\t\t'rsc'\t: operationResult.rsc,\n\t\t\t'rqi'\t: reqres.rid,\n\t\t\t'to'\t: request.id,\n\t\t\t'fr'\t: reqres.org,\n\t\t\t'ot'\t: reqres['mi/ot'],\n\t\t\t'rset'\t: reqres.et\n\t\t}\n\t\tif operationResult.rsc in [ RC.OK, RC.created, RC.updated, RC.deleted ] :\t\t\t# OK, created, updated, deleted -> resource\n\t\t\treqres['rs'] = RequestStatus.COMPLETED\n\t\t\tif operationResult.resource is not None:\n\t\t\t\treqres['ors/pc'] = operationResult.resource.asDict()\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Error\n\t\t\treqres['rs'] = RequestStatus.FAILED\n\t\t\tif operationResult.dbg is not None:\n\t\t\t\treqres['ors/pc'] = { 'm2m:dbg' : operationResult.dbg }\n\n\t\t# Update in DB\n\t\treqres.dbUpdate()\n\n\t\treturn Result(resource=reqres, status=True)", "def rollback(self, stage, enodes, exception):", "def _run_par_doe(self, root):\n\n for case in self._get_case_w_nones(self._distrib_build_runlist()):\n if case is None: # dummy cases have case == None\n # must take part in collective Allreduce call\n any_proc_is_true(self._full_comm, False)\n metadata = None\n\n else: # case is not a dummy case\n metadata = self._prep_case(case, self.iter_count)\n\n terminate, exc = self._try_case(root, metadata)\n\n if any_proc_is_true(self._full_comm, terminate):\n if exc:\n if PY3:\n raise exc[0].with_traceback(exc[1], exc[2])\n else:\n # exec needed here since otherwise python3 will\n # barf with a syntax error :(\n exec('raise exc[0], exc[1], exc[2]')\n else:\n raise RuntimeError(\"an exception was raised by another MPI process.\")\n\n self._save_case(case, metadata)\n self.iter_count += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the status of the next toplevel operation to 1 (ACTIVE) Fetches the next topleveloperation from the database, applies a FILESYSTEMLOCK! Which is /tmp/scv_operating.lck !!!
def process_next(cls): db = cls._core.get_db() configuration = cls._core.get_configuration() if os.path.exists(configuration.get_entry("core.webpath")+"/scv_operating.lck"): return False lockfile = open(configuration.get_entry("core.webpath")+"/scv_operating.lck","w") lockfile.close() stmnt_lock = "UPDATE OPERATIONS SET OPE_STATUS = 1 \ WHERE OPE_ID IN ( \ SELECT OPE_ID FROM OPERATIONS \ WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0 \ AND OPE_INVOKED = ( \ SELECT MIN(OPE_INVOKED) FROM OPERATIONS \ WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 0) \ ) ;" stmnt = "SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT IS NULL AND OPE_STATUS = 1 ;" db.query(cls._core,stmnt_lock,commit=True) cur = db.query(cls._core,stmnt) res = cur.fetchallmap() if len(res) > 0: operation = cls.restore_operation(res[0]) try: cls.process_children(operation) operation.do_workload() except Exception, e: stmnt_err = "UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;" db.query(cls._core,stmnt_err,(operation.get_id(),),commit=True) error = StringIO() print_exc(None,error) cls._core.log(error.getvalue()) ret = True else: ret = False stmnt_delete = "DELETE FROM OPERATIONS WHERE OPE_STATUS = 1 ;" db.query(cls._core,stmnt_delete,commit=True) db.commit() try: os.unlink(configuration.get_entry("core.webpath")+"/scv_operating.lck") except OSError,e : raise OperationException(OperationException.get_msg(0)) return ret
[ "def setPLOn(self):\n self.querier.setMsgHandler(DefaultMsgHandler(\"Set Programming Lock ON\"))\n return self.querier.queryext(0x20, 0x00, [0x00, 0x00, 0x00]);", "def arm_oplock_future(self):\n self.oplock_future = self.tree.session.client.oplock_break_future(self.file_id)", "def oplock_level(self):\n return self._oplock_level", "def lock_status(self):\n self._lock_status = True", "def open_labware_latch(self) -> None:", "def take_possession(self):\n self._claimfile = '%s.%s.%d' % (\n self._lockfile, socket.getfqdn(), os.getpid())\n # Wait until the linkcount is 2, indicating the parent has completed\n # the transfer.\n while self._linkcount != 2 or self._read() != self._claimfile:\n time.sleep(0.25)\n log.debug('took possession of the lock: %s', self._lockfile)", "def state_wait_enter(cfg, app, win):", "def show_swp_stat(self, image, data):\n\t\ttry:\n\t\t\tself.SWPStatLock.acquire(timeout=2)\n\t\texcept AlreadyLocked: pass\t\t\t\n\t\texcept LockFailed: pass\n\t\texcept LockTimeout: pass\n\t\telse:\n\t\t\t#print 'Got CPUStatlock for reading'\n\t\t\timage.set_from_file(self.SWPStatLock.path)\n\t\t\tself.SWPStatLock.release()\n\t\t\ttime.sleep(Quiet)", "def _wait_for_lockstate(self):\n vip = self.vmem_vip.basic\n\n opts1 = [ XGNode('container', 'string', self.container),\n XGNode('port', 'uint8', 1),\n XGNode('dev_id', 'string', self.device_id) ]\n\n opts2 = [ XGNode('container', 'string', self.container),\n XGNode('port', 'uint8', 2),\n XGNode('dev_id', 'string', self.device_id) ]\n\n for i in range(30):\n resp1 = vip.perform_action('/vshare/actions/vlock/lockstate', opts1)\n resp2 = vip.perform_action('/vshare/actions/vlock/lockstate', opts2)\n if resp1['message'][0] == '0' and resp2['message'][0]:\n break\n else:\n time.sleep(1)", "def request_key_lock_status() -> str:\n return \"STX\"", "def attempt_to_acquire_leader(self, permanent=False):", "def open_restaurent(self):\n print(\"\\nThe restaurent is currently open!\")", "def update_active(self):\n self.state = WAITING", "def retry_operation(cls,operation_id):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID FROM OPERATIONS WHERE OPE_OPE_PARENT = ? AND OPE_STATUS = 2 ;\"\n cur = db.query(cls._core,stmnt,(operation_id,))\n for row in cur.fetchallmap():\n cls.retry_operation(row[\"OPE_ID\"])\n\n stmnt = \"UPDATE OPERATIONS SET OPE_STATUS = 0 WHERE OPE_ID = ? AND OPE_STATUS = 2 ;\"\n db.query(cls._core,stmnt,(operation_id,),commit=True)", "def setOpenStatus(self, status):\r\n assert 0, 'Illegal function call: setOpenStatus(%s) for ' \\\r\n 'ART2TestScriptNode.' % status", "def Are_you_open2 (usr, doc): # [ ARE_YOU_OPEN2 ]\n if Semaphore [\"State\"] == S1:\n Semaphore [\"State\"] = S2\n send (OPEN, (usr, doc))\n else:\n send (CLOSED, (usr, doc))", "def start_session(self) -> None:\n self.get_remote_db()\n # print(\"b4 merge\\n\", self.db)\n self.db = self.merge_dbs()\n # print(\"b4 update\\n\", self.db)\n self.update_db_locale()\n self.local_files = self.calc_file_hashes()\n # print(\"after update\\n\", self.db)\n self.push_remote_db()", "def state_processing_enter(cfg, app, win):", "def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets this operations values from module metadata
def set_values(self,module): if type(module) == dict: self.set_value("name",module["name"]) self.set_value("hrname",module["hrname"]) self.set_value("version_major",module["version_major"]) self.set_value("version_minor",module["version_minor"]) self.set_value("revision",module["revision"]) if module.has_key("signature"): self.set_value("signature",module["signature"]) elif module.__class__.__name__ == "Module": pass #TODO IMPLEMENT / DISCUSS AFTER IMPLEMENTING MODULE-SUBSYSTEM
[ "def set_metadata(self, data):\r\n pass", "def configure_operation(self, operation):\n operation.axis = self.axis\n operation.axisMatrix = self.axisMatrix", "def applyMeta(self):\n self.interface.setMeta('kernel', 'opencl r%s' % self.REVISION)\n self.interface.setMeta('device',\n self.device.name.replace('\\x00','').strip())\n self.interface.setMeta('cores', self.device.max_compute_units)", "def setMetadata(self, metadata):\n pass", "def PopulateModuleMetadata(self, mod, mojom_file):\n mod.name = os.path.basename(mojom_file.file_name)\n mod.path = mojom_file.file_name\n mod.namespace = mojom_file.module_namespace\n if mojom_file.attributes:\n mod.attributes = {attr.key: attr.value for attr in mojom_file.attributes}", "def test_modify_metadata(self):\n pass", "def set_metadata(self, metadata):\n return self.client._perform_json(\n \"PUT\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name),\n body=metadata)", "def __init__(self, operations = []):\n self.operations = operations", "def __init__(self, op, op_param_list, op_reg_list):\n self. operation = {\n 'op': op,\n 'op_param_list': op_param_list,\n 'op_reg_list': op_reg_list\n }", "def update_metadata(self):\n self.data[\"keywords\"] = self.repo.topics(self.data.get(\"keywords\", []))\n self.data[\"description\"] = self.data.get(\"description\") or self.repo.description\n self.data[\"codeRepository\"] = (\n self.data.get(\"codeRepository\") or self.repo.html_url\n )\n self.data[\"name\"] = self.data.get(\"name\") or self.repo.name\n self.data[\"issueTracker\"] = (\n self.data.get(\"issueTracker\") or self.repo.issues_url\n )\n self.data[\"license\"] = self.data.get(\"license\") or self.repo.license", "def _set_module_attributes(self, c):\r\n\r\n module = self._get_module_name(c)\r\n if hasattr(self, module):\r\n return\r\n else:\r\n setattr(self, module, ModuleInstance(module) )", "def adjust_custom_op_info(compute_op_info):\n py_module_path = compute_op_info[\"py_module_path\"]\n if os.path.isfile(py_module_path):\n py_module_path, file_name = os.path.split(py_module_path)\n module_name, _ = os.path.splitext(file_name)\n compute_op_info[\"py_module_path\"] = py_module_path\n compute_op_info[\"module_name\"] = module_name", "def __setattr__(self, name, value):\n if isinstance(value, torch.jit.ScriptModule):\n object.__setattr__(self, name, value)\n elif isinstance(value, FrameworkTensor):\n self.role.register_state_tensor(value)\n self.state_attributes[name] = value\n elif isinstance(value, FrameworkLayerModule):\n for param in value.parameters():\n self.role.register_state_tensor(param)\n self.state_attributes[name] = value\n else:\n object.__setattr__(self, name, value)", "def setMeta(self, axis, **args):\n self.meta[axis].update(args)\n self.stimParams.setMeta(axis, self.meta[axis])", "def set_metadata(self, key, val):\n \n self.metadata[key] = val", "def metadata(self, metadata):\n self._metadata = metadata", "def setOperationObject(self, operation):\r\n \r\n operation.execution_time = self.time\r\n operation.n_ras = self.n_ras\r\n operation.n_ra_cbs = self.n_ra_cbs\r\n operation.n_ra_buf_waits = self.n_ra_buf_waits\r\n operation.ra_buf_wait_time = self.ra_buf_wait_time\r\n operation.ra_data_latency = self.ra_data_latency\r\n operation.avg_ra_data_latency = self.avg_ra_data_latency\r\n operation.n_ra_out_of_seq = self.n_ra_out_of_seq", "def set_operation_mode(self, operation_mode):", "def setOpArgument(self, i, value):\n\t\traise Exception(\"Abstract method IOperation.setOpArgument not implemented in: \" + str(self))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an Array of ModuleOperationObjects that are currently listedin the queue
def get_currently_processed_modules(cls): db = cls._core.get_db() stmnt = "SELECT OPE_ID, OPE_OPE_PARENT, OPE_TYPE FROM OPERATIONS \ WHERE OPE_TYPE = 'ModuleInstallOperation' \ or OPE_TYPE = 'ModuleUninstallOperation' ;" cur = db.query(cls._core,stmnt); ret = [] for row in cur.fetchallmap(): ret.append(Operation.restore_operation(row).get_meta()) return ret
[ "def to_array(self):\n\n return list(self.global_queue.queue)", "def operations(self):\r\n return self._operations_list", "def get_queue_list(self):\n return self.manager.get_queue_list()", "def _recent_commissions(self):\n\n ## we don't set up a queue, as there is a permanent one\n comm_queue = self.access_commission_stream()\n\n list_of_comm=list_of_execInformation()\n\n while not comm_queue.empty():\n MAX_WAIT_SECONDS = 5\n try:\n next_comm = comm_queue.get(timeout=MAX_WAIT_SECONDS)\n list_of_comm.append(next_comm)\n except queue.Empty:\n ## corner case where Q emptied since we last checked if empty at top of while loop\n pass\n\n ## note this could include duplicates and is a list\n return list_of_comm", "def listQueue(self, request):\n pass", "def qobjs(self) -> List[Union[QasmQobj, PulseQobj]]:\n return [mjob.qobj() for mjob in self._managed_jobs]", "def get_operations(self):\n raise NotImplementedError(\n 'operation get_operations(...) not yet implemented')", "def get_all_operations(self):\n raise NotImplementedError(\n 'operation get_all_operations(...) not yet implemented')", "def get_registered_jobs(self):\n with self.__lock:\n return list(self.__registered_jobs)", "def hbObjects(self):\r\n return self.__hbObjs", "def get_objects(self):\n\t\treturn self.__objects", "def _pull_batch_from_queue(self):\n rollout = self.explorer.queue.get( timeout = 600.0 )\n while not rollout.terminal:\n try: \n rollout.extend( self.explorer.queue.get_nowait() )\n except queue.Empty:\n break\n print(rollout.size())\n return rollout", "def get(self):\n with self.lock:\n return list(self.jobShapes)", "def operations(self):\n return self.properties.get('operations',\n EntityCollection(self.context, RichLongRunningOperation,\n ResourcePath(\"operations\", self.resource_path)))", "def get_enqueued_object_ids():\n object_ids = set()\n\n registry_types = (StartedJobRegistry, FailedJobRegistry)\n\n for queue_type in OBJECT_QUEUE_TYPES:\n queue = get_queue(queue_type)\n\n # Retrieve started and failed jobs\n for registry_type in registry_types:\n job_registry = registry_type(queue=queue)\n job_ids = job_registry.get_job_ids()\n\n for job_id in job_ids:\n object_id = job_id_to_object_id(job_id)\n if object_id is not None:\n object_ids.add(object_id)\n\n # Retrieve scheduled jobs\n for job_id in queue.get_job_ids():\n object_id = job_id_to_object_id(job_id)\n if object_id is not None:\n object_ids.add(object_id)\n\n return object_ids", "def get_queue():\n\n return multiprocessing.Queue()", "def _get_object_list(self, name):\n return getattr(self, name)", "def operations_per_joinpoint(self):\n ops = []\n current_ops = set()\n\n allocs = self.allocations\n # assumption: the shape of allocs is rectangular (i.e. each client contains the same number of elements)\n for idx in range(0, len(allocs[0])):\n for client in range(0, self.clients):\n task = allocs[client][idx]\n if isinstance(task, track.Task):\n current_ops.add(task.operation)\n elif isinstance(task, JoinPoint) and len(current_ops) > 0:\n ops.append(current_ops)\n current_ops = set()\n\n return ops", "def get_executed_jobs(self):\n with self.__lock:\n return list(self.__executed_jobs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute squarefree decomposition of the monic ``f`` in ``GF(q)[X]``. Notes ===== Uses a modified version of Musser's algorithm for squarefree decomposition of univariate polynomials over finite fields. References ==========
def _gf_sqf_list(self, f): domain = self.domain n, factors, p = 1, [], int(domain.characteristic) m = int(domain.order // p) while not f.is_ground: df = [f.diff(x) for x in self.gens] if any(_ for _ in df): g = f for q in df: g = self.gcd(g, q) h, f, i = f // g, g, 1 while h != 1: g = self.gcd(f, h) h //= g if not h.is_ground: factors.append((h, i*n)) f //= g h = g i += 1 n *= p g = self.zero for monom, coeff in f.items(): g[tuple(_ // p for _ in monom)] = coeff**m f = g return factors
[ "def _squarefree_decomposition_univariate_polynomial(self, f):\n from sage.structure.factorization import Factorization\n if f.degree() == 0:\n return Factorization([], unit=f[0])\n if self.characteristic() != 0:\n raise NotImplementedError(\"square-free decomposition not implemented for this polynomial.\")\n\n factors = []\n cur = f\n f = [f]\n while cur.degree() > 0:\n cur = cur.gcd(cur.derivative())\n f.append(cur)\n\n g = []\n for i in range(len(f) - 1):\n g.append(f[i] // f[i+1])\n\n a = []\n for i in range(len(g) - 1):\n a.append(g[i] // g[i+1])\n a.append(g[-1])\n\n unit = f[-1]\n for i in range(len(a)):\n if a[i].degree() > 0:\n factors.append((a[i], i+1))\n else:\n unit = unit * a[i].constant_coefficient() ** (i + 1)\n\n return Factorization(factors, unit=unit, sort=False)", "def is_squarefree(self, f):\n if f.is_ground:\n return True\n g = f\n for x in self.gens:\n g = self.gcd(g, f.diff(x))\n if g.is_ground:\n return True\n return False", "def sqf_norm(self, f):\n domain = self.domain\n\n if not domain.is_AlgebraicField:\n raise DomainError(f'ground domain must be algebraic, got {domain}')\n\n new_ring = self.to_ground().inject(*domain.symbols, front=True)\n g = domain.mod.set_ring(new_ring)\n s = 0\n\n while True:\n h = f.inject(front=True)\n r = g.resultant(h)\n\n if r.is_squarefree:\n return s, f, r\n f = f.compose({x: x - domain.unit for x in self.gens})\n s += 1", "def roots_quintic(f):\n result = []\n\n coeff_5, coeff_4, p_, q_, r_, s_ = f.all_coeffs()\n\n if not all(coeff.is_Rational for coeff in (coeff_5, coeff_4, p_, q_, r_, s_)):\n return result\n\n if coeff_5 != 1:\n f = Poly(f / coeff_5)\n _, coeff_4, p_, q_, r_, s_ = f.all_coeffs()\n\n # Cancel coeff_4 to form x^5 + px^3 + qx^2 + rx + s\n if coeff_4:\n p = p_ - 2*coeff_4*coeff_4/5\n q = q_ - 3*coeff_4*p_/5 + 4*coeff_4**3/25\n r = r_ - 2*coeff_4*q_/5 + 3*coeff_4**2*p_/25 - 3*coeff_4**4/125\n s = s_ - coeff_4*r_/5 + coeff_4**2*q_/25 - coeff_4**3*p_/125 + 4*coeff_4**5/3125\n x = f.gen\n f = Poly(x**5 + p*x**3 + q*x**2 + r*x + s)\n else:\n p, q, r, s = p_, q_, r_, s_\n\n quintic = PolyQuintic(f)\n\n # Eqn standardized. Algo for solving starts here\n if not f.is_irreducible:\n return result\n f20 = quintic.f20\n # Check if f20 has linear factors over domain Z\n if f20.is_irreducible:\n return result\n # Now, we know that f is solvable\n for _factor in f20.factor_list()[1]:\n if _factor[0].is_linear:\n theta = _factor[0].root(0)\n break\n d = discriminant(f)\n delta = sqrt(d)\n # zeta = a fifth root of unity\n zeta1, zeta2, zeta3, zeta4 = quintic.zeta\n T = quintic.T(theta, d)\n tol = S(1e-10)\n alpha = T[1] + T[2]*delta\n alpha_bar = T[1] - T[2]*delta\n beta = T[3] + T[4]*delta\n beta_bar = T[3] - T[4]*delta\n\n disc = alpha**2 - 4*beta\n disc_bar = alpha_bar**2 - 4*beta_bar\n\n l0 = quintic.l0(theta)\n Stwo = S(2)\n l1 = _quintic_simplify((-alpha + sqrt(disc)) / Stwo)\n l4 = _quintic_simplify((-alpha - sqrt(disc)) / Stwo)\n\n l2 = _quintic_simplify((-alpha_bar + sqrt(disc_bar)) / Stwo)\n l3 = _quintic_simplify((-alpha_bar - sqrt(disc_bar)) / Stwo)\n\n order = quintic.order(theta, d)\n test = (order*delta.n()) - ( (l1.n() - l4.n())*(l2.n() - l3.n()) )\n # Comparing floats\n if not comp(test, 0, tol):\n l2, l3 = l3, l2\n\n # Now we have correct order of l's\n R1 = l0 + l1*zeta1 + l2*zeta2 + l3*zeta3 + l4*zeta4\n R2 = l0 + l3*zeta1 + l1*zeta2 + l4*zeta3 + l2*zeta4\n R3 = l0 + l2*zeta1 + l4*zeta2 + l1*zeta3 + l3*zeta4\n R4 = l0 + l4*zeta1 + l3*zeta2 + l2*zeta3 + l1*zeta4\n\n Res = [None, [None]*5, [None]*5, [None]*5, [None]*5]\n Res_n = [None, [None]*5, [None]*5, [None]*5, [None]*5]\n\n # Simplifying improves performance a lot for exact expressions\n R1 = _quintic_simplify(R1)\n R2 = _quintic_simplify(R2)\n R3 = _quintic_simplify(R3)\n R4 = _quintic_simplify(R4)\n\n # hard-coded results for [factor(i) for i in _vsolve(x**5 - a - I*b, x)]\n x0 = z**(S(1)/5)\n x1 = sqrt(2)\n x2 = sqrt(5)\n x3 = sqrt(5 - x2)\n x4 = I*x2\n x5 = x4 + I\n x6 = I*x0/4\n x7 = x1*sqrt(x2 + 5)\n sol = [x0, -x6*(x1*x3 - x5), x6*(x1*x3 + x5), -x6*(x4 + x7 - I), x6*(-x4 + x7 + I)]\n\n R1 = R1.as_real_imag()\n R2 = R2.as_real_imag()\n R3 = R3.as_real_imag()\n R4 = R4.as_real_imag()\n\n for i, s in enumerate(sol):\n Res[1][i] = _quintic_simplify(s.xreplace({z: R1[0] + I*R1[1]}))\n Res[2][i] = _quintic_simplify(s.xreplace({z: R2[0] + I*R2[1]}))\n Res[3][i] = _quintic_simplify(s.xreplace({z: R3[0] + I*R3[1]}))\n Res[4][i] = _quintic_simplify(s.xreplace({z: R4[0] + I*R4[1]}))\n\n for i in range(1, 5):\n for j in range(5):\n Res_n[i][j] = Res[i][j].n()\n Res[i][j] = _quintic_simplify(Res[i][j])\n r1 = Res[1][0]\n r1_n = Res_n[1][0]\n\n for i in range(5):\n if comp(im(r1_n*Res_n[4][i]), 0, tol):\n r4 = Res[4][i]\n break\n\n # Now we have various Res values. Each will be a list of five\n # values. We have to pick one r value from those five for each Res\n u, v = quintic.uv(theta, d)\n testplus = (u + v*delta*sqrt(5)).n()\n testminus = (u - v*delta*sqrt(5)).n()\n\n # Evaluated numbers suffixed with _n\n # We will use evaluated numbers for calculation. Much faster.\n r4_n = r4.n()\n r2 = r3 = None\n\n for i in range(5):\n r2temp_n = Res_n[2][i]\n for j in range(5):\n # Again storing away the exact number and using\n # evaluated numbers in computations\n r3temp_n = Res_n[3][j]\n if (comp((r1_n*r2temp_n**2 + r4_n*r3temp_n**2 - testplus).n(), 0, tol) and\n comp((r3temp_n*r1_n**2 + r2temp_n*r4_n**2 - testminus).n(), 0, tol)):\n r2 = Res[2][i]\n r3 = Res[3][j]\n break\n if r2 is not None:\n break\n else:\n return [] # fall back to normal solve\n\n # Now, we have r's so we can get roots\n x1 = (r1 + r2 + r3 + r4)/5\n x2 = (r1*zeta4 + r2*zeta3 + r3*zeta2 + r4*zeta1)/5\n x3 = (r1*zeta3 + r2*zeta1 + r3*zeta4 + r4*zeta2)/5\n x4 = (r1*zeta2 + r2*zeta4 + r3*zeta1 + r4*zeta3)/5\n x5 = (r1*zeta1 + r2*zeta2 + r3*zeta3 + r4*zeta4)/5\n result = [x1, x2, x3, x4, x5]\n\n # Now check if solutions are distinct\n\n saw = set()\n for r in result:\n r = r.n(2)\n if r in saw:\n # Roots were identical. Abort, return []\n # and fall back to usual solve\n return []\n saw.add(r)\n\n # Restore to original equation where coeff_4 is nonzero\n if coeff_4:\n result = [x - coeff_4 / 5 for x in result]\n return result", "def vsfun(Q_slm, theta, phi,f=[]):\n vsf_th=numpy.zeros(theta.shape, dtype='complex')\n vsf_ph=numpy.zeros(theta.shape, dtype='complex')\n for (s,l,m) in Q_slm:\n vsh_th,vsh_ph=K(s, l, m, theta, phi)\n c_slm=Q_slm.getBysnm(s, l, m) if not(f) else Q_slm.getBysnm(s, l, m)(f)\n vsf_th=vsf_th+c_slm*vsh_th\n vsf_ph=vsf_ph+c_slm*vsh_ph\n return vsf_th, vsf_ph", "def groebner_basis(f, verbose=0):\n order = f[0].lp.order\n def select(P):\n # select the pair with minimum LCM\n pr = min(P, key = lambda(i,j): order(lcm_expv(f[i][0],f[j][0])))\n return pr\n\n def normal(g, H):\n \"\"\"\n compute the rest h of the division of g wrt the functions in H;\n if the rest is zero return None\n else if h is not in f add it to f; return its (expv,p)\n \"\"\"\n h = g.mod1([f[i] for i in H])\n # FIXME\n if not h or h == zero:\n return None\n else:\n hk = tuple(h.keys())\n # add h to SP, return (expv,pi)\n if not hk in fd:\n fd[hk] = len(f)\n hexpv = h.leading_expv()\n f.append((hexpv,h/h[hexpv]))\n return hexpv, fd[hk]\n return f[fd[hk]][0], fd[hk]\n\n def update(G,CP,h):\n \"\"\"update G using the set of critical pairs CP and h = (expv,pi)\n see [BW] page 230\n \"\"\"\n hexpv, hp = f[h]\n #print 'DB10',hp\n # filter new pairs (h,g), g in G\n C = G.copy()\n D = set()\n \n while C:\n # select a pair (h,g) by popping an element from C\n g = C.pop()\n gexpv = f[g][0]\n LCMhg = lcm_expv(hexpv, gexpv)\n \n def lcm_divides(p):\n expv = lcm_expv(hexpv, f[p][0])\n # LCM(LM(h), LM(p)) divides LCM(LM(h),LM(g))\n return monomial_div(LCMhg,expv)\n \n # HT(h) and HT(g) disjoint: hexpv + gexpv == LCMhg\n if monomial_mul(hexpv,gexpv) == LCMhg or (\\\n not any( lcm_divides(f) for f in C ) and \\\n not any( lcm_divides(pr[1]) for pr in D )):\n D.add((h,g))\n\n E = set()\n while D:\n # select h,g from D\n h,g = D.pop()\n gexpv = f[g][0]\n LCMhg = lcm_expv(hexpv, gexpv)\n if not monomial_mul(hexpv,gexpv) == LCMhg:\n E.add((h,g))\n \n # filter old pairs\n B_new = set()\n \n while CP:\n # select g1,g2 from CP\n g1,g2 = CP.pop()\n g1expv = f[g1][0]\n g2expv = f[g2][0]\n LCM12 = lcm_expv(g1expv,g2expv)\n # if HT(h) does not divide lcm(HT(g1),HT(g2))\n if not monomial_div(LCM12, hexpv) or \\\n lcm_expv(g1expv,hexpv) == LCM12 or \\\n lcm_expv(g2expv,hexpv) == LCM12:\n B_new.add((g1,g2))\n \n B_new |= E\n \n # filter polynomials\n G_new = set()\n while G:\n g = G.pop()\n if not monomial_div(f[g][0], hexpv):\n G_new.add(g)\n G_new.add(h)\n \n return G_new,B_new\n # end of update ################################\n\n if not f:\n return None\n lp = f[0].lp\n zero = Poly(lp)\n \n # lcm_expv(expv1,expv2) computes the expv for the lcm\n # of the monomials with expv1,expv2; the results are cached\n lcm_expv0 = monomial_lcm\n d_lcm_expv = {}\n def lcm_expv(expv1,expv2):\n if not (expv1,expv2) in d_lcm_expv:\n d_lcm_expv[(expv1,expv2)] = lcm_expv0(expv1,expv2)\n return d_lcm_expv[(expv1,expv2)]\n \n # replace f with a list of (p.leading_expv(),p), where p is monic\n # and all polynomials have different sets of monomials.\n # In this way, p is identified by pk = tuple(p.keys())\n # p is not hashable, so that one cannot use a built-in set of (expv,p)\n # To implement a set of polynomials SP use a dictionary fd\n # add p to SP:\n # f.append((expv,p)); fd[pk] = len(f)\n # ip is the number associated to p\n # expv,p = f[ip]\n \n # reduce the list of initial polynomials; see [BW] page 203\n #print 'DB0',f\n f1 = f[:]\n while 1:\n f = f1[:]\n f1 = []\n for i in range(len(f)):\n p = f[i]\n _, r = p.division(f[:i])\n if r != 0:\n f1.append(r)\n # when f does not change anymore, there are not two elements with \n # same LT, so the elements of f are guaranteed to have all\n # different sets of monomials\n if f == f1:\n break\n\n #print 'DB1',f\n # convert f in a list of pairs (expv,p) where expv is the encoded\n # tuple of exponents of the LT of p and p is a monic polynomial\n f1 = []\n for h in f:\n if h:\n expv = h.leading_expv()\n f1.append((expv,h/h[expv]))\n f = f1\n \n # order according to the monomial ordering the initial polynomials\n # f[i] < f[j] if i > j\n order = f[0][1].lp.order\n f.sort(key=lambda t: order(t[0]), reverse=True)\n \n #print 'DB2',[t[1] for t in f]\n \n # f list of pairs (expv,p)\n fd = {} # ip = fd[tuple(p.keys())]; (expv,p) = f[ip]\n F = set() # set of indices of polynomials\n G = set() # set of indices of intermediate would-be Groebner basis\n CP = set() # set of pairs of indices of critical pairs\n for i, h in enumerate(f):\n fd[tuple(h[1].keys())] = i\n F.add(i)\n\n #####################################\n # algorithm GROEBNERNEWS2 in [BW] page 232\n while F:\n # select p with minimum expv\n m = min([f[x] for x in F],key=lambda f: order(f[0]))[1]\n h = fd[tuple(m.keys())]\n F.remove(h)\n #print 'DB3 CP=',CP\n #print 'DB3 G', G\n G,CP = update(G,CP,h)\n \n # count the number of critical pairs which reduce to zero\n reductions_to_zero = 0\n \n while CP:\n g1,g2 = select(CP)\n CP.remove((g1,g2))\n h = S_poly(f[g1],f[g2])\n # normal(h,G) appends h to f if h\n h = normal(h,G)\n if h:\n G, CP = update(G,CP,h[1])\n else:\n reductions_to_zero += 1\n ######################################\n # now G is a Groebner basis; reduce it\n Gr = set()\n for g in G:\n h = normal(f[g][1], G - set([g]))\n if h:\n Gr.add(h[1])\n # replace ip with (expv,p)\n Gr = [f[g] for g in Gr]\n \n # order according to the monomial ordering\n Gr.sort(reverse=True)\n \n # replace (expv,p) with p\n Gr = [ x[1] for x in Gr]\n if verbose:\n print 'reductions_to_zero=',reductions_to_zero\n return Gr", "def assemble(fs, f, quadrature=None):\n\n # Create an appropriate (complete) quadrature rule, unless one has been given to us\n\n Q=None\n\n if quadrature==None:\n Q = gauss_quadrature(fs.element.cell, fs.element.degree+1)\n else:\n Q = quadrature\n\n # Tabulate the basis functions and their gradients at the quadrature points.\n\n phi = fs.element.tabulate(Q.points)\n phi_grad = fs.element.tabulate(Q.points, grad=True)\n\n # Create the left hand side matrix and right hand side vector.\n # This creates a sparse matrix because creating a dense one may\n # well run your machine out of memory!\n A = sp.lil_matrix((fs.node_count, fs.node_count))\n # A = sp.coo_matrix(np, (fs.node_count, fs.node_count))\n l = np.zeros(fs.node_count)\n\n # Now loop over all the cells and assemble A and l\n\n #right hand side first\n\n for c in range(fs.mesh.entity_counts[-1]):\n for i in range(phi.shape[0]):\n for q in range(phi.shape[1]):\n innersum = np.sum(np.dot(f.values[fs.cell_nodes[c, :]], phi.T))\n jac = np.abs(np.linalg.det(fs.mesh.jacobian(c)))\n temp1 = phi[i, q]\n temp2 = Q.weights[i]\n l[fs.cell_nodes[c, :]] += temp1*innersum*temp2*jac\n \n #left hand side\n\n #although this is an incredibly nested for loop and is bound to have good performance, I couldn't\n #quite think about the problem of building the left hand side matrix without explicitly looping over\n #all of the variables that are available to us.\n\n for c in range(fs.mesh.entity_counts[-1]):\n curr_jac = fs.mesh.jacobian(c)\n jac_inv_t = np.linalg.inv(curr_jac).T\n jac_det = np.abs(np.linalg.det(curr_jac))\n for i in range(phi.shape[1]):\n for j in range(phi.shape[1]):\n for q in range(phi.shape[0]):\n innersum = 0\n for alpha in range(curr_jac.shape[0]):\n for beta in range(curr_jac.shape[0]):\n for gamma in range(curr_jac.shape[0]):\n temp1 = jac_inv_t[beta, alpha] * phi_grad[q, i, beta]\n temp2 = jac_inv_t[gamma, alpha] *phi_grad[q, j, gamma]\n innersum += temp1*temp2 + phi[q, j]*phi[q, j]\n A[fs.cell_nodes[c][i], fs.cell_nodes[c][j]] += innersum*jac_det*Q.weights[q]\n\n return A, l", "def zzX_sqr(f):\n if poly_univariate_p(f):\n return zzx_sqr(f)\n\n if zzX_zero_p(f):\n return f\n\n df = zzX_degree(f)\n l = poly_level(f)-1\n\n h = []\n\n for i in xrange(0, 2*df+1):\n coeff = zzX_zero(l)\n\n jmin = max(0, i-df)\n jmax = min(i, df)\n\n n = jmax - jmin + 1\n\n jmax = jmin + n // 2 - 1\n\n for j in xrange(jmin, jmax+1):\n coeff = zzX_add(coeff, zzX_mul(f[j], f[i-j]))\n\n coeff = zzX_mul_const(coeff, 2)\n\n if n & 1:\n elem = zzX_sqr(f[jmax+1])\n coeff = zzX_add(coeff, elem)\n\n h.append(coeff)\n\n return h", "def compute_clique_potentials(self,F):\r\n\r\n for i in self.nodes():\r\n self.node[i]['fac'] = factor([],[],[])\r\n \r\n for f in F.factors: # assign each factor to a clique\r\n for j,data in self.nodes_iter(data=True):\r\n if len(scipy.setdiff1d(f.var,data['clique']) ) ==0:\r\n self.node[j]['fac'] *= f\r\n self.nop += scipy.prod(self.node[j]['fac'].card)\r\n break", "def roots_quartic(f):\n _, a, b, c, d = f.monic().all_coeffs()\n\n if not d:\n return [S.Zero] + roots([1, a, b, c], multiple=True)\n elif (c/a)**2 == d:\n x, m = f.gen, c/a\n\n g = Poly(x**2 + a*x + b - 2*m, x)\n\n z1, z2 = roots_quadratic(g)\n\n h1 = Poly(x**2 - z1*x + m, x)\n h2 = Poly(x**2 - z2*x + m, x)\n\n r1 = roots_quadratic(h1)\n r2 = roots_quadratic(h2)\n\n return r1 + r2\n else:\n a2 = a**2\n e = b - 3*a2/8\n f = _mexpand(c + a*(a2/8 - b/2))\n aon4 = a/4\n g = _mexpand(d - aon4*(a*(3*a2/64 - b/4) + c))\n\n if f.is_zero:\n y1, y2 = [sqrt(tmp) for tmp in\n roots([1, e, g], multiple=True)]\n return [tmp - aon4 for tmp in [-y1, -y2, y1, y2]]\n if g.is_zero:\n y = [S.Zero] + roots([1, 0, e, f], multiple=True)\n return [tmp - aon4 for tmp in y]\n else:\n # Descartes-Euler method, see [7]\n sols = _roots_quartic_euler(e, f, g, aon4)\n if sols:\n return sols\n # Ferrari method, see [1, 2]\n p = -e**2/12 - g\n q = -e**3/108 + e*g/3 - f**2/8\n TH = Rational(1, 3)\n\n def _ans(y):\n w = sqrt(e + 2*y)\n arg1 = 3*e + 2*y\n arg2 = 2*f/w\n ans = []\n for s in [-1, 1]:\n root = sqrt(-(arg1 + s*arg2))\n for t in [-1, 1]:\n ans.append((s*w - t*root)/2 - aon4)\n return ans\n\n # whether a Piecewise is returned or not\n # depends on knowing p, so try to put\n # in a simple form\n p = _mexpand(p)\n\n\n # p == 0 case\n y1 = e*Rational(-5, 6) - q**TH\n if p.is_zero:\n return _ans(y1)\n\n # if p != 0 then u below is not 0\n root = sqrt(q**2/4 + p**3/27)\n r = -q/2 + root # or -q/2 - root\n u = r**TH # primary root of solve(x**3 - r, x)\n y2 = e*Rational(-5, 6) + u - p/u/3\n if fuzzy_not(p.is_zero):\n return _ans(y2)\n\n # sort it out once they know the values of the coefficients\n return [Piecewise((a1, Eq(p, 0)), (a2, True))\n for a1, a2 in zip(_ans(y1), _ans(y2))]", "def is_sqf(f):\n return dmp_sqf_p(f.rep, f.lev, f.dom)", "def squarefree(n):\n return prod(map(lambda x: x[0], factor(Integer(n))))", "def form_factor_fixed_qsq(self, form='f+', qsq=0.0,\n var='qsq', decay='Bs2Ds', withpole=False, nz=3, gvar=True):\n if form == 'f0':\n PHI = const_b2d.PHI0\n elif form == 'f+':\n PHI = const_b2d.PHI_PLUS\n #if form not in ('f+', 'f0'):\n else:\n print \"Only f+ and f0 form factors are provided.\"\n return\n\n z = self.q_sq2z(qsq)\n formfactor = self.fcn(z, self.params(decay), nz)[form] / self.Pphi(qsq, form)\n formfactor /= PHI\n if var == 'qsq':\n if gvar:\n res = [qsq, formfactor]\n else:\n res = [qsq, gv.mean(formfactor), gv.sdev(formfactor)]\n elif var == 'z':\n if gvar:\n res = [qsq, formfactor]\n else:\n res = [z, gv.mean(formfactor), gv.sdev(formfactor)]\n return res", "def _evaluate_f_s_l_q(self, q):", "def zzX_heu_gcd(f, g, **flags):\n if poly_univariate_p(f):\n return zzx_heu_gcd(f, g, **flags)\n\n def interpolate(h, x):\n f = []\n\n while not zzX_zero_p(h):\n g = zzX_zz_trunc(h, x)\n f.insert(0, g)\n h = zzX_sub(h, g)\n h = zzX_quo_const(h, x)\n\n return f\n\n def finalize(h, cff, cfg, gcd):\n if zzX_zz_LC(h) > 0:\n h = zzX_mul_const(h, gcd)\n else:\n h = zzX_mul_const(h, -gcd)\n cff = zzX_neg(cff)\n cfg = zzX_neg(cfg)\n\n return h, cff, cfg\n\n zero_f = zzX_zero_p(f)\n zero_g = zzX_zero_p(g)\n\n l = poly_level(f)\n z = zzX_zero(l)\n\n if zero_f and zero_g:\n return z, z, z\n elif zero_f:\n return g, z, zzX_const(l, 1)\n elif zero_g:\n return f, zzX_const(l, 1), z\n\n df = zzX_degree(f)\n dg = zzX_degree(g)\n\n cf = zzX_zz_content(f)\n cg = zzX_zz_content(g)\n\n gcd = igcd(cf, cg)\n\n f = zzX_quo_const(f, gcd)\n g = zzX_quo_const(g, gcd)\n\n f_norm = zzX_max_norm(f)\n g_norm = zzX_max_norm(g)\n\n B = 2*min(f_norm, g_norm) + 29\n\n x = max(min(B, 99*INT_TYPE(isqrt(B))),\n 2*min(f_norm // abs(zzX_zz_LC(f)),\n g_norm // abs(zzX_zz_LC(g))) + 2)\n\n for i in xrange(0, 6):\n ff = zzX_eval(f, x)\n gg = zzX_eval(g, x)\n\n if not (zzX_zero_p(ff) or zzX_zero_p(gg)):\n h, cff, cfg = zzX_heu_gcd(ff, gg, **flags)\n\n h = interpolate(h, x)\n h = zzX_zz_primitive(h)[1]\n\n cff_, r = zzX_div(f, h)\n\n if zzX_zero_p(r):\n cfg_, r = zzX_div(g, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff_, cfg_, gcd)\n\n cff = interpolate(cff, x)\n\n h, r = zzX_div(f, cff)\n\n if zzX_zero_p(r):\n cfg_, r = zzX_div(g, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff, cfg_, gcd)\n\n cfg = interpolate(cfg, x)\n\n h, r = zzX_div(g, cfg)\n\n if zzX_zero_p(r):\n cff_, r = zzX_div(f, h)\n\n if zzX_zero_p(r):\n return finalize(h, cff_, cfg, gcd)\n\n x = INT_TYPE(2.7319*x*isqrt(isqrt(x)))\n\n raise HeuristicGCDFailed('no luck')", "def gaussQuad(f,a,b,m):\n c1 = (b + a)/2.0\n c2 = (b - a)/2.0\n x,A = gaussNodes(m)\n #print \"qauss nodes x = \", x\n sum = 0.0\n\n for i in range(len(x)):\n sum = sum + A[i]*f(c1 + c2*x[i])\n\n return c2*sum", "def gcd(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_gcd(F, G, lev, dom))", "def makefx(q):\n g = primitive_root(q)\n qpred = q - 1\n qd2 = qpred >> 1\n g_mf = [0, g]\n for _ in range(2, qpred):\n g_mf.append((g_mf[-1]*g) % q)\n fx = {}\n for i in range(1, qd2):\n if i in fx:\n continue\n # search j s.t. g**j + g**i = 1 mod q\n j = g_mf.index(q + 1 - g_mf[i])\n fx[i] = j\n fx[j] = i\n fx[qpred - i] = (j - i + qd2) % qpred\n fx[fx[qpred - i]] = qpred - i\n fx[qpred - j] = (i - j + qd2) % qpred\n fx[fx[qpred - j]] = qpred - j\n del g_mf\n return fx", "def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if ``f`` is a squarefree polynomial in ``K[X]``. Examples ======== >>> _, x, y = ring('x y', ZZ) >>> ((x + y)2).is_squarefree False >>> (x2 + y2).is_squarefree True
def is_squarefree(self, f): if f.is_ground: return True g = f for x in self.gens: g = self.gcd(g, f.diff(x)) if g.is_ground: return True return False
[ "def is_squarefree_hilbert_number(n):\n return is_hilbert_number(n) and is_hilbert_squarefree_number(n)", "def is_sqf(f):\n return dmp_sqf_p(f.rep, f.lev, f.dom)", "def is_quantifier_free(formula):\n assert type(formula) is Formula\n # Task 11.3.1\n root = formula.root\n if is_quantifier(root):\n return False\n elif is_unary(root):\n return is_quantifier_free(formula.first)\n elif is_binary(root):\n if not is_quantifier_free(formula.first) or not is_quantifier_free(formula.second):\n return False\n\n return True", "def is_quantifier_free(formula: Formula) -> bool:\r\n # Task 11.3.1\r\n\r\n if is_quantifier(formula.root):\r\n return False\r\n\r\n if is_binary(formula.root):\r\n return is_quantifier_free(formula.first) and is_quantifier_free(formula.second)\r\n\r\n if is_unary(formula.root):\r\n return is_quantifier_free(formula.first)\r\n\r\n return True", "def is_quantifier_free(formula):\n assert type(formula) is Formula\n # Task 11.3.1\n if is_constant(formula.root) or is_variable(formula.root) or is_relation(formula.root) or is_equality(formula.root):\n return True\n\n if is_quantifier(formula.root):\n return False\n\n is_first = is_quantifier_free(formula.first)\n if is_binary(formula.root):\n return is_first and is_quantifier_free(formula.second)\n\n return is_first", "def is_SymmetricFunction(x):\n return isinstance(x, SymmetricFunctionAlgebra_generic.Element)", "def isSymmetric(self, root):\n queue = deque()\n queue.append(root) # appending root twice makes further code shorter\n queue.append(root)\n while queue:\n node1 = queue.popleft()\n node2 = queue.popleft()\n if not node1 and not node2: # both subtrees are empty\n continue\n if not node1 or not node2: # one subtree is empty, while another one isn't\n return False\n if node1.val != node2.val:\n return False\n # subtrees' root values are equal, enqueue their subtrees in the right order\n queue.append(node1.left)\n queue.append(node2.right)\n queue.append(node1.right)\n queue.append(node2.left)\n return True # all checks went good, tree is symmetric", "def is_square(q_1: Qs) -> bool:\n\n return math.sqrt(q_1.dim).is_integer()", "def is_square_free(n, fn=prime_factor):\n return n > 0 and all(e == 1 for (_, e) in fn(n))", "def _can_do_sum_of_squares(n, k):\n if k < 1:\n return False\n if n < 0:\n return False\n if n == 0:\n return True\n if k == 1:\n return is_square(n)\n if k == 2:\n if n in (1, 2):\n return True\n if isprime(n):\n if n % 4 == 1:\n return 1 # signal that it was prime\n return False\n else:\n f = factorint(n)\n for p, m in f.items():\n # we can proceed iff no prime factor in the form 4*k + 3\n # has an odd multiplicity\n if (p % 4 == 3) and m % 2:\n return False\n return True\n if k == 3:\n if (n//4**multiplicity(4, n)) % 8 == 7:\n return False\n # every number can be written as a sum of 4 squares; for k > 4 partitions\n # can be 0\n return True", "def is_SymmetricFunctionAlgebra(x):\n return isinstance(x, SymmetricFunctionAlgebra_generic)", "def has_creep(self, pos: Union[Point2, Unit]) -> bool:\n assert isinstance(pos, (Point2, Unit)), \"pos is not of type Point2 or Unit\"\n pos = pos.position.rounded\n return self.state.creep[pos] == 1", "def isSqrt(self):\n return _libsbml.ASTNode_isSqrt(self)", "def is_symbolic(self: Q) -> bool:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n return symbolic", "def equals(self, f: Poly, g: Poly):\n return (f % self.poly) == (g % self.poly)", "def validate_proof(proof, target_hash, merkle_root, hash_f=sha256):\n\n if not proof:\n # no siblings, single item tree, so the hash should also be the root\n return target_hash == merkle_root\n\n target_hash = get_buffer(target_hash)\n merkle_root = get_buffer(merkle_root)\n\n proof_hash = target_hash\n for x in proof:\n if 'left' in x:\n # then the sibling is a left node\n proof_hash = get_buffer(hash_f(get_buffer(x['left']) + proof_hash))\n elif 'right' in x:\n # then the sibling is a right node\n proof_hash = get_buffer(hash_f(proof_hash + get_buffer(x['right'])))\n else:\n # no left or right designation exists, proof is invalid\n return False\n\n return hexlify(proof_hash) == hexlify(merkle_root)", "def is_fastqc(name,f):\n name_underscore = os.path.basename(strip_ngs_extensions(name))+'_'\n logging.debug(\"name = %s name_underscore = %s\" % (name,name_underscore))\n if f == \"%sfastqc\" % name_underscore:\n return True\n else:\n return False", "def is_primitive_root(g,n):\n\t# SAGE equivalent is mod(g,n).is_primitive_root() in IntegerMod class\n\tif gcd(g,n) != 1: return False # Not in the group of units\n\torder = euler_phi(n)\n\tif carmichael_lambda(n) != order: return False # Group of units isn't cyclic\n\torderfacts = prime_divisors(order)\n\tfor fact in orderfacts:\n\t\tif pow(g,order//fact,n) == 1: return False\n\treturn True", "def has_xfree(self, s: set[Basic]):\n # protect O(1) containment check by requiring:\n if type(s) is not set:\n raise TypeError('expecting set argument')\n return any(a in s for a in iterfreeargs(self))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Squarefree norm of ``f`` in ``K[X]``, useful over algebraic domains. Returns ``s``, ``f``, ``r``, such that ``g(x) = f(xsa)`` and ``r(x) = Norm(g(x))`` is a squarefree polynomial over K, where ``a`` is the algebraic extension of ``K``. Examples ======== >>> _, x, y = ring('x y', QQ.algebraic_field(I)) >>> (xy + y2).sqf_norm() (1, xy Ix + y2 3Iy 2, x2y2 + x2 + 2xy3 + 2xy + y4 + 5y2 + 4)
def sqf_norm(self, f): domain = self.domain if not domain.is_AlgebraicField: raise DomainError(f'ground domain must be algebraic, got {domain}') new_ring = self.to_ground().inject(*domain.symbols, front=True) g = domain.mod.set_ring(new_ring) s = 0 while True: h = f.inject(front=True) r = g.resultant(h) if r.is_squarefree: return s, f, r f = f.compose({x: x - domain.unit for x in self.gens}) s += 1
[ "def sqf_norm(f):\n s, g, r = dmp_sqf_norm(f.rep, f.lev, f.dom)\n return s, f.per(g), f.per(r, dom=f.dom.dom)", "def sqrt(S):\r\n from math import sqrt as _sqrt\r\n if isinstance(S, PowerSeries):\r\n return S.squareroot()\r\n return _sqrt(S)", "def rsqrt_(self):\n return math_funcs.rsqrt(self, self)", "def sympy_l2_norm_sq(self, i=sp.Symbol('i', integer=True)):\n class h(sp.Function):\n @classmethod\n def eval(cls, x):\n if x.is_Number:\n return self.l2_norm_sq(x)\n return h(i)", "def normalise_k(self):\n k_s = self.mod_square_k(r=True)\n norm = simps(k_s, self.k)\n self.psi_k = self.psi_k / np.sqrt(norm)\n self.mod_square_k()\n return self.psi_k", "def sympy_L2_norm_sq(self, i=sp.Symbol('i', integer=True)):\n class h(sp.Function):\n @classmethod\n def eval(cls, x):\n if x.is_Number:\n return self.L2_norm_sq(x)\n return h(i)", "def _rsq(self):\n return self._ss_reg / self._ss_tot", "def tree_norm(tree):\n reduce_fn = lambda s, a: s + np.sum(np.square(a))\n sum_sq = jax.tree_util.tree_reduce(reduce_fn, tree, 0)\n return np.sqrt(sum_sq)", "def tree_l2_norm(tree_x, squared=False):\n squared_tree = tree_map(jnp.square, tree_x)\n sqnorm = tree_sum(squared_tree)\n if squared:\n return sqnorm\n else:\n return jnp.sqrt(sqnorm)", "def norm(self):\n return math.sqrt((self.__pow__(2)).quad())", "def rsqrt(data):\n return _make.rsqrt(data)", "def test_sym_sqrtm(self): \n # create random symmetric n x n matrix\n n = 5\n A = 5.0 * 2.0*(torch.rand(n,n) - 0.5)\n A = A + A.T\n\n # reference implementation of scipy\n sqA_scipy = sla.sqrtm(A.numpy())\n isqA_scipy = sla.inv(sla.sqrtm(A.numpy()))\n # my own implementation using pure torch functions\n sqA,isqA = (x.numpy() for x in _sym_sqrtm(A))\n \n self.assertTrue(np.isclose(sqA, sqA_scipy).all())\n self.assertTrue(np.isclose(isqA, isqA_scipy).all())", "def sqrt(self):\n if self._exponent and (self.exponent % 2): #odd\n raise ValueError(\"Does not support sqrt() if exponent is odd: {}.format()self.exponent\")\n return PolyNum(mantPN_sqrt(self.mantissa, self._max_N), self._exponent // 2)", "def scalar_sqrt(self, dst, src):\n return self._scalar_single_func('sqrt', dst, src)", "def gauss_kl_vff(q_mu, q_sqrt, K):\n # KL(N₀ || N₁) = ½ [tr(Σ₁⁻¹ Σ₀) + (μ₁ - μ₀)ᵀ Σ₁⁻¹ (μ₁ - μ₀) - k + ln(det(Σ₁)/det(Σ₀))]\n # N₀ = q; μ₀ = q_mu, Σ₀ = q_sqrt q_sqrtᵀ\n # N₁ = p; μ₁ = 0, Σ₁ = K\n # KL(q || p) =\n # ½ [tr(K⁻¹ q_sqrt q_sqrtᵀA + q_muᵀ K⁻¹ q_mu - k + logdet(K) - logdet(q_sqrt q_sqrtᵀ)]\n # k = number of dimensions, if q_sqrt is m x m this is m²\n Kinv_q_mu = K.solve(q_mu)\n\n mahalanobis_term = tf.squeeze(tf.matmul(q_mu, Kinv_q_mu, transpose_a=True))\n\n # GPflow: q_sqrt is num_latent_gps x N x N\n num_latent_gps = to_default_float(tf.shape(q_mu)[1])\n logdet_prior = num_latent_gps * K.log_abs_determinant()\n\n product_of_dimensions__int = tf.reduce_prod(\n tf.shape(q_sqrt)[:-1]\n ) # dimensions are integers\n constant_term = to_default_float(product_of_dimensions__int)\n\n Lq = tf.linalg.band_part(q_sqrt, -1, 0) # force lower triangle\n logdet_q = tf.reduce_sum(tf.math.log(tf.square(tf.linalg.diag_part(Lq))))\n\n # S = tf.matmul(q_sqrt, q_sqrt, transpose_b=True)\n # trace_term = tf.trace(K.solve(S))\n trace_term = tf.squeeze(\n tf.reduce_sum(Lq * K.solve(Lq), axis=[-1, -2])\n ) # [O(N²) instead of O(N³)\n\n twoKL = (\n trace_term + mahalanobis_term - constant_term + logdet_prior - logdet_q\n )\n return 0.5 * twoKL", "def sqroot(self, **kwargs):\n\t\t#------ the arguments (in a list format) given when the number.sqroot() functiona is called are transferred to the _sqroot function via the **kwargs - a generic list.\n\t\tself.sqrt=_sqroot(self.value, **kwargs)\n\t\treturn _sqroot(self.value, **kwargs)", "def norm(self):\n return sqrt(self.dot(self))", "def norm(x):\n return inner_prod(x, x)[0].sqrt_()", "def transform_square_root(self):\n data = self.values[\"X\"]\n self.values[\"X\"] = np.sqrt(data)\n\n return self" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start twisted event loop and the fun should begin... brokerTimeout how long to wait for a broker a negative number upon failure. Otherwise, it never returns.
def start(config, brokerTimeout = 60.0): manager = multiprocessing.Manager() serverUpEvent = manager.Event() broker = multiprocessing.Process(target=startSTOMPBroker, args=(config,serverUpEvent)) broker.daemon = True broker.name = 'STOMP-Broker' broker.start() serverUpEvent.wait(brokerTimeout) if not serverUpEvent.is_set(): logger.fatal("Broker not available after %.1f seconds. Giving up", brokerTimeout) return -1 #host side logic host = config.get('Broker', 'host') port = int(config.get('Broker', 'port')) username = config.get('Broker', 'username') password = config.get('Broker', 'password') hostEngine = HostStompEngine(config) stompProtocolFactory = StompProtocolFactory(hostEngine, username, password) HostXMLRPCService(config).makeEngineAccesible(hostEngine) reactor.connectTCP(host, port, stompProtocolFactory) reactor.run()
[ "def testReactor(self):\n lbrynet.dht.protocol.reactor.listenUDP(0, self.protocol)\n lbrynet.dht.protocol.reactor.callLater(0, lbrynet.dht.protocol.reactor.stop)\n lbrynet.dht.protocol.reactor.run()", "def start(self):\n if not self._connected:\n self._client.connect(self._addr, port=self._port, keepalive=60, bind_address=\"\")\n self._client.loop_start()\n self._connected = True\n logger.info(\"Connection with MQTT Broker at %s:%d estabilished.\", self._addr, self._port)", "def _start(self):\n self.mqtt.loop_forever()", "def test_main():\n\n listener = Qe2ServerListener('', 4000)\n reactor.run()", "def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()", "def run(self):\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n self.client.on_subscribe = self.on_subscribe\n self.client.on_publish = self.on_publish\n # self.client.on_log = self.on_log\n # self.client.enable_logger(logger)\n self.client.reconnect_delay_set(min_delay=0.3, max_delay=120)\n try:\n self.client.connect(self.broker_adress, int(self.broker_port), 60)\n except:\n txt = 'Broker with ip-adress {} on port {} not found'.format(self.broker_adress, self.broker_port)\n logger.error(txt)\n raise Exception(txt)\n # sys.exit()\n # self.client.loop_forever()\n self.client.loop_start()", "async def async_start(self) -> None:\n\n self._shutdown = False\n\n # Start up the LifeSOS interface\n self._baseunit.start()\n\n # Connect to the MQTT broker\n self._mqtt_was_connected = False\n if self._config.mqtt.uri.port:\n self._mqtt.connect_async(\n self._config.mqtt.uri.hostname,\n self._config.mqtt.uri.port,\n keepalive=Translator.KEEP_ALIVE)\n else:\n self._mqtt.connect_async(\n self._config.mqtt.uri.hostname,\n keepalive=Translator.KEEP_ALIVE)\n\n # Start processing MQTT messages\n self._mqtt.loop_start()", "def start_call_back_loop(loop: asyncio.AbstractEventLoop) -> None:\n asyncio.set_event_loop(loop)\n loop.run_forever()", "def run_server(self):\n try:\n self.client.connect(host=self.host, port=self.port, keepalive=self.keepalive)\n self.logger.info('MQTT server started')\n self.client.loop_forever()\n except OSError:\n self.logger.error('MQTT broker is not available')", "def start(self) -> None:\n conn_manager = ConnectionManager(broker_host=self.broker_host, queue=self.queue)\n channel = conn_manager.start_channel()\n channel.basic_consume(queue=self.queue, on_message_callback=self.callback)\n\n try:\n print(\"PV Simulator...\")\n channel.start_consuming()\n except KeyboardInterrupt:\n pass", "def test_eventloop_api_reactor(self):\n from twisted.internet import reactor\n _main.no_setup()\n self.assertIdentical(_main._reactor, reactor)", "def runReactor(self):\n def getReadyToStop():\n self.reactor.callLater(self.timeout, self.reactor.stop)\n self.reactor.callWhenRunning(getReadyToStop)\n if self.action is not None:\n self.reactor.callWhenRunning(self.action)\n self.reactor.run(installSignalHandlers=False)", "def start(self) -> str:\r\n try:\r\n self.client.on_connect = self.on_connect\r\n self.client.on_disconnect = self.on_disconnect\r\n self.client.on_message = self.on_message\r\n self.client.on_subscribe = self.on_subscribe\r\n self.client.on_unsubscribe = self.on_unsubscribe\r\n self.client.will_set(\r\n topic=MQTT_WILL_TOPIC,\r\n payload=self.will_payload,\r\n )\r\n self.client.connect(\r\n host=MQTT_SERVER,\r\n port=MQTT_PORT\r\n )\r\n self.client.loop_start()\r\n except RuntimeError:\r\n errmsg = \"Could not start mqtt connection\"\r\n return errmsg\r\n else:\r\n return None", "def run_reactor(self):\n self.reactor.run()", "def start(self):\n\n self.init_callback(self)\n\n while 1:\n self.reset_connection()\n try:\n self.consume_messages()\n except (socket.error, AMQPConnectionException, IOError):\n self.logger.error(\"CarrotListener: Connection to broker lost.\"\n + \" Trying to re-establish connection...\")", "async def start(\n self, exit_stack: AsyncExitStack, event_broker: EventBroker\n ) -> None:", "def run(self) -> None:\n\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.DEBUG, f'MQTT: client name: {self.clientID}')\n\t\tself.mqttClient = mqtt.Client(client_id=self.clientID, clean_session=False if self.clientID else True)\t# clean_session=False is defined by TS-0010\n\n\t\t# Enable SSL\n\t\tif self.useTLS:\n\t\t\tself.mqttClient.tls_set(ca_certs=self.caFile, cert_reqs=ssl.CERT_REQUIRED if self.verifyCertificate else ssl.CERT_NONE)\n\n\t\t# Set username/password\n\t\tif self.username and self.password:\n\t\t\tself.mqttClient.username_pw_set(self.username, self.password)\n\t\t\n\t\tself.mqttClient.on_connect \t\t= self._onConnect\n\t\tself.mqttClient.on_disconnect\t= self._onDisconnect\n\t\tself.mqttClient.on_log\t\t\t= self._onLog\n\t\tself.mqttClient.on_subscribe\t= self._onSubscribe\n\t\tself.mqttClient.on_unsubscribe\t= self._onUnsubscribe\n\t\tself.mqttClient.on_message\t\t= self._onMessage\n\n\t\ttry:\n\t\t\tself.messageHandler and self.messageHandler.logging(self.mqttClient, logging.DEBUG, f'MQTT: connecting to host:{self.address}, port:{self.port}, keepalive: {self.keepalive}, bind: {self.bindIF}')\n\t\t\tself.mqttClient.connect(host=self.address, port=self.port, keepalive=self.keepalive, bind_address=self.bindIF)\n\t\texcept Exception as e:\n\t\t\tif self.messageHandler:\n\t\t\t\tself.messageHandler.logging(self.mqttClient, logging.ERROR, f'MQTT: cannot connect to broker: {e}')\n\t\t\t\tself.messageHandler.onError(self, -1)\n\n\t\t# Actually start the actor to run the MQTT client as a thread\n\t\tself.actor = BackgroundWorkerPool.newActor(self._mqttActor, name='MQTTClient').start()", "def test_client_server():\n global backend_manager\n win = QtWidgets.QMainWindow()\n backend_manager = BackendManager(win)\n with pytest.raises(NotRunning):\n backend_manager.send_request(\n backend.echo_worker, 'some data', on_receive=_on_receive)\n backend_manager.start(os.path.join(os.getcwd(), 'server.py'))\n backend_manager._process.started.connect(_send_request)\n QTest.qWait(1000)\n backend_manager.stop()\n del backend_manager\n del win", "def run(self):\n global _callback_thread\n\n self._ready.set()\n\n while self._operational:\n\n # qLen = self._work_q.qsize()\n\n while True:\n try:\n msg = self._topic_recvr.fetch(timeout=0)\n except Empty:\n break\n # TRACE:\n # log.error(\"!!! Console %s: msg on %s [%s]\" %\n # (self._name, self._topic_recvr.source, msg))\n self._dispatch(msg, _direct=False)\n\n while True:\n try:\n msg = self._direct_recvr.fetch(timeout = 0)\n except Empty:\n break\n # TRACE\n #log.error(\"!!! Console %s: msg on %s [%s]\" %\n # (self._name, self._direct_recvr.source, msg))\n self._dispatch(msg, _direct=True)\n\n self._expire_agents() # check for expired agents\n self._expire_mboxes() # check for expired async mailbox requests\n\n #if qLen == 0 and self._work_q.qsize() and self._notifier:\n if self._work_q_put and self._notifier:\n # new stuff on work queue, kick the the application...\n self._work_q_put = False\n _callback_thread = currentThread()\n trace.debug(\"Calling console notifier.indication\")\n self._notifier.indication()\n _callback_thread = None\n\n\n # wait for a message to arrive, or an agent\n # to expire, or a mailbox requrest to time out\n now = datetime.datetime.utcnow()\n next_expire = self._next_agent_expire\n\n self._lock.acquire()\n try:\n # the mailbox expire flag may be cleared by the\n # app thread(s) to force an immedate mailbox scan\n if self._next_mbox_expire is None:\n next_expire = now\n elif self._next_mbox_expire < next_expire:\n next_expire = self._next_mbox_expire\n finally:\n self._lock.release()\n\n timeout = timedelta_to_secs(next_expire - now)\n\n if self._operational and timeout > 0.0:\n try:\n trace.debug(\"waiting for next rcvr (timeout=%s)...\" % timeout)\n self._session.next_receiver(timeout = timeout)\n except Empty:\n pass\n\n trace.debug(\"Shutting down Console thread\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the solar noon (the time when the sun is at its highest point.)
def solar_noon(self, date=None, local=True): if self.astral is None: self.astral = Astral() if date is None: date = datetime.date.today() noon = self.astral.solar_noon_utc(date, self.longitude) if local: return noon.astimezone(self.tz) else: return noon
[ "def find_solar_noon(self,dt):\n year = dt.timetuple().tm_year\n #print year\n month = dt.timetuple().tm_mon\n #print month\n \n day = dt.timetuple().tm_mday \n \n sitka = ephem.Observer()\n \n date = str(year)+'/'+str(month)+'/'+str(day)\n #print date\n\n sitka.date = date\n\n sitka.lat = self.lat\n\n sitka.lon = self.lon\n\n m = ephem.Sun()\n\n noon = sitka.next_transit(m)\n \n ntime = str(noon).split(' ')[1]\n \n h = ntime.split(':')[0]\n m = ntime.split(':')[1]\n s = ntime.split(':')[2]\n \n snoon = datetime.datetime(year,month,day,int(h),int(m),int(s))\n \n snoon += datetime.timedelta(hours=self.tdiffUTC)\n \n self.solnoon = snoon\n \n return snoon", "def solar_noon_utc(self, date, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n newt = self._jday_to_jcentury(julianday + 0.5 + longitude / 360.0)\n\n eqtime = self._eq_of_time(newt)\n timeUTC = 720.0 + (longitude * 4.0) - eqtime\n\n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n noon = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return noon", "def sun_calc(lon_obs, lat_obs):\n return (lambda day, interval: sun_time(local_noon(day, lon_obs),\n lon_obs, lat_obs, interval['rising'],\n interval['angle'] if 'angle' in interval else 0.833333333))", "def declinacion_solar(dia_juliano):\n return 0.409*math.sin(2*math.pi*dia_juliano/365-1.39)", "def _solar_time(date, lon, lat):\n # IDL is computing time_t as hours and fractional minutes\n time_t = ee.Date(date).get('hour').add(\n ee.Date(date).get('minute').divide(60))\n\n # This will return the hour floating point value\n # time_t = ee.Date(date).get('hour').add(ee.Date(date).getFraction('hour'))\n\n # CGM - DOY and hour could be images in order to make these expressions\n julian = _to_jd(date)\n\n # Sunrise time\n julian_ = time_t.divide(24.0).add(julian)\n j_cen = julian_.add(0.5 - 2451545.0).divide(36525.0)\n # CGM - Does the mod happen before or after the multiply\n lon_sun = j_cen.multiply(0.0003032).add(36000.76983) \\\n .multiply(j_cen).mod(360.0).add(280.46646).subtract(360)\n an_sun = j_cen.multiply(-0.0001537).add(35999.05029) \\\n .multiply(j_cen).add(357.52911)\n ecc = j_cen.multiply(0.0000001267).add(0.000042037) \\\n .multiply(j_cen).multiply(-1).add(0.016708634)\n ob_ecl = j_cen.multiply(-0.001813).add(0.00059) \\\n .multiply(j_cen).add(46.815) \\\n .multiply(j_cen).multiply(-1).add(21.448) \\\n .divide(60.0).add(26).divide(60).add(23)\n ob_corr = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).cos() \\\n .multiply(0.00256).add(ob_ecl)\n var_y = ob_corr.divide(2.0).multiply(deg2rad).tan().multiply(\n ob_corr.divide(2.0).multiply(deg2rad).tan())\n eq_t = (\n lon_sun.multiply(2.0).multiply(deg2rad).sin().multiply(var_y)\n .subtract(an_sun.multiply(deg2rad).sin().multiply(ecc).multiply(2.0))\n .add(an_sun.multiply(deg2rad).sin()\n .multiply(lon_sun.multiply(2.0).multiply(deg2rad).cos())\n .multiply(var_y).multiply(ecc).multiply(4.0))\n .subtract(lon_sun.multiply(4.0).multiply(deg2rad).sin()\n .multiply(var_y).multiply(var_y).multiply(0.5))\n .subtract(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(ecc).multiply(ecc).multiply(1.25))\n .multiply(4.0).multiply(rad2deg))\n sun_eq = (\n an_sun.multiply(deg2rad).sin().multiply(\n j_cen.multiply(0.000014).add(0.004817)\n .multiply(j_cen).multiply(-1).add(1.914602))\n .add(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(j_cen.multiply(-0.000101).add(0.019993)))\n .add(an_sun.multiply(3.0).multiply(deg2rad).sin().multiply(0.000289)))\n sun_true = sun_eq.add(lon_sun)\n sun_app = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).sin() \\\n .multiply(-0.00478).subtract(0.00569).add(sun_true)\n\n # CGM - Intentionally not converting back to degrees\n d = ob_corr.multiply(deg2rad).sin() \\\n .multiply(sun_app.multiply(deg2rad).sin()) \\\n .asin()\n\n # CGM - Functions below are lat/lon dependent and can be written as\n # ee.Image expressions\n # CGM - d is still in radians, not converting\n ha_t = lat.expression(\n 'acos((cos(90.833 * pi / 180) / (cos(lat) * cos(d))) - tan(lat) * tan(d))'\n ' * (180 / pi)',\n {'lat': lat, 'd': d, 'pi': math.pi})\n\n # print('\\n{:10s} {:.12f}'.format('julian_', julian_.getInfo()))\n # print('{:10s} {:.12f}'.format('time_t', time_t.getInfo()))\n # print('{:10s} {:.12f}'.format('j_cen', j_cen.getInfo()))\n # print('{:10s} {:.12f}'.format('lon_sun', lon_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('an_sun', an_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('ecc', ecc.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_ecl', ob_ecl.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_corr', ob_corr.getInfo()))\n # print('{:10s} {:.12f}'.format('var_y', var_y.getInfo()))\n # print('{:10s} {:.12f}'.format('eq_t', eq_t.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_eq', sun_eq.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_true', sun_true.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_app', sun_app.getInfo()))\n # print('{:10s} {:.12f}'.format('d', d.getInfo()))\n\n return d, eq_t, ha_t", "def find_solar_risingC(self,dt):\n \n d = ephem.Date(dt.date())\n \n \n sitka = ephem.Observer()\n \n \n sitka.date = d\n\n sitka.lat = self.lat\n\n sitka.lon = self.lon\n\n m = ephem.Sun()\n\n ri = sitka.next_rising(m)\n \n sri = ri.datetime()\n sri += datetime.timedelta(hours=self.tdiffUTC)\n \n \n if dt.date() < sri.date() :\n dt = dt - datetime.timedelta(hours=24)\n \n d = ephem.Date(dt.date())\n #print d\n \n sitka.date = d\n m = ephem.Sun()\n\n ri = sitka.next_rising(m)\n #print ri\n \n sri = ri.datetime()\n sri += datetime.timedelta(hours=self.tdiffUTC)\n #print sri\n elif dt.date() > sri.date() :\n dt = dt + datetime.timedelta(hours=24)\n \n d = ephem.Date(dt.date())\n #print d\n \n sitka.date = d\n m = ephem.Sun()\n\n ri = sitka.next_rising(m)\n #print ri\n \n sri = ri.datetime()\n sri += datetime.timedelta(hours=self.tdiffUTC)\n #print sri\n \n \n self.solrising = sri\n \n return sri", "def radiacion_solar(radiacion):\n return radiacion*0.0864", "def _calculate_sun(self):\n\n daystart = self.config['env']['day_start']\n dayend = self.config['env']['day_end']\n daylen = dayend - daystart\n sun = 0\n if daystart <= self.basetime <= dayend:\n sun = truncate(sin((self.basetime - daystart) * pi / daylen)) *\\\n self.sun_amplitude\n\n self.weather['sun'] = sun", "def solar_time_index(self):\n return self.data.solar_time_index", "def omega_sun_snodgrass90(lat):\n return differential_rotation(lat, 14.71, -2.39, -1.78)", "def get_vsolar(self):\n return self.read_register(4098, 1, 3)", "def inverse_earth_sun_distance(day_angle):\n return 1 + 0.03344 * cos(day_angle - 0.048869)", "def daynight_terminator(date, lons):\n import mpl_toolkits.basemap.solar as solar\n import numpy as np\n\n dg2rad = np.pi / 180.\n # compute greenwich hour angle and solar declination\n # from datetime object (assumed UTC).\n tau, dec = solar.epem(date)\n # compute day/night terminator from hour angle, declination.\n longitude = lons + tau\n lats = np.arctan(-np.cos(longitude * dg2rad) /\n np.tan(dec * dg2rad)) / dg2rad\n return lats, tau, dec", "def get_nanosecond(self):\n if self.is_module_queried['GPS_nanosecond'] is False:\n self.get_pvt()\n self.is_module_queried['GPS_nanosecond'] = False\n self.is_module_queried['All'] = False\n\n return self.gps_nanosecond", "def get_moon_phase(now):\n from math import pi\n\n ref = date_to_jd('1899-12-31', '12:00:00')\n T = (now - ref) / 36525\n nu = -9.26009 + 445267.12165*T + 0.00168*(T**2)\n ageDeg = nu % 360\n nuRad = ageDeg * pi / 180\n nuHrs = (nu/15) % 24\n return nuHrs", "def find_sunrise(n=1):\n ts = api.load.timescale()\n ep = api.load('de421.bsp')\n location = api.Topos('41.85 N', '87.65 W') # Chicago, USA\n t0 = ts.now()\n t1 = ts.utc(t0.utc_datetime() + timedelta(days=n))\n t, y = almanac.find_discrete(t0, t1, almanac.sunrise_sunset(ep, location))\n \n times = list(zip(t.utc_iso(), y))\n print(times)", "def solar_constant():\n return 1367.", "def solar_noon_utc(LonDegE):\n _timezone = array([-180, -172.5, -157.5, -142.5, -127.5, -112.5, -97.5, -82.5, -67.5, -52.5, -37.5, -22.5, -7.5, 7.5, 22.5, 37.5, 52.5, 67.5, 82.5, 97.5, 112.5, 127.5, 142.5, 157.5, 172.5, 180]).repeat(2, 0)[1:-1].reshape(-1, 2)\n for i, (low, high) in enumerate(_timezone):\n if LonDegE >= low:\n if LonDegE <= high:\n return 12 -(-12 + i)", "def time_NEURON():\n recorded_time = h.Vector()\n recorded_time.record(h._ref_t)\n return recorded_time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the solar azimuth angle for a specific date/time.
def solar_azimuth(self, dateandtime=None): if self.astral is None: self.astral = Astral() if dateandtime is None: dateandtime = datetime.datetime.now(tz=self.tz) return self.astral.solar_azimuth(dateandtime, self.latitude, self.longitude)
[ "def HourAngle(solar_time):\n return solar_time*15 - 180", "def hour_angle(solar_time):\n ha = pi / 12 * (solar_time - 12)\n\n return ha", "def angle_and_azimuth(self, satellite_ecef):\n from numpy import arcsin, arctan2, dot\n from numpy.linalg import norm\n\n r_ss = satellite_ecef - self.position\n r_ss_norm = r_ss / norm(r_ss)\n\n r_sse = dot(r_ss_norm, self._e)\n r_ssn = dot(r_ss_norm, self._n)\n r_ssu = dot(r_ss_norm, self._u)\n\n angle = arcsin(r_ssu)\n azimuth = arctan2(r_sse, r_ssn)\n return angle, azimuth", "def azimuth(vv, v0, v1):\n with np.errstate(divide='ignore', invalid='ignore'):\n n0 = np.cross(v0, v1)\n n0 /= np.dual.norm(n0, axis=-1)[..., np.newaxis]\n nn = np.cross(v0, vv)\n nn /= np.dual.norm(nn, axis=-1)[..., np.newaxis]\n\n azi = np.arccos(np.sum(nn * n0, -1))\n if len(np.shape(azi)) > 0:\n azi[np.dot(vv, n0) < 0] *= -1\n # arbitrary angle where vv is (anti)parallel to v0\n azi[np.isnan(azi)] = 0\n elif np.isnan(azi):\n return 0\n elif np.dot(vv, v0) < 1 and azi > 0:\n azi *= -1\n\n return azi", "def azimuth(self, location, gmst):\n direction = self.local_ray_direction(location, gmst) # guide vector\n\n absolute_direction = 0\n\n # avoid divide-by-zero errors\n if direction.x == 0:\n if direction.y > 0:\n absolute_direction = math.pi/2\n else:\n absolute_direction = -math.pi/2\n else:\n # calculate direction relative to global coordinate system\n absolute_direction = math.atan(direction.y/direction.x) \n\n # arctan() range is limited to -90 to 90 degrees. To detect 90 to 270 degrees, test sign of x component.\n if direction.x < 0:\n absolute_direction += math.pi\n\n # adjust angle relative to direction of North Pole. Towards North Pole should be zero degrees.\n azimuth = gmst.rad() + location.lon.rad() - math.pi/2 - absolute_direction \n\n # normalize azimuth to range of 0 to 360 degrees\n if azimuth < 0:\n azimuth -= int(azimuth/math.pi/2.0 - 1)*math.pi*2.0\n if azimuth >= math.pi*2: \n azimuth -= int(azimuth/math.pi/2.0)*math.pi*2.0\n\n return Angle(azimuth)", "def _azimuth(section, soma):\n vector = morphmath.vector(section[0], soma.center)\n return np.arctan2(vector[COLS.Z], vector[COLS.X])", "def test_azimuth_angle(self):\n p1 = self.frame[\"SPEFitSingle_HV_ipdf\"]\n p2 = self.frame[\"SPEFitSingle_HV_rpdf\"]\n self.assertLess(abs(p1.dir.azimuth - p2.dir.azimuth), 1e-6)", "def solar_angle(day, utc_hour, longitude):\n localtime = (longitude / 180.0) * 12 + utc_hour\n\n lstm = 15 * (localtime - utc_hour)\n\n B = np.deg2rad((360. / 365.) * (day - 81))\n\n eot = (9.87 *\n np.sin(2 * B) -\n 7.53 * np.cos(B) -\n 1.5 * np.sin(B))\n\n return 15 * (localtime +\n (4 * (longitude - lstm) + eot) / 60.0 - 12)", "def azimuths(self):\n if not self.pis:\n return np.zeros(0, dtype='float')\n\n elif len(self.pis) == 1:\n return np.zeros(1, dtype='float')\n\n x = self.pi_coordinates()\n dx = x[1:,:2] - x[:-1,:2]\n az = np.arctan2(dx[:,0], dx[:,1])\n az = np.append(az, az[-1])\n\n return np.asarray(az, dtype='float')", "def azimuth(self, other, projected=True):\n x0, y0 = self.x, self.y\n if self.crs != other.crs:\n x1, y1 = other.get_vertex(self.crs)[:2]\n else:\n x1, y1 = other.x, other.y\n\n if (x0, y0) == (x1, y1):\n az = np.nan\n elif projected and not isinstance(self.crs, GeographicalCRS):\n az = 90.0 - math.atan2(y1-y0, x1-x0)*180.0/math.pi\n az = (az+180) % 360 - 180\n else:\n lon0, lat0 = self.crs.project(x0, y0, inverse=True)\n lon1, lat1 = self.crs.project(x1, y1, inverse=True)\n az, _, _ = self.crs.inverse(lon0, lat0, lon1, lat1)\n return az", "def azimuth(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_azi = np.array([vec[0], vec[1], 0])\n vec_n = np.array([0, 1, 0])\n # update by Santosh\n # angle2vecs gives the smallest angle between the vectors\n # so for a west wall angle2vecs will give 90\n # the following 'if' statement will make sure 270 is returned\n x_vector = vec_azi[0]\n if x_vector < 0:\n return 360 - angle2vecs(vec_azi, vec_n)\n else:\n return angle2vecs(vec_azi, vec_n)", "def get_azimuth (\r\n xlon: str | ArrayLike, \r\n ylat: str| ArrayLike, \r\n *, \r\n data: DataFrame =None, \r\n utm_zone:str=None, \r\n projection:str='ll', \r\n isdeg:bool=True, \r\n mode:str='soft', \r\n extrapolate:bool =...,\r\n view:bool=..., \r\n ):\r\n from ..site import Location \r\n \r\n mode = str(mode).lower() \r\n projection= str(projection).lower()\r\n extrapolate, view = ellipsis2false (extrapolate, view)\r\n\r\n xlon , ylat = assert_xy_in(xlon , ylat , data = data )\r\n \r\n if ( \r\n xlon.max() > 180. and ylat.max() > 90. \r\n and projection=='ll' \r\n and mode=='soft'\r\n ): \r\n warnings.warn(\"xlon and ylat arguments are greater than 180 degrees.\"\r\n \" we assume the coordinates are UTM. Set explicitly\"\r\n \" projection to ``UTM`` to avoid this warning.\")\r\n projection='utm'\r\n \r\n if projection=='utm':\r\n if utm_zone is None: \r\n raise TypeError (\"utm_zone cannot be None when projection is UTM.\")\r\n \r\n ylat , xlon = Location.to_latlon_in(\r\n xlon, ylat, utm_zone= utm_zone)\r\n \r\n if len(xlon) ==1 or len(ylat)==1: \r\n msg = \"Azimuth computation expects at least two points. Got 1\"\r\n if mode=='soft': \r\n warnings.warn(msg) \r\n return 0. \r\n \r\n raise TypeError(msg )\r\n # convert to radian \r\n if isdeg: \r\n xlon = np.deg2rad (xlon ) ; ylat = np.deg2rad ( ylat)\r\n \r\n dx = map (lambda ii: np.cos ( ylat[ii]) * np.sin( ylat [ii+1 ]) - \r\n np.sin(ylat[ii]) * np.cos( ylat[ii+1]) * np.cos (xlon[ii+1]- xlon[ii]), \r\n range (len(xlon)-1)\r\n )\r\n dy = map( lambda ii: np.cos (ylat[ii+1])* np.sin( xlon[ii+1]- xlon[ii]), \r\n range ( len(xlon)-1)\r\n )\r\n # to deg \r\n z = np.around ( np.rad2deg ( np.arctan2(list(dx) , list(dy) ) ), 3) \r\n azim = z.copy() \r\n if extrapolate: \r\n # use mean azimum of the total area zone and \r\n # recompute the position by interpolation \r\n azim = np.hstack ( ( [z.mean(), z ]))\r\n # reset the interpolare value at the first position\r\n with warnings.catch_warnings():\r\n #warnings.filterwarnings(action='ignore', category=OptimizeWarning)\r\n warnings.simplefilter(\"ignore\")\r\n azim [0] = scalePosition(azim )[0][0] \r\n \r\n if view: \r\n x = np.arange ( len(azim )) \r\n fig, ax = plt.subplots (1, 1, figsize = (10, 4))\r\n # add Nan to the first position of z \r\n z = np.hstack (([np.nan], z )) if extrapolate else z \r\n \r\n ax.plot (x, \r\n azim, \r\n c='#0A4CEE',\r\n marker = 'o', \r\n label ='extra-azimuth'\r\n ) \r\n \r\n ax.plot (x, \r\n z, \r\n 'ok-', \r\n label ='raw azimuth'\r\n )\r\n ax.legend ( ) \r\n ax.set_xlabel ('x')\r\n ax.set_ylabel ('y') \r\n\r\n return azim", "def SolarAzimuth(delta, omega, alpha): \n delta_radians = m.radians(delta)\n omega_radians = m.radians(omega)\n alpha_radians = m.radians(alpha)\n gamma_s_radians = m.asin(m.cos(delta_radians)*m.sin(omega_radians)/m.cos(alpha_radians))\n return gamma_s_radians/2/m.pi*360", "def azimuth(self):\n return self.get_azimuth()", "def azimuth(lons1, lats1, lons2, lats2):\n lons1, lats1, lons2, lats2 = _prepare_coords(lons1, lats1, lons2, lats2)\n cos_lat2 = numpy.cos(lats2)\n true_course = numpy.degrees(numpy.arctan2(\n numpy.sin(lons1 - lons2) * cos_lat2,\n numpy.cos(lats1) * numpy.sin(lats2)\n - numpy.sin(lats1) * cos_lat2 * numpy.cos(lons1 - lons2)\n ))\n return (360 - true_course) % 360", "def solar_elevation_angle(lat, decl, ha):\n C31 = np.cos(lat) * np.cos(decl)\n C33 = np.sin(lat) * np.sin(decl)\n\n sin_h0 = C31 * np.cos(ha) + C33\n\n h0_rad = np.arcsin(sin_h0)\n\n h0 = h0_rad * 180 / np.pi\n\n return h0", "def rotation_degrees_from_solar_noon(time):\n # determine pair of solar noon/nadir events\n evts = surrounding_events(time, 1, noon_nadir_event_times)\n\n # convert fractional value to degrees offset [0, 180]\n degrees = position_as_percent(evts, time) * 180\n\n # add 180 degrees is prior event is nadir\n return degrees if evts[0].event.value else degrees + 180", "def set_azimuth(self):\n self.azimuth = self.Calculations.convert_to_azimuth( self.declination, self.right_ascension, self.Latitude, self.LHA)\n if self.azimuth < 0:\n self.azimuth = self.azimuth + 360.0\n return self.azimuth\n else:\n pass\n return self.azimuth\n print('azimuth set to', self.azimuth)", "def horiz_angle(time, data):\n\n # TODO What should 0deg be? Set it to inline w/ target? facing target?\n\n # direction of the sun. measured in degrees counted clockwise from north.\n azimuth = data[time]['azimuth']\n\n h_angle = (azimuth / 2 - 90)\n\n # returns answer between -180 and 180 degrees\n return round(((h_angle + 180) % 360) - 180, 4)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the solar elevation angle for a specific time.
def solar_elevation(self, dateandtime=None): if self.astral is None: self.astral = Astral() if dateandtime is None: dateandtime = datetime.datetime.now(tz=self.tz) return self.astral.solar_elevation(dateandtime, self.latitude, self.longitude)
[ "def HourAngle(solar_time):\n return solar_time*15 - 180", "def hour_angle(solar_time):\n ha = pi / 12 * (solar_time - 12)\n\n return ha", "def solar_azimuth(self, dateandtime=None):\n\n if self.astral is None:\n self.astral = Astral()\n\n if dateandtime is None:\n dateandtime = datetime.datetime.now(tz=self.tz)\n \n return self.astral.solar_azimuth(dateandtime, self.latitude, self.longitude)", "def solar_angle(day, utc_hour, longitude):\n localtime = (longitude / 180.0) * 12 + utc_hour\n\n lstm = 15 * (localtime - utc_hour)\n\n B = np.deg2rad((360. / 365.) * (day - 81))\n\n eot = (9.87 *\n np.sin(2 * B) -\n 7.53 * np.cos(B) -\n 1.5 * np.sin(B))\n\n return 15 * (localtime +\n (4 * (longitude - lstm) + eot) / 60.0 - 12)", "def solar_elevation_angle(lat, decl, ha):\n C31 = np.cos(lat) * np.cos(decl)\n C33 = np.sin(lat) * np.sin(decl)\n\n sin_h0 = C31 * np.cos(ha) + C33\n\n h0_rad = np.arcsin(sin_h0)\n\n h0 = h0_rad * 180 / np.pi\n\n return h0", "def get_solar_altaz(time, location):\n frame = AltAz(obstime=time, location=location)\n\n sunaltaz = get_sun(time).transform_to(frame)\n\n return sunaltaz", "def hour_angle(self):\n target_apparent_ra = ApparentRightAscension(self.ra, mode=\"rad\")\n return target_apparent_ra.to_hour_angle(self.observer.location.longitude, self.observer.now)", "def illumination_elevation_angle(self) -> float:\n return None", "def era(self):\n # earth rotation angle using Universal Time\n J = self.MJD - 51544.5\n fraction = np.mod(J, self.turn)\n theta = np.mod(0.7790572732640 + 0.00273781191135448*J, self.turn)\n return self.turndeg*np.mod(theta + fraction, self.turn)", "def sunAltAz(jdT = None):\n #Sun position in ra, dec\n if jdT: raS, decS = ugradio.coord.sunpos(jd = jdT)\n else: raS, decS = ugradio.coord.sunpos()\n return ugradio.coord.get_altaz(raS, decS, jd = jdT)", "def rotation_degrees_from_solar_noon(time):\n # determine pair of solar noon/nadir events\n evts = surrounding_events(time, 1, noon_nadir_event_times)\n\n # convert fractional value to degrees offset [0, 180]\n degrees = position_as_percent(evts, time) * 180\n\n # add 180 degrees is prior event is nadir\n return degrees if evts[0].event.value else degrees + 180", "def AngleFromSun(body, time):\n if body == Body.Earth:\n raise EarthNotAllowedError()\n sv = GeoVector(Body.Sun, time, True)\n bv = GeoVector(body, time, True)\n return AngleBetween(sv, bv)", "def sun_topo_ra_decl_hour(latitude, longitude, elevation, jd, delta_t=0):\n\n jde = _sp.julian_ephemeris_day(jd, delta_t)\n jce = _sp.julian_century(jde)\n jme = _sp.julian_millennium(jce)\n\n helio_pos = _sp.heliocentric_position(jme)\n R = helio_pos[-1]\n phi, sigma, E = latitude, longitude, elevation\n # equatorial horizontal parallax of the sun, in radians\n xi = np.deg2rad(8.794 / (3600 * R))\n # rho = distance from center of earth in units of the equatorial radius\n # phi-prime = geocentric latitude\n # NB: These equations look like their based on WGS-84, but are rounded slightly\n # The WGS-84 reference ellipsoid has major axis a = 6378137 m, and flattening factor 1/f = 298.257223563\n # minor axis b = a*(1-f) = 6356752.3142 = 0.996647189335*a\n u = np.arctan(0.99664719 * np.tan(phi))\n x = np.cos(u) + E * np.cos(phi) / 6378140 # rho sin(phi-prime)\n y = 0.99664719 * np.sin(u) + E * np.sin(phi) / 6378140 # rho cos(phi-prime)\n\n delta_psi, epsilon = _sp.nutation_obliquity(jce)\n\n llambda, beta = _sp.sun_longitude(helio_pos, delta_psi)\n\n alpha, delta = _sp.sun_ra_decl(llambda, epsilon, beta)\n\n v = _sp.greenwich_sidereal_time(jd, delta_psi, epsilon)\n\n H = v + longitude - alpha\n Hr, dr = map(np.deg2rad, (H, delta))\n\n dar = np.arctan2(\n -x * np.sin(xi) * np.sin(Hr), np.cos(dr) - x * np.sin(xi) * np.cos(Hr)\n )\n delta_alpha = np.rad2deg(dar)\n\n alpha_prime = alpha + delta_alpha\n delta_prime = np.rad2deg(\n np.arctan2(\n (np.sin(dr) - y * np.sin(xi)) * np.cos(dar),\n np.cos(dr) - y * np.sin(xi) * np.cos(Hr),\n )\n )\n H_prime = H - delta_alpha\n\n return alpha_prime, delta_prime, H_prime", "def getOrbitalDistance():\n\n #amount of time since the vernal equinox\n timediff = (s.runTime - s.equinox).total_seconds() % \\\n s.nSecondsPerYear\n\n #angle swept out since equinox\n theta = timediff*360/s.nSecondsPerYear\n\n if theta > 360:\n print(\"Error in orbit.py\")\n print(\"Issue with orbit angle\")\n exit(1)\n\n #Perihelion occurs near Jan 3, so this angle is set\n #as s.longitudeOfPerihelion\n s.orbitAngle = (theta - s.longitudeOfPerihelion)\n if s.orbitAngle < 0:\n s.orbitAngle += 360\n\n s.orbitalDistance = inputs.distancePlanet*(1-inputs.eccentricity**2)/ \\\n (1+inputs.eccentricity*cos(s.orbitAngle*pi/180.))\n\n return 0", "def angle_and_azimuth(self, satellite_ecef):\n from numpy import arcsin, arctan2, dot\n from numpy.linalg import norm\n\n r_ss = satellite_ecef - self.position\n r_ss_norm = r_ss / norm(r_ss)\n\n r_sse = dot(r_ss_norm, self._e)\n r_ssn = dot(r_ss_norm, self._n)\n r_ssu = dot(r_ss_norm, self._u)\n\n angle = arcsin(r_ssu)\n azimuth = arctan2(r_sse, r_ssn)\n return angle, azimuth", "def plotElevation(source, time, loc, unit='deg', dt=1800):\n time = getTime(time)\n t0 = time.iso.split()[0].replace('-', '/') \n sname = source\n lname = loc \n loc = getLoc(loc)\n source = getSrc(source=source, time=time, loc=loc, unit=unit)\n \n # Compute the elevation vs time\n dt = TimeDelta(dt, format='sec')\n nvals = int((TimeDelta(1, format='jd')/dt).value)\n times = np.zeros( nvals )\n elev = np.zeros( nvals )\n for i in np.arange(nvals):\n src = getAltaz(source=source, time=time, loc=loc, unit=unit)\n times[i] = time.mjd\n elev[i] = src.alt.deg\n time += dt\n\n # Interpolate\n evst = interp1d(times, elev, kind='cubic')\n times = np.linspace(times[0], times[-1], 1000) \n elev = evst(times)\n\n # Plot\n fig, ax = plt.subplots()\n xtime = mdates.date2num( Time(times, format='mjd').to_datetime() )\n ax.plot( xtime, elev, label='{} @ {}'.format(sname, lname) )\n ax.axhline(0., linestyle='--', color='black', linewidth=1)\n ymin, ymax = ax.get_ylim()\n ax.fill_between( xtime, ymin, ymax,\n where=(elev <= 0.), facecolor='gray', alpha=0.5, step='post')\n ax.fill_between( xtime, ymin, ymax,\n where=(0. <= elev) & (elev <= 20.), facecolor='C3', alpha=0.5, step='mid')\n ax.fill_between( xtime, ymin, ymax,\n where=(20. <= elev) & (elev <= 40.), facecolor='C1', alpha=0.5, step='mid')\n ax.fill_between( xtime, ymin, ymax,\n where=(40. <= elev), facecolor='C2', alpha=0.5, step='mid')\n ax.xaxis_date()\n date_format = mdates.DateFormatter('%H:%M')\n ax.xaxis.set_major_formatter(date_format)\n ax.xaxis.set_minor_formatter(date_format)\n fig.autofmt_xdate()\n ax.legend()\n ax.set_xlabel('Time ({} UTC)'.format(t0))\n ax.set_ylabel('Elevation')\n plt.show()", "def getElAz(ra, dec, lat, lng, time):\n lst = getLST(time, lng)\n ha = ((lst- np.degrees(ra)) % 360.) * np.pi/180.\n el = np.arcsin(np.sin(dec) * np.sin(lat) + \\\n np.cos(dec) * np.cos(lat) * np.cos(ha))\n az = np.arccos( (np.sin(dec) - np.sin(el)*np.sin(lat)) / \\\n (np.cos(el) * np.cos(lat)))\n return (np.degrees(el), np.degrees(az))", "def elevation_inv(minutes):\n return -elevation(minutes)", "def solar_elevation_angle_refracted(h0):\n delta_h0ref = 0.061359 * (0.1594 + 1.123*h0 + 0.065656*h0**2)/(1 + 28.9344*h0 + 277.3971*h0**2)\n h0ref = h0 + delta_h0ref\n\n return h0ref" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialise the city database and set the default depression.
def __init__(self): self._citydb = CityDB() self._depression = 6 # Set default depression in degrees
[ "def init_db(self):\n self.create_db()\n col_rows = self.check_default_settings()\n if col_rows == 0:\n self.set_default_settings()", "def initialize():\n sql_db = SQLConnection()\n with SQLCursor(sql_db) as cur:\n cur.execute('SELECT position from govt_info')\n row = cur.fetchone()\n for pos in Government.positions:\n if row is None or len(row) != len(Government.positions):\n cur.execute('INSERT OR IGNORE INTO govt_info (position) VALUES (?);', (pos,))", "def initCitys(self):\n self.cities = []\n for vertex in self.metaGraph:\n self.cities.append(vertex)", "def __init__(self):\n self._zipcode = None\n self._city = None", "def init_db():\n db.drop_all()\n db.create_all()\n seed_companies()\n seed_emission_reports()\n seed_reduction_targets()\n seed_milestones()", "def init_db(self) -> None:\n self._db = DB()", "def set_tour(self, city_list=None):\n self.cities = city_list or \\\n random.sample(range(len(self.x_points)), len(self.y_points))\n self.distance = 0\n self.fitness = 0", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def reset_city_list(self):\n city_list = self.get_city_list()\n self.update_city_to_check(city_list)", "def init_database(database_file):\n\t# pylint: disable=global-statement\n\tglobal _geoip_db\n\tif not os.path.isfile(database_file):\n\t\tdb_path = find.data_file('GeoLite2-City.mmdb')\n\t\tif db_path is None:\n\t\t\traise errors.KingPhisherResourceError('the default geoip database file is unavailable')\n\t\tlogger.info('initializing the default geoip database')\n\t\tshutil.copyfile(db_path, database_file)\n\ttry:\n\t\t_geoip_db = geoip2.database.Reader(database_file)\n\texcept maxminddb.errors.InvalidDatabaseError:\n\t\tlogger.warning('the geoip database file is invalid, downloading a new one')\n\t\tdownload_geolite2_city_db(database_file)\n\t\t_geoip_db = geoip2.database.Reader(database_file)\n\tmetadata = _geoip_db.metadata()\n\tif not metadata.database_type == 'GeoLite2-City':\n\t\traise ValueError('the connected database is not a GeoLite2-City database')\n\tbuild_date = datetime.datetime.fromtimestamp(metadata.build_epoch)\n\tif build_date < datetime.datetime.utcnow() - datetime.timedelta(days=90):\n\t\tlogger.warning('the geoip database is older than 90 days')\n\treturn _geoip_db", "def fill_missing_city(cnx):\n list_events_missing_city = create_db.get_event_id_missing_feature(cnx, table=\"events_im\",\n missing_feature=\"city\")\n\n for event_id in list_events_missing_city:\n city = create_db.deduct_city_from_race_card(cnx, event_id)\n create_db.update_missing_date_from_event_id(cnx, event_id=event_id, table=\"events_im\",\n feature_to_fill=\"city\", value=city)", "def initialiser(self):\n\n # Vider le dictionnaire (pratique si on veut recommencer le jeu).\n self.cases.clear()\n # Parcourir le dictionnaire et mettre des objets de la classe Case.\n # dont l'attribut \"contenu\" serait un espace (\" \").\n for i in range(0, 3):\n for j in range(0, 3):\n self.cases[i, j] = Case(\" \")", "def populate_cities():\n if City.query.filter_by(name=CITIES[0]).first():\n return\n\n for city in CITIES:\n _add_city(city)", "def __init__(self, city):\n # Prepare counters for street, parcel, and lot IDs (opting for speed over readability here)\n self.id_counters = {0: 0, 1: 0, 2: 0, }\n self.city = city\n self.streets = set()\n self.blocks = set()\n self.tracts = set()\n self.lots = set()\n # These get used internally by methods below\n self._planned_parcels = []\n self._planned_street_segments = {'ns': {}, 'ew': {}}\n self._house_numberings = {}\n self._north_south_streets = {}\n self._east_west_streets = {}\n self._parcels_listing = {}\n self._lots_listing = {}\n self._street_corners = set()\n self._street_connections = {}\n # Devise a city plan\n self.generate_city_plan()\n self.parcels = set(self._parcels_listing.values())", "def __init__(self, city):\n \n self.cleanedCity = deepcopy(city)\n self.addedNodes_d = {}\n \n self.addIntersections()\n #========== TO DO!!! ==========\n #self.removeRepeatedSegments()", "def init_db(cls):\n if cls._landmarks is None:\n cls._landmarks = lite.connect(config.pathToData+'landmarks.db')\n lmcur = cls._landmarks.cursor()\n cls._landmarks.execute('CREATE TABLE IF NOT EXISTS landmarks (landmark_id INTEGER, name VARCHAR(255), lat DECIMAL(9,6), lon DECIMAL(9,6), east DECIMAL(9,6), north DECIMAL(9,6), querylat DECIMAL(9,6), PRIMARY KEY (landmark_id));')\n cls._landmarks.execute('CREATE TABLE IF NOT EXISTS landmark_queries (querylat DECIMAL(9,6));')\n lmcur.close()\n cls._landmarks.commit()", "def init_db(self):\n raise NotImplementedError()", "def city(self, city):\n self._city = city", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the City instance specified by ``key``.
def __getitem__(self, key): city = self._citydb[key] city.astral = self return city
[ "def retrieve_city(city_id):\n city = storage.get('City', city_id)\n if city:\n return city.to_dict()\n abort(404)", "def get_city(city_id):\n city = storage.get(City, city_id)\n\n if not city:\n abort(404)\n\n return jsonify(city.to_dict()), 200", "def city_by_id(city_id):\n\n fetched_obj = storage.get(\"City\", str(city_id))\n\n if fetched_obj is None:\n abort(404)\n\n return jsonify(fetched_obj.to_json())", "def get_city(city_id):\n city = storage.get('City', city_id)\n if city is None:\n abort(404)\n return jsonify(city.to_dict())", "def set_city(self):\n thisCity = random.choice(list(cities.keys()))\n return thisCity", "def get_case(self, key: str):\n case = self.cases.get(key)\n if not hasattr(case, 'case_id'):\n message = \"get_case(): Case key {} does not have a case_id\"\n logmessage(message.format(key))\n else:\n logmessage(\"get_case(): \" + \"Retrieved case {}\".format(str(case)))\n return case", "def get_city(cls, city_name: str, country_slug: str):\n try:\n city = cls.objects.get(name=city_name, country_slug=country_slug)\n return city\n except ObjectDoesNotExist:\n exception_message = f\"The city with name {city_name} doesn't exists\"\n raise CityDoesNotExistsException(exception_message)", "def get(self, key):\n # Initialize key variables\n result = self.cache.get(key)\n\n # Return\n return result", "def get_contact_item_by_key(key):\n\n item = ContactData.query. \\\n filter_by(key=key). \\\n first_or_404()\n\n return item", "def find_city(city, dbsession):\n\n\t# Since we're creating the FK relation based on ID, and hence the casing has no bearing on \n\t# whether the city record associates with the address, I'm upcasing the city to prevent dupes.\n\tcity = str(city)\n\tcity = city.upper()\n\n\tresult = dbsession.query(db.City).filter_by(city_name=city).first()\n\n\tif result is None:\n\t\t# Create a new instance of city\n\t\tcity_object = db.City(city)\n\t\t# I'm adding the city without committing the transaction since it would also\n\t\t# commit the address insert transaction that's still open in routes.py.\n\t\tdbsession.add(city_object)\n\t\treturn city_object\n\telse:\n\t\t# Assign the existing user object to the variable\n\t\treturn result", "def get_city(camp_id):\n\n place_info = get_place_info(camp_id)\n city = place_info[0]\n\n return city", "def get(key):\n try:\n instance = getattr(SQLBatisContainer.__local__, key)\n if not instance:\n raise ContainerException(\n 'There is no object bounded with the key {}'.format(key))\n return instance\n except Exception:\n raise ContainerException('No {} instance registered'.format(key))", "def get_node(self, key):\n url = self.sharding.get_node(key)\n return self.mapping.get(url)", "def central_get(self, key):\n return self.central_repo.get(key)", "def __getitem__(self, key):\n cursor = self._db.cursor()\n key = str(key) # So lazy retrieval objectes are evaluated\n query = 'SELECT %s FROM %s WHERE %s=?' % (self._queryBy,\n DBConstants._DICT_TABLE,\n self._queryBy)\n res = cursor.execute(query, (key,))\n if res.fetchone() is None:\n raise KeyError(\"Key %s not found\" % key)\n return screedRecord._buildRecord(self.fields, self._db,\n key,\n self._queryBy)", "def _get_node(self, key):\n\n index = self._hash_function(key) % self.capacity # Get the index by hashing the key\n node = self._buckets[index].contains(key) # Get the node with the key (if it exists)\n return node", "def build_from_json(json_city):\n return City(json_city['name'], json_city['country'], json_city['climate'], json_city['elevation'])", "def city(self):\n return self._city", "def gettraincity(cityname):\n if cityname.upper() in citytotrainmap.keys():\n return citytotrainmap[cityname.upper()]\n return cityname" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate dawn time in the UTC timezone.
def dawn_utc(self, date, latitude, longitude): julianday = self._julianday(date.day, date.month, date.year) if latitude > 89.8: latitude = 89.8 if latitude < -89.8: latitude = -89.8 t = self._jday_to_jcentury(julianday) eqtime = self._eq_of_time(t) solarDec = self._sun_declination(t) try: hourangle = self._hour_angle_sunrise(latitude, solarDec) except: raise AstralError('Sun remains below horizon on this day, at this location.') delta = longitude - degrees(hourangle) timeDiff = 4.0 * delta timeUTC = 720.0 + timeDiff - eqtime newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0) eqtime = self._eq_of_time(newt) solarDec = self._sun_declination(newt) hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression) delta = longitude - degrees(hourangle) timeDiff = 4 * delta timeUTC = 720 + timeDiff - eqtime timeUTC = timeUTC/60.0 hour = int(timeUTC) minute = int((timeUTC - hour) * 60) second = int((((timeUTC - hour) * 60) - minute) * 60) if second > 59: second -= 60 minute += 1 elif second < 0: second += 60 minute -= 1 if minute > 59: minute -= 60 hour += 1 elif minute < 0: minute += 60 hour -= 1 if hour > 23: hour -= 24 date += datetime.timedelta(days=1) elif hour < 0: hour += 24 date -= datetime.timedelta(days=1) dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc) return dawn
[ "def get_unixtime(self):\n if not self.Complete():\n raise DateTimeError(\"get_unixtime requires complete timepoint\")\n zoffset = self.time.GetZoneOffset()\n if zoffset is None:\n raise DateTimeError(\"get_unixtime requires timezone\")\n elif zoffset == 0:\n zt = self\n else:\n zt = self.ShiftZone(zDirection=0)\n days = zt.date.GetAbsoluteDay() - EPOCH.date.GetAbsoluteDay()\n seconds = zt.time.GetTotalSeconds() - EPOCH.time.GetTotalSeconds()\n return 86400 * days + seconds", "def dusk_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dusk", "def start_hour_utc(self) -> int:\n return pulumi.get(self, \"start_hour_utc\")", "def utc_local_time_shift():\n utc_tuple = time.gmtime()\n localtime_tuple = time.localtime()\n # To calculate the correct times shift, we need to ignore the\n # DST component in the localtime tuple, i. e. set it to 0.\n localtime_tuple = localtime_tuple[:-1] + (0,)\n time_shift_in_seconds = time.mktime(utc_tuple) - \\\n time.mktime(localtime_tuple)\n # To be safe, round the above value to units of 3600 s (1 hour).\n return round(time_shift_in_seconds / 3600.0) * 3600", "def calculate_time(self):\n return self.end_time - self.start_time", "def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()", "def get_time_delta():\n # The current UTC time\n current_time = datetime.utcnow().replace(microsecond=0)\n # Take the current date time in utc and then map it correctly to PST.\n current_time_adjusted = current_time - timedelta(hours=7)\n # Calculate the last time (Remember delta is one)\n last_time = current_time_adjusted - timedelta(hours=1)\n # Adjust start and end time to get the api kind of time format\n adjusted_current_time = str(current_time_adjusted.replace(minute=0, second=0)).replace(':', '').replace(' ', '').replace('-', '')\n adjusted_last_time = str(last_time.replace(minute=0, second=0)).replace(':', '').replace(' ', '').replace('-', '')\n return adjusted_current_time, adjusted_last_time", "def latest_synop_time()-> datetime:\n utc = datetime.utcnow()\n\n if utc.hour < 1:\n utc = utc - timedelta(days=1)\n utc = utc.replace(hour=18)\n elif utc.hour < 7:\n utc = utc.replace(hour=0)\n elif utc.hour < 13:\n utc = utc.replace(hour=6)\n elif utc.hour < 19:\n utc = utc.replace(hour=12)\n else:\n utc = utc.replace(hour=18)\n\n utc.replace(minute=0, second=0)\n return utc", "def _clock_day(self):\n return int(self._shifted_time / 86400)", "def py2_earth_hours_left(start_date=BITE_CREATED_DT):\n td = PY2_DEATH_DT - start_date\n return round(td.days * 24 + td.seconds / 60 / 60, 2)", "def run_time(self):\n if self._start_time and self._end_time:\n # If start and end time is set -- calculate run time.\n return get_duration(self._end_time - self._start_time)\n elif self._start_time and not self.status == State.RUNNING:\n # If start time but no end time, calculate current duration.\n return get_duration(datetime.now() - self._start_time)\n else:\n # Otherwise, return an uncalculated marker.\n return \"--:--:--\"", "def difUTC():\n utc = datetime.datetime.utcnow()\n local = datetime.datetime.now()\n dt = local - utc\n h = int(dt.seconds/3600)\n Qdt = QtCore.QDateTime.currentDateTime()\n Udt = QtCore.QDateTime.currentDateTimeUtc()\n dt2 = Qdt.toPyDateTime() - Udt.toPyDateTime()\n h2 = int(dt.seconds/3600)\n if h == h2:\n return h\n else :\n print 'Time Init Error'\n return h2", "def utc_epoch(self) -> int:\n return get_utc_epoch()", "def get_time_in_timezone(timezone):\n utc = datetime.now()\n # utc = utc.replace(tzinfo=utc_tz)\n time_with_offset = utc.astimezone(timezone)\n time_without_offset = time_with_offset.replace(tzinfo=None)\n return time_without_offset", "def calculate_total_time(self):\n for day in self.days:\n self.total_h += day.class_time_h\n self.total_m += day.class_time_m\n while self.total_m >= 60:\n self.total_h += 1\n self.total_m -= 60", "def forecast_made_time(self) -> datetime.time:\n return self.forecast_made_datetime.timetz()", "def getUtcNanoseconde(self) -> int:\n ...", "def timezone():\n \n pass", "def nowUTC():\n return datetime.datetime.now(pytz.utc)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate sunrise time in the UTC timezone.
def sunrise_utc(self, date, latitude, longitude): julianday = self._julianday(date.day, date.month, date.year) t = self._jday_to_jcentury(julianday) eqtime = self._eq_of_time(t) solarDec = self._sun_declination(t) try: hourangle = self._hour_angle_sunrise(latitude, solarDec) except: raise AstralError('Sun remains below horizon on this day, at this location.') delta = longitude - degrees(hourangle) timeDiff = 4.0 * delta timeUTC = 720.0 + timeDiff - eqtime newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0) eqtime = self._eq_of_time(newt) solarDec = self._sun_declination(newt) hourangle = self._hour_angle_sunrise(latitude, solarDec) delta = longitude - degrees(hourangle) timeDiff = 4 * delta timeUTC = 720 + timeDiff - eqtime timeUTC = timeUTC/60.0 hour = int(timeUTC) minute = int((timeUTC - hour) * 60) second = int((((timeUTC - hour) * 60) - minute) * 60) if second > 59: second -= 60 minute += 1 elif second < 0: second += 60 minute -= 1 if minute > 59: minute -= 60 hour += 1 elif minute < 0: minute += 60 hour -= 1 if hour > 23: hour -= 24 date += datetime.timedelta(days=1) elif hour < 0: hour += 24 date -= datetime.timedelta(days=1) sunrise = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc) return sunrise
[ "def sunrise_time(datetime=None):\n pass", "def alt_sunrise(cls, date):\n rise = cls.UJJAIN.dawn(date, angle(0, 47, 0))\n return 1/24 * 1/60 * iround(rise * 24 * 60)", "def sunrise(cls, date):\n return (date + Clock.days_from_hours(6) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n ((1577917828/1582237828 / 360) *\n (cls.ascensional_difference(date, cls.LOCATION) +\n (1/4 * cls.solar_sidereal_difference(date)))))", "def computeSunTime(self, latitude, longitude, startDate, endDate): \n self.sun = sun(lat=latitude, long=longitude)\n dateTime = datetime.datetime.combine(startDate, datetime.time(hour=8))\n while dateTime.date() <= endDate: \n daytimeStart, daytimeEnd = self.computeDaytimeStartEnd(dateTime)\n self.sunriseTimeDict[dateTime.date()] = daytimeStart\n self.sunsetTimeDict[dateTime.date()] = daytimeEnd\n dateTime += self.oneDayDelta", "def sun_set_rise_times(self, date=None):\n rstimes = (self.sunset(date=date),\n self.evening_twilight_12(date=date),\n self.evening_twilight_18(date=date),\n self.morning_twilight_18(date=date),\n self.morning_twilight_12(date=date),\n self.sunrise(date=date))\n return rstimes", "def computeDaytimeStartEnd(self, date):\n dayStartTime = datetime.datetime.combine(date.date(), datetime.time())\n #compute sunrise time for that date\n (h, m, s) = self.sun.sunrise(when=date)\n time_delta = datetime.timedelta(hours=h, minutes=m, seconds=s)\n sunrise_datetime = dayStartTime + time_delta\n #print(sunrise_datetime) \n #compute sunset time for that date \n (h, m, s) = self.sun.sunset(when=date)\n time_delta = datetime.timedelta(hours=h, minutes=m, seconds=s)\n sunset_datetime = dayStartTime + time_delta\n \n return (sunrise_datetime, sunset_datetime)", "def sunset_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n sunset = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return sunset", "def _calculate_sun(self):\n\n daystart = self.config['env']['day_start']\n dayend = self.config['env']['day_end']\n daylen = dayend - daystart\n sun = 0\n if daystart <= self.basetime <= dayend:\n sun = truncate(sin((self.basetime - daystart) * pi / daylen)) *\\\n self.sun_amplitude\n\n self.weather['sun'] = sun", "def sunrise(self):\n if self.period[\"sunrise\"] == \"false\":\n return None\n else:\n return self.period[\"sunrise\"]", "def estimate_sunrise_sunset(self, date):\n\n if self.source_type != 'solar':\n raise ValueError(\"You can only estimate sunrise and sunset for \"\n \"solar sources.\")\n\n date = pd.Timestamp(date)\n historic_data = self.data\n # The range is 14 days ago to the end of yesterday\n start_date = date - datetime.timedelta(days=14)\n end_date = date - datetime.timedelta(hours=1)\n\n # We grab all hours where actual power is greater than 0\n relevant_data = historic_data[start_date:end_date]\n daylight_data = relevant_data[relevant_data['actuals'] > 0]\n\n # We do this to stop a warning from appearing, we know it's a copy\n daylight_data.is_copy = False\n daylight_data['hours'] = daylight_data.index.hour\n\n # Find the min and max hour for each day where we have positive\n # observed power generation.\n sunrises = daylight_data.groupby(daylight_data.index.date).min()['hours']\n sunsets = daylight_data.groupby(daylight_data.index.date).max()['hours']\n\n # We round in order to have an integer value for sunrise and sunset.\n average_sunrise = int(max(round(sunrises.mean()) - 1, 0))\n average_sunset = int(min(round(sunsets.mean()) + 1, 23))\n\n return average_sunrise, average_sunset", "def sunrise_sunset(self, date, lat, lon):\n obs = Observer(latitude=lat, longitude=lon, elevation=0.0)\n sunrise = sun.sunrise(observer=obs, date=date)\n sunset = sun.sunset(observer=obs, date=date)\n return sunrise, sunset", "def get_sun_rise_set(year, month, day, time_zone, lat, lon):\n # Get the date time for noon.\n hour = 12\n local_date_time = get_datetime(year, month, day, hour, time_zone)\n\n # Get the sunrise and sunset data for the day.\n sunrise, sunset = get_sunrise_sunset(lat, lon, local_date_time)\n\n return sunrise, sunset", "def find_sunrise(n=1):\n ts = api.load.timescale()\n ep = api.load('de421.bsp')\n location = api.Topos('41.85 N', '87.65 W') # Chicago, USA\n t0 = ts.now()\n t1 = ts.utc(t0.utc_datetime() + timedelta(days=n))\n t, y = almanac.find_discrete(t0, t1, almanac.sunrise_sunset(ep, location))\n \n times = list(zip(t.utc_iso(), y))\n print(times)", "def rahukaalam_utc(self, date, latitude, longitude):\n \n if date is None:\n date = datetime.date.today()\n\n try:\n sunrise = self.sunrise_utc(date, latitude, longitude)\n sunset = self.sunset_utc(date, latitude, longitude)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n \n octant_duration = (sunset - sunrise) / 8\n\n # Mo,Sa,Fr,We,Th,Tu,Su\n octant_index = [1,6,4,5,3,2,7]\n \n weekday = date.weekday()\n octant = octant_index[weekday]\n \n start = sunrise + (octant_duration * octant)\n end = start + octant_duration\n \n return {'start': start, 'end': end}", "def estimate_sunrise_sunset(self, date, verbose=True):\n if self.source_type != 'solar':\n raise ValueError(\"You can only estimate sunrise and sunset for \"\n \"solar sources.\")\n\n date = pd.Timestamp(date)\n\n if self.diurnal_pattern is None:\n if verbose:\n print(\"Warning: Source {} has no diurnal pattern, estimating \"\n \"sunrise and sunset using average of past data.\"\n .format(self.name), file=sys.stderr)\n return Source.estimate_sunrise_sunset(self, date)\n\n if verbose:\n print(\"{} {}: Using Diurnal Pattern to estimate sunrise and sunset\"\n .format(self.name, date.date()))\n\n diurnal_pattern = self.diurnal_pattern\n daily_pattern = diurnal_pattern[date:date+datetime.timedelta(hours=23)]\n\n sunrise, sunset = None, None\n\n # This will walk through finding first sun hour and first night hour\n for hour, pattern in enumerate(daily_pattern.values):\n if sunrise is None and pattern > 0:\n sunrise = hour\n\n # If sun has risen, and we have not found night and we reach a 0\n if sunrise is not None and sunset is None and pattern == 0:\n sunset = hour\n\n if sunrise is None and sunset is None:\n raise ValueError(\"No solar power was generated on {}\".format(date))\n\n return sunrise, sunset", "def sun_calc(lon_obs, lat_obs):\n return (lambda day, interval: sun_time(local_noon(day, lon_obs),\n lon_obs, lat_obs, interval['rising'],\n interval['angle'] if 'angle' in interval else 0.833333333))", "def dawn_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n \n try:\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n \n dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dawn", "def start_sunrise_event():\n script = gametime.schedule(at_sunrise, repeat=True, hour=6, min=0, sec=0)\n script.key = \"at sunrise\"", "def sunrise_sunset(city_name, which_day=datetime.now()):\n s = Astral()[city_name].sun(which_day, local=True)\n return [s[\"sunrise\"], s[\"sunset\"]]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate solar noon time in the UTC timezone.
def solar_noon_utc(self, date, longitude): julianday = self._julianday(date.day, date.month, date.year) newt = self._jday_to_jcentury(julianday + 0.5 + longitude / 360.0) eqtime = self._eq_of_time(newt) timeUTC = 720.0 + (longitude * 4.0) - eqtime timeUTC = timeUTC/60.0 hour = int(timeUTC) minute = int((timeUTC - hour) * 60) second = int((((timeUTC - hour) * 60) - minute) * 60) if second > 59: second -= 60 minute += 1 elif second < 0: second += 60 minute -= 1 if minute > 59: minute -= 60 hour += 1 elif minute < 0: minute += 60 hour -= 1 if hour > 23: hour -= 24 date += datetime.timedelta(days=1) elif hour < 0: hour += 24 date -= datetime.timedelta(days=1) noon = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc) return noon
[ "def solar_noon(self, date=None, local=True):\n \n if self.astral is None:\n self.astral = Astral()\n\n if date is None:\n date = datetime.date.today()\n\n noon = self.astral.solar_noon_utc(date, self.longitude)\n\n if local:\n return noon.astimezone(self.tz) \n else:\n return noon", "def solar_noon_utc(LonDegE):\n _timezone = array([-180, -172.5, -157.5, -142.5, -127.5, -112.5, -97.5, -82.5, -67.5, -52.5, -37.5, -22.5, -7.5, 7.5, 22.5, 37.5, 52.5, 67.5, 82.5, 97.5, 112.5, 127.5, 142.5, 157.5, 172.5, 180]).repeat(2, 0)[1:-1].reshape(-1, 2)\n for i, (low, high) in enumerate(_timezone):\n if LonDegE >= low:\n if LonDegE <= high:\n return 12 -(-12 + i)", "def find_solar_noon(self,dt):\n year = dt.timetuple().tm_year\n #print year\n month = dt.timetuple().tm_mon\n #print month\n \n day = dt.timetuple().tm_mday \n \n sitka = ephem.Observer()\n \n date = str(year)+'/'+str(month)+'/'+str(day)\n #print date\n\n sitka.date = date\n\n sitka.lat = self.lat\n\n sitka.lon = self.lon\n\n m = ephem.Sun()\n\n noon = sitka.next_transit(m)\n \n ntime = str(noon).split(' ')[1]\n \n h = ntime.split(':')[0]\n m = ntime.split(':')[1]\n s = ntime.split(':')[2]\n \n snoon = datetime.datetime(year,month,day,int(h),int(m),int(s))\n \n snoon += datetime.timedelta(hours=self.tdiffUTC)\n \n self.solnoon = snoon\n \n return snoon", "def date_time_calculate_local_sidereal_time(longitude,date_time_utc_now):\n pass", "def timezone():\n \n pass", "def latest_synop_time()-> datetime:\n utc = datetime.utcnow()\n\n if utc.hour < 1:\n utc = utc - timedelta(days=1)\n utc = utc.replace(hour=18)\n elif utc.hour < 7:\n utc = utc.replace(hour=0)\n elif utc.hour < 13:\n utc = utc.replace(hour=6)\n elif utc.hour < 19:\n utc = utc.replace(hour=12)\n else:\n utc = utc.replace(hour=18)\n\n utc.replace(minute=0, second=0)\n return utc", "def get_now_utc_notz():\n return get_now_utc().replace(tzinfo=None)", "def get_utc_server_now():\n server_now = get_server_now()\n result = server_now.astimezone(tz.UTC)\n return result", "def get_time_in_timezone(timezone):\n utc = datetime.now()\n # utc = utc.replace(tzinfo=utc_tz)\n time_with_offset = utc.astimezone(timezone)\n time_without_offset = time_with_offset.replace(tzinfo=None)\n return time_without_offset", "def _solar_time(date, lon, lat):\n # IDL is computing time_t as hours and fractional minutes\n time_t = ee.Date(date).get('hour').add(\n ee.Date(date).get('minute').divide(60))\n\n # This will return the hour floating point value\n # time_t = ee.Date(date).get('hour').add(ee.Date(date).getFraction('hour'))\n\n # CGM - DOY and hour could be images in order to make these expressions\n julian = _to_jd(date)\n\n # Sunrise time\n julian_ = time_t.divide(24.0).add(julian)\n j_cen = julian_.add(0.5 - 2451545.0).divide(36525.0)\n # CGM - Does the mod happen before or after the multiply\n lon_sun = j_cen.multiply(0.0003032).add(36000.76983) \\\n .multiply(j_cen).mod(360.0).add(280.46646).subtract(360)\n an_sun = j_cen.multiply(-0.0001537).add(35999.05029) \\\n .multiply(j_cen).add(357.52911)\n ecc = j_cen.multiply(0.0000001267).add(0.000042037) \\\n .multiply(j_cen).multiply(-1).add(0.016708634)\n ob_ecl = j_cen.multiply(-0.001813).add(0.00059) \\\n .multiply(j_cen).add(46.815) \\\n .multiply(j_cen).multiply(-1).add(21.448) \\\n .divide(60.0).add(26).divide(60).add(23)\n ob_corr = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).cos() \\\n .multiply(0.00256).add(ob_ecl)\n var_y = ob_corr.divide(2.0).multiply(deg2rad).tan().multiply(\n ob_corr.divide(2.0).multiply(deg2rad).tan())\n eq_t = (\n lon_sun.multiply(2.0).multiply(deg2rad).sin().multiply(var_y)\n .subtract(an_sun.multiply(deg2rad).sin().multiply(ecc).multiply(2.0))\n .add(an_sun.multiply(deg2rad).sin()\n .multiply(lon_sun.multiply(2.0).multiply(deg2rad).cos())\n .multiply(var_y).multiply(ecc).multiply(4.0))\n .subtract(lon_sun.multiply(4.0).multiply(deg2rad).sin()\n .multiply(var_y).multiply(var_y).multiply(0.5))\n .subtract(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(ecc).multiply(ecc).multiply(1.25))\n .multiply(4.0).multiply(rad2deg))\n sun_eq = (\n an_sun.multiply(deg2rad).sin().multiply(\n j_cen.multiply(0.000014).add(0.004817)\n .multiply(j_cen).multiply(-1).add(1.914602))\n .add(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(j_cen.multiply(-0.000101).add(0.019993)))\n .add(an_sun.multiply(3.0).multiply(deg2rad).sin().multiply(0.000289)))\n sun_true = sun_eq.add(lon_sun)\n sun_app = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).sin() \\\n .multiply(-0.00478).subtract(0.00569).add(sun_true)\n\n # CGM - Intentionally not converting back to degrees\n d = ob_corr.multiply(deg2rad).sin() \\\n .multiply(sun_app.multiply(deg2rad).sin()) \\\n .asin()\n\n # CGM - Functions below are lat/lon dependent and can be written as\n # ee.Image expressions\n # CGM - d is still in radians, not converting\n ha_t = lat.expression(\n 'acos((cos(90.833 * pi / 180) / (cos(lat) * cos(d))) - tan(lat) * tan(d))'\n ' * (180 / pi)',\n {'lat': lat, 'd': d, 'pi': math.pi})\n\n # print('\\n{:10s} {:.12f}'.format('julian_', julian_.getInfo()))\n # print('{:10s} {:.12f}'.format('time_t', time_t.getInfo()))\n # print('{:10s} {:.12f}'.format('j_cen', j_cen.getInfo()))\n # print('{:10s} {:.12f}'.format('lon_sun', lon_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('an_sun', an_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('ecc', ecc.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_ecl', ob_ecl.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_corr', ob_corr.getInfo()))\n # print('{:10s} {:.12f}'.format('var_y', var_y.getInfo()))\n # print('{:10s} {:.12f}'.format('eq_t', eq_t.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_eq', sun_eq.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_true', sun_true.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_app', sun_app.getInfo()))\n # print('{:10s} {:.12f}'.format('d', d.getInfo()))\n\n return d, eq_t, ha_t", "def dawn_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n \n try:\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n \n dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dawn", "def getUtcNanoseconde(self) -> int:\n ...", "def utc_time(self) -> Union[str, None]:\n response = self.command('AT%UTC')\n if response is None or response[0] == 'ERROR':\n return None\n return response[0].replace('%UTC: ', '').replace(' ', 'T') + 'Z'", "def nowUTC():\n return datetime.datetime.now(pytz.utc)", "def _local_time_offset():\n if time.localtime().tm_isdst and time.daylight:\n return -time.altzone\n else:\n return -time.timezone", "def __get_stock_time(stock_tz: timezone) -> datetime:\n return datetime.now().astimezone(stock_tz)", "def timezone(self) -> tzinfo:\n response = self.tf.timezone_at(\n lng=Longitude(self.longitude).value,\n lat=Latitude(self.latitude).value,\n )\n\n return timezone(str(response))", "def utcnow():\n return pytz.utc.localize(datetime.utcnow())", "def utcnow() -> datetime:\n return datetime.now().astimezone(pytz.utc)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate sunset time in the UTC timezone.
def sunset_utc(self, date, latitude, longitude): julianday = self._julianday(date.day, date.month, date.year) t = self._jday_to_jcentury(julianday) eqtime = self._eq_of_time(t) solarDec = self._sun_declination(t) try: hourangle = self._hour_angle_sunset(latitude, solarDec) except: raise AstralError('Sun remains below horizon on this day, at this location.') delta = longitude - degrees(hourangle) timeDiff = 4.0 * delta timeUTC = 720.0 + timeDiff - eqtime newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0) eqtime = self._eq_of_time(newt) solarDec = self._sun_declination(newt) hourangle = self._hour_angle_sunset(latitude, solarDec) delta = longitude - degrees(hourangle) timeDiff = 4 * delta timeUTC = 720 + timeDiff - eqtime timeUTC = timeUTC/60.0 hour = int(timeUTC) minute = int((timeUTC - hour) * 60) second = int((((timeUTC - hour) * 60) - minute) * 60) if second > 59: second -= 60 minute += 1 elif second < 0: second += 60 minute -= 1 if minute > 59: minute -= 60 hour += 1 elif minute < 0: minute += 60 hour -= 1 if hour > 23: hour -= 24 date += datetime.timedelta(days=1) elif hour < 0: hour += 24 date -= datetime.timedelta(days=1) sunset = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc) return sunset
[ "def sunset(cls, date):\n return (date + Clock.days_from_hours(18) + \n ((cls.UJJAIN.longitude - cls.LOCATION.longitude) / 360) -\n cls.equation_of_time(date) +\n (((1577917828/1582237828) / 360) *\n (- cls.ascensional_difference(date, cls.LOCATION) +\n (3/4 * cls.solar_sidereal_difference(date)))))", "def set_time_zone_to_utc(self):\n self.set_time_zone(TimeZone(hours=0, minutes=0))", "def utc_local_time_shift():\n utc_tuple = time.gmtime()\n localtime_tuple = time.localtime()\n # To calculate the correct times shift, we need to ignore the\n # DST component in the localtime tuple, i. e. set it to 0.\n localtime_tuple = localtime_tuple[:-1] + (0,)\n time_shift_in_seconds = time.mktime(utc_tuple) - \\\n time.mktime(localtime_tuple)\n # To be safe, round the above value to units of 3600 s (1 hour).\n return round(time_shift_in_seconds / 3600.0) * 3600", "def utc_time(self) -> Union[str, None]:\n response = self.command('AT%UTC')\n if response is None or response[0] == 'ERROR':\n return None\n return response[0].replace('%UTC: ', '').replace(' ', 'T') + 'Z'", "def get_local_utcoffset():\n utc = datetime.utcnow()\n local = datetime.now()\n if local < utc:\n return - int(float((utc - local).seconds) / 60 + .5)\n else:\n return int(float((local - utc).seconds) / 60 + .5)", "def latest_synop_time()-> datetime:\n utc = datetime.utcnow()\n\n if utc.hour < 1:\n utc = utc - timedelta(days=1)\n utc = utc.replace(hour=18)\n elif utc.hour < 7:\n utc = utc.replace(hour=0)\n elif utc.hour < 13:\n utc = utc.replace(hour=6)\n elif utc.hour < 19:\n utc = utc.replace(hour=12)\n else:\n utc = utc.replace(hour=18)\n\n utc.replace(minute=0, second=0)\n return utc", "def utcnow(cls):\n t = _time.time()\n return cls.utcfromtimestamp(t)", "def set_utc(date_time):\n utc = datetime.timezone(datetime.timedelta(0))\n date_time = date_time.replace(tzinfo=utc)\n return date_time", "def time_to_utc(utc_offset,time):\n\n return to_24_hour_clock(-utc_offset+time)", "def local_to_utc(date: datetime) -> datetime:\n return date - timedelta(hours=tz)", "def difUTC():\n utc = datetime.datetime.utcnow()\n local = datetime.datetime.now()\n dt = local - utc\n h = int(dt.seconds/3600)\n Qdt = QtCore.QDateTime.currentDateTime()\n Udt = QtCore.QDateTime.currentDateTimeUtc()\n dt2 = Qdt.toPyDateTime() - Udt.toPyDateTime()\n h2 = int(dt.seconds/3600)\n if h == h2:\n return h\n else :\n print 'Time Init Error'\n return h2", "def dusk_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n\n try:\n hourangle = self._hour_angle_sunset(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n\n dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dusk", "def start_hour_utc(self) -> int:\n return pulumi.get(self, \"start_hour_utc\")", "def tz_utc_offset(self) -> int:\n import pytz\n import datetime\n\n if not hasattr(self, \"_utcoffset\"):\n dt = datetime.datetime.now(tz=pytz.utc)\n self._utcoffset = dt.astimezone(self.timezone).utcoffset()\n return int(self._utcoffset.total_seconds())", "def FromNowUTC(cls):\n t = pytime.time()\n utcTime = pytime.gmtime(t)\n return cls.FromStructTime(utcTime).WithZone(zDirection=0)", "def get_time_in_timezone(timezone):\n utc = datetime.now()\n # utc = utc.replace(tzinfo=utc_tz)\n time_with_offset = utc.astimezone(timezone)\n time_without_offset = time_with_offset.replace(tzinfo=None)\n return time_without_offset", "def __get_utc_date(self):\n\n return datetime.utcnow().strftime(SmiteAPI.DEFAULT_UTC_FORMAT)", "def utc(self):\n return self._utc", "def utcnow_ts():\r\n return calendar.timegm(utcnow().timetuple())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate dusk time in the UTC timezone.
def dusk_utc(self, date, latitude, longitude): julianday = self._julianday(date.day, date.month, date.year) if latitude > 89.8: latitude = 89.8 if latitude < -89.8: latitude = -89.8 t = self._jday_to_jcentury(julianday) eqtime = self._eq_of_time(t) solarDec = self._sun_declination(t) try: hourangle = self._hour_angle_sunset(latitude, solarDec) except: raise AstralError('Sun remains below horizon on this day, at this location.') delta = longitude - degrees(hourangle) timeDiff = 4.0 * delta timeUTC = 720.0 + timeDiff - eqtime newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0) eqtime = self._eq_of_time(newt) solarDec = self._sun_declination(newt) hourangle = self._hour_angle_dusk(latitude, solarDec, self._depression) delta = longitude - degrees(hourangle) timeDiff = 4 * delta timeUTC = 720 + timeDiff - eqtime timeUTC = timeUTC/60.0 hour = int(timeUTC) minute = int((timeUTC - hour) * 60) second = int((((timeUTC - hour) * 60) - minute) * 60) if second > 59: second -= 60 minute += 1 elif second < 0: second += 60 minute -= 1 if minute > 59: minute -= 60 hour += 1 elif minute < 0: minute += 60 hour -= 1 if hour > 23: hour -= 24 date += datetime.timedelta(days=1) elif hour < 0: hour += 24 date -= datetime.timedelta(days=1) dusk = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc) return dusk
[ "def timezone():\n \n pass", "def nowUTC():\n return datetime.datetime.now(pytz.utc)", "def dawn_utc(self, date, latitude, longitude):\n \n julianday = self._julianday(date.day, date.month, date.year)\n\n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n t = self._jday_to_jcentury(julianday)\n eqtime = self._eq_of_time(t)\n solarDec = self._sun_declination(t)\n \n try:\n hourangle = self._hour_angle_sunrise(latitude, solarDec)\n except:\n raise AstralError('Sun remains below horizon on this day, at this location.')\n\n delta = longitude - degrees(hourangle)\n timeDiff = 4.0 * delta\n timeUTC = 720.0 + timeDiff - eqtime\n\n newt = self._jday_to_jcentury(self._jcentury_to_jday(t) + timeUTC / 1440.0)\n eqtime = self._eq_of_time(newt)\n solarDec = self._sun_declination(newt)\n hourangle = self._hour_angle_dawn(latitude, solarDec, self._depression)\n delta = longitude - degrees(hourangle)\n timeDiff = 4 * delta\n timeUTC = 720 + timeDiff - eqtime\n \n timeUTC = timeUTC/60.0\n hour = int(timeUTC)\n minute = int((timeUTC - hour) * 60)\n second = int((((timeUTC - hour) * 60) - minute) * 60)\n\n if second > 59:\n second -= 60\n minute += 1\n elif second < 0:\n second += 60\n minute -= 1\n\n if minute > 59:\n minute -= 60\n hour += 1\n elif minute < 0:\n minute += 60\n hour -= 1\n\n if hour > 23:\n hour -= 24\n date += datetime.timedelta(days=1)\n elif hour < 0:\n hour += 24\n date -= datetime.timedelta(days=1)\n \n dawn = datetime.datetime(date.year, date.month, date.day, hour, minute, second, tzinfo=pytz.utc)\n\n return dawn", "def get_current_india_time():\n india_offset = datetime.timedelta(hours=5, minutes=30)\n in_time = datetime.datetime.utcnow() + india_offset\n return in_time", "def timezone():\n\n return time.timezone", "def utcnow():\n return pytz.utc.localize(datetime.utcnow())", "def local_time():\n loc_dt = datetime.datetime.now(pytz.timezone('US/Pacific'))\n return loc_dt", "def utcnow() -> datetime:\n return datetime.now().astimezone(pytz.utc)", "def get_utc_server_now():\n server_now = get_server_now()\n result = server_now.astimezone(tz.UTC)\n return result", "def date_time_calculate_local_sidereal_time(longitude,date_time_utc_now):\n pass", "def get_time_in_timezone(timezone):\n utc = datetime.now()\n # utc = utc.replace(tzinfo=utc_tz)\n time_with_offset = utc.astimezone(timezone)\n time_without_offset = time_with_offset.replace(tzinfo=None)\n return time_without_offset", "def difUTC():\n utc = datetime.datetime.utcnow()\n local = datetime.datetime.now()\n dt = local - utc\n h = int(dt.seconds/3600)\n Qdt = QtCore.QDateTime.currentDateTime()\n Udt = QtCore.QDateTime.currentDateTimeUtc()\n dt2 = Qdt.toPyDateTime() - Udt.toPyDateTime()\n h2 = int(dt.seconds/3600)\n if h == h2:\n return h\n else :\n print 'Time Init Error'\n return h2", "def forecast_made_time(self) -> datetime.time:\n return self.forecast_made_datetime.timetz()", "def local_to_utc(date: datetime) -> datetime:\n return date - timedelta(hours=tz)", "def _get_timeZone(self):\n return self.datetime.tzinfo", "def get_local_utcoffset():\n utc = datetime.utcnow()\n local = datetime.now()\n if local < utc:\n return - int(float((utc - local).seconds) / 60 + .5)\n else:\n return int(float((local - utc).seconds) / 60 + .5)", "def utc_time(self) -> Union[str, None]:\n response = self.command('AT%UTC')\n if response is None or response[0] == 'ERROR':\n return None\n return response[0].replace('%UTC: ', '').replace(' ', 'T') + 'Z'", "def get_utc_offset():\n timedelta = datetime.datetime.now() - datetime.datetime.utcnow()\n # XXX: `return -time.timezone`?\n return timedelta.total_seconds()", "def _get_local_time(self):\n api_url = None\n try:\n aio_username = self._secrets[\"aio_username\"]\n aio_key = self._secrets[\"aio_key\"]\n except KeyError:\n raise KeyError(\n \"\\n\\nOur time service requires a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'\"\n )\n location = None\n location = self._secrets.get(\"timezone\", location)\n if location:\n if self._logger:\n self._logger.debug(\"Getting time for timezone.\")\n api_url = (TIME_SERVICE + \"&tz=%s\") % (aio_username, aio_key, location)\n else: # we'll try to figure it out from the IP address\n self._logger.debug(\"Getting time from IP Address..\")\n api_url = TIME_SERVICE % (aio_username, aio_key)\n api_url += TIME_SERVICE_STRFTIME\n try:\n response = self._wifi.get(api_url)\n times = response.text.split(\" \")\n the_date = times[0]\n the_time = times[1]\n year_day = int(times[2])\n week_day = int(times[3])\n is_dst = None # no way to know yet\n except KeyError:\n raise KeyError(\n \"Was unable to lookup the time, try setting secrets['timezone'] according to http://worldtimeapi.org/timezones\"\n ) # pylint: disable=line-too-long\n year, month, mday = [int(x) for x in the_date.split(\"-\")]\n the_time = the_time.split(\".\")[0]\n hours, minutes, seconds = [int(x) for x in the_time.split(\":\")]\n now = time.struct_time(\n (year, month, mday, hours, minutes, seconds, week_day, year_day, is_dst)\n )\n rtc.RTC().datetime = now\n # now clean up\n response.close()\n response = None\n gc.collect()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate ruhakaalam times in the UTC timezone.
def rahukaalam_utc(self, date, latitude, longitude): if date is None: date = datetime.date.today() try: sunrise = self.sunrise_utc(date, latitude, longitude) sunset = self.sunset_utc(date, latitude, longitude) except: raise AstralError('Sun remains below horizon on this day, at this location.') octant_duration = (sunset - sunrise) / 8 # Mo,Sa,Fr,We,Th,Tu,Su octant_index = [1,6,4,5,3,2,7] weekday = date.weekday() octant = octant_index[weekday] start = sunrise + (octant_duration * octant) end = start + octant_duration return {'start': start, 'end': end}
[ "def timezone():\n \n pass", "def set_time_by_timezone(df):\n df = set_city_time_by_timezone(df, 1078, 3)\n df = set_city_time_by_timezone(df, 22390, 4)\n df = set_city_time_by_timezone(df, 22430, 4)\n df = set_city_time_by_timezone(df, 22438, 5)\n return df", "def time_from_utc(utc_offset,time):\n return to_24_hour_clock(+utc_offset+time)", "def batch_convert_to_utc_time(times):\n print('Converting times to UTC')\n times_in_seconds_since_pan_epoch = [MonteCarlo.time_since_pan_epoch(t) for t in times]\n astropy_times = [time2astropyTime(t, GPSTime.EPOCH_WN) for t in times_in_seconds_since_pan_epoch]\n utc_times = [t.utc.to_datetime() for t in astropy_times]\n \n return utc_times", "def what_time_lives_pybites(naive_utc_dt):\n\n timesList = []\n utc = pytz.utc\n\n year = naive_utc_dt.year\n month = naive_utc_dt.month\n day = naive_utc_dt.day\n hour = naive_utc_dt.hour\n min = naive_utc_dt.minute\n sec = naive_utc_dt.second\n\n\n dt = datetime(year,month,day,hour,min,sec, tzinfo=pytz.UTC)\n\n\n australianTime = dt.astimezone(AUSTRALIA)\n spanishTime = dt.astimezone(SPAIN)\n timesList.append(australianTime)\n timesList.append(spanishTime)\n return tuple(timesList)\n pass", "def time_to_utc(utc_offset,time):\n\n return to_24_hour_clock(-utc_offset+time)", "def difUTC():\n utc = datetime.datetime.utcnow()\n local = datetime.datetime.now()\n dt = local - utc\n h = int(dt.seconds/3600)\n Qdt = QtCore.QDateTime.currentDateTime()\n Udt = QtCore.QDateTime.currentDateTimeUtc()\n dt2 = Qdt.toPyDateTime() - Udt.toPyDateTime()\n h2 = int(dt.seconds/3600)\n if h == h2:\n return h\n else :\n print 'Time Init Error'\n return h2", "def within_schedule(utc, *timezones):\r\n pass", "async def create_timezone_cache(self):\n for user_id, timezone in await self.ex.sql.s_user.fetch_timezones():\n user = await self.ex.get_user(user_id)\n user.timezone = timezone", "def test_all(self):\n for olson in WINDOWS_TO_OLSON.values():\n pytz.timezone(olson)", "def date_time_calculate_local_sidereal_time(longitude,date_time_utc_now):\n pass", "def _local_timestamps(self) -> npt.NDArray[np.int64]:\n if self.tz is None or timezones.is_utc(self.tz):\n # Avoid the copy that would be made in tzconversion\n return self.asi8\n return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso)", "def utc_local_time_shift():\n utc_tuple = time.gmtime()\n localtime_tuple = time.localtime()\n # To calculate the correct times shift, we need to ignore the\n # DST component in the localtime tuple, i. e. set it to 0.\n localtime_tuple = localtime_tuple[:-1] + (0,)\n time_shift_in_seconds = time.mktime(utc_tuple) - \\\n time.mktime(localtime_tuple)\n # To be safe, round the above value to units of 3600 s (1 hour).\n return round(time_shift_in_seconds / 3600.0) * 3600", "def set_time_zone_to_utc(self):\n self.set_time_zone(TimeZone(hours=0, minutes=0))", "def _get_local_time(self):\n api_url = None\n try:\n aio_username = self._secrets[\"aio_username\"]\n aio_key = self._secrets[\"aio_key\"]\n except KeyError:\n raise KeyError(\n \"\\n\\nOur time service requires a login/password to rate-limit. Please register for a free adafruit.io account and place the user/key in your secrets file under 'aio_username' and 'aio_key'\"\n )\n location = None\n location = self._secrets.get(\"timezone\", location)\n if location:\n if self._logger:\n self._logger.debug(\"Getting time for timezone.\")\n api_url = (TIME_SERVICE + \"&tz=%s\") % (aio_username, aio_key, location)\n else: # we'll try to figure it out from the IP address\n self._logger.debug(\"Getting time from IP Address..\")\n api_url = TIME_SERVICE % (aio_username, aio_key)\n api_url += TIME_SERVICE_STRFTIME\n try:\n response = self._wifi.get(api_url)\n times = response.text.split(\" \")\n the_date = times[0]\n the_time = times[1]\n year_day = int(times[2])\n week_day = int(times[3])\n is_dst = None # no way to know yet\n except KeyError:\n raise KeyError(\n \"Was unable to lookup the time, try setting secrets['timezone'] according to http://worldtimeapi.org/timezones\"\n ) # pylint: disable=line-too-long\n year, month, mday = [int(x) for x in the_date.split(\"-\")]\n the_time = the_time.split(\".\")[0]\n hours, minutes, seconds = [int(x) for x in the_time.split(\":\")]\n now = time.struct_time(\n (year, month, mday, hours, minutes, seconds, week_day, year_day, is_dst)\n )\n rtc.RTC().datetime = now\n # now clean up\n response.close()\n response = None\n gc.collect()", "def utc_time(self) -> Union[str, None]:\n response = self.command('AT%UTC')\n if response is None or response[0] == 'ERROR':\n return None\n return response[0].replace('%UTC: ', '').replace(' ', 'T') + 'Z'", "def get_local_utcoffset():\n utc = datetime.utcnow()\n local = datetime.now()\n if local < utc:\n return - int(float((utc - local).seconds) / 60 + .5)\n else:\n return int(float((local - utc).seconds) / 60 + .5)", "def get_time_in_timezone(timezone):\n utc = datetime.now()\n # utc = utc.replace(tzinfo=utc_tz)\n time_with_offset = utc.astimezone(timezone)\n time_without_offset = time_with_offset.replace(tzinfo=None)\n return time_without_offset", "def computeSunTime(self, latitude, longitude, startDate, endDate): \n self.sun = sun(lat=latitude, long=longitude)\n dateTime = datetime.datetime.combine(startDate, datetime.time(hour=8))\n while dateTime.date() <= endDate: \n daytimeStart, daytimeEnd = self.computeDaytimeStartEnd(dateTime)\n self.sunriseTimeDict[dateTime.date()] = daytimeStart\n self.sunsetTimeDict[dateTime.date()] = daytimeEnd\n dateTime += self.oneDayDelta" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the phase of the moon on the specified date.
def moon_phase(self, date): jd = self._julianday(date.day, date.month, date.year) DT = pow((jd - 2382148), 2) / (41048480*86400) T = (jd + DT - 2451545.0) / 36525 T2 = pow(T,2) T3 = pow(T,3) D = 297.85 + (445267.1115*T) - (0.0016300*T2) + (T3/545868) D = radians(self._proper_angle(D)) M = 357.53 + (35999.0503*T) M = radians(self._proper_angle(M)) M1 = 134.96 + (477198.8676*T) + (0.0089970*T2) + (T3/69699) M1 = radians(self._proper_angle(M1)) elong = degrees(D) + 6.29*sin(M1) elong -= 2.10*sin(M) elong += 1.27*sin(2*D - M1) elong += 0.66*sin(2*D) elong = self._proper_angle(elong) moon = int(floor(((elong + 6.43) / 360) * 28)) if moon == 28: moon = 0 return moon
[ "def test_moon_phase(date_: datetime.date, phase: float):\n assert moon.phase(date_) == pytest.approx(phase, abs=0.001) # type: ignore", "def get_moon_phase(now):\n from math import pi\n\n ref = date_to_jd('1899-12-31', '12:00:00')\n T = (now - ref) / 36525\n nu = -9.26009 + 445267.12165*T + 0.00168*(T**2)\n ageDeg = nu % 360\n nuRad = ageDeg * pi / 180\n nuHrs = (nu/15) % 24\n return nuHrs", "def phase_to_day(phase):\n if phase < 0:\n phase += 2*np.pi\n return phase*(365./(2*np.pi))", "def phase(self, time):\n pars = self.parameters\n time_0 = pars[\"time_0\"].value\n phase_0 = pars[\"phase_0\"].value\n f0 = pars[\"f0\"].value\n f1 = pars[\"f1\"].value\n f2 = pars[\"f2\"].value\n\n t = (time - time_0) * u.day.to(u.second)\n phase = self._evaluate_phase(t, phase_0, f0, f1, f2)\n return np.remainder(phase, 1)", "def calc_phase(self, time):\n dur = self.get_duration()\n phase = time / dur\n\n if self.enable_loop():\n phase -= np.floor(phase)\n else:\n phase = np.clip(phase, 0.0, 1.0)\n\n return phase", "def phase(self, hjd):\n # 2009-09-28 14:07 IJC: Implemented object-oriented version\n return getorbitalphase(self, hjd)", "def moon_phase(\n datetime_index,\n epsilon=1e-6,\n epoch=2444237.905,\n ecliptic_longitude_epoch=278.833540,\n ecliptic_longitude_perigee=282.596403,\n eccentricity=0.016718,\n moon_mean_longitude_epoch=64.975464,\n moon_mean_perigee_epoch=349.383063,\n):\n # set time to Noon if not otherwise given, as midnight is confusingly close to previous day\n if np.sum(datetime_index.hour) == 0:\n datetime_index = datetime_index + pd.Timedelta(hours=12)\n days = datetime_index.to_julian_date() - epoch\n\n # Mean anomaly of the Sun\n a = (360 / 365.2422) * days\n N = a - 360.0 * np.floor(a / 360.0)\n N = N + ecliptic_longitude_epoch - ecliptic_longitude_perigee\n # Convert from perigee coordinates to epoch 1980\n M = a - 360.0 * np.floor(N / 360.0)\n\n m = torad(M)\n e = m.copy()\n while 1:\n delta = e - eccentricity * np.sin(e) - m\n e = e - delta / (1.0 - eccentricity * np.cos(e))\n if abs(delta).max() <= epsilon:\n break\n\n Ec = sqrt((1 + eccentricity) / (1 - eccentricity)) * np.tan(e / 2.0)\n # True anomaly\n Ec = 2 * todeg(np.arctan(Ec))\n # Suns's geometric ecliptic longuitude\n a = Ec + ecliptic_longitude_perigee\n lambda_sun = a - 360.0 * np.floor(a / 360.0)\n\n # Calculation of the Moon's position\n\n # Moon's mean longitude\n a = 13.1763966 * days + moon_mean_longitude_epoch\n moon_longitude = a - 360.0 * np.floor(a / 360.0)\n\n # Moon's mean anomaly\n a = moon_longitude - 0.1114041 * days - moon_mean_perigee_epoch\n MM = a - 360.0 * np.floor(a / 360.0)\n\n # Moon's ascending node mean longitude\n # MN = fixangle(c.node_mean_longitude_epoch - 0.0529539 * day)\n\n evection = 1.2739 * np.sin(torad(2 * (moon_longitude - lambda_sun) - MM))\n\n # Annual equation\n annual_eq = 0.1858 * np.sin(torad(M))\n\n # Correction term\n A3 = 0.37 * np.sin(torad(M))\n\n MmP = MM + evection - annual_eq - A3\n\n # Correction for the equation of the centre\n mEc = 6.2886 * np.sin(torad(MmP))\n\n # Another correction term\n A4 = 0.214 * np.sin(torad(2 * MmP))\n\n # Corrected longitude\n lP = moon_longitude + evection + mEc - annual_eq + A4\n\n # Variation\n variation = 0.6583 * np.sin(torad(2 * (lP - lambda_sun)))\n\n # True longitude\n lPP = lP + variation\n\n # Calculation of the phase of the Moon\n\n # Age of the Moon, in degrees\n moon_age = lPP - lambda_sun\n\n # Phase of the Moon\n moon_phase = (1 - np.cos(torad(moon_age))) / 2.0\n return moon_phase\n # return pd.Series(moon_phase, index=datetime_index)", "def phases(self,dataset):\n start = '1984-1-1'\n if dataset == \"ISCCP_raw\":\n stop = '2007-12-31'\n else:\n stop = '2009-12-31'\n X = getattr(self,dataset)(time=(start,stop))\n R,P = sc.fast_annual_cycle(X)\n return MV.masked_where(np.isnan(P),P)", "def get_moon_phase(self) -> str:\n waxing = self.get_moon_state()\n\n if self.illumination <= 0.1:\n phase = 'New Moon'\n elif 0.1 < self.illumination < 49.9:\n phase = 'Waxing Crescent' if waxing else 'Waning Crescent'\n elif 49.9 <= self.illumination <= 50.1:\n phase = 'First Quarter' if waxing else 'Last Quarter'\n elif 50.1 < self.illumination < 99.9:\n phase = 'Waxing Gibbous' if waxing else 'Waning Gibbous'\n else:\n phase = 'Full Moon'\n\n return phase", "def phaseEstimator(phases,omegas,T_s,k):\n length = phases.shape[0]\n pis = np.tile(2*np.pi,length)\n a = phases - T_s*k*omegas\n phaseShifts = np.mod(a,pis)\n b = phases-phaseShifts\n omega_hat = np.mod(b,pis)\n n = omega_hat/omegas\n estimatedTime = np.sum(n)/length\n \n estimatedPhase = phaseShifts + estimatedTime*omegas\n \n return estimatedPhase", "def _phase(self):\n re = self.real\n im = self.imag\n \n return im._atan2(re)", "def phase(self):\r\n\r\n #XXX calcluate this from the standard output, instead of recalculating:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n\r\n phase = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n phase[i][j] = np.angle(\r\n self.spectrum[i][j])\r\n\r\n phase[j][i] = np.angle(\r\n self.spectrum[i][j].conjugate())\r\n return phase", "def test_phase_estimation(self):\n numPulses = 9\n amp = .55\n direction = 'X'\n target = np.pi\n\n # Using the same simulated data as matlab\n data, vardata = simulate_phase_estimation(amp, target, numPulses)\n\n # Verify output matches what was previously seen by matlab\n phase, sigma = cal.phase_estimation(data, vardata, verbose=False)\n self.assertAlmostEqual(phase,-1.2012,places=4)\n self.assertAlmostEqual(sigma,0.0245,places=4)", "def phase(t,t0,P):\n return np.array((t - t0)/P - np.floor((t - t0)/P))", "def getPhase(phase):", "def phase(self, iunit, component1, component2):\n utils.trace('in')\n\n def convert(component1, component2):\n \"\"\"Convert from real/imag electrical field to phase (in degrees)\n \"\"\"\n return np.angle(component1 + 1j * component2) * cst.RAD2DEG\n\n def identity(_, component2):\n \"\"\"Directly returns component2 which is the phase.\n \"\"\"\n return component2\n\n # create the processing dictionary\n converter = {0: convert,\n 1: identity}\n\n # convert/extract the magnitude\n phs = converter[iunit](component1, component2)\n\n utils.trace('out')\n return phs", "def date_ym_value(date: dt.datetime) -> int:\n return (100 * date.year) + date.month", "def _make_phase(self, offset):\n while offset > self.period/2:\n offset -= self.period\n phase = (2*pi) * (offset / self.period)\n return phase", "def timestep_from_date(self, this_date):\n this_timestep = this_date.year - self._date_at_timestep0.year\n return this_timestep" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reorder 'shape' according to the chosen data layout to optimize data distribution.
def _optimizeshape(shape): shape.sort() if ORDER == 'C': shape[:] = shape[::-1]
[ "def sort_shape(self):\r\n DrawingProgram.merge_sort(self.__shape_list)", "def __change_shape(self):\r\n self.shape = self.next_shape", "def restore_backup_shape(self):\n\n self.shape = self.shape_backup", "def backup_shape(self):\n\n self.shape_backup = np.copy(self.shape)", "def _restore_spatial_dimensions(array,orig_shape):\n array = array.reshape(orig_shape)\n return array", "def _restore_spatial_dimensions(array,orig_shape):\n array = array.reshape(orig_shape)\n return array", "def changeInputShape(self,shape):\n self.input_shape = shape", "def _init_nd_shape_and_axes_sorted(x, shape, axes):\n noaxes = axes is None\n shape, axes = _init_nd_shape_and_axes(x, shape, axes)\n\n if not noaxes:\n shape = shape[axes.argsort()]\n axes.sort()\n\n return shape, axes", "def _settle_shape(self, shape):\n if shape:\n for block in shape.blocks:\n self.array[block.row_position][block.column_position] = block\n self.remove_completed_lines()", "def data_shapes(self):", "def expand_to_shape(data, shape, dtype=None, background=None):\n if dtype is None:\n dtype = data.dtype\n if shape==data.shape:\n return data.astype(dtype)\n if background is None:\n background = data.min()\n expanded_data = numpy.zeros(shape, dtype=dtype) + background\n slices = []\n rhs_slices = []\n for s1, s2 in zip (shape, data.shape):\n a, b = (s1-s2+1)//2, (s1+s2+1)//2\n c, d = 0, s2\n while a<0:\n a += 1\n b -= 1\n c += 1\n d -= 1\n slices.append(slice(a, b))\n rhs_slices.append(slice(c, d))\n try:\n expanded_data[tuple(slices)] = data[tuple (rhs_slices)]\n except ValueError:\n print data.shape, shape\n raise\n return expanded_data", "def processed_shape(self, shape):\n return shape", "def reshape(x, shape):\n return Reshape(shape)(x)", "def _dtype_shape(self, dtype, shape):\n if self._col is not None:\n dtype, shape = _predict_idx_shape_col(dtype, shape, self._col)\n shape = _predict_idx_shape_slice(shape, slice(self._start, self._stop, self._step))\n return dtype, shape", "def shape_elements_order(self) -> List[str]:\n return [\"channels\", \"height\", \"width\"]", "def reshape(tensor, newshape):\n raise NotImplementedError", "def _keras_update_shape(self, prep):\n\n # Run preprocessing on the training data\n X_transform = prep.fit_transform(self.X_train)\n\n # If the input shape has not been specified, it is simply the number of features in X_transform\n if 'input_shape' not in self.model.first_layer_kwargs:\n self.model.first_layer_kwargs['input_shape'] = tuple([X_transform.shape[1]])\n # Else update the input shape based on the number of features after preprocessing\n else:\n # Transform to a list to make the input_shape mutable\n self.model.first_layer_kwargs['input_shape'] = list(self.model.first_layer_kwargs['input_shape'])\n # Update the number of features based on X_transform\n if self.model.lags:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//(self.model.lags + (1 if self.model.current_sample_as_input else 0))\n else:\n self.model.first_layer_kwargs['input_shape'][-1] = X_transform.shape[1]//np.prod(self.model.first_layer_kwargs['input_shape'][:-1])\n # Transform back to a tuple as required by Keras\n self.model.first_layer_kwargs['input_shape'] = tuple(self.model.first_layer_kwargs['input_shape'])\n \n # Ensure the Architecture has been updated\n self.model.architecture.iloc[0, 2]['input_shape'] = self.model.first_layer_kwargs['input_shape']\n \n # 2D, 3D and 4D data is valid. \n # e.g. The input_shape can be a tuple of (subsequences, timesteps, features), with subsequences and timesteps as optional.\n # A 4D shape may be valid for e.g. a ConvLSTM with (timesteps, rows, columns, features) \n if len(self.model.first_layer_kwargs['input_shape']) > 5:\n err = \"Unsupported input_shape: {}\".format(self.model.first_layer_kwargs['input_shape'])\n raise Exception(err)", "def adjust_shape(placeholder, data):\n if not isinstance(data, np.ndarray) and not isinstance(data, list):\n return data\n if isinstance(data, list):\n data = np.array(data)\n\n placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]\n\n assert _check_shape(placeholder_shape, data.shape), \\\n 'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)\n\n return np.reshape(data, placeholder_shape)", "def __convert_shape(self, shape: Tuple[int]) -> tf.TensorShape:\n result = [None]\n for i in range(1, len(shape)):\n result.append(shape[i])\n\n return tf.TensorShape(result)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if ghost layer length is not zero.
def has_ghosts(self): return not np.all(self.mesh.discretization.ghosts == 0)
[ "def empty(self):\n return len(self.layers) == 0", "def is_layered(self):\n count = 0\n for l in self.layers:\n if not l.is_empty():\n count += 1\n return count > 1", "def is_empty(self):\n return len(self.layers) == 1 and self.layer.is_empty()", "def is_ghost(self):\n\t\treturn False", "def is_trivial(self):\n return self.dims == 0", "def is_empty(self) -> bool:\n return self.num_grna() == 0", "def __nonzero__(self):\n return self.Length > 0", "def _is_empty(self, layer, pos):\n assert layer in self.keys\n return pos not in self.data[layer]", "def empty(self) -> bool:\n return self.w == 0 or self.h == 0", "def is_ghost(self):\n return self._is_ghost", "def _is_empty(shape):\n return F.shape_mul(shape) == 0", "def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()", "def is_empty(self):\n return len(self.vertices) == 0 and len(self.edges) == 0", "def empty(self):\n return self.numba_rtree._bounds_tree.shape[0] == 0", "def unbound(self) -> bool:\n return self._shape is None", "def isEmpty(self, gx, gy):\n if gx < 0 or gx >= DIM or gy < 0 or gy >= DIM:\n return False\n for piece in self.pieces:\n if piece.gx == gx and piece.gy == gy:\n return False\n return True", "def has_zero_grads(self):\n for params in self.parameters():\n assert params.grad.sum() == 0", "def is_valid(self):\n\n return (torch.sum(self, -1, keepdim=True) == 1).squeeze()", "def degenerate(self):\n return not self.visible or self.height == 0.0 or self.width == 0.0 or \\\n self.axis.mag() == 0.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if target and current object are equal and have the same parent. Equal means same mesh, same shape and same domain.
def is_consistent_with(self, target): same_parent = self.parent() == target.parent() # Note FP. Is it really required to have the # same parent? Inclusion of all proc may be enough? return npw.equal(self.shape, target.shape).all() and same_parent
[ "def same_parent(self, parent):\n is_instance_of_step_parent = (\n inspect.isclass(parent)\n and isinstance(self._parent, parent)\n )\n return is_instance_of_step_parent or parent == self._parent", "def __eq__(self, other):\n parent_same = self.parent1.rid == other.parent1.rid \\\n and self.parent2.rid == other.parent2.rid\n\n parents_opposite = self.parent2.rid == other.parent1.rid \\\n and self.parent1.rid == other.parent2.rid\n\n return parent_same or parents_opposite", "def same_model(res1, res2):\n return res1.get_parent().get_parent() == res2.get_parent().get_parent()", "def __eq__(self, obj):\n if not isinstance(obj, Cube): return False\n return np.array_equal(obj.array, self.array)", "def isSameKindAs(self, *args):\n return _osgAnimation.RigGeometry_isSameKindAs(self, *args)", "def isSameKindAs(self, *args):\n return _osgAnimation.Bone_isSameKindAs(self, *args)", "def _is_equivalent(self, obj, node):\n return (node is obj) if isinstance(obj, Node) else (node == obj)", "def IsSameAs(*args, **kwargs):\n return _core_.Object_IsSameAs(*args, **kwargs)", "def __eq__(self, other: Block) -> bool:\n if len(self.children) == 0 and len(other.children) == 0:\n # Both self and other are leaves.\n return self.position == other.position and \\\n self.size == other.size and \\\n self.colour == other.colour and \\\n self.level == other.level and \\\n self.max_depth == other.max_depth\n elif len(self.children) != len(other.children):\n # One of self or other is a leaf while the other is not.\n return False\n else:\n # Both self and other have four children.\n for i in range(4):\n # The != operator also uses the __eq__ special method.\n if self.children[i] != other.children[i]:\n return False\n\n return True", "def identical_grid(self, other) -> bool:\n return (\n (\n self.crs is None\n or other.raster.crs is None\n or self.crs == other.raster.crs\n )\n and np.allclose(self.transform, other.raster.transform, atol=1e-06)\n and np.allclose(self.shape, other.raster.shape)\n )", "def equals(self, other: InputTransform) -> bool:\n return (\n super().equals(other=other)\n and self.approximate == other.approximate\n and self.tau == other.tau\n )", "def is_match(domain, target):\n return domain.n_vertices == target.n_vertices + 1 and domain.n_loops == target.n_loops \\\n and domain.n_hairs == target.n_hairs and domain.n_ws == target.n_ws", "def _is_parent(a, b):\n # type: (PredContext, PredContext) -> bool\n while b and a is not b:\n b = getattr(b, 'parent', None)\n return a is b", "def is_match(domain, target):\n return domain.n_vertices == target.n_vertices and domain.n_loops - 1 == target.n_loops and \\\n domain.n_hairs_a + 1 == target.n_hairs_a and domain.n_hairs_b + 1 == target.n_hairs_b \\\n and domain.sub_type == target.sub_type", "def same_chain(res1, res2):\n return (res1.get_parent() == res2.get_parent()) and same_model(res1, res2)", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def looks_same(self, other):\n\t\treturn self.mlparams.__dict__ == other.mlparams.__dict__ and \\\n\t\t\tself.toolparams.__dict__ == other.toolparams.__dict__ and \\\n\t\t\tself.workbasedir == other.workbasedir and \\\n\t\t\tself.toolname == other.toolname and \\\n\t\t\tself.workdir == other.workdir", "def __eq__(self, other):\n if not np.all(self.adjacency_matrix == other.adjacency_matrix):\n return False\n\n if self.name != other.name:\n return False\n\n return True", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, PropertyReference):\n return NotImplemented\n if self.name != other.name:\n return False\n return self.node.get_path() == other.node.get_path()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if current topo is complient with target.
def can_communicate_with(self, target): if self == target: return True msg = 'You try to connect topologies belonging to' msg += ' two different mpi tasks. Set taskids properly or use' msg += ' InterBridge.' assert self.task_id() == target.task_id(), msg # Parent communicator # Todo : define some proper conditions for compatibility # between topo_from, topo_to and parent: # - same size # - same domain # - common processus ... # At the time we check that both topo have # the same comm_origin. return self.is_consistent_with(target)
[ "def is_dedicated_node(self):\n return self.is_node() and not self.is_master()", "def can_reach_support(self, target):\n if self.territory.is_complex:\n return target in self.named_coast.neighbours\n\n if self.territory.is_coastal and target.is_coastal:\n return target in self.territory.shared_coasts\n\n return self.territory.adjacent_to(target) and \\\n target.accessible_by_piece_type(self)", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def checkClientMode():\n if 'TOR_PT_CLIENT_TRANSPORTS' in os.environ: return True\n if 'TOR_PT_SERVER_TRANSPORTS' in os.environ: return False\n raise EnvError('neither TOR_PT_{SERVER,CLIENT}_TRANSPORTS set')", "def isClientHost(self):\n return self.serverThread is not None", "def is_coordinator(self):\n try:\n self.coordinator\n except:\n return False\n\n return True", "def can_reach(self, target, named_coast=None):\n if target.is_complex and not named_coast:\n raise ValueError(\n 'Must specify coast if target is complex territory.'\n )\n if named_coast:\n return self.territory in named_coast.neighbours\n\n if self.territory.is_complex:\n return target in self.named_coast.neighbours\n\n if self.territory.is_coastal and target.is_coastal:\n return target in self.territory.shared_coasts\n\n return self.territory.adjacent_to(target) and \\\n target.accessible_by_piece_type(self)", "def is_strongly_connected(self):\n if self.order()==1:\n return True\n\n try:\n return self._backend.is_strongly_connected()\n\n except AttributeError:\n return len(self.strongly_connected_components()) == 1", "def isConnected(self) -> bool:\n return nx.is_connected(self.graph)", "def is_transitive(self):\r\n got_orb = False\r\n for x in self.orbits():\r\n if len(x) > 1:\r\n if got_orb:\r\n return False\r\n got_orb = True\r\n return got_orb", "def is_consistent_with(self, target):\n same_parent = self.parent() == target.parent()\n # Note FP. Is it really required to have the\n # same parent? Inclusion of all proc may be enough?\n return npw.equal(self.shape, target.shape).all() and same_parent", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def is_connected(self):\n connected = False\n self.state = self.mesh.state()\n if self.state in (STATE_CHILD, STATE_ROUTER, STATE_LEADER, STATE_LEADER_SINGLE):\n connected = True\n return connected", "def is_adjacent(self, remote_host_name):\n # Check if a topology is defined, otherwise use fully connected\n if self.topology is None:\n return True\n\n if self.name in self.topology:\n if remote_host_name in self.topology[self.name]:\n return True\n else:\n return False\n else:\n logging.warning(\n \"Node {} is not in the specified topology and is therefore \"\n \"assumed to have no neighbors\".format(self.name)\n )\n return False", "def connectivity(self) -> Optional[bool]:\n return self._connectivity", "def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False", "def can_reach_support(self, target):\n return self.territory.adjacent_to(target) and \\\n target.accessible_by_piece_type(self)", "def connected(self) -> bool:\n return self.transport is not None", "def _is_task_on_responsible_client(self):\n return get_client_id() == self.context.responsible_client" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Collect global indices of local meshes on each process of topo
def gather_global_indices(topo, toslice=True, root=None, comm=None): if comm is None: comm = topo.parent() size = comm.size start = topo.mesh.start() end = topo.mesh.stop() - 1 # communicator that owns the topology rank = comm.Get_rank() dimension = topo.domain.dimension iglob = npw.int_zeros((dimension * 2, size)) iglob_res = npw.int_zeros((dimension * 2, size)) iglob[0::2, rank] = start iglob[1::2, rank] = end # iglob is saved as a numpy array and then transform into # a dict of slices since mpi send operations are much # more efficient with numpy arrays. if root is None: comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT]) else: comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT], root=root) if toslice: return utils.arrayToDict(iglob_res) else: return iglob_res
[ "def index_and_return_global_ids(self):\n \n assert( len(self.largest_mappings) == 1 )\n\n mapped_ids = []\n for global_id, frame_one_id in enumerate(self.largest_mappings[0]):\n self.mesh_one.get_element_with_frame_id(frame_one_id).global_id = global_id\n self.mesh_two.get_element_with_frame_id(self.largest_mappings[0][frame_one_id]).global_id = global_id\n mapped_ids.append(global_id)\n\n self.mesh_two.index_global_ids()\n self.mesh_one.index_global_ids() \n \n return mapped_ids", "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def jax_synchronize_hosts():\n if jax.process_count() > 1:\n # Make sure all hosts stay up until the end of main.\n x = jnp.ones([jax.local_device_count()])\n x = jax.device_get(jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i')(x))\n assert x[0] == jax.device_count()", "def get_local_global_atom_index_mapping(\n molecules: List[MoleculeEntry],\n) -> Tuple[List[str], List[Bond], List[List[int]], List[Tuple[int, int]]]:\n\n global_species = []\n global_bonds = []\n\n local_to_global = []\n global_to_local = []\n\n n = 0\n for i, m in enumerate(molecules):\n global_species.extend(m.species)\n\n bonds = np.asarray(m.bonds) + n\n bonds = [tuple(b) for b in bonds.tolist()]\n global_bonds.extend(bonds)\n\n mp_l2g = [j + n for j in range(m.num_atoms)]\n local_to_global.append(mp_l2g)\n\n mp_g2l = [(i, j) for j in range(m.num_atoms)]\n global_to_local.extend(mp_g2l)\n\n n += m.num_atoms\n\n return global_species, global_bonds, local_to_global, global_to_local", "def _exchange_ghosts_local(self):\n for d in xrange(self._dim):\n self._exchange_ghosts_local_d(d)", "def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result", "def get_dv_src_indices(self):\n if MPI is not None and self.comm.size > 1:\n local_ndvs = self.fea_assembler.getNumDesignVars()\n all_proc_ndvs = self.comm.gather(local_ndvs, root=0)\n all_proc_indices = []\n if self.comm.rank == 0:\n tot_ndvs = 0\n for proc_i in range(self.comm.size):\n local_ndvs = all_proc_ndvs[proc_i]\n proc_indices = np.arange(tot_ndvs, tot_ndvs + local_ndvs)\n all_proc_indices.append(proc_indices)\n tot_ndvs += local_ndvs\n local_dv_indices = self.comm.scatter(all_proc_indices, root=0)\n return local_dv_indices\n else:\n ndvs = len(self.options[\"initial_dv_vals\"])\n all_dv_indices = np.arange(ndvs)\n return all_dv_indices", "def gather_global_indices_overlap(topo=None, comm=None, dom=None,\n toslice=True, root=None):\n if topo is None:\n assert comm is not None and dom is not None\n size = comm.Get_size()\n rank = comm.Get_rank()\n dimension = dom.dimension\n iglob = npw.int_zeros((dimension * 2, size))\n iglob_res = npw.int_zeros((dimension * 2, size))\n iglob[1::2, rank] = -1\n if root is None:\n comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])\n else:\n comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],\n root=root)\n if toslice:\n return utils.arrayToDict(iglob_res)\n else:\n return iglob_res\n\n else:\n return TopoTools.gather_global_indices(topo, toslice, root, comm)", "def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray):\n x = V.tabulate_dof_coordinates()\n local_dofs = dofs[dofs < V.dofmap.index_map.size_local * V.dofmap.index_map_bs]\n coords = x[local_dofs]\n num_nodes = len(coords)\n glob_num_nodes = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM)\n recvbuf = None\n if MPI.COMM_WORLD.rank == 0:\n recvbuf = np.zeros(3 * glob_num_nodes, dtype=np.float64)\n sendbuf = coords.reshape(-1)\n sendcounts = np.array(MPI.COMM_WORLD.gather(len(sendbuf), 0))\n MPI.COMM_WORLD.Gatherv(sendbuf, (recvbuf, sendcounts), root=0)\n glob_coords = MPI.COMM_WORLD.bcast(recvbuf, root=0).reshape((-1, 3))\n return glob_coords", "def loc_map(self):\n idx_map = -np.ones(shape=self.world_size, dtype=np.int16)\n for agent in self.agents:\n r, c = agent.loc\n idx_map[r, c] = int(agent.idx)\n return idx_map", "def globalindices(array, comm):\n start, end = globalrange(array, comm)\n globalsize = comm.bcast(end, root=comm.size - 1)\n\n if globalsize > 1024 * 1024 * 1024:\n dtype = 'i8'\n else:\n dtype = 'i4'\n\n return numpy.arange(start, end, dtype=dtype)", "def get_submaps(args, comm, data):\n autotimer = timing.auto_timer()\n if comm.comm_world.rank == 0:\n print('Scanning local pixels', flush=args.flush)\n start = MPI.Wtime()\n\n # Prepare for using distpixels objects\n nside = args.nside\n subnside = 16\n if subnside > nside:\n subnside = nside\n subnpix = 12 * subnside * subnside\n\n # get locally hit pixels\n lc = tm.OpLocalPixels()\n localpix = lc.exec(data)\n if localpix is None:\n raise RuntimeError(\n 'Process {} has no hit pixels. Perhaps there are fewer '\n 'detectors than processes in the group?'.format(\n comm.comm_world.rank))\n\n # find the locally hit submaps.\n localsm = np.unique(np.floor_divide(localpix, subnpix))\n\n comm.comm_world.barrier()\n stop = MPI.Wtime()\n elapsed = stop - start\n if comm.comm_world.rank == 0:\n print('Local submaps identified in {:.3f} s'.format(elapsed),\n flush=args.flush)\n return localpix, localsm, subnpix", "def compute_index_grid(self):\n vecs = [np.arange(n) for n in self.shape]\n grid_locs = np.stack([v.flatten() for v in np.meshgrid(*vecs, indexing='ij')], axis=0).reshape(\n (-1,) + self.shape)\n return grid_locs.astype(int)", "def get_locations(self):\n self.locations = {} # reset dictionary\n for node in self.extant_p:\n if node.host not in self.locations:\n self.locations.update({node.host: []})\n self.locations[node.host].append(node)", "def write_global_local_maps(dest,global_local,local_global):", "def _index_local_workflows( self ):\n \n self._workflows_by_name = {}\n self._workflows_by_config_path = {}\n\n for wf in self._all_local_workflows:\n # Index by name\n if wf.name in self._workflows_by_name:\n self._workflows_by_name[wf.name].append( wf )\n else:\n self._workflows_by_name[wf.name] = [wf]\n\n # Index by config path, note that there may be multiple\n # configuration paths that point to the same workflow. \n # \n # For example, if you ran molflow with the default config file and\n # ran from ~/.molflow/workflows, all workflows would show up twice\n # due to the './' entry and the '~/.molflow/workflows' entry.\n for config_path in wf.config_path:\n cp = str(config_path)\n if cp in self._workflows_by_config_path:\n self._workflows_by_config_path[cp].append( wf )\n else:\n self._workflows_by_config_path[cp] = [wf]\n\n # Check for duplicated names and warn.\n for wf_name in self._workflows_by_name.keys():\n if len( self._workflows_by_name[wf_name] ) > 1:\n print(\"A workflow named '{}' was found via multiple local workflow paths.\\nLocations found: {}\".format(wf_name, [i.config_path for i in self._workflows_by_name[wf_name]]))", "def _local_vertex_sets(self, d):\n\n # first we need for each cell its defining vertex indices\n cell_vertices = self._get_indices(d=self._dimension, dd=0)\n\n # then we need to know the possible local index combinations\n # that would define the `d`-dimensional mesh entities of each cell\n combs = itertools.combinations(range(self._dimension+1), self._n_vertices[d])\n # e.g. for a triangle and edges (`d=1`) this would be `[(0,1), (0,2), (1,2)]`\n # because the three edges of a triangle are defined by the\n # first-second (0,1), first-third (0,2) and second-third (1,2) \n # vertex of that triangle\n\n # now that we have that possible combinations we can simply take the\n # respective vertex indices from the cell vertices array\n vertex_sets = cell_vertices.take(list(combs), axis=1)\n\n return vertex_sets", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def get_all_local_clustering_coef(g):\n local_cc = {}\n\n for n in nx.nodes(g):\n local_cc[n] = get_local_clustering_coef(g, n)\n\n return local_cc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This functions does the same thing as gather_global_indices but may also work when topo is None. The function is usefull if you need to collect global indices on a topo define only on a subset of comm, when for the procs not in this subset, topo will be equal to None. In such a case, comm and dom are required. This may happen when you want to build a bridge between two topologies that do not handle the same number of processes but with an overlap between the two groups of processes of the topologies. In that case, a call to gather_global_indices(topo, comm, dom) will work on all processes belonging to comm, topo being None or not. The values corresponding to ranks not in topo will be empty slices.
def gather_global_indices_overlap(topo=None, comm=None, dom=None, toslice=True, root=None): if topo is None: assert comm is not None and dom is not None size = comm.Get_size() rank = comm.Get_rank() dimension = dom.dimension iglob = npw.int_zeros((dimension * 2, size)) iglob_res = npw.int_zeros((dimension * 2, size)) iglob[1::2, rank] = -1 if root is None: comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT]) else: comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT], root=root) if toslice: return utils.arrayToDict(iglob_res) else: return iglob_res else: return TopoTools.gather_global_indices(topo, toslice, root, comm)
[ "def gather_global_indices(topo, toslice=True, root=None, comm=None):\n if comm is None:\n comm = topo.parent()\n size = comm.size\n start = topo.mesh.start()\n end = topo.mesh.stop() - 1\n # communicator that owns the topology\n rank = comm.Get_rank()\n dimension = topo.domain.dimension\n iglob = npw.int_zeros((dimension * 2, size))\n iglob_res = npw.int_zeros((dimension * 2, size))\n iglob[0::2, rank] = start\n iglob[1::2, rank] = end\n # iglob is saved as a numpy array and then transform into\n # a dict of slices since mpi send operations are much\n # more efficient with numpy arrays.\n if root is None:\n comm.Allgather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT])\n else:\n comm.Gather([iglob[:, rank], MPI.INT], [iglob_res, MPI.INT],\n root=root)\n\n if toslice:\n return utils.arrayToDict(iglob_res)\n else:\n return iglob_res", "def globalindices(array, comm):\n start, end = globalrange(array, comm)\n globalsize = comm.bcast(end, root=comm.size - 1)\n\n if globalsize > 1024 * 1024 * 1024:\n dtype = 'i8'\n else:\n dtype = 'i4'\n\n return numpy.arange(start, end, dtype=dtype)", "def get_dv_src_indices(self):\n if MPI is not None and self.comm.size > 1:\n local_ndvs = self.fea_assembler.getNumDesignVars()\n all_proc_ndvs = self.comm.gather(local_ndvs, root=0)\n all_proc_indices = []\n if self.comm.rank == 0:\n tot_ndvs = 0\n for proc_i in range(self.comm.size):\n local_ndvs = all_proc_ndvs[proc_i]\n proc_indices = np.arange(tot_ndvs, tot_ndvs + local_ndvs)\n all_proc_indices.append(proc_indices)\n tot_ndvs += local_ndvs\n local_dv_indices = self.comm.scatter(all_proc_indices, root=0)\n return local_dv_indices\n else:\n ndvs = len(self.options[\"initial_dv_vals\"])\n all_dv_indices = np.arange(ndvs)\n return all_dv_indices", "def _global_index():\n var_and_type = var + Optional(type_)\n global_dec = Suppress(upkey(\"global\")) + index\n range_key_etc = Suppress(\",\") + Group(throughput) | Optional(\n Group(Suppress(\",\") + var_and_type).setResultsName(\"range_key\")\n ) + Optional(Suppress(\",\") + include_vars) + Optional(\n Group(Suppress(\",\") + throughput)\n )\n global_spec = (\n Suppress(\"(\")\n + primitive\n + Suppress(\",\")\n + Group(var_and_type).setResultsName(\"hash_key\")\n + range_key_etc\n + Suppress(\")\")\n )\n return Group(global_dec + global_spec).setName(\"global index\")", "def gather_dof_coordinates(V: FunctionSpace, dofs: np.ndarray):\n x = V.tabulate_dof_coordinates()\n local_dofs = dofs[dofs < V.dofmap.index_map.size_local * V.dofmap.index_map_bs]\n coords = x[local_dofs]\n num_nodes = len(coords)\n glob_num_nodes = MPI.COMM_WORLD.allreduce(num_nodes, op=MPI.SUM)\n recvbuf = None\n if MPI.COMM_WORLD.rank == 0:\n recvbuf = np.zeros(3 * glob_num_nodes, dtype=np.float64)\n sendbuf = coords.reshape(-1)\n sendcounts = np.array(MPI.COMM_WORLD.gather(len(sendbuf), 0))\n MPI.COMM_WORLD.Gatherv(sendbuf, (recvbuf, sendcounts), root=0)\n glob_coords = MPI.COMM_WORLD.bcast(recvbuf, root=0).reshape((-1, 3))\n return glob_coords", "def index_and_return_global_ids(self):\n \n assert( len(self.largest_mappings) == 1 )\n\n mapped_ids = []\n for global_id, frame_one_id in enumerate(self.largest_mappings[0]):\n self.mesh_one.get_element_with_frame_id(frame_one_id).global_id = global_id\n self.mesh_two.get_element_with_frame_id(self.largest_mappings[0][frame_one_id]).global_id = global_id\n mapped_ids.append(global_id)\n\n self.mesh_two.index_global_ids()\n self.mesh_one.index_global_ids() \n \n return mapped_ids", "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def getGlobalIndices( self, indices: list):\n result = indices.copy()\n for i,toAdd in enumerate(self._layout.starts):\n result[self._layout.dims_order[i]]=indices[i]+toAdd\n return result", "def get_local_global_atom_index_mapping(\n molecules: List[MoleculeEntry],\n) -> Tuple[List[str], List[Bond], List[List[int]], List[Tuple[int, int]]]:\n\n global_species = []\n global_bonds = []\n\n local_to_global = []\n global_to_local = []\n\n n = 0\n for i, m in enumerate(molecules):\n global_species.extend(m.species)\n\n bonds = np.asarray(m.bonds) + n\n bonds = [tuple(b) for b in bonds.tolist()]\n global_bonds.extend(bonds)\n\n mp_l2g = [j + n for j in range(m.num_atoms)]\n local_to_global.append(mp_l2g)\n\n mp_g2l = [(i, j) for j in range(m.num_atoms)]\n global_to_local.extend(mp_g2l)\n\n n += m.num_atoms\n\n return global_species, global_bonds, local_to_global, global_to_local", "def get_submaps(args, comm, data):\n autotimer = timing.auto_timer()\n if comm.comm_world.rank == 0:\n print('Scanning local pixels', flush=args.flush)\n start = MPI.Wtime()\n\n # Prepare for using distpixels objects\n nside = args.nside\n subnside = 16\n if subnside > nside:\n subnside = nside\n subnpix = 12 * subnside * subnside\n\n # get locally hit pixels\n lc = tm.OpLocalPixels()\n localpix = lc.exec(data)\n if localpix is None:\n raise RuntimeError(\n 'Process {} has no hit pixels. Perhaps there are fewer '\n 'detectors than processes in the group?'.format(\n comm.comm_world.rank))\n\n # find the locally hit submaps.\n localsm = np.unique(np.floor_divide(localpix, subnpix))\n\n comm.comm_world.barrier()\n stop = MPI.Wtime()\n elapsed = stop - start\n if comm.comm_world.rank == 0:\n print('Local submaps identified in {:.3f} s'.format(elapsed),\n flush=args.flush)\n return localpix, localsm, subnpix", "def globalrange(array, comm):\n s = comm.allgather(len(array))\n start = sum(s[:comm.rank])\n end = start + s[comm.rank]\n return (start, end)", "def _get_zero_param_intra_parallel_group_ranks():\n return dist.get_all_ranks_from_group(group=_get_zero_param_intra_parallel_group())", "def domain_idx(domain, cpu_idx):\n global _dl\n domain = domain_type(domain)\n result = _dl.geopm_topo_domain_idx(domain, cpu_idx)\n if result < 0:\n raise RuntimeError(\"geopm_topo_domain_idx() failed: {}\".format(\n error.message(result)))\n return result", "def get_merged_indices(coords, ref_coords=None, kdtree=None, \n pool_size=None, separation=1/3600):\n # If the user didn't specify a KDTREE, \n if kdtree is None:\n try:\n from scipy import spatial\n except ImportError:\n raise ImportError(\n \"You must have 'scipy' installed to combine catalogs\")\n if ref_coords is not None:\n if len(ref_coords)==2:\n ref1,ref2 = ref_coords\n pos1 = np.array([ref1,ref2])\n pos1 = pos1.T\n elif len(ref_coords[0])==2:\n pos1 = ref_coords\n else:\n raise ValueError(\n \"Expected either a 2xN array or Nx2 array for ref_coords\")\n KDTree = spatial.cKDTree\n kdtree = KDTree(pos1)\n else:\n raise Exception(\"Either ref_coords or kdtree must be specified\")\n if pool_size is None:\n pool_size = -1\n \n if len(coords)==2:\n coord1, coord2 = coords\n pos2 = np.array([coord1,coord2])\n pos2 = pos2.T\n elif len(coords[0])==2:\n pos2 = coords\n else:\n raise ValueError(\"Expected either a 2xN array or Nx2 array for coords\")\n \n src_count1 = len(ref1)\n src_count2 = len(coord1)\n \n # Match all of the sources\n d2,idx = kdtree.query(pos2, n_jobs=pool_size,\n distance_upper_bound=separation)\n matches = np.isfinite(d2)\n idx1 = idx[matches]\n idx2 = np.where(matches)[0]\n # Flag all of the duplicate sources\n unique, inverse, counts = np.unique(\n idx1, return_inverse=True, return_counts=True)\n u = unique.copy()\n cidx = counts>1\n u[cidx]=-1\n didx = u[inverse]<0\n duplicates = np.arange(len(idx1))[didx]\n # Identify unmatched sources\n unmatched1 = np.setdiff1d(range(src_count1), idx1)\n unmatched2 = np.setdiff1d(range(src_count2), idx2)\n \n #return (idx1, unmatched1, duplicate1), (idx2, unmatched2, duplicate2)\n return (idx2, unmatched2),(idx1, unmatched1), duplicates", "def __global_index( self , active_index = None , global_index = None , ijk = None):\n\n set_count = 0\n if not active_index is None:\n set_count += 1\n\n if not global_index is None:\n set_count += 1\n\n if ijk:\n set_count += 1\n \n if not set_count == 1:\n raise ValueError(\"Exactly one of the kewyord arguments active_index, global_index or ijk must be set\")\n \n if not active_index is None:\n global_index = self._get_global_index1A( active_index )\n elif ijk:\n nx = self.getNX()\n ny = self.getNY()\n nz = self.getNZ()\n \n i,j,k = ijk\n\n if not 0 <= i < nx:\n raise IndexError(\"Invalid value i:%d Range: [%d,%d)\" % (i , 0 , nx)) \n\n if not 0 <= j < ny:\n raise IndexError(\"Invalid value j:%d Range: [%d,%d)\" % (j , 0 , ny)) \n \n if not 0 <= k < nz:\n raise IndexError(\"Invalid value k:%d Range: [%d,%d)\" % (k , 0 , nz)) \n\n global_index = self._get_global_index3( i,j,k)\n else:\n if not 0 <= global_index < self.getGlobalSize():\n raise IndexError(\"Invalid value global_index:%d Range: [%d,%d)\" % (global_index , 0 , self.getGlobalSize())) \n return global_index", "def _compute_global_elem_to_part_elem(\n nelements: int,\n part_id_to_elements: Mapping[PartID, np.ndarray],\n part_id_to_part_index: Mapping[PartID, int],\n element_id_dtype: np.dtype) -> np.ndarray:\n global_elem_to_part_elem = np.empty((nelements, 2), dtype=element_id_dtype)\n for part_id in part_id_to_elements.keys():\n elements = part_id_to_elements[part_id]\n global_elem_to_part_elem[elements, 0] = part_id_to_part_index[part_id]\n global_elem_to_part_elem[elements, 1] = np.indices(\n (len(elements),), dtype=element_id_dtype)\n\n return global_elem_to_part_elem", "def compute_global_dof(num_elem, elem, row, col):\n\n if (row % 2 == 0):\n row_ind = elem[:, row // 2] * 2\n else:\n row_ind = elem[:, row // 2] * 2 + 1\n \n if (col % 2 == 0):\n col_ind = elem[:, col // 2] * 2\n else:\n col_ind = elem[:, col // 2] * 2 + 1\n\n return row_ind, col_ind", "def get_used_indices(self):\n\n if self.npass==1:\n return numpy.arange(self.data1.shape[0])\n else:\n if not hasattr(self, 'used_indices'):\n raise RuntimeError(\"run a 2 pass calculation first\")\n\n return self.used_indices", "def _get_global_attn_indices(is_index_global_attn):\n # helper variable\n num_global_attn_indices = tf.math.count_nonzero(is_index_global_attn, axis=1)\n num_global_attn_indices = tf.cast(num_global_attn_indices, dtype=tf.constant(1).dtype)\n\n # max number of global attn indices in batch\n max_num_global_attn_indices = tf.reduce_max(num_global_attn_indices)\n\n # indices of global attn\n is_index_global_attn_nonzero = tf.where(is_index_global_attn)\n\n # helper variable\n is_local_index_global_attn = tf.range(max_num_global_attn_indices) < tf.expand_dims(\n num_global_attn_indices, axis=-1\n )\n\n # location of the non-padding values within global attention indices\n is_local_index_global_attn_nonzero = tf.where(is_local_index_global_attn)\n\n # location of the padding values within global attention indices\n is_local_index_no_global_attn_nonzero = tf.where(tf.math.logical_not(is_local_index_global_attn))\n\n return (\n max_num_global_attn_indices,\n is_index_global_attn_nonzero,\n is_local_index_global_attn_nonzero,\n is_local_index_no_global_attn_nonzero,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return true if all mpi processes of child belong to parent
def is_parent(child, parent): # Get the list of processes assert child is not None assert parent is not None #child_ranks = [i for i in xrange(child.Get_size())] child_group = child.Get_group() parent_group = parent.Get_group() inter_group = MPI.Group.Intersect(child_group, parent_group) return child_group.Get_size() == inter_group.Get_size()
[ "def mpi_multi():\n comm, procs, rank = mpi_world()\n if procs > 1:\n return True\n else:\n return False", "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def contains_parent(self, pid):\n return pid in self._parent_ids", "def can_run_mpi(self):\n return True", "def children_born(log, mpid, np): \n sc,out = subprocess.getstatusoutput(\"ps --ppid %i -o pid,start\" % mpid)\n if sc is not 0:\n print (\" mpMonTools.children_born: no kids yet... Error, sc=%i\" % sc)\n return False\n\n ps_lines = out.splitlines()\n #print (\"ps_lines=\", ps_lines)\n nc = len(ps_lines)-1\n \n print (\" children_exist: nbr of children = [%i]\" % nc)\n if grepValue(log, \"FIRSTEVENT_ELAP_TIME\") is None:\n return False\n else:\n pass\n\n if nc>=np : #nbr of children is equal to nbr of procs required \n print (\"%i children workers forked! Registering them (creating ProcDicts) ...\" % np)\n ps_lines.pop(0)\n for line in ps_lines:\n ps_str = line.split()\n pid = int(ps_str[0])\n print (\"child [%i] born\" % pid, )\n if grepExist(log, \"%i-%i\" % (mpid, pid)):\n ProcDict(pid, start_time = _seconds(ps_str[1]))\n print (\"..... child WORKER [%i] added\" % pid)\n return True\n else:\n print (\"no children exist for parent: %s \" % mpid)\n return False", "def children_working(ppid):\n out = subprocess.getoutput(\"ps --ppid %i -o pid,state,vsize,rss,sz,start,cputime,etime\" % ppid)\n ps_lines = out.splitlines()\n ps_lines.pop(0)\n \n if len(ps_lines) > 0:\n for line in ps_lines:\n ps_str = line.split()\n pid = int(ps_str[0])\n if pid in mp_stat[\"cpid\"].keys():\n mp_stat[\"cpid\"][pid].add_ps_line(line)\n #print (\"child_stat.appended for kid: %i\" % pid )\n return True #ps returns something -> children still exist \n else:\n print (\" mpMonTools.children_working: no children exist for parent: %i\" % ppid)\n return False #ps returns nothing -> children either weren't born or died. \n return False", "def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warning(\"Parent process is terminated abnormally. Process exits.\")\n cur_process.kill()", "def i_am_root():\n try:\n return True if mpi_rank() == 0 else False\n except AttributeError:\n # not running MPI\n return True", "def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"", "def can_run_mpi():\n return False", "def is_known(self, child):\r\n return child in self._parents", "def __contains__(self, pid):\n return self.contains_child(pid) or self.contains_parent(pid)", "def using_mpi(self) -> bool:\r\n\r\n try:\r\n from mpi4py import MPI\r\n\r\n comm = MPI.COMM_WORLD\r\n return comm.size > 1\r\n\r\n except ModuleNotFoundError:\r\n return False", "def pure_mpi(self):\n return self.has_mpi and not self.has_omp", "def contains_child(self, pid):\n return pid in self._children_ids", "def childProcess(processId: int) -> bool:\n return processId == 0", "def is_parent_process_alive():\n if psutil is None:\n return True\n return psutil.pid_exists(os.getppid())", "def names_match_process_or_parents(proc, names):\n\n if proc is None:\n return False\n elif any(name == proc.name().lower() for name in names):\n return True\n elif proc.parent() is not None and proc.pid == proc.parent().pid:\n return False\n else:\n return names_match_process_or_parents(proc.parent(), names)", "def isChild(self):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Number of processess common to comm_1 and comm_2
def intersection_size(comm_1, comm_2): if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL: return None group_1 = comm_1.Get_group() group_2 = comm_2.Get_group() inter_group = MPI.Group.Intersect(group_1, group_2) return inter_group.Get_size()
[ "def getNumberOfCommonLabels(variant1=[], variant2=[]):\n\n return len(commonLabels(variant1,variant2))", "def communities_with_protesters(partition, active_nodes):\n return len(set([partition[node] for node in active_nodes]))", "def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)", "def countMotHitCmp(motDistr1, motDistr2):\n\n return motDistr2.getNcentralHits() - motDistr1.getNcentralHits()", "def calc_process_cohesion(partitions, graph):\n ch = 0\n for part in partitions:\n crc = calc_community_relation_cohesion(part, graph)\n cic = calc_community_information_cohesion(part, graph)\n ch = ch + (crc * cic)\n ch = ch / len(partitions)\n return ch", "def count_common_connections(network, user_A, user_B):\n count = 0\n if user_A not in network or user_B not in network:\n return False\n for person in network[user_A][0]:\n if person in network[user_B][0]:\n count += 1\n return count", "def line_possibility_counter(spaces1, spaces2, given_spaces, ships):\n\tcount = 0\n\tfor ship in ships:\n\t\tcount += single_ship_line_possibility_counter(spaces1, spaces2, given_spaces, ship)\n\treturn count", "def calc_community_information_cohesion(partition, graph):\n pre_suc = list()\n for vertex in partition:\n pre_suc.extend(get_unique_predecessors_successors(vertex, graph))\n pre_suc = get_duplicates(pre_suc)\n if len(pre_suc) == 0:\n cic = 0\n else:\n cic = len(pre_suc) / len(partition)\n return cic", "def compare_comm(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result == res[0]", "def num_procs():\n return MPI.COMM_WORLD.Get_size()", "def count_common_subgraphs(graph1, graph2, n1, n2,\n node_attrib='label', edge_attrib='label'):\n for graph in (graph1, graph2):\n assert nx.is_directed_acyclic_graph(graph)\n \n if graph1.node[n1][node_attrib] != graph2.node[n2][node_attrib]:\n return 0\n\n n1_children = dependency_children(graph1, n1, edge_attrib=edge_attrib)\n n2_children = dependency_children(graph2, n2, edge_attrib=edge_attrib)\n\n if not n1_children or not n2_children:\n return 0\n else:\n result = 1 # neutral element of multiplication\n for n1_target, n2_target in common_dependency_targets(graph1, graph2, n1, n2,\n node_attrib=node_attrib):\n result *= (count_common_subgraphs(graph1, graph2,\n n1_target, n2_target,\n node_attrib='label',\n edge_attrib='label') + 2)\n return result - 1", "def part1(programs):\n count = 0\n for program in programs:\n if program.connected(0)[0]:\n count += 1\n\n return count", "def commonCharacterCount(s1, s2):\n return sum(min(s1.count(x),s2.count(x)) for x in set(s1))", "def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)", "def compareInterface (dPDB1, dPDB2) :\n \n\tnbInter = 0\n\n\t\n\tfor chain in dPDB1 :\n\t\tif not chain == \"nchaine\":\n\t\t\t\n\t\t\tfor res in dPDB1[chain][\"position\"] :\n\t\t\t\t\n\t\t\t\t#If both residues belong to the interface, the number of interface shared residues increases\n\t\t\t\tif dPDB1[chain][res][\"bfactor\"] and dPDB2[chain][res][\"bfactor\"] :\n\t\t\t\t\tnbInter += 1\n\t\t\t\t\t\t\t\t\n\treturn nbInter", "def common_repos(self, user, other):\n s = 0.0\n for r in self.user_repos[user]:\n if r in self.user_repos[other]:\n s += 1.0\n return s", "def count_common_connections(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n common_connections = 0\n for conn in network[user_A]['connections']:\n if conn in network[user_B]['connections']:\n common_connections += 1\n return common_connections", "def commonality(left_struc, right_struc):\n assert type(left_struc) is type(right_struc), (left_struc, right_struc)\n assert left_struc and right_struc, (left_struc, right_struc)\n if type(left_struc) is dict:\n (overlap, left, right) = compute_keysets(left_struc, right_struc)\n com = float(len(overlap))\n tot = len(overlap.union(left, right))\n else:\n assert type(left_struc) in (list, tuple), left_struc\n com = 0.0\n for elem in left_struc:\n if elem in right_struc:\n com += 1\n tot = max(len(left_struc), len(right_struc))\n\n return com / tot", "def _common_prefix(sequence1, sequence2):\n i = 0\n for elem1, elem2 in zip(sequence1, sequence2):\n if elem1 != elem2:\n return i\n i += 1\n\n # Return length of sequence if sequences are identical\n return min(len(sequence1), len(sequence2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare two mpi communicators. Returns true if the two communicators are handles for the same group of proc and for the same communication context.
def compare_comm(comm_1, comm_2): assert comm_1 != MPI.COMM_NULL assert comm_2 != MPI.COMM_NULL result = MPI.Comm.Compare(comm_1, comm_2) res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL] return result == res[0]
[ "def compare_groups(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result in res[:-1]", "def mpi_multi():\n comm, procs, rank = mpi_world()\n if procs > 1:\n return True\n else:\n return False", "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi", "def using_mpi(self) -> bool:\r\n\r\n try:\r\n from mpi4py import MPI\r\n\r\n comm = MPI.COMM_WORLD\r\n return comm.size > 1\r\n\r\n except ModuleNotFoundError:\r\n return False", "def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)", "def Mirrorprocs(p1, p2):\n return False", "def has_same_connectivity(self, other):\n if len(self.atoms) != len(other.atoms):\n raise Exception('Require the same number of atoms to test connectivity')\n\n for atom_s, atom_o in zip(self, other):\n if atom_s.symbol != atom_o.symbol or atom_s.idx != atom_o.idx:\n raise Exception('Atoms have to be in the same order to test connectivity')\n\n if set(atom_s.connections.values()) != set(atom_o.connections.values()):\n return False\n\n return True", "def _fake_message_compare(m1, m2):\r\n m1 = m1.serialize()\r\n m2 = m2.serialize()\r\n diff = False\r\n for i in range(len(m1)):\r\n if m1[i] is None:\r\n continue\r\n if m1[i] != m2[i]:\r\n diff = True\r\n break\r\n return not diff", "def isHandle(self):\n return self.type in mpi_handle_types", "def mutexPropositions(prop1, prop2, mutexActions):\n for a1 in prop1.getProducers():\n for a2 in prop2.getProducers():\n if Pair(a1, a2) not in mutexActions:\n return False\n return True", "def clusters_are_identical(one, two):\n if not len(one) == len(two):\n return False\n for subA, subB in zip(one, two):\n if not subA.ipg or not subB.ipg:\n return False\n if subA.ipg != subB.ipg:\n return False\n return True", "def _on_same_device(self, other: \"PArray\") -> bool:\n this_device = self._current_device_index\n return this_device in other._array", "def object_communicator():\n comm = MPI.COMM_WORLD", "def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]", "def are_commuting(a: 'PauliOperator', b: 'PauliOperator') -> bool:\n if not isinstance(a, PauliOperator) or not isinstance(b, PauliOperator):\n raise Exception(\"Only supports PauliOperator\")\n\n if (a,b) in PauliOperator._anticommute_tbl:\n return False\n \n else:\n return True", "def compare_biosystem(biosystem1, biosystem2):\n if biosystem1['system_parameter']['time'] != biosystem2['system_parameter']['time']:\n return False\n list1, list2 = map(hash_gates_and_simulation, (biosystem1, biosystem2))\n if DEBUG:\n print debug_info(), list1, list2\n if sorted(list1) != sorted(list2):\n return False\n net1, net2 = map(BioSystemNetwork, (biosystem1, biosystem2))\n return sorted(net1.reaction_lines_hash) == sorted(net2.reaction_lines_hash)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare the groups of two mpi communicators. Returns true if each comm handles the same group of mpi processes.
def compare_groups(comm_1, comm_2): assert comm_1 != MPI.COMM_NULL assert comm_2 != MPI.COMM_NULL result = MPI.Comm.Compare(comm_1, comm_2) res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL] return result in res[:-1]
[ "def compare_comm(comm_1, comm_2):\n assert comm_1 != MPI.COMM_NULL\n assert comm_2 != MPI.COMM_NULL\n result = MPI.Comm.Compare(comm_1, comm_2)\n res = [MPI.IDENT, MPI.CONGRUENT, MPI.SIMILAR, MPI.UNEQUAL]\n return result == res[0]", "def mpi_multi():\n comm, procs, rank = mpi_world()\n if procs > 1:\n return True\n else:\n return False", "def same_group(self,i,j):\n if self.group_number(i) == self.group_number(j):\n return True\n else:\n return False", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def __cmp__(self, other):\n if not isinstance(other, GaloisGroup_v1):\n return cmp(type(self), type(other))\n return cmp( (self.__number_field, self.__group),\n (other.__number_field, other.__group) )", "def clusters_are_identical(one, two):\n if not len(one) == len(two):\n return False\n for subA, subB in zip(one, two):\n if not subA.ipg or not subB.ipg:\n return False\n if subA.ipg != subB.ipg:\n return False\n return True", "def _check_collision(self, group1, group2):\n ids1 = self.collision_groups[group1]\n ids2 = self.collision_groups[group2]\n\n for coni in range(0, self._sim.data.ncon):\n con = self._sim.data.contact[coni]\n\n collision = con.geom1 in ids1 and con.geom2 in ids2\n collision_trans = con.geom1 in ids2 and con.geom2 in ids1\n\n if collision or collision_trans:\n return True\n return False", "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def equals(self, other):\n if not isinstance(other, PermutationGroup):\n return False\n\n set_self_gens = set(self.generators)\n set_other_gens = set(other.generators)\n\n # before reaching the general case there are also certain\n # optimisation and obvious cases requiring less or no actual\n # computation.\n if set_self_gens == set_other_gens:\n return True\n\n # in the most general case it will check that each generator of\n # one group belongs to the other PermutationGroup and vice-versa\n for gen1 in set_self_gens:\n if not other.contains(gen1):\n return False\n for gen2 in set_other_gens:\n if not self.contains(gen2):\n return False\n return True", "def _check_collision(self, group1, group2):\n\n ids1 = self.collision_groups[group1]\n ids2 = self.collision_groups[group2]\n\n for coni in range(0, self._data.ncon):\n con = self._data.contact[coni]\n\n collision = con.geom1 in ids1 and con.geom2 in ids2\n collision_trans = con.geom1 in ids2 and con.geom2 in ids1\n\n if collision or collision_trans:\n return True\n\n return False", "def is_normal_subgroup(self, other):\r\n if not(self<=other):\r\n return False\r\n if other.is_abelian():\r\n return True\r\n if other.index(self)==2:\r\n return True\r\n gens1 = self.group_gens\r\n gens2 = other.group_gens\r\n for g in gens2:\r\n for h in gens1:\r\n p = g * h * g**-1\r\n if not p in Set(self.group_elems):\r\n return False\r\n return True", "def intersection_size(comm_1, comm_2):\n if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:\n return None\n group_1 = comm_1.Get_group()\n group_2 = comm_2.Get_group()\n inter_group = MPI.Group.Intersect(group_1, group_2)\n return inter_group.Get_size()", "def using_mpi(self) -> bool:\r\n\r\n try:\r\n from mpi4py import MPI\r\n\r\n comm = MPI.COMM_WORLD\r\n return comm.size > 1\r\n\r\n except ModuleNotFoundError:\r\n return False", "def conversationIsGroup(message):\n if message.conversation == message.who:\n return False\n return True", "def test_distinct_grouping(self):\r\n\r\n # create second connection using the same backend\r\n self.create_connection(data={'identity': '77766655555',\r\n 'backend': self.conn1.backend})\r\n self.conns = Connection.objects.all()\r\n msg = self.create_outgoing_message(data={'connections': self.conns})\r\n grouped_identities = self.router.group_outgoing_identities(msg)\r\n backend_name = self.conn1.backend.name\r\n self.assertEqual(len(grouped_identities[backend_name]), 2)", "def __eq__(self, other):\n if isinstance(other, ViewerManagerGroup):\n return self.own_order == other.own_order\n else:\n return pyglet.graphics.OrderedGroup.__eq__(self, other)", "def can_communicate_with(self, target):\n if self == target:\n return True\n msg = 'You try to connect topologies belonging to'\n msg += ' two different mpi tasks. Set taskids properly or use'\n msg += ' InterBridge.'\n assert self.task_id() == target.task_id(), msg\n\n # Parent communicator\n # Todo : define some proper conditions for compatibility\n # between topo_from, topo_to and parent:\n # - same size\n # - same domain\n # - common processus ...\n # At the time we check that both topo have\n # the same comm_origin.\n return self.is_consistent_with(target)", "def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi", "def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the values of ranks in target from ranks in source.
def convert_ranks(source, target): assert source != MPI.COMM_NULL and target != MPI.COMM_NULL g_source = source.Get_group() g_target = target.Get_group() size_source = g_source.Get_size() r_source = [i for i in xrange(size_source)] res = MPI.Group.Translate_ranks(g_source, r_source, g_target) return {r_source[i]: res[i] for i in xrange(size_source)}
[ "def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))", "def find_vectors_mapping(source, target):\n return [np.argmax([abs(scalar_projection(s, t)) for t in target]) for s in source]", "def findClosestNodes(self, target: hash.hash.Hash):\n # TODO: make more efficient\n # See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table\n \n nodes = []\n \n for bucket in self.buckets:\n nodes = nodes + bucket.nodes\n\n nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))\n\n return nodes[:config.K]", "def searchRange(self, nums, target):\n left = self.leftIndex(nums, target)\n right = self.rightIndex(nums,target)\n \n return [left, right]", "def find_range_for_target(self, target_value):\n result = None\n head_idx = 0\n tail_idx = 1\n current_sum = self.all_values[0] + self.all_values[1]\n list_length = len(self.all_values)\n while tail_idx < list_length and current_sum != target_value:\n print(\n f\"Iterating target={target_value}, current={current_sum}, considering={head_idx},{tail_idx} -> {self.all_values[head_idx: tail_idx + 1]}\"\n )\n # which way do we need to go ?\n if current_sum < target_value:\n # we need to include more values, so moving the tail\n tail_idx += 1\n current_sum += self.all_values[tail_idx]\n else:\n # we need to reduce the total, so moving the head\n current_sum -= self.all_values[head_idx]\n head_idx += 1\n # we might now have the two indexes pointing at the same place, which would be impossible\n if head_idx == tail_idx:\n tail_idx += 1\n current_sum += self.all_values[tail_idx]\n\n # and return the thing\n if current_sum == target_value:\n result = self.all_values[head_idx : tail_idx + 1]\n\n return result", "def _map_dims_(\n cls,\n source_array: List[int],\n target_array: List[int],\n source_idx: int,\n start_target_idx: int,\n source_to_target_map: DIMENSION_MAP,\n target_to_source_map: DIMENSION_MAP,\n ) -> Tuple[bool, int]:\n res, last_target_index = cls._can_reach_number_by_multiply(\n number_to_reach=source_array[source_idx], array=target_array, start_idx=start_target_idx\n )\n if not res:\n return (res, last_target_index)\n source_to_target_map[source_idx] = list(range(start_target_idx, last_target_index + 1))\n for idx in range(start_target_idx, last_target_index + 1):\n target_to_source_map[idx] = [source_idx]\n return (res, last_target_index)", "def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def extract_targets(scores: [], quartiles: []) -> []:\n targets = [None] * len(scores)\n\n q1 = quartiles[0]\n q2 = quartiles[1]\n q3 = quartiles[2]\n\n for i in range(0, len(scores)):\n score = int(scores[i])\n if score < q1:\n targets[i] = 1\n elif score < q2:\n targets[i] = 2\n elif score < q3:\n targets[i] = 3\n else:\n targets[i] = 4\n # end for loop\n\n return targets", "def get_target_rank_list(daos_object):\n try:\n daos_object.get_layout()\n return daos_object.tgt_rank_list\n except DaosApiError as error:\n raise DaosTestError(\n \"Error obtaining target list for the object: {}\".format(\n error)) from error", "def find_miRNA_target(self, target_names):\n miRNAs = set()\n expr = defaultdict(list)\n supt = defaultdict(list)\n pmid = defaultdict(list)\n if self.tfdb is not None:\n t = (target_names[0],)\n res = self.tfdb.execute(\"SELECT * FROM mirnaInfo \"\n \"WHERE target = ? \", t).fetchall()\n if res:\n for r in res:\n miRNAs.add(r[1])\n expr[(r[1],target_names[0])].append(r[3])\n supt[(r[1],target_names[0])].append(r[4])\n pmid[(r[1],target_names[0])].append(str(r[5]))\n else:\n raise TargetNotFoundException\n \n if len(target_names)>1:\n for i in range(1,len(target_names)):\n t = (target_names[i],)\n res = self.tfdb.execute(\"SELECT * FROM mirnaInfo \"\n \"WHERE target = ? \", t).fetchall()\n if res:\n miRNAs = miRNAs & set([r[1] for r in res])\n for r in res:\n expr[(r[1],target_names[i])].append(r[3])\n supt[(r[1],target_names[i])].append(r[4])\n pmid[(r[1],target_names[i])].append(str(r[5]))\n else:\n raise TargetNotFoundException\n return miRNAs,expr,supt,pmid", "def get_target_array(inp: list, target_value: int):\n \n for i in inp:\n if i < target_value:\n pair = target_value - i\n if pair in inp:\n # print(f\"the first number= {i} the second number {pair}\")\n return[inp.index(i), inp.index(pair)]\n break", "def sinks(self):\n if self.rank < self.midpoint:\n partner = self.midpoint + (self.rank - self.left)\n if partner == self.right:\n partner -= 1\n else:\n partner = self.left + (self.rank - self.midpoint)\n if partner == self.midpoint:\n partner -= 1\n\n return {partner}", "def __classify(self, target):\n values_list = []\n n = len(self.problem)\n dif = list(set(self.problem)) # different numbers of the problem\n for _ in range(10):\n numbers = random.sample(range(500000, 1000000), len(dif))\n values = {dif[i]: numbers[i] for i in range(len(dif))}\n values[0], values[1] = 0, 1\n values_list.append(values)\n answers = _get_all_expr(self.problem, n, target)\n\n uid_table, uid_r1_table = {}, {}\n for expr in answers:\n uid = expr.unique_id()\n uid_table[uid] = expr\n uid_r1 = expr.unique_id_for_rule_1(values_list)\n if uid_r1 in uid_r1_table:\n self.__parent[uid] = uid_r1_table[uid_r1]\n self.__rank[uid] = 1\n else:\n self.__parent[uid] = uid\n uid_r1_table[uid_r1] = uid\n self.__rank[uid] = 2\n\n for expr in answers:\n uid1 = expr.unique_id()\n for expr2 in expr.all_equivalent_expression():\n uid2 = expr2.unique_id()\n self.__union(uid1, uid2)\n\n return_dict = {}\n for expr in answers:\n uid = expr.unique_id()\n return_dict[uid] = self.__root(uid)\n\n return answers, return_dict", "def as_paired_ranks(x, y):\n n = len(x)\n paired = zip(x,y)\n x = list(x)\n y = list(y)\n x.sort()\n y.sort()\n rank_val_map_x = dict(zip(x, range(n)))\n rank_val_map_y = dict(zip(y, range(n)))\n ranked = []\n for i in range(n):\n ranked += [[rank_val_map_x[paired[i][0]], rank_val_map_y[paired[i][1]]]]\n return ranked", "def _compute_sequence_item_matches(source_sequence, target_sequence):\n result = []\n\n len_source = len(source_sequence)\n len_target = len(target_sequence)\n num_matches = len_source if len_source < len_target else len_target\n\n # Rank each target item against each source item.\n # Each source item will have a list containing all the items in the\n # target sequence.\n match_candidates = {}\n for src_idx, src_item in enumerate(source_sequence):\n tgt_candidates = _compute_sequence_item_match_candidates(\n src_item, target_sequence)\n match_candidates[src_idx] = tgt_candidates\n\n # Compute the best target item match for each source item.\n # It's possible for multiple source items to be similar the same target\n # item.\n # This loop ensures the highest scoring source-target pairs are matched.\n matches = {}\n while len(matches) < num_matches and match_candidates:\n # Each iteration pops the top-scoring target item for each source item.\n round_picks = []\n for src_idx, tgt_candidates in match_candidates.iteritems():\n round_pick = tgt_candidates.pop(0)\n round_picks.append((round_pick.score, src_idx, round_pick))\n\n # The picks are sorted so a source-target pair with a higher similarity\n # is matched before another source-target that shares the same target.\n round_picks.sort()\n round_picks.reverse()\n\n # Create matches between unclaimed target items and source items.\n for _, src_idx, candidate in round_picks:\n if candidate.target_index in matches:\n # Target item has already been matched to a higher-scoring\n # (more similar) source item. The next iteration round will\n # attempt to match the source item with the next best target\n # item.\n continue\n matches[candidate.target_index] = (src_idx, candidate)\n del match_candidates[src_idx]\n\n for src_idx, match in matches.values():\n result.append((src_idx, match.target_index, match.hits, match.misses))\n\n return result", "def get_targets(\n self, source: Tuple[str, str], relation: Optional[str] = None\n ) -> List[Node]:\n return self.get_common_targets([source], relation)", "def find_targetnodes(self):\n\n self.connect_backwards()\n\n targetnodes = []\n for n in self.find_datanodes():\n if len(n.receives_from) > 0:\n targetnodes.append(n)\n return targetnodes", "def similarity_matrix(source, target):\n result = numpy.zeros((len(source), len(target)))\n for i, source_item in enumerate(source):\n for j, target_item in enumerate(target):\n result[i, j] = similarity(source_item, target_item)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute drift score as the percentage of overlapping probabilities
def compute_drift_score(ref_col_prob, col_prob): return sum(abs(np.asarray(ref_col_prob) - np.array(col_prob)) * 100)
[ "def score(self):\n s = 100*self.coverage-10*self.overlapRatio-0.01*self.traveledDist\n return s", "def drift_score(self):\n if self.measured_val is None:\n return 0.0\n\n if self.rebalance_type == self.REBALANCE_TYPE_ABSOLUTE:\n return (self.measured_val - self.configured_val) / self.rebalance_thr\n else:\n return ((self.measured_val - self.configured_val) / self.configured_val) / self.rebalance_thr", "def ppp_score_percentage(self):\r\n objects = self.__get_objects()\r\n z1 = str(objects[1]).strip().split()\r\n z2 = float(z1[14]) # will be the floating point number 8.3\r\n db = binom.rvs(n=10, p=z2, size=10000)\r\n a = np.array(db)\r\n b = np.mean(a)*100/10\r\n return b", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def eval_prob(rolls, fn):\n t = f = 0\n for roll in rolls:\n if fn(roll):\n t = t + 1\n else:\n f = f + 1\n total = t + f\n #print \"t={}, f={}, total={}\".format(t, f, total)\n return t / float(total) * 100.0 if total != 0 else None", "def estBoundMatchScore(truebounds,estbounds,penalty=2.0): \n fixedval = 100000000000\n scoremat1, scoremat2 = getScoreMat(truebounds[\"start\"], estbounds[\"start\"],fixedval), getScoreMat(truebounds[\"end\"], estbounds[\"end\"],fixedval) \n curscores = []\n for cind,cscoremat in enumerate([scoremat1,scoremat2]):\n truelen, estlen = (len(truebounds[\"start\"]), len(estbounds[\"start\"])) if cind == 0 else (len(truebounds[\"end\"]), len(estbounds[\"end\"])) \n tcumscores = matchPenalized(cscoremat,fixedval,penalty,truelen,estlen) \n zercount = sum(1 for item in tcumscores if abs(item) < 0.001)\n print \"inside: \",zercount\n curscores.extend(tcumscores) \n cumscore = sum(curscores) \n len2dist = {}\n for score in curscores:\n len2dist.setdefault(score,0)\n len2dist[score] += 1\n print len2dist \n print \"mean: \",np.mean(curscores)\n print \"std: \",np.std(curscores) \n exit(1) \n return np.mean(curscores)", "def compute_acc(best_segments, targets):\n n_correct = 0\n\n for topic_id in best_segments.keys():\n for i in range(len(best_segments[topic_id])):\n n_correct_element = 0\n for episode_id in best_segments[topic_id][i].keys():\n if any([does_overlap(best_segments[topic_id][i][episode_id], (target_timespan, target_timespan+60)) for target_timespan in targets[topic_id + '-' + episode_id]]):\n n_correct_element += 1\n\n if n_correct_element > 0:\n n_correct += 1\n break\n\n\n return n_correct/len(best_segments)", "def dice_score(ground_truth, prediction):\r\n\r\n # Normalize\r\n prediction /= np.amax(prediction)\r\n ground_truth /= np.amax(ground_truth)\r\n\r\n true_positive_mask = np.logical_and(ground_truth==1, prediction==1)\r\n false_positive_mask = np.logical_and(ground_truth==0, prediction==1)\r\n false_negative_mask = np.logical_and(ground_truth==1, prediction==0)\r\n\r\n TP = np.count_nonzero(true_positive_mask)\r\n FP = np.count_nonzero(false_positive_mask)\r\n FN = np.count_nonzero(false_negative_mask)\r\n\r\n DSC = 2*TP / (2*TP + FP + FN)\r\n\r\n return DSC", "def pct_match(self, s1, s2, comp_length):\n\n matches = self.max_freq[s1:s1+comp_length] \\\n == self.max_freq[s2:s2+comp_length]\n return np.ma.sum(matches) / np.ma.count(matches)", "def estPointMatchScore(truebounds,estbounds,penalty=2.0): \n fixedval = 100000000000\n scoremat = getScoreMat(truebounds, estbounds, fixedval)\n truelen, estlen = len(truebounds), len(estbounds)\n curscores = matchPenalized(scoremat,fixedval,penalty,truelen,estlen)\n #return np.mean(curscores)\n \n\n scoremat = np.zeros((len(truebounds),len(estbounds)),dtype=np.float64)\n for trind,trbound in enumerate(truebounds):\n for estind,estbound in enumerate(estbounds):\n scoremat[trind,estind] = abs(trbound - estbound)\n scores = []\n for trind in xrange(len(truebounds)):\n score = min(scoremat[trind,:])\n scores.append(score)\n print \"infom:\" \n print \"mean \",np.mean(scores)\n print \"median \",np.median(scores)\n print \"std \",np.std(scores)\n len2dist = {}\n for mylen in scores:\n len2dist.setdefault(mylen,0)\n len2dist[mylen] += 1 \n print len2dist\n print \"done\"\n len2dist[50] = 10\n for ind in xrange(8):\n scores.append(50)\n print np.mean(scores) \n #len2dist = {0.0: 10, 1.0: 14, 2.0: 7, 3.0: 3, 4.0: 8, 5.0: 5, 6.0: 7, 7.0: 2, 8.0: 6, 9.0: 5, 10.0: 2, 11.0: 5, 12.0: 2, 15.0: 1, 16.0: 1, 20.0: 3, 22.0: 3, 24.0: 1, 27.0: 1}\n exit(1) \n m = Munkres()\n scoremat2 = deepcopy(scoremat) \n indexes = m.compute(scoremat2)\n curscore = sum(scoremat[ind1,ind2] for ind1,ind2 in indexes)\n print \"curscore is: \",curscore\n exit(1)\n print curscore/float(min(len(truedoms),len(estdoms)))\n if len(truedoms) < len(estdoms):\n usedinds = set(eind for tind,eind in indexes)\n reminds = set(range(0,len(estdoms))).difference(usedinds)\n curscore += sum(min(scoremat[:,eind]) for eind in reminds) \n return curscore/float(max(len(truedoms),len(estdoms)))", "def score(i):\n a = assessments[i]\n peers = get_peers(i)\n if len(peers) == 0:\n return None\n # clamp 1/prior above by 10 (see note in 2016-03-ORA2-scoring)\n return np.mean([(min(10, float(n)/counts[a.points])\n if a.points == assessments[j].points else 0)\n for j in peers])", "def calculate_probability(self):\n return 0", "def percent_score(self):\n return self.score * 100", "def delta(tval, tp_confidences, fp_confidences, num_samples):\n tp_percentage = \\\n np.sum([1 for x in tp_confidences if x > tval]) / num_samples\n if fp_confidences:\n fp_percentage = np.sum([1 for x in fp_confidences if x > tval]) / \\\n len(fp_confidences)\n else:\n fp_percentage = 0\n optimal_tp = len(tp_confidences) / num_samples\n delta_value = (tp_percentage - optimal_tp) ** 2 + fp_percentage ** 2\n return delta_value, tp_percentage, fp_percentage", "def percentOverlap(x1, x2):\n nonZeroX1 = np.count_nonzero(x1)\n nonZeroX2 = np.count_nonzero(x2)\n minX1X2 = min(nonZeroX1, nonZeroX2)\n percentOverlap = 0\n if minX1X2 > 0:\n percentOverlap = float(np.dot(x1.T, x2)) / float(minX1X2)\n return percentOverlap", "def calculate_percent_match(primers,\n seq_count,\n exclude_seq_count=1):\n # Calculate percent of sequences that are 'hit' by each primer\n for n in range(len(primers)):\n # Calculate percent perfect match\n primers[n].percent_match=float(primers[n].match_count/seq_count)\n primers[n].non_specific_percent=\\\n float(primers[n].non_specific_hits/exclude_seq_count)\n \n return primers", "def mfpe(actual, prediction):\n actual, prediction = np.array(actual), np.array(prediction)\n return np.mean((actual - prediction) / actual) * 100", "def calculate_strict(self, predictor, X, Y):\n stats = [int(p == y) # success\n for y, p in ((Y[i], p) for i, p in\n enumerate(predictor.predict(X)))]\n score = reduce(lambda x, y: x + y, stats) / len(stats)\n return score * 100", "def computeOverPredict(predicted_scores, full_mapping_dict):\n # initialize the numerator and denominators\n over_guesses = 0.0\n total_wrong = 0.0\n \n # compare with the actual values\n for phrase in predicted_scores:\n # check if our prediction was correct or not\n if predicted_scores[phrase] != int(full_mapping_dict[phrase]):\n total_wrong += 1\n \n # did we over- or under-guess?\n if predicted_scores[phrase] > int(full_mapping_dict[phrase]):\n over_guesses += 1\n \n # return the ratio of over-guesses to incrorrect predictions \n return over_guesses / total_wrong" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combine training and inference datasets as one data frame
def combine_train_infer(train_file, infer_dir): train_df = pd.read_feather(train_file) time_range = range(len([f for f in os.listdir(infer_dir) if 'feather' in f])) infer_df_list = [pd.read_feather(f'{infer_dir}/{t}.feather') for t in time_range] comb_df_list = [] train_df.index = [-1] * len(train_df) comb_df_list.append(train_df) for t in time_range: df = infer_df_list[t] df.index = [t] * len(df) comb_df_list.append(df) return pd.concat(comb_df_list), train_df, infer_df_list
[ "def load_train_and_dev() -> Tuple[pd.DataFrame, pd.DataFrame]:\n return load_split('train'), load_split('dev')", "def triples(self):\n return pd.concat((self._load_train(), self._load_valid(), self._load_test()))", "def _get_input_dataset(self):\n print(\"[INFO] Loading training data as tf.data.Dataset\")\n train = tf.data.Dataset.from_tensor_slices((self.training_set[\"path\"], self.training_set[\"diagnosis\"]))\n\n print(\"[INFO] Performing auto augmentation on training data...\")\n train_aug_fn = partial(preprocess.augment_image, image_size=self.image_size, format=self.train_image_suffix[1:])\n train = train.map(train_aug_fn)\n train = train.shuffle(buffer_size=10000)\n train = train.batch(batch_size=self.batch_size)\n\n print(\"[INFO] Loading validation data as tf.data.Dataset\")\n val_load_fn = partial(preprocess.load_images, image_size=self.image_size, format=self.val_image_suffix[1:])\n val = tf.data.Dataset.from_tensor_slices((self.validation_set[\"path\"], self.validation_set[\"diagnosis\"]))\n val = val.map(val_load_fn)\n val = val.shuffle(buffer_size=10000)\n val = val.batch(batch_size=self.batch_size)\n\n print(\"{t:<20}: {batch_size}\".format(t=\"Batch size\", batch_size=self.batch_size))\n print(\"{t:<20}: {image_size}\".format(t=\"Image size\", image_size=self.image_size))\n\n return train, val", "def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset", "def to_DataFrames(self):\r\n # list of all features + appended response label\r\n labels = list(self.features) + [self.response]\r\n # need to reshape y to get correct shape to broadcast when stacking\r\n # training data\r\n df_tn = DataFrame(data = hstack(\r\n (self.X_train, self.y_train.reshape([self.y_train.shape[0], 1]))),\r\n columns = labels)\r\n # test data\r\n df_tt = DataFrame(data = hstack(\r\n (self.X_test, self.y_test.reshape([self.y_test.shape[0], 1]))),\r\n columns = labels)\r\n return df_tn, df_tt", "def __build_dataframe(self):\n\n paths = list(self.__model_accuracies.keys())\n validation_accuracies = list(self.__model_accuracies.values())\n\n # Building a dataframe with the prediction of each model.\n # Explanation: we read each csv, using only the Prediction column.\n # We then multiply that column for the valitation accuracy of the related\n # model. We merge every csv in one single dataframe.\n ensemble_df = pd.concat([pd.read_csv(path, usecols=[\"Prediction\"]).rename(\n columns={\"Prediction\": f\"Prediction{self.__model_names[i]}\"}) \\\n * validation_accuracies[i] for i, path in\n enumerate(paths)], axis=1)\n\n # Updating the index to match with the submission one:\n ensemble_df.index += 1\n\n # Printing the result\n print(ensemble_df)\n\n return ensemble_df", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def get_model_comparison_dataframe(fitter):\n model_results = {}\n for posterior, weight, chain, evidence, model, data, extra in fitter.load():\n n = extra[\"name\"]\n if model_results.get(n) is None:\n model_results[n] = []\n i = posterior.argmax()\n a, s = weighted_avg_and_std(chain[:, 0], weights=weight)\n model_results[n].append([a, s, chain[i, 0], posterior[i], extra[\"realisation\"]])\n\n for label in model_results.keys():\n model_results[label] = pd.DataFrame(model_results[label], columns=[\"avg\", \"std\", \"max\", \"posterior\", \"realisation\"])\n\n # This shouldnt be necessary, but if a job or two gets killed or a node restarted, this will remove that realisation\n all_ids = pd.concat(tuple([m[[\"realisation\"]] for m in model_results.values()]))\n counts = all_ids.groupby(\"realisation\").size().reset_index()\n max_count = counts.values[:, 1].max()\n good_ids = counts.loc[counts.values[:, 1] == max_count, [\"realisation\"]]\n num_dropped = (counts.values[:, 1] != max_count).sum()\n if num_dropped:\n logging.warning(\n f\"Warning, {num_dropped} realisations did not have a fit for all models.\"\n + \" Rerun with fitter = Fitter(dir_name, remove_output=False) to fill in the missing fits\"\n )\n\n # Merge results\n summary = []\n for label, df in model_results.items():\n model_results[label] = pd.merge(good_ids, df, how=\"left\", on=\"realisation\")\n summary.append([label, np.mean(model_results[label][\"avg\"]), np.mean(model_results[label][\"std\"]), np.std(model_results[label][\"avg\"])])\n summary = pd.DataFrame(summary, columns=[\"Name\", \"Mean mean\", \"Mean std\", \"Std mean\"])\n\n return model_results, summary", "def build_train_eval_datasets(args):\n if args.trainset_type == 'png':\n assert args.train_flist is not None\n train_list = data.get_directories(args.train_flist)\n\n # train_list1 = []\n if args.eval_flist is None:\n train_list, eval_list = data.get_split_list(train_list, 0.9, random_perm=True)\n else:\n eval_list = data.get_directories(args.eval_flist)\n # test data\n if args.test_flist is not None:\n test_list = data.get_directories(args.test_flist)\n\n # Create dataset from training and evaluation files\n train_data = data.build_dataset(train_list, args, training=True)\n\n eval_data = data.build_dataset(eval_list, args, dir_flist=True, training=False)\n\n #test data\n if args.test_flist is not None:\n test_data = data.build_dataset(test_list, args, dir_flist=True, training=False)\n\n\n if args.test_flist is not None:\n return train_data, eval_data, test_data\n\n return train_data, eval_data, #test_data", "def fetchTrainingData(self, debug=True):\n\n training_datasets = []\n item_list = self.api.get_relation_data()\n\n for item in item_list:\n book1, book2 = self.__createBooksData(item)\n result = self.__createResult(item)\n training_datasets.append({\n 'book1': book1,\n 'book2': book2,\n 'result': result\n })\n\n return training_datasets", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def get_training_data(index):\n top_k = 18\n facts = dict()\n # codes, labels, new_entities = get_initial_wikidata_facts(dict(itertools.islice(entity_index.items(), 15)))\n codes, labels, new_entities = get_initial_wikidata_facts(index) # s or o from the book index\n facts.update(labels)\n index.update(new_entities) # take in the new entities \n sec_codes, sec_labels, sec_new_entities = get_secondary_wikidata_facts(codes, top_k)\n # define the possible KB relations for test time\n # best_rels_obj = sorted(codes.items(), key=lambda item: len(item[1]), reverse=True)[:top_k]\n desired_rels = list(sec_codes.keys())\n # search textual patterns between entities of found KB facts for the most prominent relations \n wikipedia_evidences = get_wikipedia_evidences(sec_labels)\n neg_data = prepare_neg_data(wikipedia_evidences, 5)\n data_statistics = {kb_rel:len(evidences.keys()) for kb_rel, evidences in wikipedia_evidences.items()}\n # [{row:..., seen_with:[...], column:..., label: 0 or 1}, ...]\n with open('data/final_dataset.json', 'w+') as outfile:\n # final_dataset = []\n for kb_rel, evidences in wikipedia_evidences.items():\n # ! reduce P279 and P31:\n if kb_rel in ['P279', 'P31']:\n subset = random.sample(evidences.items(),100)\n evidences = dict(subset)\n for pair, relations in evidences.items(): \n # ! remove the relation itself to avoid predicting simply its position\n # seen_with = [i for i in relations if not i.startswith('P')]\n for mention in relations:\n to_add = {\n 'entity_pair': pair, \n 'seen_with': relations, \n 'relations': desired_rels,\n 'relation': kb_rel\n }\n # ! query with textual mentions as in USchema is too hard and \n # ! not sufficient (prob. for depend. paths works).\n # ! Go only for KB relations as query, also on. neg samples\n json.dump(to_add, outfile) # last is always path\n outfile.write('\\n')\n\n return index, desired_rels", "def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)", "def additional_training_datasets(self):\r\n return []", "def build_dataset():\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True,\n transform=transform_train)\n train_loader = DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4)\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True,\n transform=transform_test)\n test_loader = DataLoader(testset, batch_size=100, shuffle=False, num_workers=4)\n\n # classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n return train_loader, test_loader", "def train_test_data_df(train_data_file, test_data_file):\n dtype_dict = {\n \"age\": np.int32,\n \"education-num\": np.int32,\n \"capital-gain\": np.int32,\n \"capital-loss\": np.int32,\n \"hours-per-week\": np.int32\n }\n cols = [i for i in range(15) if i != 2]\n train_data = pd.read_csv(train_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n train_data = train_data.dropna(axis=0, how=\"any\")\n test_data = pd.read_csv(test_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n test_data = test_data.dropna(axis=0, how=\"any\")\n return train_data, test_data", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def Zip(datasets):\n return tf.data.Dataset.zip(datasets)", "def split_train_test():\n mg = mongo_methods.MongoAcess()\n doc_list = mg.read_to_ml(10)\n cut_point = int(len(doc_list) * 0.7)\n train_ds = doc_list[:cut_point]\n test_ds = doc_list[cut_point:]\n # print('train ' + str(train_ds))\n # print('test ' + str(test_ds))\n return train_ds, test_ds" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call the shell script that handles BLAST database formatting.
def format_blast(makeblastdb_path, fname): # The script is written in shell, so this function just calls it and # checks the output # Build the shell command cmd = ['bash', DBFORMAT_SCRIPT, makeblastdb_path, fname] # Execute the script # shell=False to ensure that we aren't executing commands from untrusted # sources p = subprocess.Popen( cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return (out, err)
[ "def dbshell(args=\"\"):\n with cd(env.app):\n run(\"%s | %s %s dbshell\" % (args, get_binary('python'), MANAGE))", "def make_blastDB(name, infile, db_type):\n cline = \"makeblastdb -in \"+ infile +\" -dbtype \"+ db_type +\" -title \"+ \\\n infile +\" -out \"+ name +\" -parse_seqids\"\n child = subprocess.Popen(str(cline), stdout=subprocess.PIPE, shell=True)\n output, error = child.communicate()\n return output", "def dbshell_command(subparsers):\n\n parser = subparsers.add_parser('dbshell', help=dbshell.__doc__)\n parser.add_argument(\"-n\", \"--dry-run\", dest=\"dryrun\", default=False,\n action='store_true',\n help=\"does not actually run, just prints what would do instead\")\n parser.set_defaults(func=dbshell)", "def run_blastcmd(db, fstring, batch_locations):\n x = subprocess.run('printf \"{}\" {}| blastdbcmd -db {} -entry_batch -'.format(fstring, batch_locations, db),\n shell=True, universal_newlines = True, stdout=subprocess.PIPE)\n if x.stderr:\n print(\"ERROR running blastdbcmd on {} :\\n{}\".format(db, batch_locations, x.stderr))\n sys.exit()\n else:\n return [i for i in x.stdout.split('\\n') if '>' not in i and len(i) > 0]", "def main():\n\n # Connect to the latest schemas.\n db = DatabaseConnection(path_config='db_config.yaml')\n schema = db.get_latest_schema('prod_')\n schema_profil = db.get_latest_schema('source_internal_profil_')\n db.execute('SET search_path=\"' + schema + '\", \"' + schema_profil + '\";')\n timestamp = schema[schema.rfind('_') + 1:]\n print('[OK] Dumping from schemas \"%s\" and \"%s\"...' % (schema, schema_profil))\n\n # Read YAML configuration file.\n config = yaml_load('public_dumps.yaml')\n dir_save = config['save_directory']\n dumps = config['dumps']\n\n # Process all dumps.\n for dump_name in dumps:\n save_path = os.path.join(dir_save, '%s_%s.csv' % (dump_name, timestamp))\n db.dump_to_CSV(dumps[dump_name]['query'], save_path)\n print('[OK] Saved dump \"%s\" to %s' % (dump_name, save_path))\n\n stage_path = os.path.join(dir_save, dump_name + '.csv')\n shutil.copyfile(save_path, stage_path)\n print('[OK] Copied dump \"%s\" to %s' % (dump_name, stage_path))\n\n # Close database connection.\n db.close()", "def run_script():\n\n tables_dict = parse_table_data()\n source_dtypes = get_data_types(tables_dict)\n data_map = OrderedDict(zip(source_dtypes, POSTGRES_DTYPES_MAP))\n tables_dict = clean_data_types(tables_dict, data_map)\n\n create_tables = generate_table_scripts(tables_dict)\n create_pks = generate_primary_key_scripts(tables_dict)\n create_fks = generate_foreign_key_scripts(tables_dict)\n\n save_script(create_tables, 'wdb_create_tables.sql')\n save_script(create_pks, 'wdb_create_pks.sql')\n save_script(create_fks, 'wdb_create_fks.sql')", "def main():\n\n argparser = argparse.ArgumentParser\n\n parser = argparse.ArgumentParser(description='Dump F1 2019 tables in format suitable for documentation.')\n parser.add_argument('-f', '--format', default='rst', choices=['rst', 'markdown'],\n help='Format of tables to be dumped (default: rst).')\n\n args = parser.parse_args()\n\n if args.format == 'rst':\n dump_table_func = dump_table_rst\n else:\n dump_table_func = dump_table_markdown\n\n dump_tables(dump_table_func)", "def main():\n\n\timport sys\n\timport add2bib\n\n\toutput = sys.stdout\n\t\n\tfrom optparse import OptionParser\n\t\n\tusage = \"\"\"\n\t%prog [options]\n\texample: %prog -f h -bo BIB_DATABASE 0-324-23583-6 9780596529321\n\t\"\"\"\n\n\n\tparser = OptionParser(usage=usage, version =\"%prog \" + __version__)\n\tparser.add_option(\"-f\", \"--format\", action=\"store\",\n\t dest=\"format\", default='b',\n\t\t\t\t\t help=\"set format(s) of output\\nb: BibTeX\\nh: HTML\\nt: text\", metavar=\"FORMAT\")\n\tparser.add_option(\"-o\", \"--outfile\", action=\"store\", type=\"string\", dest=\"outfile\",\n\t\t\t\t\t help=\"Write formatted references to FILE\", metavar=\"FILE\")\n\tparser.add_option(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False,\n\t\t\t\t\t help=\"CAUTION! silently overwrite outfile, default=%default\")\n\tparser.add_option(\"-b\", \"--backup\", action=\"store_true\", dest=\"backup\", default=False,\n\t\t\t\t\t help=\"backup FILE to FILE.bak, default=%default\")\n\tparser.add_option(\"-v\", \"--verbose\", action=\"store_true\",\n\t dest=\"verbose\", default=False,\n\t\t\t\t\t help=\"Print INFO messages to stdout, default=%default\")\n\tparser.add_option(\"-V\", \"--very_verbose\", action=\"store_true\",\n\t dest=\"very_verbose\", default=False,\n\t\t\t\t\t help=\"Print DEBUG messages to stdout, default=%default\")\n\n\t#parse options\n\t(options, args) = parser.parse_args()\n\t# set license, also checking imports\n\tset_license_key()\n\n\t# open output file for writing (default: stdout)\n\tif options.outfile:\n\t\tif options.backup and os.path.exists(options.outfile):\n\t\t\tshutil.copyfile(options.outfile,options.outfile+\".bak\")\n\t\tif options.overwrite or not os.path.exists(options.outfile):\n\t\t\toutput = open(options.outfile,'w')\n\t\telse:\n\t\t\tisbn2bib_logger.info(\"Appending to %s.\\n(Use -n option to nuke (overwrite) the old output file.)\"\n\t\t\t %options.outfile)\n\t\t\toutput = open(options.outfile,'a')\n\tprint args\n\tfor isbn in args:\n\t\tisbn = isbn.replace('-','')\n\t\tentry = make_entry(isbn)\n\t\toutput.write( str(entry) )\n\t\t\n\tif 'h' in options.format:\n\t\toutput.write( add2bib.html_format(entry) )\n\tif 't' in options.format:\n\t\toutput.write( add2bib.text_format(entry) )", "def main():\n if len(sys.argv) != 2:\n usage(sys.argv[0])\n\n dbfile = sys.argv[1]\n db = pickle.load(open(dbfile))\n for pdbid,dbtablist in db.iteritems():\n tabnum = 0\n while tabnum < len(dbtablist):\n tableau = dbtablist[tabnum]\n n = len(tableau)\n name = pdbid\n if len(dbtablist) > 1:\n name += str(tabnum)\n sys.stdout.write('%6s %4d\\n' % (name, n))\n for i in xrange(n):\n for j in xrange(i+1):\n sys.stdout.write(tableau[(i,j)] + ' ')\n sys.stdout.write('\\n')\n sys.stdout.write('\\n')\n tabnum += 1", "def main():\n count = 0\n\n # Read in the required files and filenames.\n predicted_proteins, protein_db, output_file_aug_to_fasta, \\\n output_file_proteins_to_db, blastp_output, output_to_file, \\\n overwrite = call_files()\n\n # Write all entries in the AUGUSTUS output to a FASTA file\n for record in split_records_aug(predicted_proteins):\n if count == 0:\n mode = 'w'\n else:\n mode = 'a'\n write_fasta(record, output_file_aug_to_fasta, mode)\n count += 1\n\n # Create a blast database and carry out a blastp search\n blast_db = blast_database(protein_db, 'prot', True,\n output_file_proteins_to_db, overwrite)\n\n blastp_file = blastp(output_file_proteins_to_db, output_file_aug_to_fasta,\n True, blastp_output, overwrite, 7)\n\n # Parse the blastp results for the desired information\n blast_results = parse_blastp_output(blastp_output)\n\n # Print the results\n print_output(blast_results)", "def main():\n task_init(authorization_action='runbibformat',\n authorization_msg=\"BibReformat Task Submission\",\n description=\"\"\"\nBibReformat formats the records and saves the produced outputs for\nlater retrieval.\n\nBibReformat is usually run periodically via BibSched in order to (1)\nformat new records in the database and to (2) reformat records for\nwhich the meta data has been modified.\n\nBibReformat has to be run manually when (3) format config files have\nbeen modified, in order to see the changes in the web interface.\n\nAlthough it is not necessary to run BibReformat to display formatted\nrecords in the web interface, BibReformat allows to improve serving\nspeed by precreating the outputs. It is suggested to run\nBibReformat for 'HB' output.\n\nOption -m cannot be used at the same time as option -c.\nOption -c prevents from finding records in private collections.\n\nExamples:\n bibreformat Format all new or modified records (in HB).\n bibreformat -o HD Format all new or modified records in HD.\n bibreformat -o HD,HB Format all new or modified records in HD and HB.\n\n bibreformat -a Force reformatting all records (in HB).\n bibreformat -c 'Photos' Force reformatting all records in 'Photos' collection (in HB).\n bibreformat -c 'Photos' -o HD Force reformatting all records in 'Photos' collection in HD.\n\n bibreformat -i 15 Force reformatting record 15 (in HB).\n bibreformat -i 15:20 Force reformatting records 15 to 20 (in HB).\n bibreformat -i 15,16,17 Force reformatting records 15, 16 and 17 (in HB).\n\n bibreformat -n Show how many records are to be (re)formatted.\n bibreformat -n -c 'Articles' Show how many records are to be (re)formatted in 'Articles' collection.\n\n bibreformat -oHB -s1h Format all new and modified records every hour, in HB.\n\"\"\", help_specific_usage=\"\"\" -o, --formats \\t Specify output format/s (default HB)\n -n, --noprocess \\t Count records to be formatted (no processing done)\nReformatting options:\n -a, --all \\t Force reformatting all records\n -c, --collection \\t Force reformatting records by collection\n -f, --field \\t Force reformatting records by field\n -p, --pattern \\t Force reformatting records by pattern\n -i, --id \\t Force reformatting records by record id(s)\nPattern options:\n -m, --matching \\t Specify if pattern is exact (e), regular expression (r),\n \\t partial (p), any of the words (o) or all of the words (a)\n\"\"\",\n version=__revision__,\n specific_params=(\"ac:f:p:lo:nm:i:\",\n [\"all\",\n \"collection=\",\n \"matching=\",\n \"field=\",\n \"pattern=\",\n \"format=\",\n \"noprocess\",\n \"id=\"]),\n task_submit_check_options_fnc=task_submit_check_options,\n task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,\n task_run_fnc=task_run_core)", "def db_shell(ctx, db_key=None):\n ctx.run('pgcli -h {db_host} -d {db_name} -U {db_user}'.format(**get_database_settings(db_key)), pty=True)", "def setup_blast_db(input_file, input_type=\"fasta\", dbtype=\"prot\",\n title=\"blastdb\", out=\"blastdb\",\n makeblastdb_exe='', logger=None):\n if makeblastdb_exe == '':\n makeblastdb_exe = shutil.which(\"makeblastdb\")\n if logger:\n logger.info(\"makeblastdb executable: %s\", makeblastdb_exe)\n makedbcmd = str(\"{0} -in {1} -input_type {2} -dbtype {3} \" +\n \"-out {4}\").format(makeblastdb_exe,\n input_file,\n input_type,\n dbtype,\n out)\n if logger:\n logger.info(\"Making blast db: {0}\".format(makedbcmd))\n try:\n subprocess.run(makedbcmd, shell=sys.platform != \"win32\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n logging.debug(\"BLAST database '{0}' created here: {1}\".format(\n title, out))\n return 0\n except:\n if logger:\n logging.error(\"Something bad happened when trying to make \" +\n \"a blast database\")\n sys.exit(1)", "def format():\n isort = 'isort -rc *.py app/'\n yapf = 'yapf -r -i *.py app/'\n\n print('Running {}'.format(isort))\n subprocess.call(isort, shell=True)\n\n print('Running {}'.format(yapf))\n subprocess.call(yapf, shell=True)", "def syncdb(command=\"\"):\n return manage(\"syncdb --noinput %s\" % command)", "def format_bash(self,query_results):\n data=query_results.data\n \n name=\"ddb\"\n\n print (\"{0}_row_length={1}\".format(name,len(data)))\n print (\"{0}_column_length={1}\".format(name,len(query_results.columns)))\n print (\"\")\n\n column_index=0\n for column in query_results.columns:\n print(\"{0}_columns['{1}']='{2}'\".format(name,column_index,column))\n column_index+=1\n\n\n row_index=0\n for row in data:\n for column_index in range(0,len(query_results.columns)):\n print('{0}_data[{1}][{2}]=\"{3}\"'.format(name,row_index,column_index,row['data'][column_index]))\n row_index+=1\n # TODO return output for this\n return \"\"", "def run_sql_file(batch_id, sql_path, stdout_path, queue=\"broad\", project=\"imaging\"):\n \n # We start the job in a suspended state in order to enter\n # the job status into the database before the job starts\n #\n if os.path.isfile(stdout_path):\n os.remove(stdout_path)\n #\n # DK_ROOT is needed to point at the dotkit which supplies the \"use\"\n # function to the shell\n #\n # The \"use\" function is needed to configure Python 2.6 so we can\n # use MySQLDB\n #\n # We set PYTHON_EGG_CACHE to the web-server's egg cache in order\n # to get a writeable directory\n #\n if not os.environ.has_key(\"DK_ROOT\"):\n os.environ[\"DK_ROOT\"] = \"/broad/tools/dotkit\"\n remote_cmd = (\"source /broad/tools/dotkit/ksh/.dk_init;\"\n \"use Python-2.6;\"\n \"export PYTHON_EGG_CACHE=/imaging/analysis/People/imageweb/python_egg_cache;\"\n \"python %s -b %d -i %s\"%(__file__,batch_id, sql_path))\n cmd=[\". /broad/lsf/conf/profile.lsf;\",\n \"bsub\",\n \"-H\",\n \"-q\",\"%(queue)s\"%(locals()),\n \"-g\",\"/imaging/batch/%(batch_id)d\"%(locals()),\n \"-M\",\"1\",\n \"-P\",\"%(project)s\"%(locals()),\n \"-L\",\"/bin/bash\",\n \"-o\",stdout_path,\n \"\"\" \"%s\" \"\"\"%(remote_cmd)\n ]\n cmd = \" \".join(cmd)\n p=os.popen(cmd)\n output=p.read()\n exit_code=p.close()\n job=None\n if output:\n match = re.search(r'<([0-9]+)>',output)\n if len(match.groups()) > 0:\n job=int(match.groups()[0])\n else:\n raise RuntimeError(\"Failed to start job. Output is as follows: %s\"%(output))\n else:\n raise RuntimeError(\"Failed to start job: No output from bsub\")\n cursor = connection.cursor()\n statement = \"\"\"insert into sql_job\n (job_id, batch_id, sql_file)\n values (%(job)d, %(batch_id)d, '%(sql_path)s')\"\"\"%(locals())\n cursor.execute(statement)\n cursor.close()\n cmd = [\"bresume\", \"%d\"%(job)]\n cmd = \" \".join(cmd)\n p=os.popen(\". /broad/lsf/conf/profile.lsf;\"+cmd,'r')\n output=p.read()\n exit_code=p.close()\n return job", "def main():\n args = sys.argv[1:]\n\n # Enable debugging mode.\n if len(args) == 2 and args[1] == \"-d\":\n global DEBUG\n DEBUG = True\n del args[1]\n\n if len(args) != 1:\n write_error(\"Expecting exactly 1 command-line argument, got {}.\", len(args))\n\n # Load the query output from sql-to-json\n sql = read_input(args[0])\n execute_query(sql)", "def create_blast_db(self):\n print(\"Creating blast db\")\n if self.mask:\n command = 'dustmasker -in ' + self.seq_file + ' -infmt fasta '\n command += '-outfmt maskinfo_asn1_bin -out ' + self.seq_file + '_dust.asnb'\n subprocess.check_output(command, shell=True) # identifying low-complexity regions.\n\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-mask_data ' + self.seq_file + '_dust.asnb '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome without low-complexity regions\"'\n subprocess.check_output(command, shell=True) # Overwriting the genome file.\n else:\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome unmasked\"'\n subprocess.check_output(command, shell=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns tag of the first matched ListofValues. For each element in ``series`` returned is the tag of the listofvalues in the dictionary of LoVs ``taglov`` which first matches the element with one of its values OR value from donor with the same index OR ``na``.
def which_tag(series: pd.Series, taglov: Union[TagLoV, Any], na: Any, donor: pd.Series = None, method: Optional[Union[Callable, str]] = None, **kwargs): if series.empty: return series if not isinstance(taglov, TagLoV): taglov = TagLoV(taglov) lov_idx_plus = which_lov(series, taglov.lovs, method, **kwargs) tags_plus = np.array((na, *taglov.tags)) result = pd.Series(tags_plus[lov_idx_plus], index=series.index) if isinstance(donor, pd.Series): # take unmatched values from donor unmatched_idx = series.index[~lov_idx_plus.astype(bool)] if not unmatched_idx.empty: take_idx = unmatched_idx.intersection(donor.index) if not take_idx.empty: result[take_idx] = donor[take_idx] return result
[ "def tag_one(self, tokens, index, history):\n tag = None\n for tagger in self._taggers:\n tag = tagger.choose_tag(tokens, index, history)\n if tag is not None:\n break\n return tag", "def find_first_tag(self, tag):\n for lm, _ in self.search(tag=tag):\n return lm", "def useSeriesAbove(requestContext, seriesList, value, search, replace):\n newSeries = []\n\n for series in seriesList:\n newname = re.sub(search, replace, series.name)\n if max(series) > value:\n n = evaluateTarget(requestContext, newname)\n if n is not None and len(n) > 0:\n newSeries.append(n[0])\n\n return newSeries", "def find_tag_value(tag):\n for elem in tags:\n if elem['key'] == tag:\n return elem['value']\n return None", "def search_tags(tag_names, obj):\n for tag_name in tag_names:\n print(f\"{tag_name}:\", end=\" \")\n try:\n for tag in obj['Tags']:\n if tag['Key'] == tag_name:\n print(f\"{tag['Value']}\")\n break\n else:\n print(\"null\")\n except KeyError:\n print(\"null\")", "def get_series(series):\n vanused_koos = [1286262, 1302870, 1314323, 1323569, 1338858, 1351640, 1368511, 1385399, 1399637, 1412265, 1424073,\n 1434630, 1444522, 1455900, 1464476, 1472190, 1482247, 1493085, 1503743, 1513747, 1523486, 1534076,\n 1546304, 1558137, 1565662, 1570599, 1567749, 1554878, 1511303, 1476952, 1448075, 1425192, 1405996,\n 1393074, 1379237, 1401250, 1392720, 1383510, 1375190, 1366250, 1358850, 1350700, 1342920, 1338440,\n 1335740, 1333290, 1329660, 1325217, 1320174, 1315819, 1313271]\n vanused_15_19 = [90942, 96471, 99428, 100886, 100942, 100679, 101321, 100832, 101466, 103274, 104452, 105265,\n 106286, 106834, 106079, 105065, 102773, 101420, 99647, 99901, 101055, 104001, 107353, 111010,\n 111696, 109719, 108511, 107641, 104774, 102019, 99958, 99129, 98966, 99564, 101406, 102000, 102040,\n 101830, 102580, 102720, 102680, 101660, 98420, 93980, 87180, 79750, 73180, 68634, 64021, 61311,\n 59842]\n vanused_20_24 = [89850, 87798, 83872, 84855, 91326, 99889, 106678, 110052, 111684, 111447, 110685, 112309, 112824,\n 113620, 115187, 114229, 113763, 113566, 113628, 112455, 111154, 108428, 105989, 103767, 104027,\n 105679, 107063, 106471, 102906, 101515, 99551, 97600, 95731, 94786, 94477, 98280, 98050, 97910,\n 97620, 98550, 99070, 98770, 98510, 99560, 100160, 100710, 100000, 96669, 92203, 85651, 78493]\n vanused_25_29 = [108387, 109351, 110860, 106961, 101350, 95046, 91201, 88156, 90898, 97991, 105741, 111772, 114409,\n 116507, 116386, 116614, 118938, 119496, 119790, 120720, 120463, 120777, 121384, 121463, 119514,\n 117298, 112872, 107553, 100241, 95906, 93270, 93026, 94143, 95350, 95133, 99710, 98780, 97650,\n 97020, 96310, 95560, 94850, 94240, 93850, 94770, 95620, 95590, 95603, 96742, 97499, 98197]\n if series == \"Vanuserühmad kokku\":\n return vanused_koos\n elif series == \"15-19\":\n return vanused_15_19\n elif series == \"20-24\":\n return vanused_20_24\n elif series == \"25-29\":\n return vanused_25_29\n else:\n return []", "def tagify(x):\n\t\tm = tag_match.match(x)\n\t\tif m:\n\t\t\tg = m.groups()\n\n\t\t\tword = (g[0] if g[0] is not None else \"NA\")\n\t\t\ttag = (g[1] if g[1] is not None else \"NA\")\n\t\t\treturn (word,tag)\n\t\t\t#if g[1] is None: return (g[0], \"NA\")\n\t\t\t#else: return g\n\t\telse: return []", "def tag_values(self):\r\n return [x[1] for x in self.tags]", "def get_tag_value(\n service: str,\n tags: List[Any],\n tag_key: str,\n) -> str:\n capitalize = capitalize_tag_kv(service)\n matches = [\n t[f\"{'V' if capitalize else 'v'}alue\"]\n for t in tags\n if t[f\"{'K' if capitalize else 'k'}ey\"] == tag_key\n ]\n if len(matches) != 1:\n log_error(\n f\"Oops it looks like we're unable to find a match for tag {tag_key}.\"\n \"Please open an issue to help us get this fixed!\",\n )\n raise Abort()\n\n return matches[0]", "def label_encode(self, series):\n le = LabelEncoder()\n \n if type(series) == pd.core.series.Series:\n if series.isna().any():\n series = series.fillna(\"NaN\")\n image = le.fit_transform(series)\n image_nan = le.transform([\"NaN\"])[0]\n image = np.where(image == image_nan, np.nan, image)\n return image\n else:\n return le.fit_transform(series) # array", "def get_labels_best_tag(self):\n tags = list()\n for label in self.__labels:\n best = label.get_best()\n if best is not None:\n tags.append(best)\n else:\n tags.append(sppasTag(''))\n\n return tags", "def get_match_series(o, peptide):\n ret = {\"peptide\": peptide, \"orf_id\": o[\"orf_id\"]}\n return pd.Series(ret)", "def get_tag_options(label_matches):\r\n\ttag_options = []\r\n\tfor key in label_matches.keys():\r\n\t\tif key[1] not in tag_options:\r\n\t\t\ttag_options.append(key[1])\r\n\treturn tag_options", "def get_first_series(series_list):\n\n return min(series_list, key=lambda x: x.series_num).series_num", "def predict(self, tokens: TokenSeq) -> PosSeq:\n _, pos_tags = self.predict_greedy(tokens)\n # _, _, pos_tags = self.predict_viterbi(tokens)\n return pos_tags", "def find_series(self, key):\n # TODO: this could be more efficient if we pushed it down into Java\n return self.filter(lambda x: x[0] == key).first()[1]", "def _anytag_notnone_val(self, tag_name, estimators):\n for _, est in estimators:\n tag_val = est.get_tag(tag_name)\n if tag_val != \"None\":\n return tag_val\n return tag_val", "def findTagByKey(self, key):\n for cat in self.getCategories():\n for thg in cat.getThematicgroups():\n for tg in thg.getTaggroups():\n for t in tg.getTags():\n if t.getKey() == key:\n return t\n return None", "def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prepro 200x235x3 uint8 frame into 8300 (83x100) 1D float vector
def prepro(I): # """ prepro 200x235x3 uint8 frame into 10000 (100x100) 1D float vector """ I = I[35:200] # crop - remove 35px from start & 35px from end of image in x, to reduce redundant parts of image (i.e. after ball passes paddle) I = I[::2,::2,0] # downsample by factor of 2 I[I == 43] = 0 # erase background (background type 1) I[I != 0] = 1 # everything else (paddles, ball) just set to 1 return I.astype(np.float).ravel()
[ "def preprocess(self, frame: np.ndarray) -> torch.TensorType:\n tensor = cv.resize(frame, (self.IMGSZ, self.IMGSZ)) \n tensor = tensor.transpose(2, 0, 1)\n tensor = torch.from_numpy(tensor)\n tensor = torch.unsqueeze(tensor, 0)\n tensor = tensor.half() if self.half else tensor.float()\n tensor = tensor / 255.0\n tensor = tensor.to(self.device)\n\n return tensor", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def processing_data(raw_data):\n \n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames /= (255 / 2)\n new_frames -= 1\n\n return new_frames", "def vid2tensor( self, current_frame):", "def float_image_to_uint8(image):\n image = (image * 142.0) + 128.0\n return tf.cast(image, tf.uint8)", "def next_float(self) -> float:\n data = self.body[self.ptr : self.ptr + 4]\n self.ptr += 4\n data = sum(struct.unpack(\"f\", bytearray(data)))\n return data", "def sequence_to_vector(frame, sequence):\n return (sequence[0] * frame.x +\n sequence[1] * frame.y +\n sequence[2] * frame.z)", "def convert_to_vector(img_arr):\n img = img_arr[0:248, 0:248, 0]\n img = img.flatten()\n return img", "def to_uint8(f):\n from numpy import array, clip, uint8\n\n img = array(clip(f,0,255),uint8)\n return img", "def _byte_to_float(b):\n return (int(b) % 256) / 255.0", "def temperature_data_to_ndarray(frame):\n a = np.frombuffer(ffi.buffer(frame, np.dtype(np.float32).itemsize*768), dtype=np.float32)\n return a.reshape((24, 32))", "def pcm2float(data):\n if data.dtype == np.int16:\n data = data.astype(\"float32\")\n bits = np.iinfo(np.int16).bits\n data = data / (2**(bits - 1))\n return data", "def _image_to_vector(image):\n return image.flatten().astype(float)", "def decode_frame(self, buf):\n import numpy as np\n from cv2 import cvtColor\n\n w, h = self._resolution\n arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))\n arr = cvtColor(arr, 93) # NV21 -> BGR\n return arr", "def ggml_fp16_to_fp32(x: np.float16) -> float:\n ...", "def uvflux(file,start,step) :\n doshift = False\n uvflags = \"sdlcef\"\n handle = uvopen(file,\"old\")\n uvset(handle,\"data\",\"channel\",1,float(start),float(step),float(step))\n PolIndx = [0] * 14\n sources = [\" \"] * 14\n isrc = -1\n nsrc = 0\n npol = 0\n fluxr = []\n fluxi = []\n amp = []\n amp2 = []\n rms2 = []\n ncnt = []\n PolMin = -9\n PolMax = 4\n for i in range(0,MAXPOL) :\n temp = [0.0] * MAXSRC\n fluxr.append(temp)\n temp = [0.0] * MAXSRC\n fluxi.append(temp)\n temp = [0.0] * MAXSRC\n amp.append(temp)\n temp = [0.0] * MAXSRC\n amp2.append(temp)\n temp = [0.0] * MAXSRC\n rms2.append(temp)\n temp = [0] * MAXSRC\n ncnt.append(temp)\n preamble,data,flags = uvread(handle)\n ipol = -20\n while(len(flags) > 0) :\n ipol = uvrdvri(handle,\"pol\",1)\n if(PolIndx[ipol] == 0) :\n npol += 1\n PolIndx[ipol] = npol\n ipol = PolIndx[ipol]\n t,l,update = uvprobvr(handle,\"source\")\n if(update) :\n source = uvgetvra(handle,\"source\")\n found = False\n if(isrc >= 0) :\n found = source == sources[isrc]\n if(not found) :\n if(source in sources) :\n isrc = sources.index(source)\n found = True\n if(not found) :\n nsrc += 1\n sources[nsrc - 1] = source\n for i in range(0,MAXPOL) :\n fluxr[i][nsrc] = 0.0\n fluxi[i][nsrc] = 0.0\n amp[i][nsrc] = 0.0\n amp2[i][nsrc] = 0.0\n rms2[i][nsrc] = 0.0\n ncnt[i][nsrc] = 0\n isrc = nsrc-1\n sig2 = uvinfo(handle,\"variance\")[0]\n for i in range(0,len(flags)) :\n if(flags[i]) :\n fluxr[ipol][isrc] += data[i].real\n fluxi[ipol][isrc] += data[i].imag\n rms2[ipol][isrc] += sig2\n temp = calculations.cabs(data[i])\n amp[ipol][isrc] += temp\n amp2[ipol][isrc] += temp*temp\n ncnt[ipol][isrc] += 1\n preamble,data,flags = uvread(handle)\n uvclose(handle)\n npol = 0\n p = [0] * MAXPOL\n pp = [0] * MAXPOL\n for j in range(PolMin,PolMax+1) :\n if(PolIndx[j] > 0) :\n p[npol] = j\n pp[npol] = PolIndx[j]\n npol = npol + 1\n for i in range(npol-1,1,-1) :\n print i\n if(abs(p[i]) < abs(p[i-1])) :\n t = p[i]\n p[i] = p[i-1]\n p[i-1] = t\n t = pp[i]\n pp[i] = pp[i-1]\n pp[i-1] = t\n PolCode = \"--\"\n retVal = []\n for isrc in range(0,nsrc) :\n source = sources[isrc]\n for i in range(0,npol) :\n ipol = pp[i]\n if(ncnt[ipol][isrc] > 0) :\n PolCode = polsc2p(p[i])\n fluxr[ipol][isrc] /= ncnt[ipol][isrc]\n fluxi[ipol][isrc] /= ncnt[ipol][isrc]\n vecaver = complex(fluxr[ipol][isrc],fluxi[ipol][isrc])\n vecscat = amp2[ipol][isrc] / (2*ncnt[ipol][isrc])- 0.5*(fluxr[ipol][isrc]**2+fluxi[ipol][isrc]**2)\n vecscat = math.sqrt(abs(vecscat))\n vecamp,vecpha = amphase(vecaver)\n scalamp = amp[ipol][isrc] / ncnt[ipol][isrc]\n scalscat = amp2[ipol][isrc] / ncnt[ipol][isrc]- (amp[ipol][isrc] / ncnt[ipol][isrc])**2\n scalscat = math.sqrt(abs(scalscat))\n sig2 = math.sqrt(rms2[ipol][isrc]/ncnt[ipol][isrc])\n retVal.append([source,PolCode,sig2,complex(fluxr[ipol][isrc],fluxi[ipol][isrc]),vecscat,scalamp,scalscat,ncnt[ipol][isrc]])\n return retVal", "def bgr2yuv422(im):\r\n\r\n # Avoid overflows in average calculations.\r\n im = im.astype(np.uint16)\r\n\r\n # Initialise the new image.\r\n imShape = im.shape\r\n converted = np.zeros((imShape[0], int(imShape[1]/2), 4))\r\n \r\n # Perform the conversion calculations.\r\n converted[:,:,0] = (0.2126*im[:,0:imShape[1]:2,2] + \r\n 0.7152*im[:,0:imShape[1]:2,1] + \r\n 0.0722*im[:,0:imShape[1]:2,0]) * (219.0/256.0) + 16.0\r\n converted[:,:,2] = (0.2126*im[:,1:imShape[1]:2,2] + \r\n 0.7152*im[:,1:imShape[1]:2,1] + \r\n 0.0722*im[:,1:imShape[1]:2,0]) * (219.0/256.0) + 16.0\r\n #print((((converted[:,:,0] + converted[:,:,2])/2.0)))\r\n converted[:,:,1] = (((im[:,0:imShape[1]:2,0]+im[:,1:imShape[1]:2,0])/2.0) -\\\r\n ((converted[:,:,0] + converted[:,:,2])/2.0)) * 0.5389 *\\\r\n (224.0/256.0) + 128\r\n converted[:,:,3] = (((im[:,0:imShape[1]:2,2]+im[:,1:imShape[1]:2,2])/2.0) -\\\r\n ((converted[:,:,0] + converted[:,:,2])/2.0)) * 0.635 *\\\r\n (224.0/256.0) + 128\r\n #print(converted.astype(np.uint8))\r\n # Return the converted image.\r\n return converted.astype(np.uint8)", "def ggml_fp32_to_fp16(x: float) -> np.float16:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test _arrange_test_result method with only one module.
def test_arrange_test_result_one_module(self): pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS) pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS) pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS) fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS) fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS) ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS) reporter_1 = result_reporter.ResultReporter() reporter_1.all_test_results.extend([pass_1, pass_2, pass_3]) reporter_2 = result_reporter.ResultReporter() reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1]) info_dict = {} aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2]) expect_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 3} self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
[ "def test_arrange_test_result_multi_module(self):\n group_a_pass_1 = self._create_test_result(group_name='grpup_a',\n status=test_runner_base.PASSED_STATUS)\n group_b_pass_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.PASSED_STATUS)\n group_c_pass_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.PASSED_STATUS)\n group_b_fail_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.FAILED_STATUS)\n group_c_fail_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.FAILED_STATUS)\n group_c_ignore_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])\n\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])\n\n expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_c_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_per_module_scenarios(self):\n s = testscenarios.scenarios.per_module_scenarios(\n 'the_module', [\n ('Python', 'testscenarios'),\n ('unittest', 'unittest'),\n ('nonexistent', 'nonexistent'),\n ])\n self.assertEqual('nonexistent', s[-1][0])\n self.assertIsInstance(s[-1][1]['the_module'], tuple)\n s[-1][1]['the_module'] = None\n self.assertEqual(s, [\n ('Python', {'the_module': testscenarios}),\n ('unittest', {'the_module': unittest}),\n ('nonexistent', {'the_module': None}),\n ])", "def test_arrange_test_result_multi_runner(self):\n runner_a_pass_1 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_2 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_3 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_b_fail_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_fail_2 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.IGNORED_STATUS)\n\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 0}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_get_results(self):\n pass", "def test_heading_order(self):\n assert test_results.get('heading-order') is None, test_results['heading-order'].help", "def test_sort_cards(input, result):\n from sort_cards import sort_cards\n assert sort_cards(input) == result", "def test_group(self):\n pass", "def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]", "def test_iter_modules():\n modules = make_test_modules()\n args = MockedCommandLineArgs()\n\n expected_output = [\n (0, '01_section1'),\n (0, normpath('test_class/01_section1/01_module1')),\n (0, 'lecture1', 'en.txt', 'title'),\n ('en.txt', 'https://api.coursera.org/api/test-url', 'title')\n ]\n collected_output = []\n\n for module in _iter_modules(modules=modules, class_name='test_class',\n path='', ignored_formats=None, args=args):\n collected_output.append((module.index, module.name))\n for section in module.sections:\n collected_output.append((section.index, section.dir))\n for lecture in section.lectures:\n for resource in lecture.resources:\n collected_output.append((lecture.index, lecture.name,\n resource.fmt, resource.title))\n collected_output.append((resource.fmt, resource.url, resource.title))\n\n assert expected_output == collected_output", "def test_get_scenarios_expanded(self):\n pass", "def test_get_scenarios(self):\n pass", "def after_test(self, test_results):\n pass", "def test_require():", "def test_query_task_multi_results(self):\n pass", "def prepare_test(self):\n pass", "def test_query_task_results(self):\n pass", "def test_order_list(self):\n pass", "def test_1_uut2(executed_tests, test_result, uut, uut2):\n executed_tests.append(1)\n assert test_result", "def add_test_from_result(self, dbtestresult):\n testclass = module.get_class(dbtestresult.testimplementation)\n testclass.set_test_options()\n args, kwargs = parse_args(dbtestresult.arguments)\n testinstance = testclass(self.config)\n entry = TestEntry(testinstance, args, kwargs, False)\n self._add_with_prereq(entry)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test _arrange_test_result method with multi module.
def test_arrange_test_result_multi_module(self): group_a_pass_1 = self._create_test_result(group_name='grpup_a', status=test_runner_base.PASSED_STATUS) group_b_pass_1 = self._create_test_result(group_name='grpup_b', status=test_runner_base.PASSED_STATUS) group_c_pass_1 = self._create_test_result(group_name='grpup_c', status=test_runner_base.PASSED_STATUS) group_b_fail_1 = self._create_test_result(group_name='grpup_b', status=test_runner_base.FAILED_STATUS) group_c_fail_1 = self._create_test_result(group_name='grpup_c', status=test_runner_base.FAILED_STATUS) group_c_ignore_1 = self._create_test_result(group_name='grpup_c', status=test_runner_base.IGNORED_STATUS) reporter_1 = result_reporter.ResultReporter() reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1]) reporter_2 = result_reporter.ResultReporter() reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1]) info_dict = {} aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2]) expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0, aei._STATUS_FAILED_KEY : 0, aei._STATUS_PASSED_KEY : 1} self.assertEqual( expect_group_a_summary, info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY]) expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0, aei._STATUS_FAILED_KEY : 1, aei._STATUS_PASSED_KEY : 1} self.assertEqual( expect_group_b_summary, info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY]) expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 1, aei._STATUS_PASSED_KEY : 1} self.assertEqual( expect_group_c_summary, info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY]) expect_total_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 3} self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
[ "def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_arrange_test_result_multi_runner(self):\n runner_a_pass_1 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_2 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_a_pass_3 = self._create_test_result(runner_name='runner_a',\n status=test_runner_base.PASSED_STATUS)\n runner_b_fail_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_fail_2 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.FAILED_STATUS)\n runner_b_ignore_1 = self._create_test_result(runner_name='runner_b',\n status=test_runner_base.IGNORED_STATUS)\n\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 0}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_per_module_scenarios(self):\n s = testscenarios.scenarios.per_module_scenarios(\n 'the_module', [\n ('Python', 'testscenarios'),\n ('unittest', 'unittest'),\n ('nonexistent', 'nonexistent'),\n ])\n self.assertEqual('nonexistent', s[-1][0])\n self.assertIsInstance(s[-1][1]['the_module'], tuple)\n s[-1][1]['the_module'] = None\n self.assertEqual(s, [\n ('Python', {'the_module': testscenarios}),\n ('unittest', {'the_module': unittest}),\n ('nonexistent', {'the_module': None}),\n ])", "def test_iter_modules():\n modules = make_test_modules()\n args = MockedCommandLineArgs()\n\n expected_output = [\n (0, '01_section1'),\n (0, normpath('test_class/01_section1/01_module1')),\n (0, 'lecture1', 'en.txt', 'title'),\n ('en.txt', 'https://api.coursera.org/api/test-url', 'title')\n ]\n collected_output = []\n\n for module in _iter_modules(modules=modules, class_name='test_class',\n path='', ignored_formats=None, args=args):\n collected_output.append((module.index, module.name))\n for section in module.sections:\n collected_output.append((section.index, section.dir))\n for lecture in section.lectures:\n for resource in lecture.resources:\n collected_output.append((lecture.index, lecture.name,\n resource.fmt, resource.title))\n collected_output.append((resource.fmt, resource.url, resource.title))\n\n assert expected_output == collected_output", "def test_get_results(self):\n pass", "def test_query_task_multi_results(self):\n pass", "def test_group(self):\n pass", "def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]", "def test(): \n\t\treturn [\"vice.multizone\", \n\t\t\t[\n\t\t\t\tmig_matrix_row.test(run = False), \n\t\t\t\tmig_matrix.test(run = False), \n\t\t\t\tmig_specs.test(run = False), \n\t\t\t\tzone_array.test(run = False), \n\t\t\t\t_multizone.test(run = False), \n\t\t\t\tsrc_test(run = False) \n\t\t\t]\n\t\t]", "def test_multiple_drivers(self):\n\n self.multiple_drivers_helper(\"install\")\n self.multiple_drivers_helper(\"exact-install\")", "def test_list_group(self):\n pass", "def test_walk_modules():\n modules = make_test_modules()\n args = MockedCommandLineArgs()\n\n expected_output = [\n (0, '01_section1',\n 0, normpath('test_class/01_section1/01_module1'),\n 0, 'lecture1', normpath('test_class/01_section1/01_module1/01_lecture1_title.en.txt'),\n 'https://api.coursera.org/api/test-url')]\n collected_output = []\n\n for module, section, lecture, resource in _walk_modules(\n modules=modules, class_name='test_class',\n path='', ignored_formats=None, args=args):\n\n collected_output.append(\n (module.index, module.name,\n section.index, section.dir,\n lecture.index, lecture.name, lecture.filename(resource.fmt, resource.title),\n resource.url)\n )\n\n assert expected_output == collected_output", "def test_get_scenarios(self):\n pass", "def test_get_scenarios_expanded(self):\n pass", "def test_sort_cards(input, result):\n from sort_cards import sort_cards\n assert sort_cards(input) == result", "def after_test(self, test_results):\n pass", "def test_group_files():", "def test_cmp_multiple_by_id_tagged(self, get_compiler):\n get_compiler.compile(\"tasks.multiple\")\n assert get_compiler.tasklist\n\n calls = []\n for task in get_compiler.tasklist:\n for call in task.calls:\n calls.append(call)\n assert calls\n\n tests = (\n {\"module\": \"file\", \"function\": \"managed\",\n \"args\": [\"/etc/hosts\"], \"kwargs\": {\"src\": \"sugar://hosts\"}},\n )\n for call, test in zip(calls[8:9], tests):\n assert call.module == test[\"module\"]\n assert call.function == test[\"function\"]\n assert call.args == test[\"args\"]\n assert call.kwargs == test[\"kwargs\"]", "def process_module_list(self, modules):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test _arrange_test_result method with multi runner.
def test_arrange_test_result_multi_runner(self): runner_a_pass_1 = self._create_test_result(runner_name='runner_a', status=test_runner_base.PASSED_STATUS) runner_a_pass_2 = self._create_test_result(runner_name='runner_a', status=test_runner_base.PASSED_STATUS) runner_a_pass_3 = self._create_test_result(runner_name='runner_a', status=test_runner_base.PASSED_STATUS) runner_b_fail_1 = self._create_test_result(runner_name='runner_b', status=test_runner_base.FAILED_STATUS) runner_b_fail_2 = self._create_test_result(runner_name='runner_b', status=test_runner_base.FAILED_STATUS) runner_b_ignore_1 = self._create_test_result(runner_name='runner_b', status=test_runner_base.IGNORED_STATUS) reporter_1 = result_reporter.ResultReporter() reporter_1.all_test_results.extend([runner_a_pass_1, runner_a_pass_2, runner_a_pass_3]) reporter_2 = result_reporter.ResultReporter() reporter_2.all_test_results.extend([runner_b_fail_1, runner_b_fail_2, runner_b_ignore_1]) info_dict = {} aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2]) expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0, aei._STATUS_FAILED_KEY : 0, aei._STATUS_PASSED_KEY : 3} self.assertEqual( expect_group_a_summary, info_dict[aei._TEST_RUNNER_KEY]['runner_a']['someModule'][aei._SUMMARY_KEY]) expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 0} self.assertEqual( expect_group_b_summary, info_dict[aei._TEST_RUNNER_KEY]['runner_b']['someModule'][aei._SUMMARY_KEY]) expect_total_summary = {aei._STATUS_IGNORED_KEY : 1, aei._STATUS_FAILED_KEY : 2, aei._STATUS_PASSED_KEY : 3} self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])
[ "def test_arrange_test_result_multi_module(self):\n group_a_pass_1 = self._create_test_result(group_name='grpup_a',\n status=test_runner_base.PASSED_STATUS)\n group_b_pass_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.PASSED_STATUS)\n group_c_pass_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.PASSED_STATUS)\n group_b_fail_1 = self._create_test_result(group_name='grpup_b',\n status=test_runner_base.FAILED_STATUS)\n group_c_fail_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.FAILED_STATUS)\n group_c_ignore_1 = self._create_test_result(group_name='grpup_c',\n status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([group_a_pass_1, group_b_pass_1, group_c_pass_1])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([group_b_fail_1, group_c_fail_1, group_c_ignore_1])\n\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_group_a_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 0,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_a_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_a'][aei._SUMMARY_KEY])\n\n expect_group_b_summary = {aei._STATUS_IGNORED_KEY : 0,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_b_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_b'][aei._SUMMARY_KEY])\n\n expect_group_c_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 1,\n aei._STATUS_PASSED_KEY : 1}\n self.assertEqual(\n expect_group_c_summary,\n info_dict[aei._TEST_RUNNER_KEY]['someRunner']['grpup_c'][aei._SUMMARY_KEY])\n\n expect_total_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_total_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])", "def test_query_task_multi_results(self):\n pass", "def _run(self, results):\n for test in results.keys():\n failmsg = \"Failure in \"+test+\"(\"+str(ttraj[test])+\"): \"\n self._single_test(self.ensemble, ttraj[test], results[test], failmsg)\n\n wrapped = wrap_traj(ttraj[test], self.wrapstart, self.wrapend)\n lentt = len(ttraj[test])\n\n failmsg = \"Failure in wrapped \"+test+\"(\"+str(ttraj[test])+\"): \"\n self._single_test(self.ensemble, wrapped, results[test], failmsg)\n\n failmsg = \"Failure in slice_ens \"+test+\"(\"+str(ttraj[test])+\"): \"\n self._single_test(self.slice_ens, wrapped, results[test], failmsg)", "def test_jobs_results(self):\n pass", "def execute_testsets(testsets):\n group_results = dict() #results, by group\n group_failure_counts = dict()\n total_failures = 0\n myinteractive = False\n\n for testset in testsets:\n mytests = testset.tests\n myconfig = testset.config\n mybenchmarks = testset.benchmarks\n\n #Make sure we actually have tests to execute\n if not mytests and not mybenchmarks:\n # no tests in this test set, probably just imports.. skip to next test set\n break\n\n myinteractive = True if myinteractive or myconfig.interactive else False\n\n #Run tests, collecting statistics as needed\n for test in mytests:\n #Initialize the dictionaries to store test fail counts and results\n if test.group not in group_results:\n group_results[test.group] = list()\n group_failure_counts[test.group] = 0\n\n result = run_test(test, test_config = myconfig)\n result.body = None # Remove the body, save some memory!\n\n if not result.passed: #Print failure, increase failure counts for that test group\n logging.error('Test Failed: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group+\" HTTP Status Code: \"+str(result.response_code))\n\n if test.validators is not None:\n for validator in test.validators:\n if validator.passed == False:\n logging.warning(\" Validation Failed: \" + str(validator))\n\n #Increment test failure counts for that group (adding an entry if not present)\n failures = group_failure_counts[test.group]\n failures = failures + 1\n group_failure_counts[test.group] = failures\n\n else: #Test passed, print results\n logging.info('Test Succeeded: '+test.name+\" URL=\"+test.url+\" Group=\"+test.group)\n\n #Add results for this test group to the resultset\n group_results[test.group].append(result)\n\n # handle stop_on_failure flag\n if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:\n print 'STOP ON FAILURE! stopping test set execution, continuing with other test sets'\n break\n\n for benchmark in mybenchmarks: # Run benchmarks, analyze, write\n if not benchmark.metrics:\n logging.debug('Skipping benchmark, no metrics to collect')\n continue\n\n logging.info(\"Benchmark Starting: \"+benchmark.name+\" Group: \"+benchmark.group)\n curl = configure_curl(benchmark, myconfig)\n benchmark_result = run_benchmark(curl, benchmark, myconfig)\n print benchmark_result\n logging.info(\"Benchmark Done: \"+benchmark.name+\" Group: \"+benchmark.group)\n\n if benchmark.output_file: # Write file\n write_method = OUTPUT_METHODS[benchmark.output_format]\n my_file = open(benchmark.output_file, 'w') # Overwrites file\n logging.debug(\"Benchmark writing to file: \" + benchmark.output_file)\n write_method(my_file, benchmark_result, benchmark, test_config = myconfig)\n my_file.close()\n\n if myinteractive:\n # a break for when interactive bits are complete, before summary data\n print \"===================================\"\n\n #Print summary results\n for group in sorted(group_results.keys()):\n test_count = len(group_results[group])\n failures = group_failure_counts[group]\n total_failures = total_failures + failures\n if (failures > 0):\n print u'Test Group '+group+u' FAILED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n else:\n print u'Test Group '+group+u' SUCCEEDED: '+ str((test_count-failures))+'/'+str(test_count) + u' Tests Passed!'\n\n return total_failures", "def after_test(self, test_results):\n pass", "def test_1_uut2(executed_tests, test_result, uut, uut2):\n executed_tests.append(1)\n assert test_result", "def test_get_results(self):\n pass", "def test_result_group_can_be_sorted_by_other_metrics(\n self, result_group_roc: ResultGroup, result_1: Result, result_2: Result\n ):\n assert result_group_roc.results == [result_1, result_2]", "def test_2_uut2(executed_tests, test_result, uut, uut2):\n executed_tests.append(2)\n assert test_result", "def perform_all_test(self, debug=False):\n result_list = self.perform_all_test_no_reduce(debug)\n return self.reduce_test_result(result_list)", "def test_list_runs(self):\n pass", "def _handler_test_run_tests(self, *args, **kwargs):\n next_state = None\n result = None\n\n tc_pass = False\n tt_pass = False\n tp_pass = False\n tc_result = None\n tt_result = None\n tp_result = None\n\n test_result = {}\n\n try:\n tc_pass, tc_result = self._do_cmd_resp('tc', timeout=200)\n tt_pass, tt_result = self._do_cmd_resp('tt', timeout=200)\n tp_pass, tp_result = self._do_cmd_resp('tp', timeout=200)\n \n except Exception as e:\n test_result['exception'] = e\n test_result['message'] = 'Error running instrument tests.'\n \n finally:\n test_result['cond_test'] = 'Passed' if tc_pass else 'Failed'\n test_result['cond_data'] = tc_result\n test_result['temp_test'] = 'Passed' if tt_pass else 'Failed'\n test_result['temp_data'] = tt_result\n test_result['pres_test'] = 'Passed' if tp_pass else 'Failed'\n test_result['pres_data'] = tp_result\n test_result['success'] = 'Passed' if (tc_pass and tt_pass and tp_pass) else 'Failed'\n \n self._driver_event(DriverAsyncEvent.TEST_RESULT, test_result)\n next_state = SBE37ProtocolState.COMMAND\n \n return (next_state, result)", "def test_executor() -> None:\n for test in TESTS:\n assert executor(test[0]) == test[1]", "def test_batch(self):\n pass", "async def compare_result(self, query_type, test, result):\n if self.per_test_insertion:\n expected = self._translate_column_names(test['expected'])\n else:\n expected = test['expected']\n\n for key, value in expected.items():\n if value == 'ignore':\n continue\n\n if not isinstance(result, dict) or key not in result:\n self.num_fails += 1\n self.failed_tests.append(test['name'])\n\n print(' Expected: \"{}\": {}'.format(key, value))\n print(' Result: \"{}\": {}'.format(key, result))\n print(' Status: Failed')\n return\n\n if not self.compare_values(value, result[key]):\n time.sleep(self.sleep_time * 3)\n query_ = test['query']\n if isinstance(query_, dict):\n query_.update({\"bypass-cache\": True})\n try:\n result2 = await self.execute_query(query_type, test)\n result2 = ujson.loads(result2)\n if self.compare_values(value, result2[key]):\n print(\" Passed at second try\")\n continue\n except SlicingDiceException as e:\n print(str(e))\n\n self.num_fails += 1\n self.failed_tests.append(test['name'])\n\n print(' Expected: \"{}\": {}'.format(key, value))\n print(' Result: \"{}\": {}'.format(key, result[key]))\n print(' Status: Failed')\n return\n\n self.num_successes += 1\n\n print(' Status: Passed')", "def test_multi_step_run(self, client):\n workflow_name = 'test multistep workflow run'\n specification = Yaml.serialize({\n 'name': workflow_name,\n 'entry': 'step-0',\n 'steps': {\n 'step-0': {\n 'operation': 'flux:test-operation',\n 'postoperation': [{\n 'actions': [{\n 'action': 'execute-step',\n 'step': 'step-1',\n }],\n }],\n },\n 'step-1': {\n 'operation': 'flux:test-operation',\n 'postoperation': [{\n 'actions': [{\n 'action': 'execute-step',\n 'step': 'step-2',\n }],\n }],\n },\n 'step-2': {\n 'operation': 'flux:test-operation',\n },\n },\n })\n resp1 = self._setup_workflow(client, workflow_name,\n specification=specification)\n self.assertEqual('OK', resp1.status)\n workflow_id = resp1.content['id']\n\n resp = self._setup_run(client, workflow_id)\n self.assertEqual('OK', resp.status)\n run_id = resp.content['id']\n\n result = self._poll_run_status(client, run_id, 'completed', include=['executions'])\n\n run_ended = result.pop('ended')\n run_started = result.pop('started')\n self.assertTrue(run_ended >= run_started)\n\n ancestor_ids = []\n for execution in result['executions']:\n ancestor_ids.append(execution.pop('id'))\n execution_ended = execution.pop('ended')\n execution_started = execution.pop('started')\n\n self.assertTrue(execution_ended >= execution_started)\n self.assertTrue(execution_ended >= run_started)\n self.assertTrue(run_ended >= execution_started)\n\n expected = {\n 'id': run_id,\n 'name': workflow_name,\n 'parameters': None,\n 'workflow_id': workflow_id,\n 'products': {},\n 'status': 'completed',\n 'executions': [\n {\n 'execution_id': 1,\n 'ancestor_id': None,\n 'step': 'step-0',\n 'name': 'Test Operation',\n 'status': 'completed',\n },\n {\n 'execution_id': 2,\n 'ancestor_id': ancestor_ids[0],\n 'step': 'step-1',\n 'name': 'Test Operation',\n 'status': 'completed',\n },\n {\n 'execution_id': 3,\n 'ancestor_id': ancestor_ids[1],\n 'step': 'step-2',\n 'name': 'Test Operation',\n 'status': 'completed',\n },\n ]\n }\n self.assertEquals(expected, result)", "def run_tests(self):\n results = None\n for test in self.testList:\n results = test.run_tests()\n if test.parameters in self.testRecords:\n self.testRecords[test.parameters].append(results)\n else:\n self.testRecords[test.parameters] = results\n\n return results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A Helper to create TestResult
def _create_test_result(self, **kwargs): test_info = test_runner_base.TestResult(**RESULT_TEST_TEMPLATE._asdict()) return test_info._replace(**kwargs)
[ "def _makeResult(self):\n return TestResultAdaptor(self.test_results, self.stream, self.descriptions, self.verbosity)", "def _makeResult(self):\r\n return _XMLTestResult(self.stream, self.descriptions, \\\r\n self.verbosity, self.elapsed_times)", "def test_make_result( self ):\n sentenceIndex = 3\n wordIndex = 2\n text = 'taco'\n id = 123456789\n result = self.object.make_result( sentenceIndex, wordIndex, text, id )\n\n self.assertIsInstance( result, Result )\n self.assertEqual( result.sentence_index, sentenceIndex )\n self.assertEqual( result.word_index, wordIndex )\n self.assertEqual( result.text, text )\n self.assertEqual( result.id, id )\n self.assertEqual( result.type, 'user', \"Result is correct type\" )", "def _makeResult(self):\n\n result = super(CustomTextTestRunner, self)._makeResult()\n result.test_case_count = self.test_case_count\n return result", "def test_result(self):\n return self._test_result", "def create_result_entry(self, job_id, test_id, test):", "def create_success(test, time):\n return _TestInfo(test, time)", "def create_result(self, **kwargs):\r\n defaults = {\r\n \"tester\": self.user,\r\n \"environment\": self.envs[0]\r\n }\r\n defaults.update(kwargs)\r\n if \"runcaseversion\" not in defaults:\r\n defaults[\"runcaseversion\"] = self.create_rcv()\r\n return self.F.ResultFactory.create(**defaults)", "def create_test_result(\n cls,\n result: bool,\n reason: str=None,\n text_tokens: Union[Tuple[Any], List[Any], Set[Any]]=(),\n tooltip: Union[int, str, CommonLocalizationUtils.LocalizedTooltip]=None,\n tooltip_tokens: Iterator[Any]=(),\n icon=None,\n influence_by_active_mood: bool=False\n ) -> CommonTestResult:\n return CommonTestResult(\n result,\n reason=reason.format(*text_tokens) if reason is not None else reason,\n tooltip_text=tooltip,\n tooltip_tokens=tooltip_tokens,\n icon=icon,\n influenced_by_active_mood=influence_by_active_mood\n )", "def test_createResultType(self):\n line = \"Output Folder == {}\".format(os.path.join('..', 'results'))\n data = f.TuflowFactory.createResultType(line, self.parent)[0]\n self.assertIsInstance(data, TuflowPart)\n self.assertIsInstance(data, TuflowFile)\n self.assertIsInstance(data, ResultFile)\n self.assertEqual(data.obj_type, 'result')\n self.assertEqual(data.filename, '')\n self.assertEqual(data.extension, '')\n self.assertEqual(data.relative_root, os.path.join('..', 'results') + os.sep)\n self.assertEqual(data.root, self.fake_root)\n self.assertEqual(data.filenameAndExtension(), '')\n\n line = \"Write Check Files == {}\".format(os.path.join('..', 'checks') + os.sep)\n data = f.TuflowFactory.createResultType(line, self.parent)[0]\n self.assertIsInstance(data, TuflowPart)\n self.assertIsInstance(data, TuflowFile)\n self.assertIsInstance(data, ResultFile)\n self.assertEqual(data.obj_type, 'result')\n self.assertEqual(data.filename, '')\n self.assertEqual(data.extension, '')\n self.assertEqual(data.relative_root, os.path.join('..', 'checks') + os.sep)\n self.assertEqual(data.root, self.fake_root)\n self.assertEqual(data.filenameAndExtension(), '')\n# self.assertTrue(data.filename_is_prefix)\n\n line = \"Log Folder == log\"\n data = f.TuflowFactory.createResultType(line, self.parent)[0]\n self.assertIsInstance(data, TuflowPart)\n self.assertIsInstance(data, TuflowFile)\n self.assertIsInstance(data, ResultFile)\n self.assertEqual(data.obj_type, 'result')\n self.assertEqual(data.filename, '')\n self.assertEqual(data.extension, '')\n self.assertEqual(data.relative_root, 'log' + os.sep)\n self.assertEqual(data.root, self.fake_root)\n self.assertEqual(data.filenameAndExtension(), '')", "def test_get_results(self):\n pass", "def _create_result(\n self, raw_data: T, processed_data: List[TestGroupReport]\n ) -> ImportedResult:\n raise NotImplementedError", "def get_result(test: TestRun):\n\n try:\n results = test.results\n results['results_log'] = test.results_log.as_posix()\n\n except (TestRunError, TestRunNotFoundError) as err:\n results = {'id': test.full_id}\n for field in BASE_FIELDS[1:]:\n results[field] = None\n\n results['result'] = \"Test not found: {}\".format(err)\n\n return results", "def _CreateTestResult(self) -> paranoid_pb2.TestResultsEntry:\n\n if self.severity is None:\n raise KeyError(\"Please specify self.severity for %s.\" % self.check_name)\n return paranoid_pb2.TestResultsEntry(\n severity=self.severity, test_name=self.check_name, result=False)", "def get_test_result(self):\n failure_info_list = self._outcome.errors[1][1]\n if not failure_info_list:\n return 'OK'\n elif failure_info_list[0].__name__ == 'AssertionError':\n return 'FAIL'\n else: # 'NameError'\n return 'ERROR'", "def _makeResult(self):\r\n self.setResult(None) # delete existing result\r\n self.setResult(testresult.MarbleTestResult())", "def test_result_summary_specific(self):\r\n rcv = self.F.RunCaseVersionFactory()\r\n self.F.ResultFactory(runcaseversion=rcv, status=\"passed\")\r\n\r\n rcv2 = self.F.RunCaseVersionFactory()\r\n\r\n self.assertEqual(\r\n rcv2.result_summary(),\r\n {\r\n \"passed\": 0,\r\n \"failed\": 0,\r\n \"blocked\": 0,\r\n \"invalidated\": 0,\r\n }\r\n )", "def test_get_results(tfg):\n\n assert tfg.get_results()", "def test_formatResult(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x.formatResult(3), '3')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate a list of viable coordinates for mines, and randomly choose them.
def generate_mines(self, number): mine_locations = [] available_places = [[j, i] for i in xrange(0, self.x) for j in xrange(0, self.y)] while number > 0: # the chosen coordinate for a mine is appended into the list and is # removed from the list of choices to prevent duplicates. choice = random.choice(available_places) available_places.remove(choice) mine_locations.append(choice) number -= 1 return mine_locations
[ "def random_coords(coord_list: typing.List[Coordinate]) -> Coordinate:\n return random.choice(coord_list)", "def generate_special_coords(self):\n special_coord = []\n for _ in range(2):\n coord = random.randint(1, 7)\n while coord == 3:\n coord = random.randint(1, 7)\n special_coord.append(coord)\n return special_coord", "def get_starting_positions_for_players(self, qty):\n positions = []\n for i in range(qty):\n x = random.choice(range(self.w))\n y = random.choice(range(self.h))\n while self.out_of_bounds(x, y) or (x,y) in positions:\n x = random.choice(range(self.w))\n y = random.choice(range(self.h))\n positions.append((x,y))\n\n return positions", "def random_coordinates():\n return Coordinates(random.randint(0, 14), random.randint(0, 14))", "def coordinates_generator(total, min_x, max_x, min_y, max_y):\n coordinates = []\n for i in range(total):\n x = randint(min_x, max_x)\n y = randint(min_y, max_y)\n coordinates.append((x, y))\n return coordinates", "def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine", "def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island", "def seed_mines(self, cell_clicked):\r\n self._mine_locs = [] # array to keep track of mine locations\r\n rem_mines = self._num_mines\r\n while rem_mines > 0:\r\n # choose a random location\r\n random_loc = (random.randrange(self._height),\r\n random.randrange(self._width))\r\n if random_loc == cell_clicked:\r\n # don't put a mine in the cell clicked\r\n continue\r\n elif self._minefield[random_loc[0]][random_loc[1]] == 9:\r\n # skip the cell if there's already a mine there\r\n continue\r\n else:\r\n # place a mine\r\n self._minefield[random_loc[0]][random_loc[1]] = 9\r\n self._mine_locs.append(random_loc)\r\n rem_mines -= 1\r\n self._mine_locs = sorted(self._mine_locs)", "def generate_positions_by_minimum_distance(shape, num_bots, min_distance):\n x_pos = []\n y_pos = []\n \n x_points = list(np.arange(10, shape[0] - 10))\n y_points = list(np.arange(10, shape[1] - 10))\n while len(x_pos) < num_bots and len(x_points) > 0 and len(y_points) > 0:\n ind_x = random.choice(range(len(x_points))) #select random point\n ind_y = random.choice(range(len(y_points)))\n x = x_points.pop(ind_x)\n y = y_points.pop(ind_y)\n minimum = float('inf') #keep track of minimum distance\n for point in range(len(x_pos)):\n distance = sqrt((x_pos[point] - x)**2 + (y_pos[point] - y)**2) \n if distance < minimum:\n minimum = distance\n if minimum <= min_distance:\n continue\n else:\n x_pos.append(x)\n y_pos.append(y)\n if len(x_pos) < num_bots:\n print \"SHIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIi\", zip(x_pos, y_pos)\n return None\n else:\n #print \"Remaining points: %s %s\" % (len(x_points), len(y_points))\n return zip(x_pos, y_pos)", "def _random_coords(self):\n coords = array(range(0, self.d))\n shuffle(coords)\n return coords", "def place_mines(board_size, num_mines):\n mines_placed = 0\n board = np.zeros((board_size, board_size), dtype=int)\n while mines_placed < num_mines:\n rnd = randint(0, board_size * board_size)\n x = int(rnd / board_size)\n y = int(rnd % board_size)\n if is_valid(x, y):\n if not is_mine(board, x, y):\n board[x, y] = MINE\n mines_placed += 1\n return board", "def __get_random_hotspot(self):\n x_min = self.occupancy_map.info.origin.position.x\n x_max = x_min + self.occupancy_map.info.width * self.occupancy_map.info.resolution\n y_min = self.occupancy_map.info.origin.position.y\n y_max = y_min + self.occupancy_map.info.height * \\\n self.occupancy_map.info.resolution\n # This might bes a bit strange, but we have the following problem:\n # some simulators need a square version of the same map. A square version\n # will have other x_max or y_max and thus the random hotspots will be different.\n # TO prevent this, we will always take only the max value of either x_max or y_max.\n # This will be the same for the square version and the not-square version (of the same map).\n max_value = max(x_max, y_max)\n\n # search for a not occupied position\n while True:\n # previously: x = random.uniform(x_min, x_max) # see problem description above\n x = random.uniform(x_min, max_value)\n # previously: y = random.uniform(y_min, y_max) # see problem description above\n y = random.uniform(y_min, max_value)\n # due to the workaround for the problem above, it can be that the value is out\n # of map for the not square map version. We need to skip this (the square\n # map version will skip it due to occupied cell...):\n if x <= x_max and y <= y_max:\n cell_x = min(int(\n (x - x_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.width - 1)\n cell_y = min(int(\n (y - y_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.height - 1)\n if not self.__cell_is_occupied(cell_x, cell_y):\n break\n spread = random.uniform(0.5, 1.0)\n return (x, y, spread)", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def __pos_mines(self):\n mines_pos_1d = np.random.choice(self.n_grid ** 2, self.n_mines, replace=False)\n mines_pos = list(map(lambda x: divmod(x, self.n_grid), mines_pos_1d))\n for r, c in mines_pos:\n self._mine_map.curr[r][c] = -1\n for di, dj in self.neighbors:\n i, j = r + di, c + dj\n if i < 0 or i >= self.n_grid or j < 0 or j >= self.n_grid:\n continue\n elif self._mine_map.curr[i][j] == -1:\n continue\n else:\n # adding one adjacent bomb\n self._mine_map.curr[i][j] += 1", "def place_mines(self):\n total_mines_to_place = MINES_IN_GAME\n while total_mines_to_place > 0:\n rand_x = random.choice(range(GRID_DIMS))\n rand_y = random.choice(range(GRID_DIMS))\n if self.mine_board[rand_x][rand_y] != 'M':\n self.mine_board[rand_x][rand_y] = 'M'\n total_mines_to_place -= 1\n self.add_hint_numbers(rand_x, rand_y)", "def generateCoordinate():\n # return 2 * random() - 1\n return random.uniform(0.00001, 1.00001)", "def sample_random_position(self):\n return np.random.uniform(low=MIN_GOAL_COORDS, high=MAX_GOAL_COORDS)", "def generate_random_locations(self, location_num: int) -> List[Tuple[int, int]]:\n locations = list()\n for i in range(location_num):\n rows, cols = self.grid_size()\n new_place = (random.randint(0, rows-1), random.randint(0, cols-1))\n if new_place not in locations:\n locations.append(new_place)\n return locations", "def generate(self):\n coordslist = []\n allpositions = []\n\n aiwords = self.choosewords()\n\n while self.aiboats != []: #loops until all ships have been placed\n row, column = randrange(1,10), randrange(1,10)\n orientation = bool(getrandbits(1)) #orientation is either True or False and chosen at random\n aiships = []\n currentpositions = []\n \n for i in range(self.aiboats[0]):\n if orientation and row + self.aiboats[0] < 10:\n currentpositions.append(str(row + i) + str(column))\n elif not orientation and column + self.aiboats[0] < 10:\n currentpositions.append(str(row) + str(column + i))\n\n if not set(allpositions).intersection(set(currentpositions)):\n #intersection() checks if any item from set 2 is in set 1. if that is not the case it returns False\n for i, position in enumerate(currentpositions):\n allpositions.append(position)\n aiships.append(position + aiwords[0][i])\n\n if aiships != []:\n for pos in aiships:\n coordslist.append(pos)\n self.aiboats.remove(self.aiboats[0])\n aiwords.remove(aiwords[0])\n\n return coordslist" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open neighbours if the flag number matches the count.
def special_open_neighbours(self, y, x): if self.table_state[y][x] != "-" and self.table_state[y][x] == self.flags_nearby(y, x): l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: if xe >= self.x or ye >= self.y: # do not open out of bounds continue # if it is a bomb but not flagged if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG: self.show_answer_board([ye, xe]) print "KABOOM!" return Minesweeper.IS_A_BOMB self.open_neighbours(y, x) self.print_table(self.table_state) return Minesweeper.NOT_A_BOMB
[ "def count_neighbor_flags(self, i, j):\n return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])", "def closedIsland1(self, grid: List[List[int]]) -> int:\n dic = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n m, n = len(grid), len(grid[0])\n count = 0\n\n def dfs(x, y):\n grid[x][y] = 1\n if x in {0, m - 1} or y in {0, n - 1}:\n self.flag = 0\n\n for p in dic:\n a, b = p\n # 下面必须写成x+a和y+b,不能写成x和y,否则边缘上的数据遍历不到\n if 0 <= x + a < m and 0 <= y + b < n and grid[x + a][y + b] == 0:\n dfs(x + a, y + b)\n\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 0:\n self.flag = 1\n dfs(i, j)\n count += self.flag\n\n return count", "def open_neighbours(self, y, x):\n if [y, x] in self.mine_locations:\n return [y, x]\n # generate neighbours with positive indexes\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n # if the indexes are out of the game table, skip\n if xe >= self.x or ye >= self.y:\n continue\n # if the current coordinates are still untouched, update their values\n if self.table_state[ye][xe] == '-':\n self.table_state[ye][xe] = self.final_table[ye][xe]\n # if the coordinate has a value of 0, recursively open it's neighbours.\n if self.final_table[ye][xe] == '0':\n self.open_neighbours(ye, xe)", "def findNeighbor(grid, x, y):\n count = 0\n\n if x > 0 and grid[x-1][y] == 1:\n count += 2\n if y > 0 and grid[x][y-1] == 1:\n count += 2\n return count", "def _find_openings(self) -> List[List[Coord_T]]:\r\n openings = []\r\n blanks_to_check = {\r\n c for c in self.all_coords if self.completed_board[c] == CellNum(0)\r\n }\r\n while blanks_to_check:\r\n orig_coord = blanks_to_check.pop()\r\n # If the coordinate is part of an opening and hasn't already been\r\n # considered, start a new opening.\r\n opening = {orig_coord} # Coords belonging to the opening\r\n check = {orig_coord} # Coords whose neighbours need checking\r\n while check:\r\n coord = check.pop()\r\n nbrs = set(self.get_nbrs(coord))\r\n check |= {\r\n c for c in nbrs - opening if self.completed_board[c] == CellNum(0)\r\n }\r\n opening |= nbrs\r\n openings.append(sorted(opening))\r\n blanks_to_check -= opening\r\n return openings", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def testNeighbours(self):\n test_zeros = np.zeros((3,3))\n test_ones = np.ones((3,3))\n\n self.assertEqual(count_live(test_zeros, (1, 1)), 0)\n self.assertEqual(count_live(test_ones, (1, 1)), 8)", "def obstacle_count(self):\n self.wide_scan(count = 6)\n found_something = False\n counter = 0\n threshold = 50\n for distance in self.scan:\n if distance and distance < threshold and not found_something:\n found_something = True\n counter +=1\n print (\"\\n----Object # %d found, I think----\\n\" % counter)\n if distance and distance > threshold and found_something:\n found_something = False\n counter += 0\n print(\"\\n----I see %d objects----\\n\" % counter)\n return counter", "def count_neighbours(self, x, y, stop_at=8):\n possible_locations = [\n (x - 1, y - 1),\n (x, y - 1),\n (x + 1, y - 1),\n (x - 1, y),\n (x + 1, y),\n (x - 1, y + 1),\n (x, y + 1),\n (x + 1, y + 1),\n ]\n\n count = 0\n for x, y in possible_locations:\n if self.seat_occupied(x, y):\n count += 1\n if count >= stop_at:\n break\n return count", "def count_alive_neighbors(grid, x, y):\n height = len(grid)\n width = len(grid[0])\n alive_count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n neighbor_x = x + i\n neighbor_y = y + j\n if i == 0 and j == 0:\n continue\n elif neighbor_x < 0 or neighbor_y < 0 or neighbor_y >= height or neighbor_x >= width:\n # Edges are considered alive. Makes map more likely to appear naturally closed.\n alive_count += 1\n elif grid[neighbor_y][neighbor_x] == 1:\n alive_count += 1\n return alive_count", "def add_to_open(open, neighbour):\n for node in open:\n if neighbour == node and neighbour.f >= node.f:\n # Will not add if there already exists the same node in open that has lower f value\n return False\n\n return True", "def _mark_neighbors(self, cell):\n width, height = self.width, self.height\n x, y = cell\n for xoff, yoff in [(-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1, 0), (1,1)]:\n nx, ny = (x + xoff) % width, (y + yoff) % height\n if (nx,ny) in self.neighbors:\n self.neighbors[(nx,ny)] += 1\n else:\n self.neighbors[(nx,ny)] = 1", "def obstacle_count(self):\n self.wide_scan()\n found_something = False\n counter = 0\n for distance in self.scan:\n if distance and distance < 200 and not found_something:\n found_something = True\n counter += 1\n print(\"Object # %d found, I think\" % counter)\n if distance and distance > 200 and found_something:\n found_something = False\n print(\"\\n----I SEE %d OBJECTS----\\n\" % counter)", "def n_neighbors(self,n):\n return sum(1 for x in self.hex.get_neighbors_ring(n) if x is not None and x.is_occupied == 1)", "def checkNeighbours(data):\n features = 0\n background = 0\n neighbours = [data[0,0],data[0,1],data[0,2],data[1,2],data[2,2],data[2,1],data[2,0],data[1,0]]\n fourConnected = False\n lastPoint = neighbours[-1] #Needed for checking a complete transition cycle\n for n in neighbours:\n if not n:\n features += 1\n elif fourConnected:\n background += 1\n\n fourConnected = not fourConnected\n lastPoint = n\n\n for pos,corner in enumerate(corners):\n if numpy.alltrue(data == corner):\n cornerPos = pos+1\n break\n else:\n cornerPos = 0\n return (features,background,cornerPos)", "def check_neighbours(self, i,j):\r\n \r\n if (i == 0):\r\n if (self.objects[i+1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, j-1] == 1):\r\n return True\r\n elif (i == self.N-1):\r\n if (self.objects[i-1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, j-1] == 1):\r\n return True\r\n elif (j == 0):\r\n if (self.objects[i+1, j] == 1 or self.objects[i-1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, self.N-1] == 1) :\r\n return True\r\n elif (j == self.N-1):\r\n if (self.objects[i+1, j] == 1 or self.objects[i-1, j] == 1 or self.objects[i, j-1] == 1 or self.objects[i, 0] == 1):\r\n return True\r\n elif (self.objects[i+1, j] == 1 or self.objects[i-1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, j-1] == 1):\r\n return True\r\n return False", "def all_bees_raised_flag(self):\n pos, com, success = self.perception\n if len(pos) > 0:\n return all(map(lambda x: x[1][\"flag\"] == (self.nr_of_possible_neighbors + 1), com))\n else:\n return True", "def count_land_neighbours(self):\n\t\tglobal neibs\n\t\tneibs = np.zeros((hh,wh),int)\n\t\t\n\t\t# Calculate the number of adjacent grids which are lands\n\t\tfor x in range(1,h+1): \n\t\t\tfor y in range(1,w+1):\n\t\t\t\tneibs[x,y] = lscape[x-1,y] \\\n\t\t\t\t\t+ lscape[x+1,y] \\\n\t\t\t\t\t+ lscape[x,y-1] \\\n\t\t\t\t\t+ lscape[x,y+1]", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open neighbours if the current coordinates are 0 and neighbours are untouched. Recursively opens if the neighbours are also 0.
def open_neighbours(self, y, x): if [y, x] in self.mine_locations: return [y, x] # generate neighbours with positive indexes l = [[ye, xe] for xe in range( x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0] for ye, xe in l: # if the indexes are out of the game table, skip if xe >= self.x or ye >= self.y: continue # if the current coordinates are still untouched, update their values if self.table_state[ye][xe] == '-': self.table_state[ye][xe] = self.final_table[ye][xe] # if the coordinate has a value of 0, recursively open it's neighbours. if self.final_table[ye][xe] == '0': self.open_neighbours(ye, xe)
[ "def special_open_neighbours(self, y, x):\n if self.table_state[y][x] != \"-\" and self.table_state[y][x] == self.flags_nearby(y, x):\n l = [[ye, xe] for xe in range(\n x - 1, x + 2) if xe >= 0 for ye in range(y - 1, y + 2) if ye >= 0]\n for ye, xe in l:\n if xe >= self.x or ye >= self.y: # do not open out of bounds\n continue\n # if it is a bomb but not flagged\n if self.final_table[ye][xe] == Minesweeper.BOMB and self.table_state[ye][xe] != Minesweeper.FLAG:\n self.show_answer_board([ye, xe])\n print \"KABOOM!\"\n return Minesweeper.IS_A_BOMB\n self.open_neighbours(y, x)\n self.print_table(self.table_state)\n return Minesweeper.NOT_A_BOMB", "def _find_openings(self) -> List[List[Coord_T]]:\r\n openings = []\r\n blanks_to_check = {\r\n c for c in self.all_coords if self.completed_board[c] == CellNum(0)\r\n }\r\n while blanks_to_check:\r\n orig_coord = blanks_to_check.pop()\r\n # If the coordinate is part of an opening and hasn't already been\r\n # considered, start a new opening.\r\n opening = {orig_coord} # Coords belonging to the opening\r\n check = {orig_coord} # Coords whose neighbours need checking\r\n while check:\r\n coord = check.pop()\r\n nbrs = set(self.get_nbrs(coord))\r\n check |= {\r\n c for c in nbrs - opening if self.completed_board[c] == CellNum(0)\r\n }\r\n opening |= nbrs\r\n openings.append(sorted(opening))\r\n blanks_to_check -= opening\r\n return openings", "def closedIsland1(self, grid: List[List[int]]) -> int:\n dic = [(1, 0), (0, 1), (-1, 0), (0, -1)]\n m, n = len(grid), len(grid[0])\n count = 0\n\n def dfs(x, y):\n grid[x][y] = 1\n if x in {0, m - 1} or y in {0, n - 1}:\n self.flag = 0\n\n for p in dic:\n a, b = p\n # 下面必须写成x+a和y+b,不能写成x和y,否则边缘上的数据遍历不到\n if 0 <= x + a < m and 0 <= y + b < n and grid[x + a][y + b] == 0:\n dfs(x + a, y + b)\n\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 0:\n self.flag = 1\n dfs(i, j)\n count += self.flag\n\n return count", "def check_neighbours(self, i,j):\r\n \r\n if (i == 0):\r\n if (self.objects[i+1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, j-1] == 1):\r\n return True\r\n elif (i == self.N-1):\r\n if (self.objects[i-1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, j-1] == 1):\r\n return True\r\n elif (j == 0):\r\n if (self.objects[i+1, j] == 1 or self.objects[i-1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, self.N-1] == 1) :\r\n return True\r\n elif (j == self.N-1):\r\n if (self.objects[i+1, j] == 1 or self.objects[i-1, j] == 1 or self.objects[i, j-1] == 1 or self.objects[i, 0] == 1):\r\n return True\r\n elif (self.objects[i+1, j] == 1 or self.objects[i-1, j] == 1 or self.objects[i, j+1] == 1 or self.objects[i, j-1] == 1):\r\n return True\r\n return False", "def requires_neighbours():\n return False", "def neighbors(self):\n \n # find 0 - blank square\n \n x0 = None\n y0 = None\n \n for i in range(4):\n for j in range(4):\n if self.get_tile(i,j) == 0:\n y0 = i\n x0 = j\n\n if x0 == None or y0 == None:\n return []\n \n neighbor_list = []\n \n # move 0 to the right\n if x0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0+1)\n new_position.set_tile(y0,x0+1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'r'\n neighbor_list.append(new_position)\n # move 0 to the left\n if x0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0,x0-1)\n new_position.set_tile(y0,x0-1,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'l'\n neighbor_list.append(new_position)\n # move 0 up\n if y0 > 0:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0-1,x0)\n new_position.set_tile(y0-1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'u'\n neighbor_list.append(new_position)\n # move 0 down\n if y0 < 3:\n new_position = Position(self.tiles)\n temp = new_position.get_tile(y0+1,x0)\n new_position.set_tile(y0+1,x0,0)\n new_position.set_tile(y0,x0,temp)\n new_position.directiontomoveto = 'd'\n neighbor_list.append(new_position)\n \n return neighbor_list", "def neighbours(self):\n for y in range(self.y - 1, self.y + 2):\n for x in range(self.x - 1, self.x + 2):\n if self.x != x or self.y != y:\n neighbour = self.g.at(x, y)\n if neighbour:\n yield neighbour", "def check_neighbours(self, node):\n for i in range(8): # loop through all neighbours\n neighbour_index = self.index_from_direction(node.index, i)\n neighbour = self.get_node_from_index(neighbour_index)\n # set extra_depth bassed on direction\n if (i == DIRECTION_UP or i == DIRECTION_RIGHT or\n i == DIRECTION_DOWN or i == DIRECTION_LEFT):\n extra_depth = 1\n else: # diagonal direction\n extra_depth = SQRT_TWO\n if neighbour is None: # neighbour not in list yet\n if self.check_valid(neighbour_index):\n neighbour = Node(neighbour_index,\n parent_index=node.index,\n depth=node.depth+extra_depth)\n self.frontier_nodes.appendleft(neighbour)\n dist = euclidean_distance(*(self.xy_from_index(neighbour_index)+\n self.xy_from_index(self.index_goal)))\n item = (dist, neighbour)\n heappush(self.frontier_nodes_pl, item)\n #self.cells.appendleft(self.point_from_node(neighbour))\n self.nodes.append(neighbour)\n self.index_nodes_map[neighbour_index] = neighbour\n if neighbour_index == self.index_goal:\n return True # goal found\n\n else: # neighbour exists in list\n if neighbour.visited:\n continue # already dealt with this neighbour, so skip to next loop\n if neighbour.depth > node.depth + extra_depth:\n neighbour.depth = node.depth + extra_depth\n neighbour.parent_index = node.index\n\n #if neighbour is not None:\n # cells = GridCells()\n # cells.header.frame_id = \"map\"\n # cells.cell_height = 0.1\n # cells.cell_width = 0.1\n # cells.cells = self.cells\n # self.cells_pub.publish(cells)\n\n node.visited = True # node 'visited' after checking all of its neighbours\n return False # goal not found", "async def open(code: int, col: int, row: int, double_click: bool = False):\n OPENED = []\n\n async def update_opened(code: int, col: int, row: int):\n await db_spot.filter(code=code, col=col, row=row).update(opened=True)\n spot = await spot_pydantic.from_queryset_single(db_spot.get(\n code=code, col=col, row=row))\n spot = spot.dict()\n return spot\n\n async def open_zeros(spot: dict, code: int, size: tuple):\n \"\"\"Recursive function that open's all the zero's neighbors and their neighbors if they are zero.\n\n Args:\n spot (dict): The dictionary of a spot_pydantic model\n code (int): The game the spot belongs to\n\n Returns:\n list[dict]: A list of all the spots that are opened\n \"\"\"\n col, row = spot[\"col\"], spot[\"row\"]\n n_cols, n_rows = size\n\n if spot[\"n_mines\"] != 0:\n spot = await update_opened(code=code, col=col, row=row)\n\n OPENED.append(spot)\n\n else:\n neighbors = [\n (col-1, row-1), (col, row-1), (col+1, row-1),\n (col-1, row), (col+1, row),\n (col-1, row+1), (col, row+1), (col+1, row+1)\n ]\n\n if spot[\"opened\"] == False and spot[\"flagged\"] == False:\n spot = await update_opened(code, col, row)\n\n OPENED.append(spot)\n for nb in neighbors:\n if 0 <= nb[0] < n_rows and 0 <= nb[1] < n_cols:\n nb = await spot_pydantic.from_queryset_single(db_spot.get(code=code, col=nb[0], row=nb[1]))\n nb = nb.dict()\n if nb[\"opened\"] == False and nb[\"flagged\"] == False:\n await open_zeros(nb, code, size)\n\n spot = await spot_pydantic.from_queryset_single(\n db_spot.get(code=code, col=col, row=row))\n spot_dict = spot.dict()\n\n if spot_dict[\"mine\"] == True and spot_dict[\"flagged\"] == False:\n # TODO define better return value for frontend\n return {\"game_status\": \"lost\"}\n\n ms = await minesweeper_pydantic.from_queryset_single(db_minesweeper.get(code=code))\n ms = ms.dict()\n size = (ms[\"n_cols\"], ms[\"n_rows\"])\n\n # TODO make sure to open only if it is a double click or the field is not yet opened\n if not double_click:\n await open_zeros(spot_dict, code, size)\n spots = await spot_pydantic.from_queryset(\n db_spot.filter(code=code, mine=False, opened=False))\n spots = [spot.dict() for spot in spots]\n if len(spots) == 0:\n return {\"status\": \"You Won!\"}\n return OPENED", "def _dfs(grid, i, j):\n grid[i][j] = False\n for x in range(i - 1, i + 2):\n for y in range(j - 1, j + 2):\n if (abs((x + y) - (i + j)) == 1) and _is_valid_land(x, y, grid):\n _dfs(grid, x, y)", "def create_neighbors(self):\n for row in self._grid:\n for cell in row:\n #\n # There are some nine situations that we have to account for:\n #\n # 1. upper left corner (3 neighbors)\n # 2. rest of the top row (5 neighbors)\n # 3. upper right corner (3 neighbors)\n # 4. far left side (5 neighbors)\n # 5. normal cells (8 neighbors)\n # 6. far right side (5 neighbors)\n # 7. lower left corner (3 neighbors)\n # 8. rest of bottom row (5 neighbors)\n # 9. lower right corner (3 neighbors)\n #\n row = cell.get_row()\n column = cell.get_column()\n #print(f'({row},{column})')\n # top row\n if row == 0:\n if column == 0:\n #print('upper left')\n cell.add_neighbor(self._grid[row][column + 1])\n cell.add_neighbor(self._grid[row + 1][column])\n cell.add_neighbor(self._grid[row + 1][column + 1])\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._grid[row][column - 1])\n cell.add_neighbor(self._grid[row][column + 1])\n cell.add_neighbor(self._grid[row + 1][column - 1])\n cell.add_neighbor(self._grid[row + 1][column])\n cell.add_neighbor(self._grid[row + 1][column + 1])\n else:\n #print('upper right')\n cell.add_neighbor(self._grid[row][column - 1])\n cell.add_neighbor(self._grid[row + 1][column - 1])\n cell.add_neighbor(self._grid[row + 1][column])\n # middle area\n elif row < (self._rows - 1):\n if column == 0:\n #print('far left side')\n cell.add_neighbor(self._grid[row][column + 1])\n cell.add_neighbor(self._grid[row + 1][column])\n cell.add_neighbor(self._grid[row + 1][column + 1])\n cell.add_neighbor(self._grid[row - 1][column])\n cell.add_neighbor(self._grid[row - 1][column + 1])\n elif column < (self._columns - 1):\n #print('normal')\n cell.add_neighbor(self._grid[row][column + 1])\n cell.add_neighbor(self._grid[row][column - 1])\n cell.add_neighbor(self._grid[row + 1][column])\n cell.add_neighbor(self._grid[row + 1][column + 1])\n cell.add_neighbor(self._grid[row - 1][column - 1])\n cell.add_neighbor(self._grid[row - 1][column])\n cell.add_neighbor(self._grid[row - 1][column + 1])\n cell.add_neighbor(self._grid[row + 1][column - 1])\n else:\n #print('far right side')\n cell.add_neighbor(self._grid[row][column - 1])\n cell.add_neighbor(self._grid[row + 1][column])\n cell.add_neighbor(self._grid[row + 1][column - 1])\n cell.add_neighbor(self._grid[row - 1][column])\n cell.add_neighbor(self._grid[row - 1][column - 1])\n # bottom row\n else:\n if column == 0:\n #print('lower left')\n cell.add_neighbor(self._grid[row][column + 1])\n cell.add_neighbor(self._grid[row - 1][column])\n cell.add_neighbor(self._grid[row - 1][column + 1])\n elif column < (self._columns - 1):\n #print('lower')\n cell.add_neighbor(self._grid[row][column - 1])\n cell.add_neighbor(self._grid[row][column + 1])\n cell.add_neighbor(self._grid[row - 1][column - 1])\n cell.add_neighbor(self._grid[row - 1][column])\n cell.add_neighbor(self._grid[row - 1][column + 1])\n else:\n #print('lower right')\n cell.add_neighbor(self._grid[row][column - 1])\n cell.add_neighbor(self._grid[row - 1][column - 1])\n cell.add_neighbor(self._grid[row - 1][column])", "def get_neighbours(self, grid):\n\t\tfor diff in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n\t\t\tres = Vector((self.row, self.col)) + diff\n\t\t\tif res[0] >= 0 and res[1] >= 0 and res[0] < len(grid) and res[1] < len(grid[0]):\n\t\t\t\tyield grid[res[0]][res[1]]", "def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos", "def get_neighbours(self) -> Generator['Position', None, None]:\n for dc in range(-1, 2):\n for dy in range(-1, 2):\n if dc != 0 or dy != 0:\n p = self + Vector2(dc, dy)\n if p.is_valid():\n yield p", "def neighbours(self):\n\n neighbours = []\n root = self.root\n if self == root:\n return neighbours\n\n ########################\n # IMMEDIATELY ADJACENT #\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n coords = [(self.mins[0] + sizes[0] / 2, self.maxs[1] + sizes[1] / 2,),\n (self.maxs[0] + sizes[0] / 2, self.mins[1] + sizes[1] / 2,),\n (self.mins[0] + sizes[0] / 2, self.mins[1] - sizes[1] / 2,),\n (self.maxs[0] - sizes[0] / 2, self.mins[1] + sizes[1] / 2,),]\n # loop through top, right, bottom, left\n for i in range(4):\n x, y = coords[i]\n query_quad = root.query_xy(x, y)\n if query_quad is not None:\n same_size_idx = query_quad.location[: self.tree_depth]\n same_size_quad = root[same_size_idx]\n neighbours += list(self._get_border_children(same_size_quad, i))\n\n #############\n # DIAGONALS #\n root_sizes = [root.maxs[0] - root.mins[0], root.maxs[1] - root.mins[1]]\n xs, ys = (root_sizes / 2 ** root.max_tree_depth) / 2\n neighbours += [\n root.query_xy(self.mins[0] - xs, self.mins[1] - ys), # TL\n root.query_xy(self.maxs[0] + xs, self.mins[1] - ys), # TR\n root.query_xy(self.mins[0] - xs, self.maxs[1] + ys), # BL\n root.query_xy(self.maxs[0] + xs, self.maxs[1] + ys), # BR\n ]\n\n unique_neighbours = list(set(neighbours))\n try:\n unique_neighbours.remove(self)\n except ValueError:\n pass\n\n return unique_neighbours", "def get_neighbors(start_square, visited=[]):\n neighbors = []\n\n # loop over possible x values\n for i in [start_square.x - 1, start_square.x, start_square.x + 1]:\n\n # drop neighbors outside of our region of interest\n if i < 0 or i > MAX_X:\n continue\n\n # loop over possible y values\n for j in [start_square.y - 1, start_square.y, start_square.y + 1]:\n\n # drop neighbors outside of our region of interest\n if j < 0 or j > MAX_Y:\n continue\n\n # Ignore ourself\n if i == start_square.x and j == start_square.y:\n continue\n\n # Ignore corner pieces\n if i == start_square.x - 1 and j != start_square.y:\n continue\n if i == start_square.x + 1 and j != start_square.y:\n continue\n\n # Deal with barriers\n found = False\n for square in visited:\n if square.pos == [i, j]:\n found = True\n break\n if found:\n continue\n\n neighbors.append(Square(i, j))\n\n return neighbors", "def mark_neighbouring_squares(square):\n i, j = square\n if (i, j) not in visited_squares and grid[i][j] == \"1\":\n visited_squares.add((i, j))\n if i > 0:\n mark_neighbouring_squares((i - 1, j))\n if i < 127:\n mark_neighbouring_squares((i + 1, j))\n if j > 0:\n mark_neighbouring_squares((i, j - 1))\n if j < 127:\n mark_neighbouring_squares((i, j + 1))", "def setNeighbors(self):\n for cellIndex in range(len(self.cells)):\n cell = self.cells[cellIndex]\n\n #Checks the 8 cells around the living one. \n for neighborsX in range(cell.x - 1, cell.x + 2):\n for neighborsY in range(cell.y - 1, cell.y + 2):\n\n #If the position is outside the world, loop around.\n neighborsX = neighborsX % self.screen.worldSize\n neighborsY = neighborsY % self.screen.worldSize\n\n #Skipping itself. Becouse we do not want to calculate itself as a neighbor\n if(neighborsX == cell.x and neighborsY == cell.y):\n continue\n else:\n #Checks if a cell exist at neighborsX, neighborsY\n cellToCheck = self.getCellFromPosition(neighborsX, neighborsY)\n if(cellToCheck != False):\n #Add one to the neighbor var if there already exist and cell for the given position.\n cellToCheck.numOfNeighbor += 1\n else:\n #Creates a new cell if it do not exist any.\n newCell = Cell(self.screen, neighborsX, neighborsY, True)\n newCell.numOfNeighbor += 1\n self.cells.append(newCell)", "def live_neighbours(self):\r\n\r\n # loop over whole grid\r\n for i in range(self.N):\r\n for j in range(self.N):\r\n\r\n number_of_live_neighbours = 0 # total number of live neighbours\r\n\r\n # loop over all neighbours\r\n for x in [i - 1, i, i + 1]:\r\n for y in [j - 1, j, j + 1]:\r\n\r\n if (x == i and y == j):\r\n continue # skip current point, only count neighbours\r\n\r\n if (x != self.N and y != self.N):\r\n number_of_live_neighbours += self.old_grid[x][y]\r\n\r\n # remaining branches handle the case where the neighbour is off the end of the grid\r\n # in this case, we loop back round such that the grid becomes a \"toroidal array\"\r\n elif (x == self.N and y != self.N):\r\n number_of_live_neighbours += self.old_grid[0][y]\r\n elif (x != self.N and y == self.N):\r\n number_of_live_neighbours += self.old_grid[x][0]\r\n else:\r\n number_of_live_neighbours += self.old_grid[0][0]\r\n\r\n # store number of neighbours in corresponding matrix\r\n self.live_neighbours_grid[i][j] = number_of_live_neighbours" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
come here when the coordinates do not have a bomb. update the table_state with the selected coordinate.
def tease_user(self, y, x): self.table_state[y][x] = self.final_table[y][x] # if there are no neighbouring 0s, open neighbours if self.table_state[y][x] == '0': self.open_neighbours(y, x) self.print_table(self.table_state)
[ "def _update_clicked_cell(self, event):\n if self._app.is_active() or not self._inside_range(event):\n return\n\n row, col = self._get_coord(event)\n if self.coords((row, col)): # coords method inherited from Canvas\n self.display_cell_as_dead(row, col)\n else:\n self.display_cell_as_alive(row, col)\n self._app.update_cell_in_board_data(row, col)", "def mouseClick(self, event):\n if self.editMode:\n self.applyEditing(event)\n self.clearEditCursor(event)\n return\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if self.checkFree(x, y) == self.colors['busy']:\n return # clicked busy position\n self.onBoard += 1\n self.refreshScore()\n self.history.append((\n self.setBusy(x, y),\n self.addPentomino(x, y)\n ))\n if self.onBoard == self.expectedBest:\n self.gameOver()", "def update_state(self):\n self.reset_state()\n for piece in self.pieces:\n coordinates = piece.get_block_positions()\n for coor in coordinates:\n x, y = coor\n self.state[y][x] = piece", "def set_state(self, new_state):\n for x, y in doublerange(self.size):\n self.change_cell(x, y, new_state[x][y])", "def table_move_update():\n pos = self.variables.table.get_current_position()\n self.table_move_ui.x_move.setProperty(\"value\", int(pos[0]))\n self.table_move_ui.y_move.setProperty(\"value\", int(pos[1]))\n self.table_move_ui.z_move.setProperty(\"value\", int(pos[2]))", "def ensure_state_in_qtable(self, state):\n if state not in self.qtable:\n self.qtable[state.get_copy()] = {action:0.0 for action in self.action_space}", "def pick(self, obj_height_from_table):\n init_x = self.x\n init_y = self.y\n init_z = self.z\n obj_z = self.table_z + obj_height_from_table*self.disk_height\n \n #open gripper\n self.gripper.command_position(100)\n \n #drop to given height\n self.move_to(init_x, init_y, obj_z)\n \n #close gripper\n self.gripper.command_position(0)\n \n #return to initial position\n self.move_to(init_x, init_y, init_z)", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))", "def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]", "def make_cell_change(self, x, y):\n self.cells[x][y] = 1 if not self.cells[x][y] else 0", "def update_board(self, coordinate, hit):\n \n if hit:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"H\"\n else:\n self.board_state[coordinate.row_idx][coordinate.col_idx] = \"M\"", "def change_cell(self,pos,purpose):\n coord = self.cell2state(pos)\n try:\n cell = self.window.cell_dict[coord]\n if cell.purpose == 'start':\n self.made_start = False\n elif cell.purpose == 'finish':\n self.made_finish = False\n cell.assign_purpose(purpose) \n except:\n pass", "def place_on_table(self):\n self.move_to_pos([3, 3])\n self.free()", "def update_state(self):\n rows = range(self.rows)\n cols = range(self.cols)\n self.state = [[self.squares[row][col].value for col in cols] for row in rows]", "def picked_up(self):\n\n self.position[0] = -100\n self.position[1] = -100", "def switch_to_next_geometry(self):", "def changeState(self, xyPoints):\n numPoints = len(xyPoints)\n selInd = random.randint(0, self.numToSelect-1)\n remInd = random.randint(self.numToSelect, len(xyPoints) - 1)\n xyPoints[selInd], xyPoints[remInd] = tuple(xyPoints[remInd]), tuple(xyPoints[selInd])\n self.setEnergyForOnePoint(xyPoints, selInd)", "def _set_state_coordinates(atomic_entity, width, height):\n state_entity = atomic_entity.get(\"children\")[0]\n parent_coor = atomic_entity[\"coordinates\"]\n state_entity[\"coordinates\"] = {\n \"x\": parent_coor[\"x\"] + (parent_coor[\"width\"] - width) / 2,\n \"y\": parent_coor[\"y\"] - (height / 2),\n \"width\": width,\n \"height\": height,\n }", "def update_game_state(db_cursor: sqlite3.Cursor,\n move_id: int,\n command: Command,\n gridpoint_row: int,\n gridpoint_col: int,\n tweet_id: str) -> None:\n db_cursor.execute(\n _UPDATE_STATE_QUERY,\n (move_id, command.name, gridpoint_row, gridpoint_col, tweet_id)\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that check if file at provided url exist.
def file_exist(file_url): try: response = requests.head(file_url) if 200 <= response.status_code < 300: return True return False except ConnectionError: return False
[ "def remote_file_exists(url):\n status = requests.head(url).status_code\n\n if status == 200:\n return True\n else:\n raise RemoteFileDoesntExist", "def url_exists(url):\n request = requests.get(url)\n if request.status_code == 200:\n exist = True\n else:\n exist = False\n return exist", "def _url_exists(self, url):\n return url_exists(url)", "def exists(self, url):\n return (self.base_path / url).exists()", "def url_exists(url):\n # Check for URLs we can't validate\n if url.startswith(\"https://kiwiirc.com\"):\n return True\n if url.startswith(\"https://www.projectcalico.org\"):\n return True\n\n try:\n urllib2.urlopen(url)\n return True\n except urllib2.HTTPError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False\n except urllib2.URLError, e:\n print_bullet(\"Hit error reading %s: %s\" % (url, e))\n return False", "def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False", "def url_exist(url:str) -> bool:\r\n with closing(requests.head(url, allow_redirects=True)) as r:\r\n return r.ok", "def checkURL(self, url):\n\n try:\n rsp = self.b.open(url)\n except:\n return False\n\n return True", "def check_remote_file_exists(url, login=None, password=None):\r\n credentials = None\r\n if login and password:\r\n credentials = login, password\r\n\r\n response = requests.get(url,\r\n stream=True,\r\n verify=False,\r\n auth=credentials)\r\n if response.status_code >= 400 or response.status_code < 200:\r\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\r\n\r\n response.close()", "def check_remote_file_exists(url, login=None, password=None):\n credentials = None\n if login and password:\n credentials = login, password\n\n response = requests.get(url,\n stream=True,\n verify=False,\n auth=credentials)\n if response.status_code >= 400 or response.status_code < 200:\n raise Exception('Returned wrong status code: {}'.format(response.status_code))\n\n response.close()", "def is_file_exists(self):\n pass", "def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)", "def path_exists(path):\n if path.startswith('http://') or path.startswith('https://'):\n return True\n\n return isfile(path)", "def check_dataset(filename=None, url=None):\n try:\n with open(filename, 'r'):\n exists = True\n except IOError:\n exists = False\n\n if not exists:\n retreive_dataset(filename, url)", "def IsFile( self, path ) :\n \n # modules:\n import urllib.request\n import urllib.error\n \n # try to open file, fails if not present:\n try :\n # try to open:\n urllib.request.urlopen( self.protocol+path )\n # ok:\n return True\n except urllib.error.HTTPError :\n # something wrong, assume that means file not present ..\n return False\n except :\n # unknown error, raise it and break:\n raise\n #endtry", "def check_file_existence(location):\n return os.path.isfile(location)", "def file_exist() -> bool:\n pass", "def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False", "def _is_already_downloaded(url, filename):\n try:\n # Note: it's in local timezone\n local_date = datetime.fromtimestamp(os.path.getmtime(filename)).date()\n except FileNotFoundError:\n return False\n # To keep it simple, we just compare dates.\n return (local_date - get_date(url)).days >= 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that based on file url return appropriate hash.
def get_hash(file_url): file_extension = os.path.splitext(file_url)[1] return str(HASHES.get(file_extension))
[ "def getHash(url):\n #return hashlib.sha256(url).hexdigest()\n\n # NOTE: debugging\n return url", "def read_hash_from_url(url_hash):\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n headers = {'User-Agent':user_agent,}\n\n aux_req = request.Request(url_hash, None, headers)\n response = request.urlopen(aux_req)\n hashes = response.read()\n\n # expecting only one hash, and not interested in the filename:\n online_hash, _ = hashes.decode('ascii').split()\n\n return online_hash", "def hash_file(filepath: str) -> str:\n md5 = hashlib.md5()\n acc_hash(filepath, md5)\n return md5.hexdigest()", "def hash_file(pathname):\n h = hashlib.sha256()\n with open(pathname, 'rb') as ifile:\n h.update(ifile.read())\n return h.digest()", "def get_file_content_hash(file_path):\n with open(file_path) as content:\n hasher = hashlib.sha256()\n hasher.update(content.read())\n return hasher.hexdigest()", "def url_hash(self, url:Union[URI, str, int]):\n h = hashlib.sha256()\n h.update(str(url).encode())\n return int(h.hexdigest(), 16) % self.max_nodes", "def get_file_hash(self, filepath):\n if filepath not in self._file_hash_cache:\n self._file_hash_cache[filepath] = self.static_file_hash(filepath)\n return self._file_hash_cache[filepath]", "def calculate_hash(filepath, hash_name):\n\n hash_name = hash_name.lower()\n if not hasattr(hashlib, hash_name):\n raise Exception('Hash algorithm not available : {}'\\\n .format(hash_name))\n\n with open(filepath, 'rb') as f:\n checksum = getattr(hashlib, hash_name)()\n for chunk in iter(lambda: f.read(4096), b''):\n checksum.update(chunk)\n\n return checksum.hexdigest()", "def hash_for_file(filepath, hash_id):\n _deprecation(\"Call to deprecated function hash_for_file(), should use hash_file().\")\n return hash_file(filepath, hash_id)", "def hash_file(cls, filepath: str) -> str:\n hasher = cls()\n with open(filepath, \"rb\") as fd:\n # process files in chunks so that large files won't cause excessive memory consumption.\n size = 1024 * 1024 # chunk size 1MB\n chunk = fd.read(size)\n while chunk:\n hasher.update(chunk)\n chunk = fd.read(size)\n\n return hasher.string_digest()", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def file_hash(filename):\n # Open the file, read contents as bytes.\n # Calculate, return SHA1 has on the bytes from the file.\n with open(filename, 'rb') as fobj:\n contents = fobj.read()\n return hashlib.sha1(contents).hexdigest()", "def hash_file(path):\n hasher = hashlib.sha256()\n with open(path, 'rb') as f:\n buffer = f.read(BLOCK_SIZE)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = f.read(BLOCK_SIZE)\n return base64.urlsafe_b64encode(hasher.digest()[:12]).decode('utf-8')", "def calculate_hash(filename, raise_on_not_found = False):\n if not is_file(filename) and not raise_on_not_found:\n return \"NOTFOUND\"\n\n with open(filename, \"rb\") as file:\n sha256 = hashlib.sha256()\n buf = file.read(128)\n while len(buf) > 0:\n sha256.update(buf)\n buf = file.read(128)\n return str(binascii.hexlify(sha256.digest()), \"utf8\")", "def get_hash_file_info(self, input_file, needs_hash):\n assert os.path.isabs(input_file)\n orig_filepath = self._get_orig_file(input_file) or input_file\n hash_file = self._get_hash_file(orig_filepath)\n if not os.path.isfile(hash_file):\n if needs_hash:\n raise RuntimeError(\n \"ERROR: Hash file not found: {}\".format(hash_file))\n else:\n hash = self._hash_type.create_empty()\n hash.filepath = orig_filepath\n else:\n # Load the hash.\n with open(hash_file, 'r') as f:\n hash = self._hash_type.create(\n f.read().strip(), filepath=orig_filepath)\n return (hash, orig_filepath)", "def get_md5_hash(file_path: str) -> str:\n from hashlib import md5\n\n # local file\n if file_path.startswith('/'):\n return md5(open(file_path, 'rb').read()).hexdigest()\n\n # remote file\n httpresponse = url_is_alive(file_path)\n if not httpresponse:\n error_open_mess(file_path)\n return ''\n\n md5hash = md5()\n max_file_size = 100 * 1024 * 1024\n total_read = 0\n while True:\n data = httpresponse.read(4096)\n total_read += 4096\n\n if not data or total_read > max_file_size:\n break\n\n md5hash.update(data)\n\n httpresponse.close()\n return md5hash.hexdigest()", "def _hash_uri(self, uri):\n return hashlib.md5(self._c14n_uri(uri)).digest()", "def hash(file_chunk):\n hash_algo = sha256()\n file_hash = hash_algo.update(file_chunk)\n file_hash = hash_algo.hexdigest()\n\n first_bytes = file_hash[:4]\n last_bytes = file_hash[-4:]\n\n return first_bytes + last_bytes", "def _get_url_hashes(path):\n urls = _read_text_file(path)\n\n def url_hash(u):\n h = hashlib.sha1()\n try:\n u = u.encode(\"utf-8\")\n except UnicodeDecodeError:\n logging.error(\"Cannot hash url: %s\", u)\n h.update(u)\n return h.hexdigest()\n\n return {url_hash(u): True for u in urls}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Newton's second law of motion for measuring stoppnig distance Newton's second law of motion is d = (1/2)(v02/(mug)) so the stopping distance of an object in motion, like a car, can be measured. The friction coefficient measures how slick a road is with a default of 0.3.
def stopping_length_function(initial_velocity=120, friction_coefficient=0.3): g = 9.81 v0 = initial_velocity/3.6 mu = friction_coefficient return (1/2)*(v0**2/(mu*g))
[ "def friction(self):\n deceleration = 3\n if(self.speed > self.currentFrontSpeed * 1.5): deceleration = 25\n if(self.speed > self.currentFrontSpeed * 1.75): deceleration = 50\n if(self.speed > self.currentFrontSpeed * 2): deceleration = 100\n \n if self.speed > 0: self.speed -= deceleration\n else: self.speed += deceleration\n if abs(self.speed) < 5: self.speed = 0", "def calc_force_from_damping(v, damping, masses):\n F = masses*damping*np.diff(v, 0)\n\n return F", "def duty_cycle_by_force(newton: float, profile: GripForceProfile) -> float:\n if profile.min <= newton <= profile.max:\n return sum(ele[1] * (newton ** ele[0]) for ele in profile.polynomial)\n else:\n raise ValueError(\"Gripper force out of bounds\")", "def dampingForce(lamda,p,m ):\n return -lamda*p/m", "def f(r):\n x,y,vx,vy = r\n rval = np.sqrt(x**2 + y**2) # magnitude of distance to origin\n dxdt = vx\n dydt = vy\n # x and y componenents of Newton's law of gravitation\n dvxdt = -(G*M/rval**3)*x\n dvydt = -(G*M/rval**3)*y\n return np.array([dxdt,dydt,dvxdt,dvydt])", "def calculate_drag_coefficient(self):\n \n self.Cd = (56.11 * self.ad**2\n - 15.28 * self.ad\n + 1.3\n - 0.0005465 / self.ad)\n self.Cd[self.ad < 0.006] = 1.2\n \n self.Cd_veg = 0.5 * self.Cd * self.veg", "def _damping_force(self, y):\n force = (\n -self.cRUB\n * (self.radial_displ_vel_node)\n * y\n / abs(self.radial_displ_vel_node)\n )\n return force", "def cal_dry_mass(init_mass: float, isp: float, target_dv: float) -> float:\n mass_ratio = math.e ** (target_dv / isp / KG)\n return init_mass / mass_ratio", "def compute_chezy_friction_coeff(Cz):\n return 1./ ( Cz ** 2 )", "def circularVelocity(self):\n return math.sqrt(GM/self.a)", "def advection_1d(var,vel,dz,dt,NN):\n \n # check cfl for advection and diffusion\n cfl = 0.5\n dtc = cfl*dz/(np.max(np.abs(vel)))\n dt = np.min([dt, dtc])\n \n # ghost cells required of my artificial boundary conditions:\n # non-reflecting Neumann type boundary conditions are implemented\n vargh = np.insert(var, [0,NN], [var[0],var[-1]]) \n velgh = np.insert(vel, [0,NN], [vel[0],vel[-1]])\n \n theta = np.ones(NN+2)\n theta[np.where(velgh<0)] = -1\n \n # calculate slopes for the flux limiter (phi)\n TVD_r = vargh[1:]\n TVD_r2 = np.insert(vargh[2:],np.shape(vargh[2:])[0],vargh[-1])\n TVD_m = vargh[:-1]\n TVD_l = np.insert(vargh[:-2],0,vargh[0])\n \n r_TVDup = (TVD_r2-TVD_r)/(TVD_r-TVD_m)\n r_TVDdown = (TVD_m-TVD_l)/(TVD_r-TVD_m)\n \n r_TVD = r_TVDdown\n r_TVD[np.where(theta[1:]<0)] = r_TVDup[np.where(theta[1:]<0)]\n r_TVD[np.where(np.diff(TVD_m)==0)] = 1\n r_TVD[0] = 1\n r_TVD[-1] = 1\n \n # define Flux Limiter function (Van Leer)\n phi = (r_TVD + np.abs(r_TVD))/(1 + np.abs(r_TVD))\n phi_r = phi[1:]\n phi_l = phi[:-1]\n \n # think about my ghost cells\n TVD_r = vargh[2:]\n TVD_l = vargh[:-2]\n \n # compute fluxes for TVD\n F_rl = .5*((1+theta[1:-1])*vel*var + (1-theta[1:-1])*vel*TVD_r)\n F_rh = .5*vel*(var + TVD_r) - .5*vel*vel*dt/dz*(TVD_r-var)\n \n F_ll = .5*((1+theta[1:-1])*vel*TVD_l + (1-theta[1:-1])*vel*var)\n F_lh = .5*vel*(TVD_l+var) - .5*vel*vel*dt/dz*(var-TVD_l)\n \n # do the job\n F_right = F_rl + phi_r*(F_rh - F_rl)\n F_left = F_ll + phi_l*(F_lh - F_ll)\n \n vari = var - dt*(F_right-F_left)/dz\n \n # might want to add a check for imaginary numbers...\n \n return vari,dt", "def calc_repulsion_force(rij,rad,sigma,eps):\n\n rep_force = 48.0*eps*(sigma/(rij-rad))**(13)\n\n return rep_force;", "def newton_raphson(x1, fx1, f1):\n return x1 - f1/fx1", "def radial_velocity(self, mjd):\n ma = self.mean_anomaly(mjd)\n kcirc = 2.*np.pi*self['A1']*self.evaluate('FB', mjd)\n e1, e2 = self['EPS1'], self['EPS2']\n vrad = kcirc*(np.cos(ma)+e1*np.sin(2*ma)+e2*np.cos(2*ma))\n return vrad", "def car_dynamics(self,x, t, u, p):\n # f = vehicle_dynamics_ks(x, u, p)\n f = vehicle_dynamics_st(x, u, p)\n # f = vehicle_dynamics_std(x, u, p)\n # f = vehicle_dynamics_mb(x, u, p)\n return f", "def get_speed(self):\r\n\t\tvel = self.vehicle.get_velocity()\r\n\t\treturn 3.6 * math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2)", "def leapfrog(xn: numpy.array, vn: numpy.array, dt: float, m: float):\n xnp1 = xn + vn * dt + force(xn, m) / m * dt**2 / 2\n vnp1 = vn + (force(xn, m) + force(xnp1, m)) / m * dt / 2\n return xnp1, vnp1", "def calculate_forces(v0, mu, density_m, CD, diameter_b, \\\n area_b, volume_b, density_b, \\\n dt, T):\n \n # Gravitational const. m/s^2\n g = 9.81 \n # Proportionality constant for\n # Reynolds number\n Re_const = diameter_b*density_m/mu\n \n a_s = 3*math.pi*diameter_b*mu/(density_b*volume_b)\n a_q = 0.5*CD*density_m*area_b/(density_b*volume_b)\n b = g*(density_m/density_b - 1.0)\n \n # Numerical solution gives velocity as \n # a function of time.\n v, t = vm.solver(v0, a_s, a_q, b, Re_const, T, dt) \n\n # Initialize vectors\n Fg = zeros(len(v))\n Fb = zeros(len(v))\n Fd = zeros(len(v))\n\n # Loop over time steps\n for n in range(0, len(v)):\n # Evaluate Reynolds number\n Re = Re_const*v[n] \n \n # Gravity force\n Fg[n] = -density_b*volume_b*g\n # Bouyancy force\n Fb[n] = density_m*g*volume_b\n \n # Drag force\n if abs(Re) < 1:\n # If Re < 1, use Stokes' drag force \n Fd[n] = -3.0*math.pi*diameter_b*mu*v[n]\n else:\n # If Re >= 1, use the quadratic\n # drag force\n Fd[n] = -0.5*CD*density_m*area_b*abs(v[n])*v[n]\n\n \n return Fg, Fb, Fd, t", "def dfp(x, t):\n k = 1. #ratio of gravity acceleration to length of pendulum\n d = 0.1 #damping coefficient\n A = 1.5 #amplitude of forcing\n t = float(t)\n x = array(x)\n if x.shape != (2,):\n raise ValueError(\"The state of the pendulum is described by a vector with two elements\")\n return array([ x[1],\n - k*sin(x[0]) - d*x[1] + A*sin(t) ])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Integration function Using scitools.StringFunction to do integration. >>> integration.py 'sin(x)' 0 pi/2
def integrate_function(): def midpoint_integration(f, a, b, n=100): h = (b - a)/float(n) I = 0 for i in range(n): I += f(a + i*h + 0.5*h) return h*I f_formula = sys.argv[1] a = eval(sys.argv[2]) b = eval(sys.argv[3]) if len (sys.argv) >= 5: n = int(sys.arvg[4]) else: n = 200 from scitools.StringFunction import StringFunction f = StringFunction(f_formula) # turn formula into f(x) func. """ >>> g = StringFunction('A*exp(-a*t)*sin(omega*x)', independent_variable='t', A=1, a=0.1, omega=pi, x=0.5) >>> g.set_parameters(omega=0.1) >>> g.set_parameters(omega=0.1, A=5, x=0) >>> g(0) 0.0 >>> g(pi) 2.8382392288852166e-15 """ I = midpoint_integration(f, a, b, n) print("Integral of {:s} on [{:g}, {:g}] with n ={:d}: {:g}" \ .format(f_formula, a, b, n, I))
[ "def sin(x):", "def main():\n try:\n f_formula = sys.argv[1]\n a = eval(sys.argv[2])\n b = eval(sys.argv[3])\n n = int(sys.argv[4])\n except IndexError:\n print 'Usage: %s f-formula a b n' % sys.argv[0]\n sys.exit(1)\n\n from scitools.std import StringFunction\n f = StringFunction(f_formula)\n I = integrate(f, a, b, n)\n print I", "def integrate(f, inf_lim, sup_lim):\n function = get_function_from_text(f)\n return sp_integrate.quad(function, inf_lim, sup_lim)[0]", "def f(x):\n global input_function\n replaced = str(input_function).replace('x', str(x))\n replaced = replaced.replace('e', str(np.math.e))\n return eval(replaced)", "def scintegral(f,s,a,b):\n\tx = sc.linspace(0,10,1000)\n\ti_trapez = integ.trapz(f(x), x)\n\ti_simpson = integ.simps(f(x), x)\n\ti_quad = integ.quad(f, 0, 10)\n\ti_quad2 = i_quad[0]\n\t\n\tprint \"integ.trapz() bei \" + str(s) + \" Stützpunkten: \" + str(i_trapez)\n\tprint \"integ.simps() bei \" + str(s) + \" Stützpunkten: \" + str(i_simpson)\n\tprint \"integ.quad() bei \" + str(s) + \" Stützpunkten: \" + str(i_quad2)\n\t\n\tintegral_pre = [i_quad2, i_trapez, i_simpson]\n\treturn integral_pre", "def sin(self, a):\n return math.sin(a)", "def singularityintegrate(f, x):\n\n if not f.has(SingularityFunction):\n return None\n\n if isinstance(f, SingularityFunction):\n x, a, n = f.args\n if n.is_positive or n.is_zero:\n return SingularityFunction(x, a, n + 1)/(n + 1)\n elif n in (-1, -2):\n return SingularityFunction(x, a, n + 1)\n\n if f.is_Mul or f.is_Pow:\n\n expr = f.rewrite(DiracDelta)\n expr = integrate(expr, x)\n return expr.rewrite(SingularityFunction)\n return None", "def sin(x):\n return SinOp(x)", "def sin(x):\n raise NotImplementedError", "def sin(x):\n if isinstance(x, int):\n x = Expression(x)\n return _sin(x)", "def sin(a):\n return math.sin(a)", "def integrand(x):\n return np.exp(x * x) * special.erfc(x)", "def j0i(x):\n def integrand(phi):\n return math.cos(x * math.sin(phi))\n return (1.0/math.pi) * quad(integrand, 0, math.pi)[0]", "def integrate(self, *args, **kwargs):\n from sympy.integrals.integrals import integrate\n return integrate(self, *args, **kwargs)", "def integrate(self, z):\n return simps(simps(z, self.y), self.x)", "def sin_function():\n f = lambda x: math.sin(x)\n return f", "def integrate_fn(f, x_low, x_high, num=50):\n pass", "def _integrate_0_2pi_phis(self, expr):\n\n phi_s = sp.Symbol('phi_s')\n\n # replace first all odd powers of sin(phi_s) as these are\n # all zero for the integral\n replacements1 = [(sp.sin(phi_s) ** i, 0.)\n for i in range(1, self.SRF.ncoefs +\n self.V.ncoefs + 1) if i % 2 == 1]\n\n # then substitute the sine**2 by 1-cos**2\n replacements1 = (replacements1 +\n [(sp.sin(phi_s) ** i,\n expand((1. -\n sp.cos(phi_s) ** 2) ** sp.Rational(i, 2)))\n for i in range(2, self.SRF.ncoefs +\n self.V.ncoefs + 1) if i % 2 == 0])\n\n res = expand(expr.xreplace(dict(replacements1)))\n\n # replacements need to be done simultaneously, otherwise all\n # remaining sin(phi_s)**even will be replaced by 0\n\n # integrate the cosine terms\n replacements3 = [(sp.cos(phi_s) ** i, self._cosintegral(i))\n for i in range(1, self.SRF.ncoefs +\n self.V.ncoefs + 1)]\n\n res = expand(res.xreplace(dict(replacements3)))\n return res", "def sine(B):\n sin = math.sin\n pi = math.pi\n \n def f(x):\n return B*sin(pi*x)\n return f" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
user enters an integer and the corresponding link is deleted
def delete_registry(self) -> None: self.view_registry() links = self.load_links()[0] try: url_to_delete = links[abs(int(input("Enter no. of URL to delete: ")))] except IndexError: print('Item not found - Nothing was deleted') return with open(URL_FILE, 'w') as f: for link in links: if(link != url_to_delete): f.write(link+'\n')
[ "def delete_link(self, link):", "def unlink(self, link_id):", "def remove_link():", "def delete_secret_link(link_id):\n\n Secret_Link.objects.filter(link_id=link_id).delete()", "def deleteLink(self):\n # get selected index\n index = self.getSelectedIndex(\"delete\")\n # check selection\n if index is not None:\n # prevent last link to use reference beacon from being deleted\n if self.isLastAnchorLink(index):\n QMessageBox.warning(\n self, \n \"Last Link To Reference Beacon\", \n \"Cannot remove last link to reference beacon\"\n )\n return\n # recursively delete dependant links\n self.deleteLinkDependants(self.beardistChain[index][0][3])\n # delete link\n del self.beardistChain[index] \n self.updateBearDistChainDependants()\n if len(self.beardistChain) == 0: \n self.pshbtn_chain_finish.setEnabled(False)\n self.pshbtn_chain_edt.setEnabled(False)\n self.pshbtn_chain_del.setEnabled(False)", "def delete_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n db(db.lioli_main.unique_id == u_id).delete()\n redirect(URL('new_entries'))\n return dict()", "def deleteEntry(entry_id):", "def delete(identifier):", "def delete_link(self):\n self.link_layout.links_list.remove_widget(self)\n self.link_layout.links.remove(self.text)\n utils.update_data()\n utils.data[self.link_layout.parent_screen.name]['links'] = self.link_layout.links\n utils.save_project_data(utils.data[self.link_layout.parent_screen.name],\n f\"{utils.data[self.link_layout.parent_screen.name]['proj_path']}/project_data.json\")", "def delete_inventory():\r\n strIDDel = input('Which ID would you like to delete?: ').strip()\r\n while ValueError:\r\n try: \r\n int(strIDDel)\r\n break\r\n except ValueError:\r\n strIDDel = input('Error: ID must be numeric. Enter ID: ').strip()\r\n return strIDDel", "def bestiary_unlink(request, pk, ci=None):\n bestiary = Bestiary.objects.filter(owner=request.user, id=pk).first()\n if ci:\n creature = Creature.objects.filter(owner=request.user, id=ci).first()\n # Get the CreatureQuantity object that describes the link\n obj = CreatureQuantity.objects.filter(bestiary=bestiary, creature=creature, owner=request.user)\n if len(obj) == 1:\n # and delete it\n obj.delete()\n else:\n bestiary.creatures.clear()\n return HttpResponseRedirect(reverse('bestiary-detail', kwargs={'pk': pk}))", "def sys_upload_link_remove(request):\n content_type = 'application/json; charset=utf-8'\n result = {}\n\n token = request.POST.get('t')\n if not token:\n result = {'error': _(u\"Argument missing\")}\n return HttpResponse(json.dumps(result), status=400, content_type=content_type)\n\n UploadLinkShare.objects.filter(token=token).delete()\n result = {'success': True}\n return HttpResponse(json.dumps(result), content_type=content_type)", "def delete(self, _id):", "def delete_secret_link(self, identity, id_, link_id, links_config=None, uow=None):\n record, parent = self.get_parent_and_record_or_draft(id_)\n\n # Permissions\n self.require_permission(identity, \"manage\", record=record)\n\n # Fetching\n link_ids = [link.link_id for link in parent.access.links]\n if str(link_id) not in link_ids:\n raise LookupError(str(link_id))\n\n link_idx = link_ids.index(link_id)\n link = parent.access.links[link_idx].resolve()\n\n # Deletion\n parent.access.links.pop(link_idx)\n link.revoke()\n\n # Commit\n uow.register(RecordCommitOp(parent))\n if record:\n uow.register(RecordCommitOp(record))\n\n # Index all child records of the parent\n self._index_related_records(record, parent, uow=uow)\n\n return True", "def delete_links(self):\n link_names = get_selected_list(self.link_box)\n if link_names:\n self.link_obj.delete_links(link_names)\n self.populate_links()", "def delete_secret_link(\n self,\n id_,\n identity,\n link_id,\n links_config=None,\n ):\n record, parent = self._get_record_and_parent_by_id(id_)\n\n # Permissions\n self.require_permission(identity, \"manage\", record=record)\n\n # Fetching\n link_ids = [link.link_id for link in parent.access.links]\n if str(link_id) not in link_ids:\n raise LookupError(str(link_id))\n\n link_idx = link_ids.index(link_id)\n link = parent.access.links[link_idx].resolve()\n\n # Deletion\n parent.access.links.pop(link_idx)\n link.revoke()\n\n # Commit and index\n parent.commit()\n if record:\n record.commit()\n\n db.session.commit()\n self._index_related_records(record, parent)\n\n return True", "def deleterecord(phones,username,phonenum):\r\n if username in phones:\r\n del phones[username]\r\n else:\r\n raise ValueError(\"This username are not exist\")", "def delete_link(self, word):\n meaning = self.word2meaning[word]\n print(str(self.unique_id) + \" forgot \" +\n str(word) + \" for \" + str(meaning))\n del self.word2meaning[word]\n del self.meaning2word[meaning]\n del self.wordsuccess[word]\n\n # If the agent was the only one using the word, delete the word\n if len(self.model.vocabulary[meaning][word]) == 1:\n del self.model.vocabulary[meaning][word]\n # Else simply remove the agent\n else:\n self.model.vocabulary[meaning][word].remove(self.unique_id)", "def _lnk_delete_link(self, link_name):\n translated_name = '/' + self._trajectory_name + '/' + link_name.replace('.','/')\n link = self._hdf5file.get_node(where=translated_name)\n link._f_remove()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculating the psi operator for the transport and production of the enstrophy
def psi_enstrophy( Tau, # SGS; (6,64,64,64) h = False, # spatial step size flag = True): # spectral flag; default is gradient tool #---------------------------------------------------------------------# # Default variables # #---------------------------------------------------------------------# if h is False: Pi = np.pi N = 64 h = (2.0*Pi)/N #---------------------------------------------------------------------# # Preallocation variables # #---------------------------------------------------------------------# dim = np.shape(Tau)[1] Psi = np.zeros((9, dim, dim, dim)) #---------------------------------------------------------------------# # Calculating psi using spectral methods # #---------------------------------------------------------------------# if flag is False: kspec = np.fft.fftfreq(dim) * dim Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij')) #-----------------------------------------------------------------# # Psi_{11} # #-----------------------------------------------------------------# Psi[0] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real -\ np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real #-----------------------------------------------------------------# # Psi_{12} # #-----------------------------------------------------------------# Psi[1] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[4])).real -\ np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[3])).real #-----------------------------------------------------------------# # Psi_{13} # #-----------------------------------------------------------------# Psi[2] = np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[5])).real -\ np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[4])).real #-----------------------------------------------------------------# # Psi_{21} # #-----------------------------------------------------------------# Psi[3] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[0])).real -\ np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[2])).real #-----------------------------------------------------------------# # Psi_{22} # #-----------------------------------------------------------------# Psi[4] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[1])).real -\ np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real #-----------------------------------------------------------------# # Psi_{23} # #-----------------------------------------------------------------# Psi[5] = np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(Tau[2])).real -\ np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[5])).real #-----------------------------------------------------------------# # Psi_{31} # #-----------------------------------------------------------------# Psi[6] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[1])).real -\ np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[0])).real #-----------------------------------------------------------------# # Psi_{32} # #-----------------------------------------------------------------# Psi[7] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[3])).real -\ np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[1])).real #-----------------------------------------------------------------# # Psi_{33} # #-----------------------------------------------------------------# Psi[8] = np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(Tau[4])).real -\ np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(Tau[2])).real #---------------------------------------------------------------------# # Calculating psi using gradient tool # #---------------------------------------------------------------------# else: #-----------------------------------------------------------------# # Psi_{11} # #-----------------------------------------------------------------# Psi[0] = np.gradient(Tau[2],h, edge_order=2)[1] -\ np.gradient(Tau[1], h, edge_order=2)[0] #-----------------------------------------------------------------# # Psi_{12} # #-----------------------------------------------------------------# Psi[1] = np.gradient(Tau[4],h, edge_order=2)[1] -\ np.gradient(Tau[3], h, edge_order=2)[0] #-----------------------------------------------------------------# # Psi_{13} # #-----------------------------------------------------------------# Psi[2] = np.gradient(Tau[5],h, edge_order=2)[1] -\ np.gradient(Tau[4], h, edge_order=2)[0] #-----------------------------------------------------------------# # Psi_{21} # #-----------------------------------------------------------------# Psi[3] = np.gradient(Tau[0],h, edge_order=2)[0] -\ np.gradient(Tau[2], h, edge_order=2)[2] #-----------------------------------------------------------------# # Psi_{22} # #-----------------------------------------------------------------# Psi[4] = np.gradient(Tau[1],h, edge_order=2)[0] -\ np.gradient(Tau[4], h, edge_order=2)[2] #-----------------------------------------------------------------# # Psi_{23} # #-----------------------------------------------------------------# Psi[5] = np.gradient(Tau[2],h, edge_order=2)[0] -\ np.gradient(Tau[5], h, edge_order=2)[2] #-----------------------------------------------------------------# # Psi_{31} # #-----------------------------------------------------------------# Psi[6] = np.gradient(Tau[1],h, edge_order=2)[2] -\ np.gradient(Tau[0], h, edge_order=2)[1] #-----------------------------------------------------------------# # Psi_{32} # #-----------------------------------------------------------------# Psi[7] = np.gradient(Tau[3],h, edge_order=2)[2] -\ np.gradient(Tau[1], h, edge_order=2)[1] #-----------------------------------------------------------------# # Psi_{33} # #-----------------------------------------------------------------# Psi[8] = np.gradient(Tau[4],h, edge_order=2)[2] -\ np.gradient(Tau[2], h, edge_order=2)[1] return Psi
[ "def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi) \n \n\n # tmpnum=100000\n # locq = self.param_q(np.linspace(0,1,tmpnum)) #augmenting precision near the core\n # locphi = self.rhotor**2\n # locphi_p = interpolate.interp1d(np.linspace(0,1,len(locphi)),locphi)\n # locphi = locphi_p(np.linspace(0,1,tmpnum))\n # psi = integrate.cumtrapz(1/locq,locphi)\n # psi = np.concatenate([[0], psi])\n # psi = psi/max(psi)\n # rhopsi = psi**0.5\n # self.param_psi = interpolate.interp1d(np.linspace(0,1,tmpnum), rhopsi)", "def exptomo(self, psi):\n return np.exp(1j*psi * self.voxelsize * self.wavenumber())", "def _phi2psi(self):\n try:\n locq = self.param_q(self.rhotor)\n except:\n self._readeqdsk(self.shot)\n locq = self.param_q(self.rhotor)\n \n locphi = self.rhotor**2\n psi = integrate.cumtrapz(1/locq,locphi)\n psi = np.concatenate([[0], psi])\n psi = psi/max(psi)\n self.param_psi = interpolate.interp1d(self.rhotor, psi)", "def cal_phi(self):\n\n if not self.check_def(['E','px','py','pz']):\n sys.exit('Particle error: Quadri impulsion not define (error for phi routine)')\n\n if(self.px>0):\n self.phi=math.atan(self.py/self.px)\n elif(self.px<0):\n self.phi=math.atan(self.py/self.px)+math.pi\n elif(self.py>0): #remind that p(1)=0\n self.phi=math.pi/2.0\n elif(self.py<0): # remind that p(1)=0\n self.phi=-math.pi/2.0\n else:\n print \"Warning self.phi not properly defined put value to 0\"\n self.phi=0\n \n if(self.phi<0):\n self.phi=self.phi+2*math.pi\n\n return self.phi", "def get_mfp(self, T):\n\n self.air.T = T\n self.air.set_TempPres_dependents()\n\n self.mfp = (\n (np.sqrt(2.) * np.pi * self.air.d ** 2. * self.air.n) ** -1.\n )\n\n return self.mfp", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def calc_orbitals(self):\n\n assert len(self.veff) != 0, \"Veff is not set\"\n\n Nelem = self.grid.Nelem\n\n if self.Nmo != 0:\n\n #Inverse volume element\n W = spdiags(data=self.grid.w, diags=0, m=Nelem, n=Nelem)\n W = csc_matrix(W)\n\n #Build effective potential operator\n Veff = spdiags(data= W @ self.veff, diags=0, m=Nelem, n=Nelem)\n\n if self.H0 is None:\n self.hamiltionian()\n\n #Construct Hamiltonian\n H = self.H0 + Veff\n\n #Solve eigenvalue problem\n self.eig, self.phi = eigs(spsolve(W, H), k=self.Nmo, sigma=self.e0, v0=self.opt[\"v0\"])\n self.eig = self.eig.real\n self.phi = self.phi.real\n e0 = self.e0\n\n while np.isnan(self.phi).all() != np.zeros_like(self.phi).all():\n e0 = e0 - 0.1\n self.eig, self.phi = eigs(spsolve(W, H), k=self.Nmo, sigma=e0, v0=self.opt[\"v0\"])\n self.eig = self.eig.real\n self.phi = self.phi.real\n\n #Check for degenerate and nearly degenerate orbitals\n for i in range(self.Nmo-1):\n for j in range(i+1, self.Nmo):\n if np.abs(self.eig[i]-self.eig[j]) < 1e-9:\n even = self.phi[:, i] + self.grid.mirror(self.phi[:,i]) + self.phi[:,j] + self.grid.mirror(self.phi[:,j])\n odd = self.phi[:, i] - self.grid.mirror(self.phi[:,i]) + self.phi[:,j] - self.grid.mirror(self.phi[:,j])\n self.phi[:, i] = even/norm(even)\n self.phi[:, j] = odd/norm(odd)\n\n\n if self.SYM is True:\n for i in range(self.Nmo):\n if self.phi[:,i].T @ self.grid.mirror(self.phi[:,i]) > 0:\n\n self.phi[:,i] = self.phi[:, i] + self.grid.mirror(self.phi[:,i])\n self.phi[:,i] = self.phi[:, i] / norm(self.phi[:, i])\n\n else:\n\n self.phi[:, i] = self.phi[:, i] - self.grid.mirror(self.phi[:,i])\n \n else:\n self.eig = -1 / spacing(1)", "def _calc_enthalpy(self):\r\n \r\n if self.temperature==None:\r\n raise BadStreamError, '%s Stream temperature needs to be defined to calculate enthalpy.' % self.name\r\n if self.pressure==None:\r\n raise BadStreamError, '%s Stream pressure needs to be defined to calculate enthalpy.' % self.name\r\n if self.composition ==None:\r\n raise BadStreamError, '%s Stream composition needs to be defined to calculate enthalpy.' % self.name\r\n conv = uc.UnitConverter()\r\n \r\n #get the specific enthalpy -- for the vector case will need to loop through and build the enthalpy vectors\r\n getattr(self, \"_calc_spec_enthalpy_%s\" % self.mode)()\r\n\r\n #self.ct_setcomp(self.composition)\r\n #self.ctphase.set(T = conv.convert_units(self.temperature[0], self.temperature[1], 'K'), P = conv.convert_units(self.pressure[0], self.pressure[1], 'Pa'))\r\n \r\n #Cantera output is J/kmol or J/kg, so conversions must follow this for molar and mass flow rates.\r\n if self.basis == 'molar':\r\n #convert to kmol/s:\r\n \r\n flow = conv.convert_units(self.flowrate[0], self.flowrate[1], 'kmol/s')\r\n\r\n elif self.basis == 'mass':\r\n #convert to kg/s\r\n flow = conv.convert_units(self.flowrate[0], self.flowrate[1], 'kg/s')\r\n \r\n \r\n elif self.basis == \"gas_volume\":\r\n val = conv.convert_units(self.flowrate[0], self.flowrate[1], 'm^3/s')\r\n p = conv.convert_units(self.pressure[0], self.pressure[1], 'Pa')\r\n T = conv.convert_units(self.temperature[0], self.temperature[1], 'K')\r\n flow = val*p/(8.314*T)/1000\r\n \r\n elif self.basis == \"std_gas_volume\":\r\n val = conv.convert_units(self.flowrate[0], self.flowrate[1], 'm^3/s')\r\n p = conv.convert_units(self.std_pressure[0], self.std_pressure[1], 'Pa')\r\n T = conv.convert_units(self.std_temperature[0], self.std_temperature[1], 'K')\r\n flow = val*p/(8.314*T)/1000\r\n\r\n enthalpy = flow*self.spec_enthalpy\r\n self.enthalpy = (enthalpy, 'J/s')", "def internalenergy(temp,pres):\n g = liq_g(0,0,temp,pres)\n g_t = liq_g(1,0,temp,pres)\n g_p = liq_g(0,1,temp,pres)\n u = g - temp*g_t - pres*g_p\n return u", "def flux_qubit_potential(self):\n return -self.Ej * cos(self.phis - 2. * pi * self.phi) + self.El/2. * (self.phis) ** 2", "def angact_iso(x,params):\n GM = Grav*params[0]\n E = H_iso(x,params)\n r,p,t,vr,vphi,vt=cart2spol(x)\n st=np.sin(t)\n Lz=r*vphi*st\n L=np.sqrt(r*r*vt*vt+Lz*Lz/st/st)\n if(E>0.): # Unbound\n return (np.nan,np.nan,np.nan,np.nan,np.nan,np.nan)\n Jr=GM/np.sqrt(-2*E)-0.5*(L+np.sqrt(L*L+4*GM*params[1]))\n action = np.array([Jr,Lz,L-abs(Lz)])\n\n c=GM/(-2*E)-params[1]\n e=np.sqrt(1-L*L*(1+params[1]/c)/GM/c)\n eta=np.arctan2(r*vr/np.sqrt(-2.*E),params[1]+c-np.sqrt(params[1]**2+r*r))\n OmR=np.power(-2*E,1.5)/GM\n Omp=0.5*OmR*(1+L/np.sqrt(L*L+4*GM*params[1]))\n thetar=eta-e*c*np.sin(eta)/(c+params[1])\n\n if(abs(vt)>1e-10):\n psi=np.arctan2(np.cos(t),-np.sin(t)*r*vt/L)\n else:\n psi=np.pi/2.\n a=np.sqrt((1+e)/(1-e))\n ap=np.sqrt((1+e+2*params[1]/c)/(1-e+2*params[1]/c))\n F = lambda x,y: np.pi/2.-np.arctan(np.tan(np.pi/2.-0.5*y)/x) if y>np.pi/2. \\\n else -np.pi/2.+np.arctan(np.tan(np.pi/2.+0.5*y)/x) if y<-np.pi/2. \\\n else np.arctan(x*np.tan(0.5*y))\n\n thetaz=psi+Omp*thetar/OmR-F(a,eta)-F(ap,eta)/np.sqrt(1+4*GM*params[1]/L/L)\n\n LR=Lz/L\n sinu = LR/np.sqrt(1.-LR**2)/np.tan(t)\n u = 0\n if(sinu>1.):\n u=np.pi/2.\n elif(sinu<-1.):\n u = -np.pi/2.\n else:\n u = np.arcsin(sinu)\n if(vt>0.):\n u=np.pi-u\n thetap=p-u+np.sign(Lz)*thetaz\n angle = np.array([thetar,thetap,thetaz])\n return np.concatenate((action,angle % (2.*np.pi)))", "def psi(self, x, y):\n self._check_size_x(x)\n features, edges = self.get_features(x), self.get_edges(x)\n n_nodes = features.shape[0]\n\n if isinstance(y, tuple):\n # y is result of relaxation, tuple of unary and pairwise marginals\n unary_marginals, pw = y\n unary_marginals = unary_marginals.reshape(n_nodes, self.n_states)\n # accumulate pairwise\n pw = pw.reshape(-1, self.n_states, self.n_states).sum(axis=0)\n else:\n y = y.reshape(n_nodes)\n gx = np.ogrid[:n_nodes]\n\n #make one hot encoding\n unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.int)\n gx = np.ogrid[:n_nodes]\n unary_marginals[gx, y] = 1\n\n ##accumulated pairwise\n pw = np.dot(unary_marginals[edges[:, 0]].T,\n unary_marginals[edges[:, 1]])\n\n unaries_acc = np.dot(unary_marginals.T, features)\n pw = pw + pw.T - np.diag(np.diag(pw)) # make symmetric\n\n psi_vector = np.hstack([unaries_acc.ravel(),\n pw[np.tri(self.n_states, dtype=np.bool)]])\n return psi_vector", "def ScintillationPhotons(self,E):\n return E/self.Ws()", "def psi_exp(self):\n return [t[1] for t in self.psis]", "def finiteKPerpdispRel(w):\r\n #Various plasma frequencies\r\n Pi_he=np.sqrt((ne*heRatio)*q_p**2/(eps_0*4*m_amu)) #Helium plasma frequency\r\n Pi_ne=np.sqrt((ne*neRatio)*q_p**2/(eps_0*20*m_amu)) #Neon plasma frequency\r\n Pi_e=np.sqrt(ne*q_e**2/(eps_0*m_e)) #Electron plasma frequency\r\n Omega_he=q_p*magB/(4*m_amu) #Helium cyclotron frequency\r\n Omega_ne=q_p*magB/(20*m_amu) #Neon cyclotron frequency\r\n Omega_e=q_e*magB/(m_e) #Electron cyclotron frequency\r\n \r\n #R,L and P\r\n R=1-((Pi_e**2/w**2)*(w/(w+Omega_e)))-((Pi_he**2/w**2)*(w/(w+Omega_he)))-((Pi_ne**2/w**2)*(w/(w+Omega_ne))) #Right-hand polarized wave\r\n L=1-((Pi_e**2/w**2)*(w/(w-Omega_e)))-((Pi_he**2/w**2)*(w/(w-Omega_he)))-((Pi_ne**2/w**2)*(w/(w-Omega_ne))) #Left-hand polarized wave\r\n P=1-(Pi_e**2/(w*(w+1j*nu_e)))-(Pi_he**2/w**2)-(Pi_ne**2/w**2) #Unmagnetized plasma\r\n\r\n #S and D\r\n S=(R+L)/2\r\n D=(R-L)/2\r\n \r\n #u=w**2/c**2\r\n u=(w/c)**2\r\n \r\n #g_perp=k_perp**2\r\n gPerp=kPerp**2\r\n \r\n #Cubic equation coefficients\r\n bTerm=(gPerp*S/P)+(2*gPerp)-(L*u)-(R*u)\r\n cTerm=(2*gPerp*gPerp*S/P)-(gPerp*R*L*u/P)-(gPerp*S*u)+(gPerp*gPerp)-(gPerp*L*u)-(gPerp*R*u)+(R*L*u*u)\r\n dTerm=(gPerp*gPerp*gPerp*S/P)-(gPerp*gPerp*R*L*u/P)-(gPerp*gPerp*S*u)+(gPerp*R*L*u*u)\r\n \r\n #Depressed cubic equation coefficients\r\n pTerm=(3*cTerm-bTerm*bTerm)/3\r\n qTerm=(2*bTerm*bTerm*bTerm-9*bTerm*cTerm+27*dTerm)/27\r\n \r\n #kPar\r\n kPar=0\r\n if 4*pTerm*pTerm*pTerm+27*qTerm*qTerm>0:\r\n #Single real root\r\n term1=(-qTerm/2+np.sqrt((qTerm*qTerm/4)+(pTerm*pTerm*pTerm/27)))**(1/3)\r\n term2=(-qTerm/2-np.sqrt((qTerm*qTerm/4)+(pTerm*pTerm*pTerm/27)))**(1/3)\r\n realRoot=term1+term2\r\n \r\n #Convert back to original cubic\r\n gPar=realRoot-bTerm/3\r\n \r\n #Calcualte kPar\r\n kPar=np.sqrt(gPar)\r\n \r\n else:\r\n #arccos term\r\n arccosTerm=np.arccos((3*qTerm/(2*pTerm))*np.sqrt(-3/pTerm))\r\n #cos term\r\n k=0\r\n cosTerm=np.cos((1/3)*arccosTerm-2*np.pi*k/3)\r\n \r\n #Real root\r\n realRoot=2*np.sqrt(-pTerm/3)*cosTerm\r\n \r\n #Convert back to original cubic\r\n gPar=realRoot-bTerm/3\r\n \r\n #Calcualte kPar\r\n kPar=np.sqrt(gPar)\r\n \r\n return kPar", "def _system_of_equations_desoto(params, specs):\n\n # six input known variables\n Isc, Voc, Imp, Vmp, beta_oc, alpha_sc, EgRef, dEgdT, Tref, k = specs\n\n # five parameters vector to find\n IL, Io, a, Rsh, Rs = params\n\n # five equation vector\n y = [0, 0, 0, 0, 0]\n\n # 1st equation - short-circuit - eq(3) in [1]\n y[0] = Isc - IL + Io * np.expm1(Isc * Rs / a) + Isc * Rs / Rsh\n\n # 2nd equation - open-circuit Tref - eq(4) in [1]\n y[1] = -IL + Io * np.expm1(Voc / a) + Voc / Rsh\n\n # 3rd equation - Imp & Vmp - eq(5) in [1]\n y[2] = Imp - IL + Io * np.expm1((Vmp + Imp * Rs) / a) \\\n + (Vmp + Imp * Rs) / Rsh\n\n # 4th equation - Pmp derivated=0 - eq23.2.6 in [2]\n # caution: eq(6) in [1] has a sign error\n y[3] = Imp \\\n - Vmp * ((Io / a) * np.exp((Vmp + Imp * Rs) / a) + 1.0 / Rsh) \\\n / (1.0 + (Io * Rs / a) * np.exp((Vmp + Imp * Rs) / a) + Rs / Rsh)\n\n # 5th equation - open-circuit T2 - eq (4) at temperature T2 in [1]\n T2 = Tref + 2\n Voc2 = (T2 - Tref) * beta_oc + Voc # eq (7) in [1]\n a2 = a * T2 / Tref # eq (8) in [1]\n IL2 = IL + alpha_sc * (T2 - Tref) # eq (11) in [1]\n Eg2 = EgRef * (1 + dEgdT * (T2 - Tref)) # eq (10) in [1]\n Io2 = Io * (T2 / Tref)**3 * np.exp(1 / k * (EgRef/Tref - Eg2/T2)) # eq (9)\n y[4] = -IL2 + Io2 * np.expm1(Voc2 / a2) + Voc2 / Rsh # eq (4) at T2\n\n return y", "def energy_tot(P,F,H,molecule):\n return energy_el(P,F,H) + energy_n(molecule)", "def idealOpAmp():", "def _energy(E, q, p, a, mu):\n return (\n a ** 4 * (-(E ** 2) + mu ** 2 + 2 * p[0] ** 2)\n + 8 * a * E * p[2] * q[0]\n - a ** 2\n * (\n 2 * p[2] ** 2\n - 2 * p[1] ** 2\n + q[0]\n * (\n mu ** 2 * (2 - 3 * q[0])\n - 4 * p[0] ** 2 * (-2 + q[0])\n + E ** 2 * (2 + 3 * q[0])\n )\n )\n + 2\n * q[0]\n * (\n p[1] ** 2 * (-2 + q[0])\n + q[0]\n * (\n p[0] ** 2 * (-2 + q[0]) ** 2\n + q[0] * (mu ** 2 * (-2 + q[0]) - E ** 2 * q[0])\n )\n )\n - (a ** 2 + (-2 + q[0]) * q[0])\n * (\n a ** 2 * (E - mu) * (E + mu) * np.cos(2 * q[1])\n - 2 * p[2] ** 2 * (1 / (np.sin(q[1]) ** 2))\n )\n ) / (\n (a ** 2 + (-2 + q[0]) * q[0])\n * (a ** 2 + 2 * q[0] ** 2 + a ** 2 * np.cos(2 * q[1]))\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the coordinates of this atom. Returns
def get_coords(self): return self.coords
[ "def get_coords(self):\n return tuple(self.coords)", "def get_coords(self):\n assert all(atom.idx is not None for atom in self)\n atoms = self.atoms[:]\n atoms.sort(key=lambda a: a.idx)\n return np.array([atom.coords for atom in atoms])", "def getCoordinates(self):\n return self.getSelection(\"all\").getCoordsets()", "def coords(self):\n\n return self.__get_gps_location()", "def get_coordinates(self):\n return np.array([(n.x, n.y) for n in self.nodes])", "def get_coordinates(self):\r\n\r\n return traci.vehicle.getPosition(self.id)", "def coordinates(self) -> tuple:\n return (self.lat, self.lon)", "def coordinates(self):\n\t\tlocation = self.current['geometry']['location']\n\t\treturn location['lat'], location['lng']", "def getCoords(self):\n\n if self._coords is not None:\n return self._coords[self._acsi].copy()", "def coords(self):\n return (self.x1, self.y1, self.x2, self.y2)", "def getCoords(self):\n try:\n return self.coords\n except AttributeError:\n raise AttributeError, \"Entity has coordinates not set.\"", "def get_coord(self):\n return self.coord", "def realCoordinates(self):\n return self.__realCoordinates", "def getMachineCoordinates(self):\n return (self.x, self.y, self.z)", "def get_coords(self) -> Tuple[int]:\r\n return self.file, self.rank", "def mpiCoords(self):\n return self._current_manager.mpiCoords", "def coordinates_attrs(self):\n return self._get_attrs('coordinates')", "def coords(self):\n return self._coords[self._model, :]", "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the x coordinate of this atom. Returns
def get_x(self): return self.coords[0]
[ "def get_x(self):\n return self.__x_position", "def getXPosition(self):\n return self._x", "def get_pos_x(self):\n return self.__pos_x", "def x(self):\n return self.coords[0]", "def get_x(self) -> int:\n return self.__x", "def x(self):\n return _libsbml.Point_x(self)", "def pos_x(self) -> int:\n return self._pos_x", "def X(self) -> int:\n return self.m_point.x()", "def getX(self):\n return _libsbml.BoundingBox_getX(self)", "def get_origin_x_position(self):\n return self.origin_coordinates[0]", "def origin_x(self):\n return self._origin[0]", "def get_item_x(self):\n return self.item.x", "def get_ship_x(self):\n return self.x", "def getXOffset(self):\n return _libsbml.Point_getXOffset(self)", "def position(self):\n return self.x", "def tile_x(self):\n return self._tile_x", "def x ( self ) :\n return self.xvar", "def getX(self):\n return self.components[0]", "def getXViewpoint(self):\r\n return self._xViewpoint" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the z coordinate of this atom. Returns
def get_z(self): return self.coords[2]
[ "def getZ(self):\n\t\treturn self.coords.z", "def z(self):\n return self.coords[2]", "def get_z(self):\n return self._z", "def get_z(self) -> int:\n return self.__z", "def getZ(self):\n return _libsbml.BoundingBox_getZ(self)", "def z(self):\n return self.position[2]", "def z(self):\n return _libsbml.Point_z(self)", "def z ( self ) :\n return self.zvar", "def get_zaxis(self):\n return self.zaxis", "def tile_z(self):\n return self._tile_z", "def getZ(self):\n return self.getAcceleration(self.Axes.kZ)", "def z_axis(self):\n return self.z.coordinate_system.axes[0]", "def num_z(self):\n return self._xyzct['Z']", "def getZOffset(self):\n return _libsbml.Point_getZOffset(self)", "def getZOrder(self):\n\t\treturn self._zOrder", "def zx(self):\n return Vector.outer(self.z, self.x)", "def getz_index(self):\n return self._getz_index", "def zy(self):\n return Vector.outer(self.z, self.y)", "def z(self):\n return self._translation[2, 0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the mass of this atom. Returns
def get_mass(self): return self.m
[ "def get_mass(self):\n\n return self.__mass", "def mass(self):\n return self._mass", "def get_mass(self):\n _pal.lib.geometry_get_mass.restype = c.c_float\n return _pal.lib.geometry_get_mass(self._geometry)", "def get_mass(self):\n return sum([cell.mass for cell in self.cells])", "def get_mass(self) -> float:\n mass = 0\n if hasattr(self, \"SOLUTEATOM\"):\n for i in self.SOLUTEATOM.content:\n mass += i.MASS\n return mass", "def mass(self):\r\n return Box2DEngine.Body_GetMass(self.id)", "def mass(self):\n return maspy.peptidemethods.calcPeptideMass(self.sequence)", "def mass(self):\n formula = self.formula(as_dict=True)\n mass = 0\n for element, number in formula.items():\n element = Atom(element) # PT.x requires Atom objects\n mass += PT.get_mass(element) * number\n return f\"{mass:.2f} g mol⁻¹\"", "def getMass(self):\n return self.mass", "def atomic_mass(a):\n\n return a.GetMass()", "def total_mass(self):\n return self._total_mass", "def mass(self):\n\t\treturn self.volume*self.density", "def get_mass(self) -> pd.Series:\n return pd.Series(map(float, self.database.mass), name=\"mass\", index=self.database.time)", "def mass(self):\n if self._propagator is not None:\n return self._propagator._propagator_num.getInitialState().getMass()\n else:\n err_msg = \"Mass of \" + self.namespace + \" not initialized. Build propagator method has to be called first!\"\n raise AttributeError(err_msg)", "def get_mass(self, body_id: str):\n return self._get_physical_property(body_id, 'mass')", "def get_mass(self, total=False):\n if self.n == 0:\n return 0.0\n\n grid_cid0 = self.grid.get_positions()\n p1 = grid_cid0[self.node_ids[:, 0]]\n p2 = grid_cid0[self.node_ids[:, 1]]\n L = p2 - p1\n rho = self.model.Materials.get_rho(self.material_id)\n mass = norm(L, axis=1) * self.A * rho + self.nsm\n if total:\n return mass.sum()\n else:\n return mass", "def particleMass(self):\n return self.params['particleMass']", "def _get_mass(self, geom: MjcfElement, volume: float) -> float:\n if geom.mass:\n return geom.mass\n density = geom.density if geom.density else self._default_density\n return volume * density", "def mass_energy(self) -> u.J:\n return self._get_particle_attribute(\n \"mass_energy\",\n unit=u.J,\n default=np.nan * u.J,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the van Der Waals radius of this atom. Returns
def get_van_Der_Waals_radius(self): return self.van_Der_Waals_radius
[ "def get_radius(self):\n return self.radius", "def outer_radius(self):\n return self._outer_radius", "def radius(self):\n return self._radius", "def get_radius(self):\n return self._handler.get_radius()", "def get_radius(self):\n if self.no_dist is False:\n dist = self.distance\n radius = (dist * self.ang_size / 60. *\n np.pi/180. * ct._kpc_over_pc_)/2.\n self.radius = radius\n else:\n self.radius = -1 # use -1 to indicate unknown diameter\n\n return self.radius", "def getRadius(self):\n return self.__radius", "def inner_radius(self):\n return self._inner_radius", "def get_radius(self):\n return self.__size * Asteroid.ASTEROID_RADIUS_FACTOR + Asteroid.ASTEROID_RADIUS_NORMALIZER", "def get_radius(self):\r\n return self._handler.get_radius()", "def outer_radius(self) -> Quantity:\n return self._outer_rad", "def equivalent_radius(self):\n\n return np.sqrt(self.area / np.pi)", "def radius_of_gyration(self):\n\n center_of_mass = self.center_of_mass\n atoms = self._atoms\n square_deviation = sum(\n [atom.distance_to(center_of_mass) ** 2 for atom in atoms]\n )\n mean_square_deviation = square_deviation / len(atoms)\n return math.sqrt(mean_square_deviation)", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def getRadius(self):\n return self.get('sphere.radius')", "def cell_radius(self):\n return self.cell.mechanics.radius", "def triad_radius(self):\n return self._triad_radius", "def _get_equatorial_radius(self):\n return self._equatorial_radius", "def radius(self):\n return float(np.mean(spsd.cdist(self.mContour, [self.centroid()])))", "def rotor_radius(self):\n return self.rotor_diameter / 2.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the euler tensor of this atom. Returns
def get_euler(self): return array([ coord * self.coords for coord in self.coords ])
[ "def euler_x(self):\n return self._euler_x", "def euler_z(self):\n return self._euler_z", "def euler_y(self):\n return self._euler_y", "def imu_get_euler(self):\n return self.imu.get_euler()", "def inertia_tensor(self):\n\n mu = self.moments_central\n a = mu[0, 2]\n b = -mu[1, 1]\n c = mu[2, 0]\n return np.array([[a, b], [b, c]]) * u.pix**2", "def get_inertia_tensor(self):\n\n Inertia_Tensor = np.zeros((3, 3))\n\n for i in range(self.natom):\n Inertia_Tensor[0][0] += self.mwcart[i][1] * self.mwcart[i][1] + self.mwcart[i][2] * self.mwcart[i][2]\n Inertia_Tensor[1][1] += self.mwcart[i][0] * self.mwcart[i][0] + self.mwcart[i][2] * self.mwcart[i][2]\n Inertia_Tensor[2][2] += self.mwcart[i][0] * self.mwcart[i][0] + self.mwcart[i][1] * self.mwcart[i][1]\n Inertia_Tensor[0][1] += self.mwcart[i][0] * self.mwcart[i][1]\n Inertia_Tensor[0][2] += self.mwcart[i][0] * self.mwcart[i][2]\n Inertia_Tensor[1][2] += self.mwcart[i][1] * self.mwcart[i][2]\n\n Inertia_Tensor[1][0] = Inertia_Tensor[0][1]\n Inertia_Tensor[2][0] = Inertia_Tensor[0][2]\n Inertia_Tensor[2][1] = Inertia_Tensor[1][2]\n\n self.moi = Inertia_Tensor\n\n return Inertia_Tensor", "def euler_integrator(self, t, y, tau):\n\n return self.plant.rhs(t, y, tau)", "def getTensor(self):\n\t\treturn self.cur_tensor", "def eulerAxis(Q):\n x = Q[2, 1] - Q[1, 2]\n y = Q[0, 2] - Q[2, 0]\n z = Q[1, 0] - Q[0, 1]\n r = math.sqrt(x**2 + y**2 + z**2)\n t = math.atan2(r, Q[0, 0] + Q[1, 1] + Q[2, 2] -1) \n\n if abs(r) < 1e-8:\n # The matrix is almost and identity matrix producing no\n # rotation, we can represent it with a null euler axis,\n # to avoid dividing by 0\n return 0, 0, 0\n\n eX = t*x/r\n eY = t*y/r\n eZ = t*z/r\n\n return eX, eY, eZ", "def euler_characteristic(self):\n return Integer(self.degree() * 2 -\n sum(sum(j - 1 for j in self.profile(i))\n for i in range(self.length())))", "def inverse_e(self, e) -> tf.Tensor:\n return e", "def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta", "def euler_from_quaternion(quaternion, axes='sxyz'):\r\n return euler_from_matrix(quaternion_matrix(quaternion), axes)", "def euler(faces, edges, verticies):\n\n # Return the calculated value\n return verticies + edges - faces", "def inertia(self):\n return (self.rank - self.neig, self.neig, self.n - self.rank)", "def tensor_base(self):\n return self._tensor_base", "def getEtalonTemp(self):\n return float(self.laserQuery('ET'))", "def inertia_tensor(self, masswt=True, zero=ZERO):\n return self.inertia_tensor_partial(range(self.natom()), masswt, zero)", "def euler(x, t, h, f):\n return x + h * f(x, t)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the symbol of this atom. Returns
def get_symbol(self): return self.symbol
[ "def symbol(self):\n return self._symbol", "def getSymbol(self):\n return self.data[1:-1]", "def symbol(self, name):\n return self.symbols[name]", "def get_atomic_symbol(self, atom):\n atomic_number = self.get_atomic_number(atom)\n return get_symbol(atomic_number)", "def symbol_id(self) -> str:\n return self._symbol", "def getSymbol(self):\n return _libsbml.InitialAssignment_getSymbol(self)", "def GetChemicalSymbol(self):\n return self.symbol", "def getElementSymbol(self):\n dataDict = self.__dict__\n yy = self\n while yy is not None:\n xx = yy\n yy = xx.findFirstChemAtomSet()\n \n result = xx.findFirstChemAtom().elementSymbol\n return result", "def symbol(self):\r\n return self.__current_token", "def symbol(self) -> str:\n return self.current_token", "def get_display_symbol(self):\n return self.symbol if self.display_symbol is None else self.display_symbol", "def getAiSymbol(self) -> str:\n return self.ai.getSymbol()", "def atomic_symbol(self, atomic_number):\n return self.GetSymbol(atomic_number)", "def getPlayerSymbol(self) -> str:\n return self.player.getSymbol()", "def symbol(self, **kw):\n if not kw:\n raise ValueError(u\"'symbol' needs keyword arguments\")\n res = self.find_symbols(**kw)\n if len(res)==1:\n return res[0]\n else:\n return res", "def getPrimarySymbol(self) -> ghidra.program.model.symbol.Symbol:\n ...", "def read_symbol(self) -> str:\n return self.tape[self.current_position]", "def symbolic_name(self):\n return self._symbolic_name", "def atom(self):\n\n result = self.current_char\n pos = self.pos\n self.next()\n\n if self.current_char is not None and self.current_char.isalpha():\n nresult = result + self.current_char\n if nresult in TOT_SYMBOLS:\n self.next()\n return nresult\n\n if result in TOT_SYMBOLS:\n return result\n else:\n raise LexerException(pos, '{} is not a valid atomic symbol'.format(result))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the chain sequence number of the amminoacid this atom belongs to. Returns
def get_ammino_chain_seq(self): return self.ammino_chain_seq
[ "def get_sequence_num(self):\n return self._sequence_num", "def sequence_number(self):\n return self._sequence_number", "def sequence_num(self):\n return self._sequence_num", "def seq_no(self):\n return self._seq_no", "def get_tx_seq(self, ac):\n cur = self._get_cursor()\n cur.execute(self.sql['tx_seq'], [ac])\n try:\n return cur.fetchone()['seq']\n except TypeError:\n return None", "def attempt_sequence_number(self):\n return self._attempt_sequence_number", "def chain_serial(self):\n return self.structure.chain_serial[self.mask]", "def chain_id(self) -> str:\n return pulumi.get(self, \"chain_id\")", "def __get_sequence_number(self):\n if self.counter > 999:\n self.counter = 0\n else:\n self.counter += 1\n\n str_sequence_num = self.counter + 256\n str_hex_sequence_num = hex(str_sequence_num)[2:]\n return str_hex_sequence_num", "def get_chain_sequence_and_numbering(self, chain_id, *args, **varargs):\n chain = self.structure[0][chain_id]\n return get_chain_sequence_and_numbering(chain, *args, **varargs)", "def sequence(self):\n return self._seqname", "def getResidueNumber(self, iAtom):\n return self._getResiduePointer(iAtom)+1", "def ReceiveSequenceNumber(self):\n return self._get_attribute('receiveSequenceNumber')", "def get_msg_seq_num(self):\n return self.__msg_seq_num", "def get_atomic_number(self, atom):\n self._check_atom_number(atom)\n return self.atomic_numbers[atom]", "def seq(self):\n return self._seq", "def residueNumber(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_number(self._c_structure,i)", "def get_seq_from_pdbchain(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n three_res_list = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'CA':\n residue = atom.get_parent()\n three_res_list.append(residue.get_resname())\n return three_to_one(three_res_list) # three_to_one function\n else:\n nucleic_acid_res = []\n for res in chain:\n residues_atoms = res.get_atoms()\n for atom in residues_atoms:\n if atom.get_name() == 'P':\n residue = atom.get_parent()\n nucleic_acid_res.append(residue.get_resname())\n nucleic_acid_seq = [x[2] for x in nucleic_acid_res]\n return \"\".join(nucleic_acid_seq)", "def sequence_number(self):\n return self._annotations.get(EventData.PROP_SEQ_NUMBER, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the euclid distance from this atom to the given atom. Returns
def get_euclid_distance_to(self, atom): return linalg.norm(self.get_coords() - atom.get_coords())
[ "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)", "def distance(self, x, c):\n return self.distance_metric(x, c)", "def __findEclidDist__(self, row, col):\r\n dist = np.sqrt( (row**2 + col**2 ))\r\n dist = np.round( dist )\r\n return dist", "def get_distance(self):\n return _TOF_LIBRARY.getDistance(self._dev)", "def get_distance_from_root(self):\n\n return self.distance_from_root", "def distance(self, x: int, y: int) -> float:\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab", "def distance(self):\n return self.value * len(self.alignment.query)", "def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))", "def distance_to(self, circle):\n diff = tuple(map(sub, self.pos, circle.pos))\n return math.hypot(*diff)", "def _euclid_distance(self, A, B, axis=1):\n return np.linalg.norm(A - B, axis=axis)", "def distance(self) -> float:\n xdiff = self.position[0] - self.target[0]\n ydiff = self.position[1] - self.target[1]\n zdiff = self.position[2] - self.target[2]\n res = math.sqrt(xdiff * xdiff + ydiff * ydiff + zdiff * zdiff)\n if res == 0:\n res = 0.001\n return res", "def getElectricalDistance(self, neighborID):\n\n if not neighborID in self.Neighbors: # neighborID is not a neighbor\n return -1\n\n for n in range(len(self.Neighbors)):\n if self.Neighbors[n] == neighborID:\n break;\n\n return self.ElectricalDistanceToNeighbors[n]", "def __compute_distance(self, x, centroid):\n \n diff = x - centroid\n return np.sqrt(np.dot(diff.T, diff))", "def distance(self, other: 'Cell') -> float:\n return math.sqrt(((other.x - self.x) ** 2) + ((other.y - self.y) ** 2))", "def _getDistance(self, E):\n return self.a * (1 - self.e * math.cos(E))", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def distance_to(self, point):\n return np.linalg.norm(self.center - point)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a PLaSM cuboid with a color an put it on this atom coords.
def plasm_cube(self, size=0.1, color=WHITE): return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size])))
[ "def make_cube(self, color=Cube.Colors.RED):\n cube = Cube(self.__canvas, (100, 100), color)\n self.__cubes.append(cube)\n\n return cube", "def space_to_draw_cube(city, color) :\n (x,y) = CityLocs[city]\n ColorOffsets = {'red':(0,0),'blue': (15,0),'black':(0,15),'yellow':(15,15)}\n return (x + ColorOffsets[color][0], y + ColorOffsets[color][1])", "def create_cube(bm, size, position):\n post = cube(bm, *size)\n bmesh.ops.translate(bm, verts=post[\"verts\"], vec=position)\n return post", "def make_cube():\n pm.polyCube(name=\"Nisse\")\n return None", "def makeSphereCuboid():\n Nx, Ny, Nz = 256, 256, 256\n Nx2, Ny2, Nz2 = int(Nx/2), int(Ny/2), int(Nz/2)\n # fill with zeros\n a = np.zeros(Nx*Ny*Nz, np.uint8).reshape(Nx, Ny, Nz)\n # set data\n for k in range(Nz):\n for j in range(Ny):\n for i in range(Nx):\n # inside cuboid?\n if abs(i-Nx2) < 40 and abs(j-Ny2) < 30 and abs(k-Nz2) < 20:\n a[i][j][k] = min(2*(-Nx2 + 40 + i), 255)\n else:\n # inside sphere?\n if ((i-Nx2)*(i-Nx2) + (j-Ny2)*(j-Ny2) + \n (k-Nz2)*(k-Nz2)) < 100*100:\n a[i][j][k] = 50\n # create dir if not exists \n imgDir = 'sphere-cuboid'\n if not os.path.exists(imgDir):\n os.makedirs(imgDir)\n # write images\n for i in range(Nz):\n # create image from data\n im = Image.fromarray(a[i])\n # save file\n fileName = os.path.join(imgDir, 'sphere-cuboid%03d.png' % i)\n im.save(fileName)", "def assign_mapcuboid(self, xs, ys, zs, radius, grid_type):\n\tself.mapgrid = Grid(grid_type, radius, 2*radius, -1.)\n\tself.grid_type = grid_type\n self.mapcuboid = MapCuboid(xs, ys, zs, radius)\n self.mapcuboid.assign_cuboid_values()\n self.build_mapgrid(self.mapcuboid)\n\t\n\tself.mapgrid.set_xyz_ranges(self.mapcuboid.xmin, self.mapcuboid.xmax,\\\n self.mapcuboid.ymin, self.mapcuboid.ymax,\\\n self.mapcuboid.zmin, self.mapcuboid.zmax)\n\tself.mapgrid.generate_cubic_grid(self.mappoints, self.map_radius)\n \n\tself.mapgrid.set_map_threshold(self.map_threshold)\n\tself.density_sum = self.mapgrid.get_map_density_sum()\n\t\t\n\tif self.mapgrid.mapcells:\n\t self.ns_mapgrid = NeighborSearch(self.mapgrid.mapcells)\n\telse:\n\t self.ns_mapgrid = []", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def swop_cube(coords):\n\tif len(coords) == 3:\n\t\t[x, y, z] = coords\n\t\tif get_cube(x, y, z) == \"#\":\n\t\t\tspace[z][x][y] = \".\"\n\t\telse:\n\t\t\tspace[z][x][y] = \"#\"\n\telse:\n\t\t[x, y, z, w] = coords\n\t\tif get_cube(x, y, z, w) == \"#\":\n\t\t\tspace[w][z][x][y] = \".\"\n\t\telse:\n\t\t\tspace[w][z][x][y] = \"#\"", "def create_cube():\n new_cube = RubicsCube2x2()\n show_cube_console(new_cube)\n\n seed = [10, 9, 17, 14, 11, 8, 3, 2, 17, 3, 9, 7, 15, 4, 14, 14, 3, 3, \\\n 13, 7, 15, 9, 14, 13, 11, 17, 7, 10, 5, 16, 11, 5, 7, 10, 14, \\\n 7, 17, 7, 8, 6, 12, 3, 6, 1, 16, 12, 5, 13, 3, 4]\n for move in seed:\n new_cube.do_move(move)\n return new_cube", "def generate_cubo(self, dimensao):\r\n self.numero_nodos = 0\r\n self.numero_bloqueados = 0\r\n self.porcentagem_bloqueada = 0\r\n\r\n try:\r\n self.dimensao_X = int(dimensao)\r\n self.dimensao_Y = int(dimensao)\r\n self.dimensao_Z = int(dimensao)\r\n except:\r\n print(\" Dimensões erradas, setadas para default 3x3x3\")\r\n self.generate_cubo(DEFAULT)\r\n return\r\n\r\n for i in range(self.dimensao_X):\r\n for j in range(self.dimensao_Y):\r\n for k in range(self.dimensao_Z):\r\n self.cubo[(i, j, k)] = Node( (i, j, k) )\r\n self.numero_nodos += 1\r\n if k != 0 : # Condições de contorno, altura igual a zero\r\n self.cubo[(i, j, k)].vizinho_abaixo = self.cubo[(i, j, k-1)] # Seta vizinho Abaixo\r\n self.cubo[(i, j, k-1)].vizinho_acima = self.cubo[(i, j, k)] # Seta vizinho Acima\r\n\r\n if j != 0 : # COndicções de contorno, profuntidade igual a zero\r\n self.cubo[(i, j, k)].vizinho_atras = self.cubo[(i, j-1, k)] # Seta vizinho Atras\r\n self.cubo[(i, j-1, k)].vizinho_frente = self.cubo[(i, j, k)] # Seta vizinho a Frente\r\n\r\n if i != 0 : # Condições de contornor, posição igual a zero\r\n self.cubo[(i, j, k)].vizinho_esquerda = self.cubo[(i-1, j, k)] # Seta vizinho a Esquerda\r\n self.cubo[(i-1, j, k)].vizinho_direita = self.cubo[(i, j, k)] # Seta vizinho a Direita\r", "def drawcube_old():\n allpoints = list(zip(CUBE_POINTS, CUBE_COLORS))\n\n GL.glBegin(GL.GL_QUADS)\n for face in CUBE_QUAD_VERTS:\n for vert in face:\n pos, color = allpoints[vert]\n GL.glColor3fv(color)\n GL.glVertex3fv(pos)\n GL.glEnd()\n\n GL.glColor3f(1.0, 1.0, 1.0)\n GL.glBegin(GL.GL_LINES)\n for line in CUBE_EDGES:\n for vert in line:\n pos, color = allpoints[vert]\n GL.glVertex3fv(pos)\n\n GL.glEnd()", "def __init__(self, grid, color):\r\n self.pos = (15, 10)\r\n self.apple = pygame.Surface((grid.cella_meret, grid.cella_meret))\r\n self.color = color.red", "def buildCube(self, x_center, y_center, z_center, step, cote):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for i in range(-cote/2, cote/2, step):\n for j in range(-cote/2, cote/2, step):\n for k in range(-cote / 2, cote / 2, step):\n self.z_cube_vector.append(k + z_center)\n self.y_cube_vector.append(j + y_center)\n self.x_cube_vector.append(i + x_center)\n\n ax.scatter(self.x_cube_vector, self.y_cube_vector, self.z_cube_vector, s=8, c=\"g\", depthshade=True)\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\")\n ax.set_zlabel(\"Z\")\n ax.set_title(\"Le cube\")\n plt.show()", "def paint_square(self, pos, color, cr):\n cr.set_source_rgb(*color)\n i, j = pos\n cr.rectangle(i*DOT_SIZE+1, j*DOT_SIZE-1, DOT_SIZE-2, DOT_SIZE-2)\n cr.fill()", "def place_cube(self,\n cube_xy,\n player=None,\n weight=1,\n azimuth=None,\n return_azimuth=False):\n\n self.color_idx += 1\n if self.color_idx == len(self.colors):\n self.color_idx = 0\n if azimuth is None:\n azimuth = np.random.randint(0, 180)\n else:\n assert azimuth >= 0 and azimuth <= 180\n cube_rot = self.p0.getQuaternionFromEuler([\n 0, 0, np.deg2rad(azimuth)\n ]) # rotated around which axis? # np.deg2rad(90)\n\n alpha = 1 # this could be set to .5 for some transparency\n\n if weight == 1:\n if player is None or self.four_colors:\n color = self.colors[self.color_idx] + [alpha]\n elif player == Player.Player:\n color = [0, 0, 1, 1]\n if DEBUG:\n print(\"Player putting down cube at\", cube_xy)\n elif player == Player.Enemy:\n color = [1, 0, 0, 1]\n if DEBUG:\n print(\"Opponent putting down cube at\", cube_xy)\n elif player == Player.Starter:\n color = [0, 0, 0, 1]\n if self.dark:\n color = [1, 1, 1, 1]\n if DEBUG:\n print(\"Starter cube at\", cube_xy)\n else:\n color = WEIGHT_COLORS[weight]\n\n max_z = self.find_highest_z(cube_xy, azimuth)\n\n cube_pos = [cube_xy[0], cube_xy[1], max_z + 1.0001]\n # print (\"placing cube at\",cube_pos)\n\n cube_visual = self.p0.createVisualShape(\n shapeType=self.p0.GEOM_BOX,\n rgbaColor=color,\n halfExtents=[1, 1, 1]\n # specularColor=[0.4, .4, 0],\n )\n\n cube = self.p0.createMultiBody(\n baseMass=weight,\n # baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=self.cube_collision,\n baseVisualShapeIndex=cube_visual,\n basePosition=cube_pos,\n baseOrientation=cube_rot,\n useMaximalCoordinates=True)\n\n self.cubes.append(cube)\n\n if max_z > self.current_max_z:\n self.current_max_z = np.around(max_z)\n out = True\n else:\n out = False\n\n if not return_azimuth:\n return out\n else:\n return out, azimuth", "def point(loc: Vec3, size: int = 10, color: str = \"\"):\n col = get_color(color)\n renderer.draw_rect_3d(loc, size, size, True, col, True)", "def cube(loader, x, y, z, r):\r\n\tmodel = loader.loadModel(mydir + \"/models/cube\")\r\n\tmodel.setScale(r)\r\n\tmodel.setPos(x, y, z)\r\n\tmodel.setColor(1.0, 1.0, 1.0, 1.0)\r\n\treturn model", "def cube_colors(self, cubes):\n n = cubes.shape[0]\n col = np.zeros((n ** 3, 3))\n terrain_col = (66, 244, 72)\n empty_col = self.background\n for i in range(n):\n for j in range(n):\n for k in range(n):\n c = cubes[i, j, k]\n col[i * n ** 2 + j * n + k] = empty_col if c.state == 'empty' else terrain_col\n self.wireframe_col = col", "def __init__(self, cube_size, time_range):\n\n # cubesize is in z,y,x for interactions with tile/image data\n self.zdim, self.ydim, self.xdim = self.cubesize = [cube_size[2], cube_size[1], cube_size[0]]\n self.time_range = time_range\n self._newcube = False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that the GsmModem in PDU mode accepts outgoing SMS, when the text is within ASCII chars 22 126.
def testSendSmsPduMode(self): # setup expectation to raise a timeout error with prompt err = errors.GsmReadTimeoutError(">") when(self.mockDevice).read_lines().thenRaise(err).thenReturn(self.oklines) self.gsm.send_sms("1234", "Test Message") # must see command with size verify(self.mockDevice, times=1).write("AT+CMGS=21\r") # must see command with text and terminating char verify(self.mockDevice, times=1).write("00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\x1a") # allow any number of reads verify(self.mockDevice, atleast=1).read_lines() verifyNoMoreInteractions(self.mockDevice)
[ "def _check_voice_message(self, text):\n\t\tif self._gsm0338_length(text) > 160:\n\t\t\traise SMSTradeError(u'too many GSM characters in message')", "def is_gsm_text(text):\n try:\n tx = text.encode(\"gsm0338\")\n print(tx, type(tx), len(tx))\n except UnicodeError:\n return False\n except:\n traceback.print_exc(file=sys.stdout)\n return False\n\n return True", "def _check_unicode_message(text):\n\t\tfor char in text:\n\t\t\tcode = ord(char)\n\t\t\tif (0xd800 <= code <= 0xdfff) or (code > 0xffff):\n\t\t\t\traise SMSTradeError(u\"the message can not be represented in UCS2\")\n\t\tif len(text) > 70:\n\t\t\traise SMSTradeError(u\"too many characters in message, unicode SMS may contain up to 70 characters\")", "def IsOneSMSMessage(value):\r\n value = escape.to_unicode(value)\r\n utf16_count = len(value.encode('utf-16-be')) / 2\r\n if _gsm_re.search(value):\r\n return utf16_count <= MAX_GSM_CHARS\r\n\r\n return utf16_count <= MAX_UTF16_CHARS", "def testSendSmsPduModeError(self):\n\n # setup expectation to raise a non-timeout error with prompt\n when(self.mockDevice).read_lines().thenRaise(Exception(\"something other than timeout\"))\n self.gsm.send_sms(\"1234\", \"Test Message\")\n \n # must see command with size\n verify(self.mockDevice, times=1).write(\"AT+CMGS=21\\r\")\n # must see command to break out of command prompt\n verify(self.mockDevice, times=1).write(\"\\x1b\")\n # must NOT see command with text and terminating char\n verify(self.mockDevice, times=0).write(\"00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\\x1a\")\n # allow any number of reads\n verify(self.mockDevice, atleast=1).read_lines()\n verifyNoMoreInteractions(self.mockDevice)", "def test_outgoing_unicode_characters(self):\r\n message = self.create_outgoing_message()\r\n config = {\r\n \"sendsms_params\": {\"smsc\": \"usb0-modem\",\r\n \"from\": \"+SIMphonenumber\",\r\n \"username\": \"rapidsms\",\r\n \"password\": \"CHANGE-ME\"},\r\n \"charset\": \"UTF-8\",\r\n }\r\n backend = KannelBackend(None, \"kannel\", **config)\r\n kwargs = backend.prepare_request(1, message.text,\r\n [message.connections[0].identity], {})\r\n data = kwargs['params']\r\n self.assertEqual(data['text'].decode('UTF-8'), message.text)", "def test_number_verification_contains_letter(self):\n\n sms = SmsModel('testuser', 'testpassword')\n sms.dry = True\n try:\n sms.send_sms(\"+420a23456789\",\"testsms\")\n self.fail('sms.send_sms(\"+420a23456789\",\"testsms\") should throw WrongNumberException')\n except WrongNumberException as e:\n pass\n except:\n self.fail('sms.send_sms(\"+420a23456789\",\"testsms\") should throw WrongNumberException')", "def send_sms_via_modem(self, mobile, text=\"\"):\n\n mobile = self.sanitise_phone(mobile)\n\n # Add '+' before country code\n mobile = \"+\" + mobile\n\n try:\n self.modem.send_sms(mobile, text)\n return True\n except:\n return False", "def _check_binary_message(text):\n\t\ttry:\n\t\t\tlength = len(text.lower().decode('hex'))\n\t\t\tif length > 140:\n\t\t\t\traise SMSTradeError(u'too many bytes in message, binary messages may contain up to 140 bytes')\n\t\texcept:\n\t\t\traise SMSTradeError('message cannot be encoded as bytes')", "def test_sms_send(self):\n pass", "def compose_sms(driver, input_text):\r\n if keywords.check_exist(driver, locators.Common.com_discard_mssage, timeout=10):\r\n keywords.click(driver, locators.Common.com_discard_button)\r\n if keywords.check_exist(driver, locators.Common.com_enter_mssage, timeout=30):\r\n keywords.send_keys(driver, locators.Common.com_enter_mssage, input_text)\r\n logging.info('Text has been entered in the textbox')\r\n return True\r\n else:\r\n logging.error('Could not find enter message')\r\n return False\r\n else:\r\n logging.error('Could not find discard message button')\r\n return False", "def astral(msg):\r\n return any(ord(c) > 0xFFFF for c in msg)", "def testIsOneSMSMessage(self):\r\n self.assertTrue(IsOneSMSMessage(''))\r\n self.assertTrue(IsOneSMSMessage('a' * MAX_GSM_CHARS))\r\n self.assertFalse(IsOneSMSMessage('a' * MAX_GSM_CHARS + 'a'))\r\n self.assertTrue(IsOneSMSMessage('Ñ' * MAX_GSM_CHARS))\r\n self.assertFalse(IsOneSMSMessage('Ñ' * MAX_GSM_CHARS + 'Ñ'))\r\n self.assertTrue(IsOneSMSMessage('\\n' * MAX_GSM_CHARS))\r\n self.assertFalse(IsOneSMSMessage('\\n' * MAX_GSM_CHARS + '\\r'))\r\n self.assertTrue(IsOneSMSMessage('Ω' * MAX_UTF16_CHARS))\r\n self.assertFalse(IsOneSMSMessage('Ω' * MAX_UTF16_CHARS + '-'))\r\n self.assertTrue(IsOneSMSMessage('[' * MAX_UTF16_CHARS))\r\n self.assertFalse(IsOneSMSMessage('[' * MAX_UTF16_CHARS + '1'))\r\n self.assertTrue(IsOneSMSMessage('朋' * MAX_UTF16_CHARS))\r\n self.assertFalse(IsOneSMSMessage('朋' * MAX_UTF16_CHARS + '_'))\r\n self.assertTrue(IsOneSMSMessage('👍' * (MAX_UTF16_CHARS / 2)))\r\n self.assertFalse(IsOneSMSMessage('👍' * (MAX_UTF16_CHARS / 2) + '\\n'))\r\n\r\n self.assertTrue(IsOneSMSMessage(SMSUtilTestCase._gsm_chars + '01234567890123456789012345678901234567890123'))\r\n self.assertFalse(IsOneSMSMessage(SMSUtilTestCase._gsm_chars + '012345678901234567890123456789012345678901234'))", "def test_send_test_sms(self):\n pass", "def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)", "def check_sms_send_status(self, message='usb_sms_test'):\n self.dut.droid.smsSendTextMessage(self.receiver_number, message, False)\n self.log.info('Waiting for SMS sent event')\n test_status = wait_for_sms_sent_success(self.log, self.dut)\n if not test_status:\n raise Exception('Failed to send SMS')", "def test_5g_nsa_sms_mo_mt(self):\n ads = self.android_devices\n if not provision_device_for_5g(self.log, ads):\n return False\n\n if not _sms_test_mo(self.log, ads):\n return False\n\n if not verify_5g_attach_for_both_devices(self.log, ads):\n return False\n\n self.log.info(\"PASS - SMS test over 5G NSA validated\")\n return True", "def test_send_sms_report(self):\n pass", "def isvalidport(txt):\n return txt.isdigit() and int(txt) <= 65535 and int(txt) >= 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks that the GsmModem in PDU mode does not send message if error, when the text is within ASCII chars 22 126.
def testSendSmsPduModeError(self): # setup expectation to raise a non-timeout error with prompt when(self.mockDevice).read_lines().thenRaise(Exception("something other than timeout")) self.gsm.send_sms("1234", "Test Message") # must see command with size verify(self.mockDevice, times=1).write("AT+CMGS=21\r") # must see command to break out of command prompt verify(self.mockDevice, times=1).write("\x1b") # must NOT see command with text and terminating char verify(self.mockDevice, times=0).write("00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\x1a") # allow any number of reads verify(self.mockDevice, atleast=1).read_lines() verifyNoMoreInteractions(self.mockDevice)
[ "def _check_voice_message(self, text):\n\t\tif self._gsm0338_length(text) > 160:\n\t\t\traise SMSTradeError(u'too many GSM characters in message')", "def _check_unicode_message(text):\n\t\tfor char in text:\n\t\t\tcode = ord(char)\n\t\t\tif (0xd800 <= code <= 0xdfff) or (code > 0xffff):\n\t\t\t\traise SMSTradeError(u\"the message can not be represented in UCS2\")\n\t\tif len(text) > 70:\n\t\t\traise SMSTradeError(u\"too many characters in message, unicode SMS may contain up to 70 characters\")", "def is_gsm_text(text):\n try:\n tx = text.encode(\"gsm0338\")\n print(tx, type(tx), len(tx))\n except UnicodeError:\n return False\n except:\n traceback.print_exc(file=sys.stdout)\n return False\n\n return True", "def testSendSmsPduMode(self):\n \n # setup expectation to raise a timeout error with prompt\n err = errors.GsmReadTimeoutError(\">\")\n when(self.mockDevice).read_lines().thenRaise(err).thenReturn(self.oklines)\n self.gsm.send_sms(\"1234\", \"Test Message\")\n \n # must see command with size\n verify(self.mockDevice, times=1).write(\"AT+CMGS=21\\r\")\n # must see command with text and terminating char\n verify(self.mockDevice, times=1).write(\"00110004A821430000AA0CD4F29C0E6A96E7F3F0B90C\\x1a\")\n # allow any number of reads\n verify(self.mockDevice, atleast=1).read_lines()\n verifyNoMoreInteractions(self.mockDevice)", "def _check_binary_message(text):\n\t\ttry:\n\t\t\tlength = len(text.lower().decode('hex'))\n\t\t\tif length > 140:\n\t\t\t\traise SMSTradeError(u'too many bytes in message, binary messages may contain up to 140 bytes')\n\t\texcept:\n\t\t\traise SMSTradeError('message cannot be encoded as bytes')", "def test_smpp_drop_on_unknown_char(self):\n\n pduHex = '000000250000000400000000000000020005003200010033000000000000010008000200a7'\n pdu = self.getPDU(pduHex)\n\n # Asserts\n self.assertEqual(CommandId.submit_sm, pdu.id)\n self.assertEqual(CommandStatus.ESME_ROK, pdu.status)", "def test_number_verification_contains_letter(self):\n\n sms = SmsModel('testuser', 'testpassword')\n sms.dry = True\n try:\n sms.send_sms(\"+420a23456789\",\"testsms\")\n self.fail('sms.send_sms(\"+420a23456789\",\"testsms\") should throw WrongNumberException')\n except WrongNumberException as e:\n pass\n except:\n self.fail('sms.send_sms(\"+420a23456789\",\"testsms\") should throw WrongNumberException')", "def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True", "def check_sms_send_status(self, message='usb_sms_test'):\n self.dut.droid.smsSendTextMessage(self.receiver_number, message, False)\n self.log.info('Waiting for SMS sent event')\n test_status = wait_for_sms_sent_success(self.log, self.dut)\n if not test_status:\n raise Exception('Failed to send SMS')", "def _valid_msg(self) -> bool:\n if len(self.content['message']) <= 140:\n return True\n\n return False", "def valid_message(msg: str) -> bool:\n return bool(msg.replace(' ', '')) and len(msg) < 100", "def _validate_ascii(message):\n return all(ord(c) < 128 for c in message)", "def IsOneSMSMessage(value):\r\n value = escape.to_unicode(value)\r\n utf16_count = len(value.encode('utf-16-be')) / 2\r\n if _gsm_re.search(value):\r\n return utf16_count <= MAX_GSM_CHARS\r\n\r\n return utf16_count <= MAX_UTF16_CHARS", "def test_outgoing_unicode_characters(self):\r\n message = self.create_outgoing_message()\r\n config = {\r\n \"sendsms_params\": {\"smsc\": \"usb0-modem\",\r\n \"from\": \"+SIMphonenumber\",\r\n \"username\": \"rapidsms\",\r\n \"password\": \"CHANGE-ME\"},\r\n \"charset\": \"UTF-8\",\r\n }\r\n backend = KannelBackend(None, \"kannel\", **config)\r\n kwargs = backend.prepare_request(1, message.text,\r\n [message.connections[0].identity], {})\r\n data = kwargs['params']\r\n self.assertEqual(data['text'].decode('UTF-8'), message.text)", "def validate_message(self, message):\n if len(message) > 140:\n raise Exception(\"Mensagem inválida: excede 140 caracteres\")", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def testIsOneSMSMessage(self):\r\n self.assertTrue(IsOneSMSMessage(''))\r\n self.assertTrue(IsOneSMSMessage('a' * MAX_GSM_CHARS))\r\n self.assertFalse(IsOneSMSMessage('a' * MAX_GSM_CHARS + 'a'))\r\n self.assertTrue(IsOneSMSMessage('Ñ' * MAX_GSM_CHARS))\r\n self.assertFalse(IsOneSMSMessage('Ñ' * MAX_GSM_CHARS + 'Ñ'))\r\n self.assertTrue(IsOneSMSMessage('\\n' * MAX_GSM_CHARS))\r\n self.assertFalse(IsOneSMSMessage('\\n' * MAX_GSM_CHARS + '\\r'))\r\n self.assertTrue(IsOneSMSMessage('Ω' * MAX_UTF16_CHARS))\r\n self.assertFalse(IsOneSMSMessage('Ω' * MAX_UTF16_CHARS + '-'))\r\n self.assertTrue(IsOneSMSMessage('[' * MAX_UTF16_CHARS))\r\n self.assertFalse(IsOneSMSMessage('[' * MAX_UTF16_CHARS + '1'))\r\n self.assertTrue(IsOneSMSMessage('朋' * MAX_UTF16_CHARS))\r\n self.assertFalse(IsOneSMSMessage('朋' * MAX_UTF16_CHARS + '_'))\r\n self.assertTrue(IsOneSMSMessage('👍' * (MAX_UTF16_CHARS / 2)))\r\n self.assertFalse(IsOneSMSMessage('👍' * (MAX_UTF16_CHARS / 2) + '\\n'))\r\n\r\n self.assertTrue(IsOneSMSMessage(SMSUtilTestCase._gsm_chars + '01234567890123456789012345678901234567890123'))\r\n self.assertFalse(IsOneSMSMessage(SMSUtilTestCase._gsm_chars + '012345678901234567890123456789012345678901234'))", "def checkOK(self,msg):\r\n #also check that the reply is what was expected\r\n if msg[0:2] != 'OK':\r\n if msg=='': logging.error('No reply from LS100'); sys.stdout.flush()\r\n else: logging.error('Error message from LS100:' + self.codes[msg]); sys.stdout.flush()\r\n return False\r\n else:\r\n return True", "async def check_message(self, msg):\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the content type is valid.
def is_valid_content_type(cls, content_type: str) -> bool: return content_type in cls.CONTENT_TYPES.value
[ "def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)", "def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )", "def is_valid_content_type(content_type: str, expected_content_type: str) -> bool:\n return (content_type is not None) and (content_type.strip().lower() == expected_content_type)", "def _ValidateType(self):\n entity_type_str = str(self.entity['type'])\n type_parse = entity_type_str.split('/')\n\n if len(type_parse) != 2:\n print('Type improperly formatted:', entity_type_str)\n return False\n\n namespace = type_parse[0]\n entity_type = type_parse[1]\n\n if self.universe.GetEntityTypeNamespace(namespace) is None:\n print('Invalid namespace:', namespace)\n return False\n\n if self.universe.GetEntityType(namespace, entity_type) is None:\n print('Invalid entity type:', entity_type)\n return False\n\n return True", "def is_schema_types_valid(self):\n valid_types = {\"string\", \"int\", \"float\", \"datetime\", \"boolean\"}\n invalid_types = []\n if self.schema_content:\n for dataset in self.schema_content:\n attributes = self.schema_content.get(dataset)\n for attr in attributes.values():\n type_to_validate = attr.get(\"type\")\n if type_to_validate not in valid_types:\n invalid_types.append(type_to_validate)\n\n if invalid_types:\n error_message, error_code = Errors.modeling_rule_schema_types_invalid(\n invalid_types\n )\n if self.handle_error(\n error_message, error_code, file_path=self.file_path\n ):\n self._is_valid = False\n return False\n return True", "def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES", "def is_readable(self, content_type):\n return False", "def is_valid_mime_type_format(self, mime_type: str) -> bool:\n return mime_type in mimetypes.types_map.values()", "def isValid(self):\n return isValidBlob(self.format_, self)", "def is_well_formed(self):\n try:\n self.validate()\n except XmrsError:\n return False\n return True", "def _is_valid_support_type(self) -> bool:\n try:\n pack_meta_file_content = json.loads(self._read_file_content(self.pack_meta_file))\n if pack_meta_file_content[PACK_METADATA_SUPPORT] not in SUPPORT_TYPES:\n self._add_error(Errors.pack_metadata_invalid_support_type(), self.pack_meta_file)\n return False\n self.support = pack_meta_file_content[PACK_METADATA_SUPPORT]\n except (ValueError, TypeError):\n if self._add_error(Errors.pack_metadata_isnt_json(self.pack_meta_file), self.pack_meta_file):\n return False\n\n return True", "def checkContentType(ctype, mtype, ext):\n\text_video = ['mpg', 'mpeg', 'avi']\n\text_audio = ['mp3', 'ogg']\n\text_image = ['jpeg', 'jpg', 'gif', 'png']\n\n\tmediaName = MEDIA_TYPE[int(mtype) -1][1]\n\t#if ctype.partition('/')[0] != mediaName:\n\tif ctype.split('/')[0] != mediaName:\n\t\treturn False\n\tif mediaName == 'audio' and not ext_audio.count(ext) :\n\t\treturn False\n\telif mediaName == 'video' and not ext_video.count(ext):\n\t\treturn False\n\telif mediaName == 'image' and not ext_image.count(ext):\n\t\treturn False\n\telse:\n\t\treturn True", "def _is_compositional_type(content_type: str) -> bool:\n for valid_compositional_type in (\n SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES\n ):\n if content_type.startswith(valid_compositional_type):\n return True\n return False", "def _is_valid_set(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:set\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)", "def checkFileFormat():\n #Check that a MIME file is being read\n if self.__fileRef == None:\n self.__openFile()\n if (\"Mime-Version: 1.0\" in self.__fileRef.read()) == False:\n raise OurInFileException\n else:\n return True", "def is_valid(self):\n return (self.time is not None\n and self.author is not None\n and self.content is not None)", "def is_valid(self):\n return self._service_endpoint is not None and self._type is not None", "def is_content_malformed(self):\n return self._tag == 'content_malformed'", "def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructor for facebook sdk
def init_fb(self, **kwargs): try: self.graph = facebook.GraphAPI(access_token=fb_token, version='2.4') except Exception as e: sys.exit(str(e))
[ "def __init__(self):\n try:\n access_token = facebook.is_connected(request.cookies)[\"access_token\"]\n except:\n raise Exception(\"Not authed.\")\n self.graph = facebook.GraphAPI(access_token)\n self.user = self.graph.get_object(\"me\")", "def initialize_facebook():\n session = FacebookSession(APP_ID, APP_SECRET, ACCESS_TOKEN)\n return FacebookAdsApi(session)", "def __init__(self, page_access_token, api_ver=None):\n if api_ver:\n assert isinstance(api_ver, (int, float)), \"type of api version is not float or integer\"\n else:\n api_ver = 2.9\n self.URL = 'https://graph.facebook.com/v{}/'.format(api_ver) + '{}'\n\n self.Access_Token = page_access_token", "def init_facebook(self, request):\n\n # initial facebook request comes in as a POST with a signed_request\n if u'signed_request' in request.POST:\n signed_request = request.POST.get('signed_request')\n if self.load_signed_request(signed_request):\n request.session['u'] = {\n 'signed_request': signed_request,\n 'expires': datetime.now() + timedelta(minutes=1440)\n }\n elif 'u' in request.session and request.session['u']['expires'] > datetime.now():\n self.load_signed_request(request.session['u']['signed_request'])\n\n if self.user_id:\n self.graph = GraphAPI(self.access_token)\n self.fql = FQLAPI(self.access_token)", "def facebook(self, facebook):\n\n self._facebook = facebook", "def init_facebook(self):\n facebook = Facebook()\n user = None\n\n # initial facebook request comes in as a POST with a signed_request\n if u'signed_request' in self.request.POST:\n facebook.load_signed_request(self.request.get('signed_request'))\n # we reset the method to GET because a request from facebook with a\n # signed_request uses POST for security reasons, despite it\n # actually being a GET. in webapp causes loss of request.POST data.\n self.request.method = u'GET'\n self.set_cookie(\n 'u', facebook.user_cookie, datetime.timedelta(minutes=1440))\n elif 'u' in self.request.cookies:\n facebook.load_signed_request(self.request.cookies.get('u'))\n\n # try to load or create a user object\n if facebook.user_id:\n print >> sys.stderr, 'getting user: ' + str(facebook.user_id)\n user = User.get_by_key_name(facebook.user_id)\n if user:\n # update stored access_token\n if facebook.access_token and \\\n facebook.access_token != user.access_token:\n user.access_token = facebook.access_token\n user.put()\n # refresh data if we failed in doing so after a realtime ping\n if user.dirty:\n user.refresh_data()\n # restore stored access_token if necessary\n if not facebook.access_token:\n facebook.access_token = user.access_token\n\n if not user and facebook.access_token:\n me = facebook.api(u'/me', {u'fields': _USER_FIELDS})\n try:\n\n if me.has_key( u'friends' ):\n friends = [ usr1[u'id'] for usr1 in me[u'friends'][u'data'] ]\n else:\n friends = []\n\n# print >> sys.stderr, 'CREATING USER: ' + str(facebook.user_id)\n# print >> sys.stderr, 'NAME: ' + str( me[u'name'].encode('ascii', 'ignore') )\n# print >> sys.stderr, 'GENDER: ' + str( me[u'gender'] )\n# if me.has_key( u'friends' ):\n# for user in me[u'friends'][u'data']:\n# print >> sys.stderr, ' Friend: ' + str( user[u'id'] ) + ' -' + str( user[u'name'].encode('ascii', 'ignore') )\n# pprint.pprint( me, sys.stderr);\n\n user = User(key_name=facebook.user_id,\n user_id=facebook.user_id, friends=friends,\n access_token=facebook.access_token, name=me[u'name'],\n email=me.get(u'email'), picture=me[u'picture'][u'data'][u'url'],\n gender=me[u'gender'])\n user.put()\n# pprint.pprint( user, sys.stderr);\n\n except KeyError, ex:\n raise # ignore if can't get the minimum fields\n\n self.facebook = facebook\n self.user = user", "def __init__(self, api_key, api_secret, callback_url):\n # Credientials\n self.URI_SCHEME = \"https\"\n self.API_ENDPOINT = \"api.linkedin.com\"\n self.REQUEST_TOKEN_URL = \"/uas/oauth/requestToken\"\n self.ACCESS_TOKEN_URL = \"/uas/oauth/accessToken\"\n self.REDIRECT_URL = \"/uas/oauth/authorize\"\n self.version = \"1.0\"\n self.signature_method = \"HMAC-SHA1\" # as I said\n self.BASE_URL = \"%s://%s\" % (self.URI_SCHEME, self.API_ENDPOINT)\n \n self.API_KEY = api_key\n self.API_SECRET = api_secret\n self.CALLBACK_URL = callback_url\n self.request_token = None # that comes later\n self.access_token = None # that comes later and later\n \n self.request_token_secret = None\n self.access_token_secret = None\n \n self.verifier = None\n self.error = None\n\n self.request_oauth_nonce = None\n self.request_oauth_timestamp = None\n self.access_oauth_nonce = None\n self.access_oauth_timestamp = None\n self.request_oauth_error = None\n self.access_oauth_error = None", "def __init__(self, oauth_consumer_token=None, oauth_access_token=None):\n self.consumer_token = oauth_consumer_token\n self.access_token = oauth_access_token", "def __init__(self, client_id, token, scope=[\"activity\", \"heartrate\", \"location\", \"nutrition\", \"profile\", \"settings\", \"sleep\", \"social\", \"weight\"]):\n\n\t\tif token['access_token'] == \"\":\n\t\t\t# We need to fetch a token for the user.\n\t\t\tprint(\"Note: looks like we don't have an access token yet. Let's fetch one.\")\n\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope)\n\n\t\t\tauthorization_base_url = \"https://www.fitbit.com/oauth2/authorize\"\n\n\t\t\tauthorization_url, state = self.fitbit.authorization_url(authorization_base_url)\n\n\t\t\tprint(\"Please go to the following authorization URL: {}\".format(authorization_url))\n\n\t\t\traw_callback_url = input(\"Paste callback URL you get back here: \")\n\n\t\t\tself.fitbit.token_from_fragment(raw_callback_url)\n\t\t\tself.token = self.fitbit.token['access_token']\n\n\t\t\tprint(self.fitbit.token)\n\n\t\telse:\n\t\t\t# We've got an access token, and we'll use it.\n\t\t\tself.client = MobileApplicationClient(client_id)\n\t\t\tself.fitbit = OAuth2Session(client_id, client=self.client, scope=scope, token=token)\n\t\t\tself.token = token['access_token']", "def __init__(self, oauth=None, client_id=None):\n\t\tself.oauth = oauth\n\t\tself.client_id = client_id or self.default_client_id", "def facebook(self):\n try:\n from facebook import Facebook\n except ImportError:\n log.warning(\"PyFacebook is not installed!\")\n else:\n if self.user and self.user.profile.uses_facebook_connect:\n # This implies, that the correct cookies must be set. We don't\n # double check for that.\n api_key = get_app().cfg['facebook/api_key']\n secret_key = get_app().cfg['facebook/secret_key']\n facebook = Facebook(api_key, secret_key)\n # Setting the cookie values\n # It's so cool to have no private attributes. (;\n facebook.uid = self.session['fb_user_id']\n facebook.session_key = self.session['fb_session_id']\n return facebook", "def __init__(self, consumer_key, consumer_secret, access_token,\n access_token_secret, **kwargs):\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.access_token = access_token\n self.access_token_secret = access_token_secret\n super().__init__(**kwargs)", "def __init__(self, access_token, db_path, id_list):\n self.access_token = access_token\n self.db_path = db_path\n self.id_list = id_list\n\n g = facebook.GraphAPI(self.access_token, version='2.3')\n self.g = g\n\n # connect to database\n con = lite.connect(self.db_path)\n self.con = con\n\n with con:\n # create cursor to the database\n cur = con.cursor()\n self.cur = cur\n # create tables for posts, comments, post likes and people if not exists\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Posts(post_id TEXT PRIMARY KEY, status_id TEXT, content TEXT, \"\n \"person_hash_id TEXT, published_date TEXT, last_comment_date TEXT, post_type TEXT, status_type TEXT, \"\n \"post_link TEXT, link TEXT, video_link TEXT, picture_link TEXT, link_name TEXT, link_caption TEXT, \"\n \"link_description TEXT, comment_count INTEGER, share_count INTEGER, like_count INTEGER, \"\n \"love_count INTEGER, wow_count INTEGER, haha_count INTEGER, sad_count INTEGER, angry_count INTEGER, \"\n \"mentions_count INTEGER, mentions TEXT, location TEXT, date_inserted TEXT)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Comments(comment_id TEXT PRIMARY KEY, person_hash_id TEXT, post_id TEXT, \"\n \"comment_content TEXT, comment_date TEXT, like_count INTEGER)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS Post_likes(like_id TEXT PRIMARY KEY, person_hash_id TEXT, post_id TEXT)\")\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS People(person_hash_id TEXT PRIMARY KEY, person_id TEXT, person_name TEXT)\")", "def __init__(self, oauth_client: Any = None, consumer_key: str = None, consumer_secret: str = None, callback_url: str = None) -> None:\n self.callback_url = callback_url\n if oauth_client:\n self.oauth_client = oauth_client\n elif (consumer_key is not None) and (consumer_secret is not None):\n consumer = oauth2.Consumer(key=consumer_key, secret=consumer_secret)\n self.oauth_client = oauth2.Client(consumer)\n else:\n raise Exception(\"Please supply either an oauth_client argument or a consumer_key + consumer_secret pair\")", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def __init__(self, api_key=None, shared_secret=None, httplib2_inst=None):\n self.api_key = api_key\n self.secret = shared_secret\n self.http = httplib2_inst or Http()\n self.uri = PUBLIC_URI", "def __init__(self, login_url, text=Copy.login_with_facebook):\n super(FacebookLoginAnchor, self).__init__(login_url, text)\n\n self.append_class(self.FACEBOOK_LOGIN_ANCHOR_CLASS)", "def __init__(self, url, user=None, password=None, oauth=False, oauth_access_token=None,verify=None, loadmetadata=False):\n\n #self.http = httplib2.Http()\n if url[-1] != '/': url += '/'\n self.url = url\n self.oauth = oauth\n self.oauth_access_token = oauth_access_token\n if verify is None:\n self.verify = certifi.where()\n else:\n self.verify = verify\n if user and password:\n self.authenticated = True\n self.user = user\n self.password = password\n self.oauth = False\n self.initauth()\n else:\n self.authenticated = False\n self.user = None\n self.password = None\n self.initauth()\n self.loadmetadata = loadmetadata", "def __init__(self, api_key, api_secret):\n self.api_key = api_key\n self.api_secret = api_secret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save event to database
def save_event(self, data): rdb.table(self.rdb_table).insert(data)
[ "def __save_event(cls, e: Event) -> None:\n statement = 'INSERT OR REPLACE INTO events VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'\n values = e.to_db_tuple()\n\n cls.__get_cursor().execute(statement, values)\n # cls.__connection.commit()", "def save(self, event):\n self.saved_events.append(event)", "def insert_event_to_db(self):\n try:\n events_coll.insert_one(self.event_info_to_dic())\n except Exception as e:\n print(e)", "def add_event(self, event):\n sql = \"\"\"INSERT INTO event(title, description, event_date, event_time)\n VALUES(%s, %s, %s, %s)\"\"\"\n arguments = (event.title, event.description, event.event_date, event.event_time)\n self.db.initialize_connection()\n self.db.cursor.execute(sql, arguments)\n self.db.connection.commit()\n self.db.close_connection()", "def writeEvent(self):\n\t\ttry:\n\t\t\tif self.dataFileHnd:\n\t\t\t\tself.dataFileHnd.writeRecord( (self.mdList())+[self.eventData] )\n\t\texcept sqlite3.OperationalError, err:\n\t\t\t# If the db is locked, wait 1 s and try again.\n\t\t\tprint err\n\t\t\ttime.sleep(1)\n\t\t\tself.writeEvent()\n\t\t# else:\n\t\t# \traise MissingMDIOError(\"Meta-data I/O object not initialized.\")", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "async def _store_incoming(self, event, db):\n ts = event.get('ts') or time.time()\n user = event.get('user')\n\n if isinstance(user, dict):\n user = user.get('id')\n\n logger.debug('Saving incoming event %s from %s', event['type'], user)\n await database.__dict__[db.type].dispatcher.save_incoming_event(\n ts=ts, user=user, event=event, db=db\n )\n\n await db.commit()", "def save(self):\n # send data to be saved by another job\n save_callevent.delay(self.data)", "def save(self):\n file = Path(\"config/event_{0}.json\".format(self.name))\n try:\n file.write_text(self.toJSON())\n except Exception as err:\n raise(err)", "def save(self)->None:\n database.cursor.execute(\"INSERT INTO meetups(topic,happening_date,tags,location,images,body) VALUES(%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.topic,\n self.happening_on,\n self.tags,\n self.location,\n self.images,\n self.body\n ))\n super().save()", "def send(self, event):\r\n try:\r\n self.collection.insert(event, manipulate=False)\r\n except PyMongoError:\r\n # The event will be lost in case of a connection error.\r\n # pymongo will re-connect/re-authenticate automatically\r\n # during the next event.\r\n msg = 'Error inserting to MongoDB event tracker backend'\r\n log.exception(msg)", "def store_event(self, event: EventLogEntry) -> None:\n check.inst_param(event, \"event\", EventLogEntry)\n insert_event_statement = self.prepare_insert_event(event)\n run_id = event.run_id\n\n with self.run_connection(run_id) as conn:\n conn.execute(insert_event_statement)\n\n if event.is_dagster_event and event.dagster_event.asset_key: # type: ignore\n check.invariant(\n event.dagster_event_type in ASSET_EVENTS,\n \"Can only store asset materializations, materialization_planned, and\"\n \" observations in index database\",\n )\n\n event_id = None\n\n # mirror the event in the cross-run index database\n with self.index_connection() as conn:\n result = conn.execute(insert_event_statement)\n event_id = result.inserted_primary_key[0]\n\n self.store_asset_event(event, event_id)\n\n if event_id is None:\n raise DagsterInvariantViolationError(\n \"Cannot store asset event tags for null event id.\"\n )\n\n self.store_asset_event_tags(event, event_id)\n\n if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:\n self.store_asset_check_event(event, None)", "def _write_event(self, event):\n if self.run is None:\n self.add_run()\n with RecordWriter(self._record_file, \"ab\") as fh:\n fh.write(event)", "def add_event(event):\n backend_lock.acquire()\n sql_session = get_session()\n sql_session.add(event)\n sql_session.commit()\n backend_lock.release()", "def save_alert(self, alert):\n self.database_list.append(alert) # fake database for demo", "def save_changes(self, data):\n db.session.add(data)\n db.session.commit()", "def onSave(self):\n self.triggerEvent(self.EVENT_SAVE_BUTTON_CLICKED)", "def save(self):\n self.emit(\"save\", self.data)", "def save_evc(self, evc):\n self.box.data[evc.id] = evc.as_dict()\n\n content = {'namespace': self.namespace,\n 'box_id': self.box.box_id,\n 'data': self.box.data,\n 'callback': self._save_evc_callback}\n\n event = KytosEvent(name='kytos.storehouse.update', content=content)\n self.controller.buffers.app.put(event)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate through all events pages
def get_events(self): url = '/v2.4/'+self.page_id+'/events' data = self.graph.request(url) while 'next' in data['paging'].keys(): print data['paging']['next'] data = self.graph.request(url, args={ 'limit' : 100, 'after' : data['paging']['cursors']['after'] }) return data
[ "def _events(self):\n try:\n latest_event = Event.objects.latest('start_time')\n last_update = latest_event.start_time\n except Event.DoesNotExist:\n last_update = timezone.make_aware(datetime.min,\n timezone.get_default_timezone())\n\n self.stdout.write('Pulling events from OffTheGrid facebook page')\n fbconsole.ACCESS_TOKEN = '{}|{}'.format(FB_APP_ID, FB_APP_SECRET)\n events = fbconsole.get('/OffTheGridSF/events')['data']\n\n for event in events:\n if parse(event['start_time']) > last_update:\n yield event", "def display_pages(self, title):\n log.debug(\"obj_dict[Event]\")\n for item in self.report.obj_dict[Event].items():\n log.debug(\" %s\" % str(item))\n event_handle_list = self.report.obj_dict[Event].keys()\n event_types = []\n for event_handle in event_handle_list:\n event = self.report.database.get_event_from_handle(event_handle)\n event_types.append(str(event.get_type()))\n with self.report.user.progress(_(\"Narrated Web Site Report\"),\n _(\"Creating event pages\"), \n len(event_handle_list) + 1) as step:\n self.EventListPage(self.report, title, event_types, event_handle_list)\n\n for event_handle in event_handle_list:\n step()\n self.EventPage(self.report, title, event_handle)", "def test_logevents_mainpage(self):\n mysite = self.get_site()\n mainpage = self.get_mainpage()\n for entry in mysite.logevents(page=mainpage, total=3):\n self.assertEqual(entry.page().title(), mainpage.title())\n self.assertEqual(entry.page(), mainpage)", "def scrape_events(meta_url, collection):\r\n options = Options()\r\n options.add_argument('--headless')\r\n driver = webdriver.Firefox(options=options)\r\n driver.get(meta_url)\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n meta_dropdown = soup.find('select', {'name': 'meta'}) # get drop down selector for meta\r\n selected_meta = meta_dropdown.find('option', selected=True) # get current meta\r\n \r\n def get_next(d, class_name):\r\n \"\"\"Check if the next button is still valid\"\"\"\r\n try:\r\n button = d.find_elements_by_class_name('Nav_PN')[-1]\r\n return button if button.text == 'Next' else False\r\n except Exception as e:\r\n return False\r\n \r\n page = 1\r\n while True:\r\n print(f'\\nScraping event page {page}...')\r\n next_btn = get_next(driver, 'Nav_PN')\r\n soup = BeautifulSoup(driver.page_source, 'html.parser') # make some soup\r\n \r\n for event in soup.find_all(class_='Stable')[2].find_all(class_='hover_tr'): # 10 events list table\r\n \"\"\"\r\n This loop iterates through event table rows, pulling out an ID number,\r\n the star rating and the date of the event\r\n \"\"\"\r\n link = event.a # associated hyperlink\r\n eid = re.search(r\"e=(\\d+)&\", link['href']).group(1) # unique id number\r\n stars = event.find(class_='O16').find_all('img') # star rating / level\r\n collection.insert_one({\r\n 'id': eid,\r\n 'name': link.text,\r\n 'date': event.find(class_='S10').text,\r\n 'level': 4 if 'bigstar' in stars[0]['src'] else len(stars),\r\n 'link': mtgtop8_url.format(link['href']),\r\n 'meta': selected_meta.text\r\n })\r\n \r\n if next_btn:\r\n next_btn.click()\r\n page += 1\r\n sleep(1)\r\n else:\r\n print('\\n\\n')\r\n driver.close()\r\n break", "def get_event_list(self):\n\t\tif self.event_list == []:\n\t\t\ttry:\n\t\t\t\tresponse = urllib.urlopen(self.url)\n\t\t\texcept HTTPError as e:\n\t\t\t\tprint ('Http error. Error code: ', e.code)\n\t\t\texcept URLError as e:\n\t\t\t\tprint( 'Url Error. Reason: ', e.reason)\n\t\t\telse:\n\t\t\t\tself.page_content = html.fromstring(urllib.urlopen(self.url).read())\n\t\t\t\tself.select_page_content(self.page_content)\n\t\t\n\t\treturn self.event_list", "def show_events_list():\r\n\tevents_list = Page.objects.filter(tags='events').order_by('-created')\r\n\treturn {'events_list': events_list}", "def events(self) -> [EventPage]:\n\n # Get list of live event pages that are descendants of this page\n events: [EventPage] = EventPage.objects.live().descendant_of(self)\n\n # Order by date\n return events.order_by(\"-time\")", "def events(request):\n events = []\n\n # Get all upcoming events.\n upcoming = Event.objects.filter(start_date__gte=datetime.date.today)\n upcoming = upcoming.order_by('start_date')\n\n for e in upcoming:\n events.append(e)\n\n context = collect_events(events)\n return render(request, 'events.html', context)", "def __show_all_events(self):\n for event in self.events_list:\n self.__print_events_info(event)\n print()", "def test_event_page(self):\n res = self.client.get('/events')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Upcoming Events' in data", "def _get_logbook_page(\n self, path: str, args: Optional[dict] = None\n ) -> Iterator[dict]:\n end_of_events = False\n\n while not end_of_events:\n res = self._get(path, args=args)\n end_of_events = res[\"endOfEvents\"]\n args = {\"cursor\": res[\"nextCursor\"]}\n\n yield from res[\"events\"]", "def _iter_events(self) -> Generator:\n response = self.client.call()\n events: list = response.json()\n\n if not events:\n return []\n\n while True:\n yield events\n last = events.pop()\n self.client.set_next_run_filter(last['@timestamp'])\n response = self.client.call()\n events = response.json()\n try:\n events.pop(0)\n assert events\n except (IndexError, AssertionError):\n LOG('empty list, breaking')\n break", "def scrape_events(path, urls):\n seen_ids = set()\n result = []\n for url in urls:\n # Get all of the Network requests being sent out\n print(f'Processing {url}')\n driver.get(url)\n browser_log = driver.get_log('performance') \n events = [process_browser_log_entry(entry) for entry in browser_log]\n results = []\n # Find the Network request that sends a GET request to EventBrite API\n for event in events:\n if event['method'] == 'Network.responseReceived':\n # print(event)\n if 'event_ids' in event['params']['response']['url']:\n results.append(event)\n # Get the GET request URL\n get_url = \"\"\n # TODO: Sometimes returning 0 or more than 1... I'm not sure why :(\n if len(results) >= 1:\n get_url = results[0]['params']['response']['url']\n # Get the GET request response JSON\n json_response = get_request(get_url)\n event_list = json_response['events']\n # Find unique events in the response JSON \n unique_event_list = []\n for event in event_list:\n if event['id'] not in seen_ids:\n seen_ids.add(event['id'])\n unique_event_list.append(event)\n parsed_events = parse_event_page(unique_event_list)\n result.extend(parsed_events)\n else:\n print(results)\n print('yikes something went wrong')\n\n driver.close()\n return result\n # save_events(path, result)", "def slurp_events(self):\n while self.has_event():\n self.get_event()", "def display_all_evets():\n\n view.print_all_events(events.Event.get_events())", "def list_event(request):\n event_list = Event.objects.all()\n paginator = Paginator(event_list, 5)\n\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n # If page request (9999) is out of range, deliver last page of results.\n try:\n event_list = paginator.page(page)\n except (EmptyPage, InvalidPage):\n event_list = paginator.page(paginator.num_pages)\n\n context = {'event_list': event_list }\n return render_to_response('event_list.html',\n context,\n context_instance=RequestContext(request))", "def test_get_all_events_page(self):\n test_user = User.objects.create(username=\"TestUser\",\n password=\"TestPassword\")\n membership = Membership.objects.create(user_id='1')\n self.client.force_login(test_user)\n test_event = Event.objects.create(\n title=\"TestEvent\",\n description=\"Test Event Works\",\n price=None,\n age_range=\"Adults\",\n address=\"70 Castle Street\",\n town=\"Hamilton\",\n post_code=\"ML3 3PU\",\n event_type=\"Arts and Crafts\",\n event_date_begins=\"2019-10-31\",\n event_date_ends=\"2019-10-31\",\n event_time_begins=\"18:00:00\",\n event_time_ends=\"19:00:00\",\n event_day=\"Tuesday\",\n max_participants=\"25\",\n event_host=test_user,\n image=None\n )\n page = self.client.get(\"/search/\")\n self.assertEqual(page.status_code, 200)\n self.assertTemplateUsed(page, \"view_all_events.html\")", "def _next_page(self):\n if self.next_page_token is None:\n raise LastPage\n next_result = self._caller._poll_for_decision_task(\n next_page_token=self.next_page_token,\n reverse_order=True)\n self.next_page_token = self._get_next_page_token(next_result)\n self._events.extend(next_result['events'])\n return next_result['events']", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup the Binary Sensor platform fo EnOcean.
def setup_platform(hass, config, add_devices, discovery_info=None): dev_id = config.get(CONF_ID, None) devname = config.get(CONF_NAME, "EnOcean binary sensor") add_devices([EnOceanBinarySensor(dev_id, devname)])
[ "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host = config[CONF_HOST]\n monitored_variables = config[CONF_MONITORED_VARIABLES]\n\n charger = openevsewifi.Charger(host)\n\n entities = [\n OpenEVSESensor(charger, description)\n for description in SENSOR_TYPES\n if description.key in monitored_variables\n ]\n\n add_entities(entities, True)", "def setup_sensors(self):\n super(EddRoach2ProductController, self).setup_sensors()\n self._firmware_server_sensor = Sensor.string(\n \"firmware-server\",\n description=\"The address of the firmware server started by this product\",\n default=\"\",\n initial_status=Sensor.UNKNOWN)\n self.add_sensor(self._firmware_server_sensor)\n self._parent.mass_inform(Message.inform('interface-changed'))", "def setup_platform(hass, config, add_entities, discovery_info=None):\n _LOGGER.debug('Setup__sensor__')\n conf_name = hass.data[DOMAIN]['conf_name']\n sensors = hass.data[DOMAIN]['bsensors']\n # All data was correct and sensor initialized\n dev = []\n for variable in sensors:\n dev.append(SauresSensor(\n conf_name, variable[0], variable[2],\n SENSOR_TYPES[variable[1]][0],\n SENSOR_TYPES[variable[1]][1],\n SENSOR_TYPES[variable[1]][2]))\n add_entities(dev, True)", "def setup(hass, config):\n import insteon\n\n username = config[DOMAIN][CONF_USERNAME]\n password = config[DOMAIN][CONF_PASSWORD]\n api_key = config[DOMAIN][CONF_API_KEY]\n\n global INSTEON\n INSTEON = insteon.Insteon(username, password, api_key)\n\n if INSTEON is None:\n _LOGGER.error(\"Could not connect to Insteon service\")\n return False\n\n discovery.load_platform(hass, 'light', DOMAIN, {}, config)\n\n return True", "def setup_platform(hass, config, add_devices, discovery_info=None):\n sensors = []\n for coil in config.get(CONF_COILS):\n sensors.append(ModbusHASBinarySensor(\n hass, \n coil.get(CONF_NAME),\n coil.get(CONF_SLAVE),\n coil.get(CONF_COIL)))\n add_devices(sensors)", "def setup_platform(hass, config, add_devices, discovery_info=None):\r\n pull_mode = config[CONF_PULL_MODE]\r\n invert_logic = config[CONF_INVERT_LOGIC]\r\n\r\n iopi = IOPi(config.get(CONF_I2C_ADDRESS), True)\r\n\r\n binary_sensors = []\r\n pins = config[CONF_PINS]\r\n\r\n for pin_num, pin_name in pins.items():\r\n binary_sensors.append(abelectronicsiopiBinarySensor(pin_name, pin_num, pull_mode, invert_logic, iopi))\r\n add_devices(binary_sensors, True)", "def platform_setup():\n return PlatformSetupFixture()", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n host: str = config[CONF_HOST]\n port: int = config[CONF_PORT]\n name: str = config[CONF_NAME]\n url = f\"http://{host}:{port}/api/LiveData.xml\"\n\n gateway = Ted5000Gateway(url)\n\n # Get MUT information to create the sensors.\n gateway.update()\n\n entities = []\n for mtu in gateway.data:\n for description in SENSORS:\n entities.append(Ted5000Sensor(gateway, name, mtu, description))\n\n add_entities(entities)", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n stop_id = config[CONF_STOP_ID]\n api_key = config[CONF_API_KEY]\n route = config.get(CONF_ROUTE)\n destination = config.get(CONF_DESTINATION)\n name = config.get(CONF_NAME)\n\n data = PublicTransportData(stop_id, route, destination, api_key)\n add_entities([TransportNSWSensor(data, stop_id, name)], True)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discover_info: DiscoveryInfoType | None = None,\n) -> None:\n name = config[CONF_NAME]\n host = config[CONF_HOST]\n port = config[CONF_PORT]\n\n avr = hkavr.HkAVR(host, port, name)\n avr_device = HkAvrDevice(avr)\n\n add_entities([avr_device], True)", "def setup_platform(hass, config, add_entities, discovery_info=None):\n zigbee_device = hass.data[DOMAIN]\n add_entities([XBeeSwitch(XBeeDigitalOutConfig(config), zigbee_device)])", "def setup_platform(hass, config, add_entities, discovery_info=None):\n name = hass.data[STE_DOMAIN]['name']\n ste_data = hass.data[STE_DOMAIN]['ste_data']\n\n add_entities([StiebelEltron(name, ste_data)], True)", "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n if discovery_info is None:\n return\n add_entities([EufyHomeSwitch(discovery_info)], True)", "def __init__(self, data, controller):\n super().__init__(data, controller)\n self.__state = False\n\n self.type = 'parking brake sensor'\n self.hass_type = 'binary_sensor'\n self.sensor_type = 'power'\n\n self.name = self._name()\n\n self.uniq_name = self._uniq_name()\n self.bin_type = 0x1\n self.update()", "def setup_platform(hass, config, add_devices, discovery_info=None):\n from pysnmp.hlapi import (\n getCmd, CommunityData, SnmpEngine, UdpTransportTarget, ContextData,\n ObjectType, ObjectIdentity)\n\n name = config.get(CONF_NAME)\n host = config.get(CONF_HOST)\n\n errindication, _, _, _ = next(\n getCmd(SnmpEngine(),\n CommunityData('public', mpModel=0),\n UdpTransportTarget((host, 161)),\n ContextData(),\n ObjectType(ObjectIdentity(BASEOID))))\n\n if errindication:\n _LOGGER.error(\"Please check the details in the configuration file\")\n return False\n else:\n data = BrotherQLData(host)\n add_devices([BrotherQLSensor(data, name)], True)", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load an ARFF File from a file.
def load(filename): o = open(filename) s = o.read() a = ArffFile.parse(s) o.close() return a
[ "def LoadFromFile(self, filename):\n f = open(filename, 'r')\n contents = f.read()\n self.LoadFromData(contents)\n f.close()", "def load(self, arffile=None):\n inputstream = _get_file_object(arffile)\n if inputstream is None:\n inputstream = self.inputstream\n if inputstream is None:\n return False\n\n arff_data = loadarff(inputstream)\n self.data = arff_data[0]\n self.attributes = arff_data[1]\n return True", "def loadFastaFile(self, faFile):\n rows = [(faRec.id, str(faRec.seq))\n for faRec in SeqIO.parse(faFile, \"fasta\")]\n self.loads(rows)", "def from_file(file: str):\n with open(file, \"rb\") as elf_file:\n bytes = bytearray(elf_file.read())\n\n return ELF(file, bytes)", "def load(self, filename):\n aead_f = open(filename, \"rb\")\n buf = aead_f.read(1024)\n if buf.startswith(YHSM_AEAD_CRLF_File_Marker):\n buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]\n if buf.startswith(YHSM_AEAD_File_Marker):\n if buf[len(YHSM_AEAD_File_Marker)] == chr(1):\n # version 1 format\n fmt = \"< I %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)\n self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)\n self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]\n else:\n raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')\n else:\n # version 0 format, just AEAD data\n self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]\n aead_f.close()", "def load(cls, from_file):\n raise NotImplementedError", "def load_file(self):\n with open(self.src_f, 'r') as src:\n self.src_txt = src.readlines()", "def from_file(cls, fileobj):\n raise NotImplementedError('from_file not implemented')", "def from_file(f, origin=None, rdclass=dns.rdataclass.IN,\n relativize=True, zone_factory=Zone, filename=None,\n allow_include=True, check_origin=True):\n\n with contextlib.ExitStack() as stack:\n if isinstance(f, str):\n if filename is None:\n filename = f\n f = stack.enter_context(open(f))\n return from_text(f, origin, rdclass, relativize, zone_factory,\n filename, allow_include, check_origin)", "def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data", "def _read_from_file(self, filename):\n ff = fits.open(filename)\n # Load the normalized intensity\n self.norm_int = ff[0].data\n # Load the other parameters\n self.lam = ff[1].data['lam']\n self.lam_unit = ff[1].columns['lam'].unit\n self.theta = ff[2].data['theta']\n self.taux = ff[3].data['taux']\n # Set halo type\n self.description = filename", "def loadFile(self):\n fhex = open(self.mFileName,'r')\n for l in fhex.readlines():\n rec = self.parseRecord(l)\n if rec[0]==0: # data record\n self.processRecord(rec)\n elif rec[0]==1: # end of file\n break \n elif rec[0]==3: # start address; Start Segment Address Record\n self.mStartAddress = rec[1]\n elif rec[0]==2: # Extended Segment Address Record\n self.mOffset = (rec[3][0]<<8+rec[3][1]) << 4\n elif rec[0]==4: # Extended Linear Address Record\n self.mOffset = rec[3][0]<<8+rec[3][1] << 16\n print len(self.mBinData)\n for elem in self.mBinData:\n print elem[\"startaddr\"],elem[\"endaddr\"]\n fhex.close()", "def load(self, filename):\n with open(filename, 'rb') as f:\n self._read_set(PeekFile(f), 0, self._data)", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def from_file(cls, filename: str) -> \"OntoALAConfig\":\n with open(filename, \"r\") as config_file:\n config_dict = yaml.load(config_file, Loader=yaml.FullLoader)\n return OntoALAConfig(\n knowledge_file=config_dict[\"knowledge-file\"],\n )", "def load(self, file_name):\n self.file_name = file_name\n self.frd = FRDFile(file_name)\n self._build_node_kon()\n self._build_step_idx()", "def from_file(cls, filename, code='', cif=False):\n if cif:\n a = convert_cif_to_ampal(cif=filename, path=True)\n a.id = code\n else:\n a = convert_pdb_to_ampal(pdb=filename, path=True, pdb_id=code)\n instance = cls(assembly=a)\n return instance", "def parse_arff()->dict:\n lines = []\n for line in fileinput.input():\n lines.append(line)\n data = arff.loads(\"\\n\".join(lines))\n dataset: Dataset = data\n return dataset", "def from_file(cls, filename):\n range_string = Path(filename).open().read()\n return cls(range_string)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Define a new attribute. atype has to be one of 'numeric', 'string', and 'nominal'. For nominal attributes, pass the possible values as data.
def define_attribute(self, name, atype, data=None): self.attributes.append(name) self.attribute_types[name] = atype self.attribute_data[name] = data
[ "def addAttr(*args, attributeType: Union[AnyStr, bool]=\"\", binaryTag: Union[AnyStr, bool]=\"\",\n cachedInternally: bool=True, category: Union[AnyStr, List[AnyStr], bool]=\"\",\n dataType: Union[AnyStr, List[AnyStr], bool]=\"\", defaultValue: Union[float,\n bool]=0.0, disconnectBehaviour: Union[int, bool]=0, enumName: Union[AnyStr,\n bool]=\"\", exists: bool=True, fromPlugin: bool=True, hasMaxValue: bool=True,\n hasMinValue: bool=True, hasSoftMaxValue: bool=True, hasSoftMinValue: bool=True,\n hidden: bool=True, indexMatters: bool=True, internalSet: bool=True, keyable:\n bool=True, longName: Union[AnyStr, bool]=\"\", maxValue: Union[float, bool]=0.0,\n minValue: Union[float, bool]=0.0, multi: bool=True, niceName: Union[AnyStr,\n bool]=\"\", numberOfChildren: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n proxy: Union[AnyStr, bool]=\"\", readable: bool=True, shortName: Union[AnyStr,\n bool]=\"\", softMaxValue: Union[float, bool]=0.0, softMinValue: Union[float,\n bool]=0.0, storable: bool=True, usedAsColor: bool=True, usedAsFilename: bool=True,\n usedAsProxy: bool=True, writable: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def setatt(self,staname, newatt, newvalue):\n try:\n self.attributes.loc[staname, newatt] = newvalue\n except KeyError:\n pass", "def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")", "def new_attribute_definition(self, name, owner, type, definition):\r\n adh = ct.c_void_p(None)\r\n dll.kvaDbAddAttributeDefinition(self._handle, ct.byref(adh))\r\n dll.kvaDbSetAttributeDefinitionType(adh, type.value)\r\n dll.kvaDbSetAttributeDefinitionName(adh, name.encode('utf-8'))\r\n dll.kvaDbSetAttributeDefinitionOwner(adh, owner)\r\n\r\n atr_def = AttributeDefinition(self, adh, definition)\r\n return atr_def", "def attr_type(self, attr_type):\n\n self._attr_type = attr_type", "def opt_attr(self, attr_name, _type=None, value=None):\n self.attributes[attr_name] = Attribute(_type, value)\n self.optional_attrs.append(attr_name)", "def create_attribute(self):", "def req_attr(self, attr_name, _type=None, value=None):\n self.attributes[attr_name] = Attribute(_type, value)\n self.required_attrs.append(attr_name)", "def create_attribute(owner_name, att_name, context=ast.Load(), line=0, column=0):\n attribute = ast.Attribute()\n attribute.attr = att_name\n attribute.ctx = context\n attribute.lineno = line\n attribute.col_offset = column\n\n if isinstance(owner_name, str):\n attribute_name = ast.Name()\n attribute_name.ctx = ast.Load()\n attribute_name.id = owner_name\n attribute_name.lineno = line\n attribute_name.col_offset = column\n\n attribute.value = attribute_name\n else:\n attribute.value = owner_name\n\n return attribute", "def setAttribute( self, aname, value ):\n self.attributes[aname] = value\n return S_OK()", "def add_attribute(self, attr):\n self.attrs.add_attribute(attr)", "def add_attribute(self, attribute, key=None):\n if isinstance(attribute, TileType):\n key = attribute.name if key is None else key\n self.attributes[key] = attribute\n else:\n key = key if key is not None else str(id(attribute))\n self.attributes[key] = String(key, attribute)", "def visit_AttributeDeclaration(self, node):\n attr_type = node.type or 'object'\n self.extend_ops([\n (SetLineno, node.lineno),\n (DUP_TOP, None), #cls._add_user_attribute(name, type, is_event)\n (LOAD_CONST, node.name),\n (LOAD_NAME, attr_type),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0003),\n (POP_TOP, None),\n ])", "def create_attribute(self, name, value=\"\"):\n self.database.Metadata.create_attribute(self.name + name, value=value)", "def addAttribute(self, att):\r\n\t\t\r\n\t\tself.attributes.append(att)", "def add_attribute(self):\n pass", "def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)", "def add_data_attribute(self, name, data):\n # Get the new data's dimensions.\n data_height = -1\n if isinstance(data, np.ndarray):\n if data.ndim == 1:\n data_width = data.shape[0]\n if data.ndim > 1:\n data_width = data.shape[0]\n data_height = data.shape[1]\n # Check if n_samples has been set yet. If not, set it.\n # Otherwise, check that the dimensions match.\n if self.n_samples < 0:\n self.n_samples = data_height\n elif self.n_samples != data_height:\n raise ValueError('Cannot add attribute. New attribute has ' +\n 'a different number of samples than the other attributes.')\n if data.ndim == 3:\n # Add or update ourself.\n setattr(self, 'n_sectors', data.shape[2])\n self._object_attributes += ['n_sectors']\n else:\n # We only allow numpy arrays as data attributes.\n raise TypeError('Invalid data attribute type. Data attributes must ' +\n 'be numpy arrays.')\n\n # Check if n_pings has been set yet. If not, set it. Otherwise,\n # check that the dimensions match. When checking if dimensions\n # match, we allow a match on the number of pings OR the number of\n # samples since a 1d data attribute can be on either axis.\n if self.n_pings < 0:\n self.n_pings = data_width\n elif self.n_pings != data_width and self.n_samples != data_width:\n raise ValueError('Cannot add attribute. The new attribute has '\n 'a different number of pings or samples than the other attributes.')\n\n # Add the name to our list of attributes if it doesn't already exist.\n if name not in self._data_attributes:\n self._data_attributes.append(name)\n\n # Add or update ourself.\n setattr(self, name, data)\n\n # update the shape attribute\n self.shape = self._shape()", "def visit_AttributeDeclaration(self, node):\n attr_type = node.type or 'object'\n self.code_ops.extend([\n (SetLineno, node.lineno),\n (DUP_TOP, None), # cls._add_user_attribute(name, type, is_event)\n (LOAD_CONST, node.name),\n (LOAD_NAME, attr_type),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0003),\n (POP_TOP, None),\n ])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the overall ignorance
def update_overall_ignorance(overall_ignorance, object_ignorance, rate=0.05): return (1-rate)*overall_ignorance + rate*object_ignorance
[ "def update_potential(self):\n pass", "def dummy_update( self ):\r\n pass", "def updateAll(self):\n self.elevatorInterfaceVis.updateElevators(self.elevatorInterface)\n stats = self.elevatorInterface.bridge.getStats()\n self.infoPanel.updateStats(stats)", "def update_inhibition(self) -> None:\n if self.spec.inhibition_type == \"fffb\":\n self.calc_fffb_inhibition()\n else:\n self.calc_kwta_inhibition()\n\n self.units.update_inhibition(torch.Tensor(self.size).fill_(self.gc_i))", "def report_update():\r\n resources[\"water\"] = resources[\"water\"] - MENU[order][\"ingredients\"][\"water\"]\r\n resources[\"milk\"] = resources[\"milk\"] - MENU[order][\"ingredients\"][\"milk\"]\r\n resources[\"coffee\"] = resources[\"coffee\"] - MENU[order][\"ingredients\"][\"coffee\"]\r\n resources[\"money\"] = resources[\"money\"] + total", "def test_ipam_vlans_update(self):\n pass", "def exclude(self):\n\n self.eod.value = 0\n self.public.value = 0", "def test_update_impact_level(self):\n pass", "def update_percent(self):", "def test_update_incident_involvement(self):\n pass", "def update_analysis(self) -> None:\n\t\tcore.BNUpdateAnalysis(self.handle)", "def overwrite_status(self):\n pass", "def test_update_antivirus_settings(self):\n pass", "def _update_energies(self) -> NoReturn:\n self._currentTotPot = self.calculate_total_potential_energy()\n self._currentTotKin = self.calculate_total_kinetic_energy()\n self._currentTotE = self._currentTotPot if (np.isnan(self._currentTotKin)) else np.add(self._currentTotKin,\n self._currentTotPot)", "def update(self):\n self.updateNeuronCounters()\n self.updateAstrocyteActivations()\n self.performAstrocyteActions()", "def update(self):\n self.wall_list.update()\n self.enemy_list.update()\n self.sludge.update()\n self.consumeable.update()\n self.can_climb.update()", "def update_donation():", "def module_update_non_admin_invisible_any(self):\n self.test_runner.run_module_update_non_admin_invisible_any()", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return focus image at target positon
def check_target_position(environment, target_xy, fovea): temp_fovea = Fovea(target_xy, fovea.size, [0, 0, 0], fovea.unit) temp_image = temp_fovea.get_focus_image(environment) return temp_image
[ "def click_img(self, target_img):\n pos = imagesearch_loop(target_img, timesample=0.5)\n if pos[0] == -1:\n print(\"No image found\")\n else:\n self.click(pos)", "def extract_target_pixel_location(self):\n #Respective Image location\n pixel_array = self.imageprepare(self.image_path)\n\n #Select less_than_target color point --> must be calibrated\n #?? Should we use an abstract class here instead of an if statment ??\n if self.color == \"g\":\n less_than_target = .15\n else:\n raise ValueError(\"Unknown color value\")\n\n #Chooses target pixels as well as it's location\n target_pixels = []\n for pixel in enumerate(pixel_array):\n if pixel[1] < less_than_target:\n target_pixels.append(pixel[0])\n\n return target_pixels", "def _identify_target(self):\n \n # change the cursor for the drawing area\n x_cursor = gtk.gdk.Cursor(gtk.gdk.X_CURSOR)\n self.drawing_area.window.set_cursor(x_cursor)\n \n # set the drawing area mode\n self.drawing_area_mode = \"IDENTIFY_TARGET\"\n \n #clear the screen\n if self.box_drawn == True:\n self.redraw_current_image()", "def focus(self):\n self.image_window.focus_set()", "def GetBitmapFocus(self):\n\n return self.bmpFocus", "def get_target(img):\n # Find the contours\n # We only care about external contours\n contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # Get the minimum bounding box of each target and filter\n target = filter_targets([(contour, cv2.minAreaRect(contour)) for contour in contours])\n # Simplify its contours\n if target:\n simple_contour = simplify_contour(target[0], 8)\n return (simple_contour, target[1]) if simple_contour is not None else None\n return None", "def focus_target(self):\n return self.widget", "def focus(self):\n\n # Getting the microscope height\n current_z = self.microscope.position(2)\n\n # Tabs of maximum match value and their location during the process\n vals = []\n locs = []\n\n # Getting the maxvals and their locations\n for i in self.template:\n\n res, val, loc = templatematching(self.cam.frame, i)\n locs += [loc]\n\n if res:\n # Template has been detected\n vals += [val]\n else:\n # Template has not been detected, val set at 0\n vals += [0]\n\n # Search of the highest value, indicating which template image match the best the current image\n maxval = max(vals)\n\n if maxval != 0:\n # At least one template has been detected, setting the microscope at corresponding height\n index = vals.index(maxval)\n loc = locs[index]\n focus_height = current_z + len(self.template) // 2 - index\n self.microscope.absolute_move(focus_height, 2)\n self.microscope.wait_motor_stop(2)\n dep = len(self.template) // 2 - index\n else:\n # No template has been detected, focus can not be achieved\n raise ValueError('The template image has not been detected.')\n\n return maxval, dep, loc", "def grab_focus(self):\r\n scene = self.get_scene()\r\n if scene and scene._focus_sprite != self:\r\n scene._focus_sprite = self", "def cursor_on_image_pixcoords_and_value(self, p):\n #p = self.mapToScene(e.pos())\n ix, iy = int(floor(p.x())), int(floor(p.y()))\n v = None\n arr = self.arr\n if ix<0\\\n or iy<0\\\n or iy>arr.shape[0]-1\\\n or ix>arr.shape[1]-1: pass\n else: v = self.arr[iy,ix]\n return ix, iy, v", "def search_given_picture_in_area_and_give_pos(target_pic, search_area, full_screen=False, debug_mode=False, debug_pic=None):\n # TODO Finish the comments\n if debug_mode:\n screen_shot = debug_pic\n if debug_pic is None:\n raise ValueError(\"Missing debug picture\")\n else:\n screen_shot = ImageGrab.grab()\n screen_shot = np.array(screen_shot)[:, :, :3] # sometimes PNG files can have 4 channels, which are not needed here\n ((left, top), (right, bottom)) = search_area\n if full_screen:\n search_window = screen_shot\n else:\n search_window = screen_shot[top: bottom, left: right]\n\n target = np.array(target_pic)[:, :, :3] # sometimes PNG files can have 4 channels, which are not needed here\n\n for line_index in range(search_window.shape[0]):\n for pixel_index in range(search_window.shape[1]):\n if search_window.shape[0] - line_index > target.shape[0] and search_window.shape[1] - pixel_index > target.shape[1]:\n if (search_window[line_index][pixel_index] == target[0][0]).all():\n if (target == search_window[line_index:line_index + target.shape[0],\n pixel_index: pixel_index + target.shape[1]]).all():\n if full_screen:\n return (pixel_index, line_index), (pixel_index + target.shape[1], line_index + target.shape[0])\n else:\n return (left + pixel_index, top + line_index), (left + pixel_index + target.shape[1], top + line_index + target.shape[0])\n return None\n\n # TODO Optimize this function. It now needs 3.7s to scan 1920*1080 screen\n\n # TODO Modify this function to threshold version", "def focus_cb(self, shell, channel):\n # Reflect transforms, colormap, etc.\n fitsimage = channel.fitsimage\n image = channel.get_current_image()\n if image is not None:\n chname = channel.name\n thumbkey = self._get_thumb_key(chname, image)\n new_highlight = set([thumbkey])\n\n if self.have_thumbnail(fitsimage, image):\n # schedule an update of the thumbnail to pick up changes\n self.redo_delay(fitsimage)\n else:\n # no image has the focus\n new_highlight = set([])\n\n if self.highlight_tracks_keyboard_focus:\n self.update_highlights(self._tkf_highlight, new_highlight)\n self._tkf_highlight = new_highlight", "def target_in_camera(self):\r\n raise NotImplementedError", "def draw_target(img, target):\n box = np.int0(cv2.boxPoints(target[1]))\n cv2.drawContours(img, [target[0]], -1, (0, 0, 255), 3)\n cv2.drawContours(img, [box], -1, (0, 255, 255), 2)", "def get_current_image(self):\r\n return self.images[self.frame]", "def get_active_target(self, inp_hist):\n go = inp_hist[:, 0]\n curr_targ = inp_hist[:, 3:5]\n next_targ = inp_hist[:, 5:7]\n return curr_targ * (1 - go[:, None]) + next_targ * go[:, None]", "def prep_robot_target(self):\n x = int(self.robot.target_x)\n y = int(self.robot.target_y)\n target_str = f\"Target (X,Y): {str(x)}, {str(y)}\"\n # Prepare the image and positions it on the screen\n self.target_image = self.font.render(target_str, True, self.text_color, self.bg_color)\n self.target_rect = self.target_image.get_rect()\n self.target_rect.left = self.location_rect.left\n self.target_rect.top = self.location_rect.bottom + self.line_gap", "def click_b(self, event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.image_b_coordinates = (x, y)\n print(\"ImageB selected coordinates =\", self.image_b_coordinates)\n return x, y", "def get_click_pos(file_path, threshold=0.7):\n res = screen_shot(file_path)\n print(res.max())\n if res.max() > threshold:\n image = cv2.imread(file_path)\n h, w = image.shape[:2]\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n top_left = max_loc\n click_pos = (250 + top_left[0] + w / 2 + random.choice([1, 2, 3, 4, 5, 6]),\n 140 + top_left[1] + h / 2 + random.choice([1, 2, 3, 4, 5, 6]))\n return click_pos\n else:\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if target area is free
def check_free_space(environment, target_xy, fovea): temp_image = check_target_position(environment, target_xy, fovea) if np.array_equal(temp_image, np.zeros(temp_image.shape)): return True else: return False
[ "def is_free(self) -> bool:\n return self.places < self.total", "def is_free(self,x,y):\n self.insist_valid(x,y) \n return self.grid[y][x] == \" \"", "def is_free(self):\n return self.name == 'free' # (No coverage)", "def _isFree(self, pos:ndarray):\n map = self._state.getMap()\n return map.isInside(pos) and map.get(pos) != \"*\" \\\n and (self._parameters['allow_robot_overlap'] or not(self._isRobot(pos)))", "def is_free(self):\n return self._size > 0", "def guard_occupy_transition(self):\n if not self.get_free_positions:\n return True", "def is_free(self, location):\n if not 0 <= location[0] < self.size or not 0 <= location[1] < self.size:\n return False\n for car in self.cars:\n if location in car.locations_list():\n return False\n\n return True", "def isFree(point):\n global grid\n for i in point:\n if i < 0:\n return False\n try:\n value = grid[point[0]][point[1]][point[2]]\n # print value\n except:\n print \"point \", point, \"lies outside of grid\"\n value = False\n\n return value", "def checkFree(self, x, y):\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n if not self.checkAvailable(new_x, new_y):\n return self.colors['busy']\n return self.colors['free']", "def _is_free_obstacle(self, obstacle):\n\n obst_coords = Obstacle.obs_to_tuple(obstacle)\n delta = obstacle.r + 0.03\n\n for prev_obstacle in self.obstacles:\n if prev_obstacle:\n obs = Obstacle.obs_to_tuple(prev_obstacle)\n if Utils.dist(obs, obst_coords) < self.epsilon:\n return False\n\n if Utils.dist(obst_coords, self.target) < delta or Utils.dist(obst_coords, self.source) < delta:\n return False\n\n return True", "def is_full(self):\n return self.remaining_space_in_hold() == 0", "def eligible(self, total: int) -> bool:\n assert total > 0\n return (self.count / total) < self.target", "def _is_target_reached(block, target):\n target_center_x = target[unity_constants.POSITION_X_FEATURE_INDEX]\n target_center_y = target[unity_constants.POSITION_Y_FEATURE_INDEX]\n center_x = block[unity_constants.POSITION_X_FEATURE_INDEX]\n center_y = block[unity_constants.POSITION_Y_FEATURE_INDEX]\n cos_theta = block[unity_constants.COSINE_ANGLE_FEATURE_INDEX]\n sin_theta = block[unity_constants.SINE_ANGLE_FEATURE_INDEX]\n width = block[unity_constants.WIDTH_FEATURE_INDEX]\n height = block[unity_constants.HEIGHT_FEATURE_INDEX]\n x1, y1 = geometry.rotate_rectangle_corner(\n center_x + width / 2, center_y - height / 2,\n center_x, center_y, cos_theta, sin_theta)\n x2, y2 = geometry.rotate_rectangle_corner(\n center_x + width / 2, center_y + height / 2,\n center_x, center_y, cos_theta, sin_theta)\n x3, y3 = geometry.rotate_rectangle_corner(\n center_x - width / 2, center_y + height / 2,\n center_x, center_y, cos_theta, sin_theta)\n x4, y4 = geometry.rotate_rectangle_corner(\n center_x - width / 2, center_y - height / 2,\n center_x, center_y, cos_theta, sin_theta)\n return geometry.is_point_in_rectangle(\n x1, y1, x2, y2, x3, y3, x4, y4, target_center_x, target_center_y,)", "def checkAvailable(self, x, y):\n return 0 <= x < self.rows and 0 <= y < self.cols and not self.gridBusy[x][y]", "def is_occupied(self):\n if (self.occupant != None):\n return True\n else:\n return False", "def is_blocked(self,x,y):\n return not self.is_free(x,y)", "def is_center_taken(self, candid):\n for t in self.TRACKERS:\n if sqrt( (t.x-candid.x)**2 + (t.y-candid.y)**2 ) <= 2:\n # print(\"[-] (\", candid.x, candid.y, \") is taken\")\n return True\n # print(\"[+] (\", candid.x, candid.y, \") is free\")\n return False", "def check_position_free(self, position):\n return self.board[position] == -1", "def has_free_cell(self):\n return self.free_cells[-1] is None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate loss and gradient for the threelayer convolutional network.
def loss(self, X, y=None): W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] W3, b3 = self.params['W3'], self.params['b3'] # pass conv_param to the forward pass for the convolutional layer filter_size = W1.shape[2] conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2} # pass pool_param to the forward pass for the max-pooling layer pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} scores = None ############################################################################ # TODO: Implement the forward pass for the three-layer convolutional net, # # computing the class scores for X and storing them in the scores # # variable. # ############################################################################ cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2) scores, scores_cache = affine_forward(hidden_out, W3, b3) ############################################################################ # END OF YOUR CODE # ############################################################################ if y is None: return scores loss, grads = 0, {} ############################################################################ # TODO: Implement the backward pass for the three-layer convolutional net, # # storing the loss and gradients in the loss and grads variables. Compute # # data loss using softmax, and make sure that grads[k] holds the gradients # # for self.params[k]. Don't forget to add L2 regularization! # ############################################################################ # Compute loss and gradients loss, dscores = softmax_loss(scores, y) dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache) dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache) dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache) # Regularization loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2) loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2) loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2) grads['W3'] = grads['W3'] + self.reg * self.params['W3'] grads['W2'] = grads['W2'] + self.reg * self.params['W2'] grads['W1'] = grads['W1'] + self.reg * self.params['W1'] ############################################################################ # END OF YOUR CODE # ############################################################################ return loss, grads
[ "def loss_func_c3():\n ub = global_model(Zb_elem_tensor) # ub constains element boundary value tensor, shape: (n_elem, 2, 1)\n dub_dx = K.gradients(ub, Zb_elem_tensor) # dub_dx contains du/dx tensor on boundary, shape: (n_elem,2,1)\n dub_dx_2 = K.gradients(dub_dx, Zb_elem_tensor) # second derivative tensor, shape: (n_elem,2,1)\n dub_dx_3 = K.gradients(dub_dx_2, Zb_elem_tensor) # third derivative tensor, shape: (n_elem,2,1)\n\n C0_concat = K.concatenate(ub, 0) # shape: (n_elem*2, 1)\n C1_concat = K.concatenate(dub_dx, 0) # shape: (n_elem*2, 1)\n C2_concat = K.concatenate(dub_dx_2, 0) # shape: (n_elem*2, 1)\n C3_concat = K.concatenate(dub_dx_3, 0) # shape: (n_elem*2,1)\n\n C0_residual = K.dot(bound_assembly_tensor, C0_concat) # shape: (n_elem-1, 1)\n C1_residual = K.dot(bound_assembly_tensor, C1_concat) # shape: (n_elem-1, 1)\n C2_residual = K.dot(bound_assembly_tensor, C2_concat) # shape: (n_elem-1, 1)\n C3_residual = K.dot(bound_assembly_tensor, C3_concat) # shape: (n_elem-1, 1)\n\n this_loss = K.sum(K.square(C0_residual)) + K.sum(K.square(C1_residual)) \\\n + K.sum(K.square(C2_residual)) + K.sum(K.square(C3_residual))\n return this_loss", "def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n \n N,D = X.shape\n C = b3.shape[0]\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n\n scores = np.zeros((N, C)) #declaring score variable to store scores\n #print scores.shape\n \n #'''\n #################################### written by me! ###########################################\n # Compute the forward pass\n \n #layer 1\n a1, cache_a1 = affine_forward(X,W1,b1)\n a1_out, cache_relu1 = relu_forward(a1)\n \n #layer 2\n a2, cache_a2 = affine_forward(a1_out,W2,b2)\n a2_out, cache_relu2 = relu_forward(a2)\n\n #layer 3\n scores, cache_a3 = affine_forward(a2_out,W3,b3)\n\n \n #################################################################################################\n #'''\n \n if verbose:\n print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache_a3[0]])][:]\n if y is None:\n return scores\n\n ########### compute the gradients ###########\n #softmax layer and dout\n data_loss, dout = softmax_loss(scores, y) \n\n #http://cs231n.github.io/neural-networks-case-study/\n reg_loss = (0.5 * reg * np.sum(W1 * W1) + 0.5 * reg * np.sum(W2 * W2)+0.5 * reg * np.sum(W3 * W3))\n\n\n #'''\n #################################### written by me! ###########################################\n # Compute the backward pass\n #layer 3 \n dout, dW3, db3 = affine_backward(dout, cache_a3)\n dW3 += reg * W3\n\n #layer 2\n dout = relu_backward(dout, cache_relu2)\n dout, dW2, db2 = affine_backward(dout, cache_a2)\n dW2 += reg * W2\n \n #layer 1\n dout = relu_backward(dout, cache_relu1)\n dout, dW1, db1 = affine_backward(dout, cache_a1) \n dW1 += reg * W1 \n ##################################################################################################\n #'''\n \n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def test_gradcheck_3d():\n run_gradcheck_3d(propagator=scalarprop)", "def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n N,D= X.shape\n\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n \n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n # Compute the forward pass\n \n '''\n AffineLayer = X.dot(W1)+b1 \n ReluLayer,_ = relu_forward(AffineLayer)\n AffineLayer2 = ReluLayer.dot(W2) + b2\n ReluLayer2,_ = relu_forward(AffineLayer2)\n AffineLayer3 = ReluLayer2.dot(W3) + b3\n scores = AffineLayer3\n \n print X.shape\n print W1.shape\n print b1.shape\n print W2.shape\n print b2.shape\n print W3.shape\n print b3.shape\n '''\n affine_out1,cache1 = affine_forward(X, W1, b1)\n relu_out1,cache_relu1 = relu_forward(affine_out1)\n \n affine_out2,cache2 = affine_forward(relu_out1, W2, b2)\n relu_out2,cache_relu2 = relu_forward(affine_out2)\n \n affine_out3,cache3 = affine_forward(relu_out2, W3, b3)\n scores = affine_out3\n\n #if verbose:\n #print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache3[0]])][:]\n if y is None:\n return scores\n data_loss,d_softmax = softmax_loss(scores,y)\n data_loss += reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))\n '''\n max_scores = np.max(scores)\n scores -= max_scores\n correct_class_scores = scores[y,np.arange(N)]\n exp_score = np.exp(scores)\n sumexp = np.sum(exp_score,axis=0)\n loss_i = -correct_class_scores + np.log(sumexp)\n loss = np.sum(loss_i) / N \n ''' \t\n # Compute the backward pass\n \n d_affine_out3, dW3, db3 = affine_backward(d_softmax, cache3) \n d_relu2 = relu_backward(d_affine_out3, cache_relu2)\n \n d_affine_out2, dW2, db2 = affine_backward(d_relu2, cache2) \n d_relu1 = relu_backward(d_affine_out2, cache_relu1)\n \n d_affine_out1, dW1, db1 = affine_backward(d_relu1, cache1) \n \n #\n reg_loss = 0\n\n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads", "def three_layer_convnet(x, params):\n conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params\n scores = None\n ################################################################################\n # Implement the forward pass for the three-layer ConvNet. #\n ################################################################################\n conv1 = F.conv2d(x, conv_w1, conv_b1, stride=1, padding=2)\n relu1 = F.relu(conv1)\n conv2 = F.conv2d(relu1, conv_w2, conv_b2, stride=1, padding=1)\n relu2 = F.relu(conv2)\n relu_flatten = flatten(relu2)\n scores = relu_flatten.mm(fc_w) + fc_b\n ################################################################################\n # END OF YOUR CODE #\n ################################################################################\n return scores", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def checkBatchGradient():\n\n from mynnet import InputLayer\n\n n,b,d,o = (1, 4, 3, 7) # sequence length, batch size, hidden size, output size\n input_size = 10\n \n lstm = create_cell(input_size, (n,b,d,o))\n\n X = np.random.randn(n,b,input_size)\n c0 = np.random.randn(b,d)\n \n print \"c0:\", c0\n\n # batch forward backward\n H, Ct = lstm.forward(X, c0)\n wrand = np.random.randn(*H.shape)\n loss = np.sum(H * wrand) # weighted sum is a nice hash to use I think\n dH = wrand\n dX, dW, dV, dc0 = lstm.backward(dH)\n\n def fwd():\n h, _ = lstm.forward(X, c0)\n return np.sum(h * wrand)\n\n # now gradient check all\n delta = 1e-7\n rel_error_thr_warning = 1e-2\n rel_error_thr_error = 1\n tocheck = [X, lstm.W, lstm.V, c0]\n grads_analytic = [dX, dW, dV, dc0]\n names = ['X', 'W', 'V', 'c0']\n for j in xrange(len(tocheck)):\n mat = tocheck[j]\n dmat = grads_analytic[j]\n name = names[j]\n # gradcheck\n for i in xrange(mat.size):\n old_val = mat.flat[i]\n mat.flat[i] = old_val + delta\n loss0 = fwd()\n mat.flat[i] = old_val - delta\n loss1 = fwd()\n mat.flat[i] = old_val\n\n grad_analytic = dmat.flat[i]\n grad_numerical = (loss0 - loss1) / (2 * delta)\n\n if grad_numerical == 0 and grad_analytic == 0:\n rel_error = 0 # both are zero, OK.\n status = 'OK'\n elif abs(grad_numerical) < 1e-7 and abs(grad_analytic) < 1e-7:\n rel_error = 0 # not enough precision to check this\n status = 'VAL SMALL WARNING'\n else:\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n status = 'OK'\n if rel_error > rel_error_thr_warning: status = 'WARNING'\n if rel_error > rel_error_thr_error: status = '!!!!! NOTOK'\n\n # print stats\n print '%s checking param %s index %s (val = %+8f), analytic = %+8f, numerical = %+8f, relative error = %+8f' \\\n % (status, name, `np.unravel_index(i, mat.shape)`, old_val, grad_analytic, grad_numerical, rel_error)", "def loss_func_c1():\n output_tensor = global_model(Zb_elem_tensor)\n # now output_tensor contains list of output tensors, shape: (n_elem, 2, 1)\n\n grad_dudx = K.gradients(output_tensor, Zb_elem_tensor)\n # now grad_dudx contains gradients,\n # shape is (n_elem, 2, 1)\n # grad_dudx is a list of (2,1) arrays\n\n C0_concat = K.concatenate(output_tensor, 0) # shape (n_elem*2, 1)\n C1_concat = K.concatenate(grad_dudx, 0) # shape (n_elem*2, 1)\n\n C0_residual = K.dot(bound_assembly_tensor, C0_concat) # shape: (n_elem-1, 1)\n C1_residual = K.dot(bound_assembly_tensor, C1_concat) # shape: (n_elem-1, 1)\n\n this_loss = K.sum(K.square(C0_residual)) + K.sum(K.square(C1_residual))\n return this_loss", "def test_nn_deep_backward_propagation_gradient_checking_relu():\n layers = np.random.randint(2, 5, np.random.randint(3, 5))\n layer_dims, activations = deep._split_layer_dims_activations(layers)\n m = np.random.randint(1, 10)\n X = np.random.randn(layer_dims[0], m)\n Y = np.random.randint(0, 2, size=(layer_dims[-1], m))\n\n # 1) Compute gradients with nn.deep._backward_propagation()\n parameters = deep._initialize_parameters(layer_dims)\n Y_computed, cache = deep._forward_propagation(X, parameters, activations)\n gradients = deep._backward_propagation(cache, Y_computed, Y, activations)\n\n # 2) Compute gradients manually\n epsilon = 10**-6\n for key, parameter in parameters.items():\n\n if key[0] == 'W':\n continue\n for index, element in np.ndenumerate(parameter):\n # Get J(..., x + e, ...)\n parameter[index] = element + epsilon\n Y_computed, _ = deep._forward_propagation(X, parameters, activations)\n cost_plus = deep._compute_cost(Y_computed, Y)\n\n # Get J(..., x - e, ...)\n parameter[index] = element - epsilon\n Y_computed, _ = deep._forward_propagation(X, parameters, activations)\n cost_minus = deep._compute_cost(Y_computed, Y)\n\n # Estimate gradient\n estimated_gradient = (cost_plus - cost_minus) / (2 * epsilon)\n gradient = gradients['d' + str(key)][index]\n\n # Relative Error: http://cs231n.github.io/neural-networks-3/\n if np.abs(gradient) != 0 or np.abs(estimated_gradient) != 0:\n relative_error = np.abs(gradient - estimated_gradient) \\\n / max(np.abs(gradient), np.abs(estimated_gradient))\n else:\n relative_error = np.abs(gradient - estimated_gradient)\n\n print('Parameter : ' + key)\n print('index : ' + str(index))\n print('Backpropagation : ' + str(gradient))\n print('Gradient Checking : ' + str(estimated_gradient))\n print('Relative Error : ' + str(relative_error))\n print()\n\n assert relative_error < 10 ** -4\n\n # Reset parameter\n parameter[index] = element", "def run_net_3_conv_conv_fully_connected_softmax_with_relu():\n functs = functions.Functions()\n training_data, validation_data, test_data = cnn.load_data_shared()\n\n fcn_n_inp, fcn_n_out = 40 * 4 * 4, 100\n sl_n_inp, sl_n_out = 100, 10\n epochs, mini_batch_size, eta, lmbda = 60, 10, 0.03, 0.1\n\n conv_1_image = (mini_batch_size, 1, 28, 28)\n conv_1_filter = (20, 1, 5, 5)\n\n conv_2_image = (mini_batch_size, 20, 12, 12)\n conv_2_filter = (40, 20, 5, 5)\n\n layers = [\n cnn.ConvPoolLayer(conv_1_filter, conv_1_image, activation_fn=functs.relu),\n cnn.ConvPoolLayer(conv_2_filter, conv_2_image, activation_fn=functs.relu),\n cnn.FullyConnectedLayer(fcn_n_inp, fcn_n_out, activation_fn=functs.relu),\n cnn.SoftmaxLayer(sl_n_inp, sl_n_out),\n ]\n net = network_test_3.NetworkTest3(layers, mini_batch_size)\n net.stochastic_gradient_descent(training_data, epochs, mini_batch_size, eta, validation_data, test_data,\n lmbda=lmbda,\n print_mini_batch_iteration=False)", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def gradient_descent_epoch(train_data, train_labels, learning_rate, batch_size, params, forward_prop_func, backward_prop_func):\n\n # *** START CODE HERE ***\n #update rule for batch gradient descent is to select batches w/out replacement:\n #for each, calculate output, calculate loss and update parameter\n\n train_size = train_data.shape[0]\n cur_idx = 0\n while(train_size-cur_idx > batch_size): #run with full batch\n cur_data = train_data[cur_idx:cur_idx+batch_size]\n cur_labels = train_labels[cur_idx:cur_idx+batch_size]\n losses = backward_prop_func(cur_data,cur_labels,params,forward_prop_func)\n #updates\n params['W2'] = params['W2'] - (learning_rate * losses['W2'])\n params['W1'] = params['W1'] - (learning_rate * losses['W1'])\n params['b2'] = params['b2'] - (learning_rate * losses['b2'])\n params['b1'] = params['b1'] - (learning_rate * losses['b1'])\n\n cur_idx+=batch_size\n\n #last batch\n cur_data = train_data[cur_idx:]\n cur_labels = train_labels[cur_idx:]\n losses = backward_prop_func(cur_data,cur_labels,params,forward_prop_func)\n #updates\n params['W2'] = params['W2'] - (learning_rate * losses['W2'])\n params['W1'] = params['W1'] - (learning_rate * losses['W1'])\n params['b2'] = params['b2'] - (learning_rate * losses['b2'])\n params['b1'] = params['b1'] - (learning_rate * losses['b1'])\n\n # *** END CODE HERE ***\n\n # This function does not return anything\n return", "def compute_gradients(self, loss: Mapping[str, Any]) -> ParamDictType:", "def a3c_loss(R, v_pred, pi, pi_one_hot, vloss_frac, ploss_frac, hregu_frac):\n pi = pi + 1e-8; # To avoid log zero\n pi_one_hot = pi_one_hot + 1e-8;\n with tf.name_scope('a3c_loss'):\n with tf.name_scope('value_mse_loss'):\n v_mse_loss = 0.5 * tf.reduce_sum(tf.square(R - v_pred));\n with tf.name_scope('policy_loss'):\n policy_loss = tf.reduce_sum(tf.log(pi_one_hot) * tf.stop_gradient(R - v_pred));\n with tf.name_scope('entropy'):\n entropy = -tf.reduce_sum(pi * tf.log(pi));\n with tf.name_scope('weighted_loss'):\n loss = vloss_frac * v_mse_loss - (ploss_frac * policy_loss + \n hregu_frac * entropy); \n \n return loss;", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def test_gradientW(self):\n n, h, m, t = 3, 2, 4, 5\n init = np.zeros((m, h))\n cell = pvml.RNNBasicCell(n, h)\n X = np.random.randn(m, t, n)\n H = cell.forward(X, np.zeros((m, h)))\n L = H.sum()\n DZ = cell.backward(H, np.ones_like(H), init)[0]\n gradients = cell.parameters_grad(X, H, DZ, init)\n eps = 1e-7\n for p in range(3):\n for index in np.ndindex(*gradients[p].shape):\n backup = cell.parameters()[p][index]\n with self.subTest(parameter=p, index=index):\n cell.parameters()[p][index] += eps\n H1 = cell.forward(X, init)\n L1 = H1.sum()\n D = (L1 - L) / eps\n self.assertAlmostEqual(gradients[p][index], D, 5)\n cell.parameters()[p][index] = backup", "def _eval_loss_and_grads(self, img):\n # 0 learning phase for 'test'\n return self.overall_loss_grad_function([img, 0])", "def verify_gradients(self):\n\n print 'WARNING: calling verify_gradients reinitializes the learner'\n\n rng = np.random.mtrand.RandomState(1234)\n\n self.seed = 1234\n self.sizes = [4, 5]\n self.initialize(20, 3)\n example = (rng.rand(20) < 0.5, 2)\n input, target = example\n epsilon = 1e-6\n self.lr = 0.1\n self.decrease_constant = 0\n\n self.fprop(input, target)\n self.bprop(input, target) # compute gradients\n\n import copy\n emp_grad_weights = copy.deepcopy(self.weights)\n\n for h in range(len(self.weights)):\n for i in range(self.weights[h].shape[0]):\n for j in range(self.weights[h].shape[1]):\n self.weights[h][i, j] += epsilon\n a = self.fprop(input, target)\n self.weights[h][i, j] -= epsilon\n\n self.weights[h][i, j] -= epsilon\n b = self.fprop(input, target)\n self.weights[h][i, j] += epsilon\n\n emp_grad_weights[h][i, j] = (a - b) / (2. * epsilon)\n\n print 'grad_weights[0] diff.:', np.sum(np.abs(self.grad_weights[0].ravel() - emp_grad_weights[0].ravel())) / \\\n self.weights[0].ravel().shape[0]\n print 'grad_weights[1] diff.:', np.sum(np.abs(self.grad_weights[1].ravel() - emp_grad_weights[1].ravel())) / \\\n self.weights[1].ravel().shape[0]\n print 'grad_weights[2] diff.:', np.sum(np.abs(self.grad_weights[2].ravel() - emp_grad_weights[2].ravel())) / \\\n self.weights[2].ravel().shape[0]\n\n emp_grad_biases = copy.deepcopy(self.biases)\n for h in range(len(self.biases)):\n for i in range(self.biases[h].shape[0]):\n self.biases[h][i] += epsilon\n a = self.fprop(input, target)\n self.biases[h][i] -= epsilon\n\n self.biases[h][i] -= epsilon\n b = self.fprop(input, target)\n self.biases[h][i] += epsilon\n\n emp_grad_biases[h][i] = (a - b) / (2. * epsilon)\n\n print 'grad_biases[0] diff.:', np.sum(np.abs(self.grad_biases[0].ravel() - emp_grad_biases[0].ravel())) / \\\n self.biases[0].ravel().shape[0]\n print 'grad_biases[1] diff.:', np.sum(np.abs(self.grad_biases[1].ravel() - emp_grad_biases[1].ravel())) / \\\n self.biases[1].ravel().shape[0]\n print 'grad_biases[2] diff.:', np.sum(np.abs(self.grad_biases[2].ravel() - emp_grad_biases[2].ravel())) / \\\n self.biases[2].ravel().shape[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inplace applies a one mode gate G into the process matrix T in mode i
def _apply_one_mode_gate(G, T, i): T[i] *= G return T
[ "def _apply_two_mode_gate(G, T, i, j):\n (T[i], T[j]) = (G[0, 0] * T[i] + G[0, 1] * T[j], G[1, 0] * T[i] + G[1, 1] * T[j])\n return T", "def _apply_gate(self, mat, modes):\n\n args = [mat, self._state, self._pure, modes, self._num_modes, self._trunc]\n if self._mode == 'blas':\n self._state = ops.apply_gate_BLAS(*args)\n elif self._mode == 'einsum':\n self._state = ops.apply_gate_einsum(*args)\n else:\n raise NotImplementedError", "def apply_gate(self, gate, target):\n program.apply_gate(\n self.queue,\n [int(2**self.num_qubits / 2)],\n None,\n self.buffer.data,\n np.int32(target),\n self.dtype(gate.a),\n self.dtype(gate.b),\n self.dtype(gate.c),\n self.dtype(gate.d)\n )", "def gate_matrix_1q(self, qid, gate_matrix):\n ops = [tf.eye(2, dtype=tf.complex64) for i in range(self._n)]\n ops[qid] = gate_matrix\n for i in range(len(ops)):\n if i == 0:\n out = ops[i]\n else:\n out = self.kronecker_product(out, ops[i])\n full_matrix = out\n return full_matrix", "def iSWAP(invert):\n gate = np.zeros(4 * 4, dtype=complex)\n gate = gate.reshape(4, 4)\n\n gate[0][0] = 1\n if not invert:\n gate[1][2] = 1j\n gate[2][1] = 1j\n else:\n gate[1][2] = -1j\n gate[2][1] = -1j\n gate[3][3] = 1\n\n return gate", "def forward(A0, n):\n\n A = A0\n for L in range(1, n+1): # op_i produces A_i\n A = tf.tanh(A, name=\"A\"+str(L))\n return A", "def SWAP():\n gate = np.zeros(4 * 4, dtype=complex)\n gate = gate.reshape(4, 4)\n\n gate[0][0] = 1\n gate[1][2] = 1\n gate[2][1] = 1\n gate[3][3] = 1\n\n return gate", "def optimize_generator(self):\n self.optimizer_g_a.step()\n self.optimizer_g_b.step()", "def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)", "def test_one_qubit_gate_multiplication(backend):\n import qibo\n original_backend = qibo.get_backend()\n qibo.set_backend(backend)\n gate1 = gates.X(0)\n gate2 = gates.H(0)\n final_gate = gate1 @ gate2\n assert final_gate.__class__.__name__ == \"Unitary\"\n target_matrix = (np.array([[0, 1], [1, 0]]) @\n np.array([[1, 1], [1, -1]]) / np.sqrt(2))\n np.testing.assert_allclose(final_gate.unitary, target_matrix)\n\n final_gate = gate2 @ gate1\n assert final_gate.__class__.__name__ == \"Unitary\"\n target_matrix = (np.array([[1, 1], [1, -1]]) / np.sqrt(2) @\n np.array([[0, 1], [1, 0]]))\n np.testing.assert_allclose(final_gate.unitary, target_matrix)\n\n gate1 = gates.X(1)\n gate2 = gates.X(1)\n assert (gate1 @ gate2).__class__.__name__ == \"I\"\n assert (gate2 @ gate1).__class__.__name__ == \"I\"\n qibo.set_backend(original_backend)", "def _run_accelerated(queue, t_sorted, t_order, t_inv, p_inv, p_inv_t,\n conns, idx, indptr, n_steps):\n from numba import njit\n try:\n from numba.core.errors import NumbaPendingDeprecationWarning\n except ModuleNotFoundError:\n from numba.errors import NumbaPendingDeprecationWarning\n warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)\n\n @njit\n def wrapper(queue, t_sorted, t_order, t_inv, p_inv, p_inv_t, conns,\n idx, indptr, n_steps):\n count = 0\n while (len(queue) > 0) and (count < n_steps):\n # Find throat at the top of the queue\n t = hq.heappop(queue)\n # Extract actual throat number\n t_next = t_sorted[t]\n t_inv[t_next] = count\n # If throat is duplicated\n while len(queue) > 0 and queue[0] == t:\n # Note: Preventing duplicate entries below might save some time\n t = hq.heappop(queue)\n # Find pores connected to newly invaded throat\n Ps = conns[t_next]\n # Remove already invaded pores from Ps\n Ps = Ps[p_inv[Ps] < 0]\n if len(Ps) > 0:\n p_inv[Ps] = count\n p_inv_t[Ps] = t_next\n for i in Ps:\n Ts = idx[indptr[i]:indptr[i+1]]\n Ts = Ts[t_inv[Ts] < 0]\n for i in set(Ts): # set(Ts) to exclude repeated neighbor throats\n hq.heappush(queue, t_order[i])\n count += 1\n return t_inv, p_inv, p_inv_t\n\n return wrapper(queue, t_sorted, t_order, t_inv, p_inv, p_inv_t, conns,\n idx, indptr, n_steps)", "def problem_reduction_single(self, i, val):\n y_update = - val * self.A.getcol(i).toarray().flatten()\n self.y += y_update\n self.A = sparse.hstack([self.A[:, :i], self.A[:, i + 1:]], format='csr')\n z_index = self.mask.searchsorted(i)\n self.mask = np.insert(self.mask, z_index, i)\n self.z = np.insert(self.z, z_index, val)", "def insert_single_gate(self,gate,qbit,ind):\n for i in range(self.n):\n if i == qbit:\n self.qubits[i].circ.insert(ind,gate)\n continue\n self.qubits[i].circ.insert(ind,I())\n self.control_list.insert(ind,I())", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def calcT1(g2, g1):\n idop = FermiOp(g2.orbs, 3, 3)\n idop.data = np.eye(int(binom(g2.orbs, 3)))\n\n return p2N(g2, 3) - p2N(g1, 3) + idop", "def Swap():\n\n return Operator((1.0 + 0.0j) * np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.0]],\n [[ 0.0, 0.0],\n [ 1.0, 0.0]]],\n [[[ 0.0, 1.0],\n [ 0.0, 0.0]],\n [[ 0.0, 0.0],\n [ 0.0, 1.0]]]]))", "def ggml_tanh_inplace(ctx: ffi.CData, a: ffi.CData) -> ffi.CData:\n ...", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def apply_single_gate(self, gate, q_id):\n q = self.shared_dict.get_queues_for_ids([q_id])[0]\n q.put([SINGLE_GATE, gate, q_id])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inplace applies a two mode gate G into the process matrix T in modes i and j
def _apply_two_mode_gate(G, T, i, j): (T[i], T[j]) = (G[0, 0] * T[i] + G[0, 1] * T[j], G[1, 0] * T[i] + G[1, 1] * T[j]) return T
[ "def _apply_one_mode_gate(G, T, i):\n\n T[i] *= G\n return T", "def _apply_gate(self, mat, modes):\n\n args = [mat, self._state, self._pure, modes, self._num_modes, self._trunc]\n if self._mode == 'blas':\n self._state = ops.apply_gate_BLAS(*args)\n elif self._mode == 'einsum':\n self._state = ops.apply_gate_einsum(*args)\n else:\n raise NotImplementedError", "def iSWAP(invert):\n gate = np.zeros(4 * 4, dtype=complex)\n gate = gate.reshape(4, 4)\n\n gate[0][0] = 1\n if not invert:\n gate[1][2] = 1j\n gate[2][1] = 1j\n else:\n gate[1][2] = -1j\n gate[2][1] = -1j\n gate[3][3] = 1\n\n return gate", "def apply_2opt_exchange(visit_order, i, j):\n\n tmp = visit_order[i + 1: j + 1]\n tmp.reverse()\n visit_order[i + 1: j + 1] = tmp\n\n return visit_order", "def Swap():\n\n return Operator((1.0 + 0.0j) * np.array([[[[ 1.0, 0.0],\n [ 0.0, 0.0]],\n [[ 0.0, 0.0],\n [ 1.0, 0.0]]],\n [[[ 0.0, 1.0],\n [ 0.0, 0.0]],\n [[ 0.0, 0.0],\n [ 0.0, 1.0]]]]))", "def SWAP():\n gate = np.zeros(4 * 4, dtype=complex)\n gate = gate.reshape(4, 4)\n\n gate[0][0] = 1\n gate[1][2] = 1\n gate[2][1] = 1\n gate[3][3] = 1\n\n return gate", "def _swap_rows(self, i, j):\n L = np.eye(3, dtype=\"int_\")\n L[i, i] = 0\n L[j, j] = 0\n L[i, j] = 1\n L[j, i] = 1\n self._L.append(L.copy())\n self._A[:] = np.dot(L, self._A)", "def move_multi_wire_gates(self, operator_grid):\n n = operator_grid.num_layers\n i = -1\n while i < n - 1:\n i += 1\n\n this_layer = operator_grid.layer(i)\n layer_ops = _remove_duplicates(this_layer)\n other_layer = [None] * operator_grid.num_wires\n\n for j in range(len(layer_ops)):\n op = layer_ops[j]\n\n if op is None:\n continue\n\n # translate wires to their indices on the device\n wire_indices = self.active_wires.indices(op.wires)\n\n if len(op.wires) > 1:\n\n sorted_wires = wire_indices.copy()\n sorted_wires.sort()\n\n blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))\n\n for k in range(j + 1, len(layer_ops)):\n other_op = layer_ops[k]\n\n if other_op is None:\n continue\n\n # translate wires to their indices on the device\n other_wire_indices = self.active_wires.indices(other_op.wires)\n other_sorted_wire_indices = other_wire_indices.copy()\n other_sorted_wire_indices.sort()\n other_blocked_wires = list(\n range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)\n )\n\n if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):\n op_indices = [\n idx for idx, layer_op in enumerate(this_layer) if layer_op == op\n ]\n\n for l in op_indices:\n other_layer[l] = op\n this_layer[l] = None\n\n break\n\n if not all([item is None for item in other_layer]):\n operator_grid.insert_layer(i + 1, other_layer)\n n += 1", "def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)", "def compute_mode_2(self, half_ub_size, block_id):\n tik_instance = self.tik_instance\n indices_ub = tik_instance.Tensor(self.indices_dtype, ((half_ub_size + UB_2K_SIZE) // self.indices_dsize,),\n name=\"indices_ub\", scope=tik.scope_ubuf)\n res_ub = tik_instance.Tensor(self.params_dtype, ((half_ub_size + BLOCK_SIZE) // self.params_dsize,),\n name=\"res_ub\", scope=tik.scope_ubuf)\n x_ub = tik_instance.Tensor(self.params_dtype, (CACHE_UB_SIZE // self.params_dsize,),\n name=\"x_ub\", scope=tik.scope_ubuf)\n # cache params data in UB from gm\n tik_instance.data_move(x_ub, self.x, 0, 1, ceil_value(self.params_total, self.block_elem), 0, 0)\n\n # 1. indices_num_each_core: indices_row_num_once * indices_loop_num + indices_row_num_last\n # a. process indices_row_num_once * indices_loop_num\n with tik_instance.for_range(0, self.indices_loop_num) as indices_loop_i:\n indices_num_offset = block_id * self.indices_num_each_core + indices_loop_i * self.indices_row_num_once\n # move indices data to ub from gm\n tik_instance.data_move(indices_ub, self.indices[indices_num_offset * self.indices_last_dim], 0, 1,\n ceil_value(self.indices_row_num_once * self.indices_last_dim * self.indices_dsize,\n BLOCK_SIZE), 0, 0)\n\n # indices_row_num_once: row_num_once_ub * inner_loop_num + row_num_once_tail_ub\n # a1. process row_num_once_ub * inner_loop_num\n with tik_instance.if_scope(self.inner_loop_num > 0):\n self.process_loop_mode_2(self.inner_loop_num, indices_num_offset, indices_ub, res_ub, x_ub)\n\n # a2. process row_num_once_tail_ub\n with tik_instance.if_scope(self.row_num_once_tail_ub > 0):\n inner_indices_offset = self.inner_loop_num * self.row_num_once_ub\n self.process_last_mode_2(self.row_num_once_tail_ub, indices_num_offset, inner_indices_offset,\n indices_ub, res_ub, x_ub)\n\n # b. indices_row_num_last: row_num_once_ub * inner_loop_num_last + row_num_last_tail_ub\n with tik_instance.if_scope(self.indices_row_num_last > 0):\n indices_num_offset = block_id * self.indices_num_each_core + \\\n self.indices_loop_num * self.indices_row_num_once\n # copy indices data to ub from gm\n tik_instance.data_move(indices_ub, self.indices[indices_num_offset * self.indices_last_dim], 0, 1,\n ceil_value(self.indices_row_num_last * self.indices_last_dim * self.indices_dsize,\n BLOCK_SIZE), 0, 0)\n\n # b1. process row_num_once_ub * inner_loop_num_last\n with tik_instance.if_scope(self.inner_loop_num_last > 0):\n self.process_loop_mode_2(self.inner_loop_num_last, indices_num_offset, indices_ub, res_ub, x_ub)\n\n # b2. process row_num_last_tail_ub\n with tik_instance.if_scope(self.row_num_last_tail_ub > 0):\n inner_indices_offset = self.inner_loop_num_last * self.row_num_once_ub\n self.process_last_mode_2(self.row_num_last_tail_ub, indices_num_offset, inner_indices_offset,\n indices_ub, res_ub, x_ub)\n\n with tik_instance.if_scope(tik.all(self.indices_num_remaining > 0, block_id == self.tail_process_core)):\n self.process_remaining_tail_mode_2(indices_ub, res_ub, x_ub)", "def test_two_qubit_gate_multiplication(backend):\n import qibo\n original_backend = qibo.get_backend()\n qibo.set_backend(backend)\n theta, phi = 0.1234, 0.5432\n gate1 = gates.fSim(0, 1, theta=theta, phi=phi)\n gate2 = gates.SWAP(0, 1)\n final_gate = gate1 @ gate2\n target_matrix = (np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -1j * np.sin(theta), 0],\n [0, -1j * np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, np.exp(-1j * phi)]]) @\n np.array([[1, 0, 0, 0], [0, 0, 1, 0],\n [0, 1, 0, 0], [0, 0, 0, 1]]))\n np.testing.assert_allclose(final_gate.unitary, target_matrix)\n\n # Check that error is raised when target qubits do not agree\n with pytest.raises(NotImplementedError):\n final_gate = gate1 @ gates.SWAP(0, 2)\n # Reset backend for other tests\n qibo.set_backend(original_backend)", "def _run_accelerated(queue, t_sorted, t_order, t_inv, p_inv, p_inv_t,\n conns, idx, indptr, n_steps):\n from numba import njit\n try:\n from numba.core.errors import NumbaPendingDeprecationWarning\n except ModuleNotFoundError:\n from numba.errors import NumbaPendingDeprecationWarning\n warnings.simplefilter('ignore', category=NumbaPendingDeprecationWarning)\n\n @njit\n def wrapper(queue, t_sorted, t_order, t_inv, p_inv, p_inv_t, conns,\n idx, indptr, n_steps):\n count = 0\n while (len(queue) > 0) and (count < n_steps):\n # Find throat at the top of the queue\n t = hq.heappop(queue)\n # Extract actual throat number\n t_next = t_sorted[t]\n t_inv[t_next] = count\n # If throat is duplicated\n while len(queue) > 0 and queue[0] == t:\n # Note: Preventing duplicate entries below might save some time\n t = hq.heappop(queue)\n # Find pores connected to newly invaded throat\n Ps = conns[t_next]\n # Remove already invaded pores from Ps\n Ps = Ps[p_inv[Ps] < 0]\n if len(Ps) > 0:\n p_inv[Ps] = count\n p_inv_t[Ps] = t_next\n for i in Ps:\n Ts = idx[indptr[i]:indptr[i+1]]\n Ts = Ts[t_inv[Ts] < 0]\n for i in set(Ts): # set(Ts) to exclude repeated neighbor throats\n hq.heappush(queue, t_order[i])\n count += 1\n return t_inv, p_inv, p_inv_t\n\n return wrapper(queue, t_sorted, t_order, t_inv, p_inv, p_inv_t, conns,\n idx, indptr, n_steps)", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def prepare_multimode(self, state, modes):\n if isinstance(modes, int):\n modes = [modes]\n\n n_modes = len(modes)\n pure_shape = tuple([self._trunc]*n_modes)\n mixed_shape = tuple([self._trunc]*(2*n_modes))\n pure_shape_as_vector = tuple([self._trunc**n_modes])\n mixed_shape_as_matrix = tuple([self._trunc**n_modes]*2)\n\n # Do consistency checks\n if self._checks:\n if state.shape != pure_shape and state.shape != mixed_shape \\\n and \\\n state.shape != pure_shape_as_vector and state.shape != mixed_shape_as_matrix:\n raise ValueError(\"Incorrect shape for state preparation\")\n if len(modes) != len(set(modes)):\n raise ValueError(\"The specified modes cannot appear multiple times.\")\n\n # reshape to support input both as tensor and vector/matrix\n if state.shape == pure_shape_as_vector:\n state = state.reshape(pure_shape)\n elif state.shape == mixed_shape_as_matrix:\n state = state.reshape(mixed_shape)\n\n if self._num_modes == n_modes:\n # Hack for marginally faster state preparation\n self._state = state.astype(ops.def_type)\n self._pure = bool(state.shape == pure_shape)\n else:\n if self._pure:\n self._state = ops.mix(self._state, self._num_modes)\n self._pure = False\n\n if state.shape == pure_shape:\n state = ops.mix(state, len(modes))\n\n # Take the partial trace\n # todo: For performance the partial trace could be done directly from the pure state. This would of course require a better partial trace function...\n reduced_state = ops.partial_trace(self._state, self._num_modes, modes)\n\n # Insert state at the end (I know there is also tensor() from ops but it has extra aguments wich only confuse here)\n self._state = np.tensordot(reduced_state, state, axes=0)\n\n # unless the preparation was meant to go into the last modes in the standard order, we need to swap indices around\n if modes != list(range(self._num_modes-len(modes), self._num_modes)):\n mode_permutation = [x for x in range(self._num_modes) if x not in modes] + modes\n if self._pure:\n scale = 1\n index_permutation = mode_permutation\n else:\n scale = 2\n index_permutation = [scale*x+i for x in mode_permutation for i in (0, 1)] #two indices per mode if we have pure states\n index_permutation = np.argsort(index_permutation)\n\n self._state = np.transpose(self._state, index_permutation)", "def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out", "def test_one_qubit_gate_multiplication(backend):\n import qibo\n original_backend = qibo.get_backend()\n qibo.set_backend(backend)\n gate1 = gates.X(0)\n gate2 = gates.H(0)\n final_gate = gate1 @ gate2\n assert final_gate.__class__.__name__ == \"Unitary\"\n target_matrix = (np.array([[0, 1], [1, 0]]) @\n np.array([[1, 1], [1, -1]]) / np.sqrt(2))\n np.testing.assert_allclose(final_gate.unitary, target_matrix)\n\n final_gate = gate2 @ gate1\n assert final_gate.__class__.__name__ == \"Unitary\"\n target_matrix = (np.array([[1, 1], [1, -1]]) / np.sqrt(2) @\n np.array([[0, 1], [1, 0]]))\n np.testing.assert_allclose(final_gate.unitary, target_matrix)\n\n gate1 = gates.X(1)\n gate2 = gates.X(1)\n assert (gate1 @ gate2).__class__.__name__ == \"I\"\n assert (gate2 @ gate1).__class__.__name__ == \"I\"\n qibo.set_backend(original_backend)", "def test_enhancement_matrices(self):\n\n # Original solution order\n solution_order = 1\n\n # Modes we want to use from the neighbor\n modes = [1]\n\n # Get the enhancement matrix\n Al, Alinv, Ar, Arinv = enhance.enhancement_matrices(\n solution_order, modes)\n\n # Make sure both left and right enhancements are correct\n npt.assert_array_almost_equal(Al, np.array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 1., 6.]]),\n decimal=13)\n\n npt.assert_array_almost_equal(Ar, np.array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 1., -6.]]),\n decimal=13)", "def migrateBetweenTwoPops(self, mode, popIndex1, popIndex2, individualIndexs1, individualIndexs2):\n if mode == 'replace':\n for i in range(len(individualIndexs1)):\n self.model[popIndex2].pop[individualIndexs2[i]] = copy.deepcopy(\n self.model[popIndex1].pop[individualIndexs1[i]]) # 要深copy才保险\n elif mode == 'exchange':\n for i in range(len(individualIndexs1)):\n self.model[popIndex1].pop[individualIndexs1[i]], self.model[popIndex2].pop[individualIndexs2[i]] = \\\n self.model[popIndex2].pop[individualIndexs2[i]], self.model[popIndex1].pop[individualIndexs1[i]]", "def perform_gauss_jordan_elimination(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n\n r, c = 0, 0\n rows = len(m)\n cols = len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n _swap = False\n\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n if m[r][c] == 0:\n ## Swap\n for i in range(rows):\n if r != i and i > r: ## Avoid comparing the same row and do not swap to upper rows\n if m[i][c] == 1 and not _swap: ## Check if a swap is not performed before in the same column\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n #m = swap(m,r,i)\n temp = m[r]\n m[r] = m[i]\n m[i] = temp\n _swap = True\n if show:\n print_matrix(m)\n if not _swap: ## If not swap, means there is no 1 to swap, so go to the next column\n c+=1\n\n if m[r][c] == 1:\n ## XOR\n for i in range(rows):\n if r != i: ## Avoid comparing the same row\n if m[i][c] == 1:\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n for e in range(len(m[0])):\n m[i][e] ^= m[r][e]\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column are treated)\n if r == rows or c >= cols-1:\n break\n \n return m" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of the columns that are in our features dataframe that should not be used in prediction. These are essentially either metadata columns (team name, for example), or potential target variables that include the outcome. We want to make sure not to use the latter, since we don't want to use information about the current game to predict that same game.
def get_non_feature_columns(): return ['teamid', 'op_teamid', 'matchid', 'competitionid', 'seasonid', 'goals', 'op_goals', 'points', 'timestamp', 'team_name', 'op_team_name']
[ "def only_feature_columns(self) -> list:\n return self._tf_feature_cols", "def no_export_feature_columns(self) -> list:\n return self._tf_no_export_cols", "def get_features(self, df):\n return df.drop(df.columns[self.target_col], axis=1)", "def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)", "def get_feature_columns(all_cols):\n return [col for col in all_cols if col not in get_non_feature_columns()]", "def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]", "def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df", "def colnames_excluded(self):\n return [col\n for col, keep in zip(self.cache.colnames, self._colmask) \n if not keep]", "def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop", "def get_valid_columns(self):\r\n return self.target_columns", "def get_not_null_columns(cls):\n column_list = cls.get_columns()\n return [column for column in column_list if column.not_null]", "def ignored_columns(self):\n return self._parms.get(\"ignored_columns\")", "def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)", "def old_non_pk_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self._pk_for_filter\n and col.name not in self.dropped_column_name_list\n ]", "def get_cols_drop():", "def list_feature_drop(self):\n \n list_to_drop = list()\n list_not_in_df = list()\n \n #-------------------------------------------------------------------------\n # Columns are checked to be into df_invoice_line dataframe\n #-------------------------------------------------------------------------\n for col in self._list_feature_to_drop:\n if col in self.df_invoice_line.columns:\n list_to_drop.append(col)\n else:\n list_not_in_df.append(col)\n \n if 0 == len(list_to_drop):\n self.strprint(\"\\n*** ERROR : no element in list belonging to dataframe!\")\n else:\n if len(self._list_feature_to_drop) != len(list_to_drop):\n self.strprint(\"\\n*** WARNING : followings features do not belong to \\\n dataframe : {}\".format(list_not_in_df))\n else:\n pass\n list_col_keep \\\n = [col for col in self.df_invoice_line.columns \\\n if col not in list_to_drop]\n s\n self.df_invoice_line = self.df_invoice_line[list_col_keep]\n return", "def drop_cols(features, cols_to_drop):\n print('Dropping specific column(s)...', end=\" \")\n features = features.drop(cols_to_drop, axis=1)\n print('Finished.')\n return features", "def get_all_hidden_columns(self):\n visible_columns_list = []\n column_headers = self.driver.find_elements_by_xpath('//thead/tr/th')\n for i in range(len(column_headers)):\n if column_headers[i].get_attribute('class') == 'ng-scope ng-hide':\n visible_columns_list.append(i + 1)\n return visible_columns_list", "def _remove_columns_from_metadata(metadata, excluded_columns):\n generated = schema_utils.schema_as_feature_spec(metadata.schema)\n new_feature_spec = {\n name: spec\n for name, spec in generated.feature_spec.items()\n if name not in excluded_columns\n }\n new_domains = {\n name: spec\n for name, spec in generated.domains.items()\n if name not in excluded_columns\n }\n return dataset_metadata.DatasetMetadata.from_feature_spec(\n new_feature_spec, new_domains)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of all columns that should be used in prediction (i.e. all features that are in the dataframe but are not in the features.get_non_feature_column() list).
def get_feature_columns(all_cols): return [col for col in all_cols if col not in get_non_feature_columns()]
[ "def only_feature_columns(self) -> list:\n return self._tf_feature_cols", "def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop", "def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns", "def no_export_feature_columns(self) -> list:\n return self._tf_no_export_cols", "def get_features(self, df):\n return df.drop(df.columns[self.target_col], axis=1)", "def get_df_cols(self, data: pd.DataFrame):\n return [col for col in self.columns if col in data.columns]", "def all_feature_columns(self) -> list:\n return (\n self._tf_feature_cols\n + self._label_tf_feature_cols\n + self._id_tf_feature_cols\n )", "def get_non_feature_columns():\n return ['teamid', 'op_teamid', 'matchid', 'competitionid', 'seasonid',\n 'goals', 'op_goals', 'points', 'timestamp', 'team_name', \n 'op_team_name']", "def label_feature_columns(self) -> list:\n return self._label_tf_feature_cols", "def get_all_numerical_features_list(input_df):\n features_list = [x for x in input_df.columns.values if x != 'email_address' and x != 'poi']\n features_list.insert(0,'poi') \n return features_list", "def get_valid_columns(self):\r\n return self.target_columns", "def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)", "def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]", "def get_columns_after_apply_mapping(self) -> List[str]:\n return self.get_dyf_and_apply_mapping().toDF().columns", "def get_features(df: pd.DataFrame) -> List[str]:\n return list(set([col.split('_')[1] for col in df.columns]))", "def get_feature_names(self):\n\n return self.columns", "def get_dense_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_embedding_feature_columns()", "def select_columns(self, solution_vec):\n return self.x_train.columns[solution_vec >= 0.5].tolist()", "def export_feature_columns(self) -> list:\n return self._tf_export_cols" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Setup cache object for wallet
def setup_cache(self): if self.walletname not in cache: cache[self.walletname] = { "raw_transactions": {}, "transactions": [], "tx_count": None, "tx_changed": True, "last_block": None, "raw_tx_block_update": {}, "addresses": [], "change_addresses": [], "scan_addresses": True }
[ "def __init_cache__(self) -> None:\n try:\n self.cache = caches[CACHE_NAME]\n logging.info(\"GeoIP2 - successfully initialised cache\")\n except InvalidCacheBackendError as ex:\n raise MiddlewareNotUsed(f\"GeoIP2 - cache configuration error: {ex}\") from ex", "def setup_memcache (self):\n cached_items = xbmcgui.Window(xbmcgui.getCurrentWindowId()).getProperty('memcache')\n # no cache setup yet, create one\n if len(cached_items) < 1:\n xbmcgui.Window(xbmcgui.getCurrentWindowId()).setProperty('memcache', pickle.dumps({}))", "def __init__(self,mirror=\"warehouse.primekinetics.org\",cache=\"cache\"):\n\t\tself.mirrorLocation=mirror\n\t\tself.cacheLocation=cache\n\t\tself.cas2primeids=dict()\n\t\tself.primeid2cas=dict()\n\t\n\t\tself.cacheItems=['cas2primeids','primeid2cas'] # these are the items we wish to save/load in the cache\n\t\ttry: \n\t\t\tself.loadCache() # try to load the cache\n\t\texcept:\n\t\t\tprint \"Couldn't load cache.\"\n\t\t\tself.readCAS()\n\t\t\tself.saveCache()", "def __initCacheSection(self):\n m = hashlib.md5()\n for i in [self.AUTHZ_ENDPOINT, self.CLIENT_ID]:\n m.update(bytes(self.conf[i], \"utf-8\"))\n self.cacheSection = str(m.hexdigest())", "def __init__(self):\n self.cache = {}\n logger.debug(\"Initializing ontology cache\")", "def __init__(self, cache_location=None):\n CacheManager.cache_location = None # The dir that holds the whole cache.\n CacheManager.cache_file_location = None # The JSON file that contains posts, etc.\n\n if cache_location is not None:\n CacheManager.create_cache(cache_location)", "def __init__(self, proxy, use_cache):\n super(NetworkAccessManager, self).__init__()\n self.setProxy(proxy)\n self.sslErrors.connect(self.sslErrorHandler)\n # the requests that are still active\n self.active_requests = [] \n self.cache = pdict.PersistentDict(settings.cache_file) if use_cache else None", "def cache():\n \n global __cache_instance\n if __cache_instance is None:\n cache_key = __configuration[CACHE_KEY]\n if CACHE_STORE in __configuration:\n store = __configuration[CACHE_STORE]\n else:\n store = __configuration[STORE]\n __cache_instance = __configuration[CACHE](store(cache_key))\n \n return __cache_instance", "def create_cache_object(self):\n # make it a dict\n cache_object = {}\n\n # create a key for chapters\n cache_object['title'] = None\n cache_object['chapters'] = {}\n\n return cache_object", "def _cached_init(self):\n original = _SymbolCache[self.__class__]\n self.__dict__ = original().__dict__", "def setup_cache(self):\n train_cache_path = self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)\n dev_cache_path = self.cache.get_cache_path_and_check(DEV_STR, self.task_name)\n test_cache_path = self.cache.get_cache_path_and_check(TEST_STR, self.task_name)\n\n self.train_cache_writer = None\n self.dev_cache_writer = None\n self.test_cache_writer = None\n\n if os.path.exists(train_cache_path):\n f = h5py.File(train_cache_path, 'r')\n self.train_cache = (torch.tensor(f[str(i)][()]) for i in range(len(f.keys())))\n else:\n self.train_cache_writer = h5py.File(train_cache_path, 'w')\n if os.path.exists(dev_cache_path):\n f2 = h5py.File(dev_cache_path, 'r')\n self.dev_cache = (torch.tensor(f2[str(i)][()]) for i in range(len(f2.keys())))\n else:\n self.dev_cache_writer = h5py.File(dev_cache_path, 'w')\n if os.path.exists(test_cache_path):\n f3 = h5py.File(test_cache_path, 'r')\n self.test_cache = (torch.tensor(f3[str(i)][()]) for i in range(len(f3.keys())))\n else:\n self.test_cache_writer = h5py.File(test_cache_path, 'w')", "def _init_cache(self):\n\n # initialise or clear depth cache\n self._depth_cache = DepthCache(self._symbol)\n\n # set a time to refresh the depth cache\n if self._refresh_interval:\n self._refresh_time = int(time.time()) + self._refresh_interval", "def create_cache():\n global _dl\n err = _dl.geopm_topo_create_cache()\n if err < 0:\n raise RuntimeError(\"geopm_topo_create_cache() failed: {}\".format(\n error.message(err)))", "def _init_bulk_add_cache(self) -> Mapping[str, Any]:\n return {}", "def setup_redis_cache_connection():\n\tglobal cache\n\n\tif not cache:\n\t\tfrom frappe.utils.redis_wrapper import RedisWrapper\n\n\t\tcache = RedisWrapper.from_url(conf.get(\"redis_cache\"))", "def cache_dict():\n return {}", "def __init__(self, config, cache_filename, path):\n self.config = config\n self.cache_path = os.path.join(path, cache_filename)\n self._cache = None", "def __init__(self, contract=None, hash_value=None):\n if contract is not None:\n self.contract = contract\n elif hash_value is not None:\n try:\n file_path = HashMap().get_file(hash_value)\n if file_path is None:\n file_path = DATA_FOLDER + \"cache/\" + hexlify(hash_value)\n with open(file_path, 'r') as filename:\n self.contract = json.load(filename, object_pairs_hook=OrderedDict)\n except Exception:\n self.contract = {}\n else:\n self.contract = {}", "async def create_currency_cache(self):\n for user_id, money in await self.ex.sql.s_currency.fetch_currency():\n user = await self.ex.get_user(user_id)\n user.balance = int(money)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Cache `raw_transactions` (with full data on all the inputs and outputs of each tx)
def cache_raw_txs(self, cli_txs): # Get list of all tx ids txids = list(dict.fromkeys(cli_txs.keys())) tx_count = len(txids) # If there are new transactions (if the transations count changed) if tx_count != self.cache["tx_count"]: for txid in txids: # Cache each tx, if not already cached. # Data is immutable (unless reorg occurs) and can be saved in a file for permanent caching if txid not in self.cache["raw_transactions"]: # Call Bitcoin Core to get the "raw" transaction - allows to read detailed inputs and outputs raw_tx_hex = self.cli.gettransaction(txid)["hex"] raw_tx = self.cli.decoderawtransaction(raw_tx_hex) # Some data (like fee and category, and when unconfirmed also time) available from the `listtransactions` # command is not available in the `getrawtransacion` - so add it "manually" here. if "fee" in cli_txs[txid]: raw_tx["fee"] = cli_txs[txid]["fee"] if "category" in cli_txs[txid]: raw_tx["category"] = cli_txs[txid]["category"] if "time" in cli_txs[txid]: raw_tx["time"] = cli_txs[txid]["time"] if "blockhash" in cli_txs[txid]: raw_tx["block_height"] = self.cli.getblockheader(cli_txs[txid]["blockhash"])["height"] else: raw_tx["block_height"] = -1 # Loop on the transaction's inputs # If not a coinbase transaction: # Get the the output data corresponding to the input (that is: input_txid[output_index]) tx_ins = [] for vin in raw_tx["vin"]: # If the tx is a coinbase tx - set `coinbase` to True if "coinbase" in vin: raw_tx["coinbase"] = True break # If the tx is a coinbase tx - set `coinbase` to True vin_txid = vin["txid"] vin_vout = vin["vout"] try: raw_tx_hex = self.cli.gettransaction(vin_txid)["hex"] tx_in = self.cli.decoderawtransaction(raw_tx_hex)["vout"][vin_vout] tx_in["txid"] = vin["txid"] tx_ins.append(tx_in) except: pass # For each output in the tx_ins list (the tx inputs in their output "format") # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is). raw_tx["from"] = [{ "address": out["scriptPubKey"]["addresses"][0], "amount": out["value"], "internal": out["scriptPubKey"]["addresses"][0] in self.wallet_addresses } for out in tx_ins] # For each output in the tx (`vout`) # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is). raw_tx["to"] = [({ "address": out["scriptPubKey"]["addresses"][0], "amount": out["value"], "internal": out["scriptPubKey"]["addresses"][0] in self.wallet_addresses }) for out in raw_tx["vout"] if "addresses" in out["scriptPubKey"]] # Save the raw_transaction to the cache cache[self.walletname]["raw_transactions"][txid] = raw_tx # Set the tx count to avoid unnecessary indexing cache[self.walletname]["tx_count"] = tx_count # Set the tx changed to indicate the there are new transactions to cache cache[self.walletname]["tx_changed"] = True else: # Set the tx changed to False to avoid unnecessary indexing cache[self.walletname]["tx_changed"] = False # If unconfirmed transactions were mined, assign them their block height blocks = self.cli.getblockcount() if blocks != self.cache["last_block"]: for txid in self.cache["raw_transactions"]: if self.cache["raw_transactions"][txid]["block_height"] == -1 and "blockhash" in cli_txs[txid]: height = self.cli.getblockheader(cli_txs[txid]["blockhash"])["height"] cache[self.walletname]["raw_transactions"][txid]["block_height"] = height cache[self.walletname]["raw_tx_block_update"][txid] = height cache[self.walletname]["last_block"] = blocks return self.cache["raw_transactions"]
[ "def cache_txs(self, raw_txs):\n # Get the cached `raw_transactions` dict (txid -> tx) as a list of txs\n transactions = list(sorted(raw_txs.values(), key = lambda tx: tx['time'], reverse=True))\n result = []\n\n # If unconfirmed transactions were mined, assign them their block height\n if len(self.cache[\"raw_tx_block_update\"]) > 0:\n for i in range(0, len(self.cache[\"transactions\"])):\n if self.cache[\"transactions\"][i][\"txid\"] in cache[self.walletname][\"raw_tx_block_update\"]:\n cache[self.walletname][\"transactions\"][i][\"block_height\"] = cache[self.walletname][\"raw_tx_block_update\"][cache[self.walletname][\"transactions\"][i][\"txid\"]]\n cache[self.walletname][\"raw_tx_block_update\"] = {}\n\n # If the `raw_transactions` did not change - exit here.\n if not self.cache[\"tx_changed\"]:\n return self.cache[\"transactions\"]\n\n # Loop through the raw_transactions list\n for i, tx in enumerate(transactions):\n # If tx is a user generated one (categories: `send`/ `receive`) and not coinbase (categories: `generated`/ `immature`)\n if tx[\"category\"] == \"send\" or tx[\"category\"] == \"receive\":\n is_send = True\n is_self = True\n\n # Check if the transaction is a `send` or not (if all inputs belong to the wallet)\n if len(tx[\"from\"]) == 0:\n is_send = False\n\n for fromdata in tx[\"from\"]:\n if not fromdata[\"internal\"]:\n is_send = False\n\n # Check if the transaction is a `self-transfer` (if all inputs and all outputs belong to the wallet)\n for to in tx[\"to\"]:\n if not is_send or not to[\"internal\"]:\n is_self = False\n break\n\n tx[\"is_self\"] = is_self\n\n if not is_send or is_self:\n for to in tx[\"to\"]:\n if to[\"internal\"]:\n # Cache received outputs\n result.append(self.prepare_tx(tx, to, \"receive\", destination=None, is_change=(to[\"address\"] in self.change_addresses)))\n\n if is_send or is_self:\n destination = None\n for to in tx[\"to\"]:\n if to[\"address\"] in self.change_addresses and not is_self:\n # Cache change output\n result.append(self.prepare_tx(tx, to, \"receive\", destination=destination, is_change=True))\n elif not to[\"internal\"] or (is_self and to[\"address\"] not in self.change_addresses):\n destination = to\n for fromdata in tx[\"from\"]:\n # Cache sent inputs\n result.append(self.prepare_tx(tx, fromdata, \"send\", destination=destination))\n else:\n tx[\"is_self\"] = False\n # Cache coinbase output\n result.append(self.prepare_tx(tx, tx[\"to\"][0], tx[\"category\"]))\n\n # Save the result to the cache\n cache[self.walletname][\"transactions\"] = result\n return self.cache[\"transactions\"]", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions", "def all_transactions(self):\n self._update()\n with self.all_tx_lock:\n all_tx_copy = copy.deepcopy(self._all_transactions)\n return all_tx_copy", "def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }", "def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)", "def transactions(self):\n return copy.deepcopy(self._transactions)", "def transaction_gen(block):\n for tx_hash in block['tx']:\n # The second argument represents the verbosity\n yield get_transaction(tx_hash)", "def get_all_tx(self) -> List[Transaction]:\n return [\n Transaction(\n nonce=int(ethereum_transaction.nonce),\n gasprice=int(ethereum_transaction.gasprice),\n startgas=int(ethereum_transaction.startgas),\n value=int(ethereum_transaction.value),\n v=ethereum_transaction.v,\n r=int(ethereum_transaction.r),\n s=int(ethereum_transaction.s),\n data=ethereum_transaction.data.tobytes(),\n to=HexBytes(ethereum_transaction.to.tobytes()).hex(),\n )\n for ethereum_transaction in PendingEthereumTransaction.objects.all()\n ]", "def calculate_input_unspent_transactions(self):\n pass", "def get_pending_trust_transactions():\n with django.db.transaction.atomic():\n transactions = list(\n Transaction.objects.filter(\n kind=Transaction.KIND.deposit,\n status=Transaction.STATUS.pending_trust,\n pending_execution_attempt=False,\n )\n .select_related(\"asset\")\n .select_for_update()\n )\n Transaction.objects.filter(id__in=[t.id for t in transactions]).update(\n pending_execution_attempt=True\n )\n return transactions", "def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def _update_UTXO(self): # TODO\n for transaction in self.waiting_transactions:\n for tx_in in transaction.tx_ins:\n for i in range(len(self.UTXO_pool)):\n # Delete UTXOs that have been spent\n self.UTXO_pool = [\n x for x in self.UTXO_pool if x.tx_id != tx_in.tx_out_id]\n for idx, tx_out in enumerate(transaction.tx_outs):\n new_utxo = tx.UTXO(transaction.id, idx,\n tx_out.address, tx_out.amount)\n self.UTXO_pool.append(new_utxo)\n self.waiting_transactions = []", "def read_trans(self) -> None:\n self.trans = {}\n for t in range(self.nTransactions):\n\n # Make transaction objects (and table later?)\n trans = Trans(self.mmap, self.cursor,\n verb=self.verb,\n **self.trans_kwargs)\n\n # Read the transaction\n trans.get_transaction()\n\n # Validate, if on\n if self.validateTrans:\n trans.api_verify()\n\n # Update cursor\n self.cursor = trans.cursor\n\n # Save\n self.trans[t] = trans", "def process_transactions(self, data):\n\n self.transactions = []\n \n transaction_nums = range(6)\n for transaction_num in transaction_nums:\n transdict = IPN.Transaction.slicedict(data, ('transaction[%s].'\n % transaction_num))\n if len(transdict) > 0:\n self.transactions.append(IPN.Transaction(**transdict))", "def __init__(self, inputs=None, outputs=None, locktime=0, version=None,\n network=DEFAULT_NETWORK, fee=None, fee_per_kb=None, size=None, txid='', txhash='', date=None,\n confirmations=None, block_height=None, block_hash=None, input_total=0, output_total=0, rawtx=b'',\n status='new', coinbase=False, verified=False, witness_type='legacy', flag=None):\n\n self.coinbase = coinbase\n self.inputs = []\n if inputs is not None:\n for inp in inputs:\n self.inputs.append(inp)\n if not input_total:\n input_total = sum([i.value for i in inputs])\n id_list = [i.index_n for i in self.inputs]\n if list(set(id_list)) != id_list:\n _logger.info(\"Identical transaction indexes (tid) found in inputs, please specify unique index. \"\n \"Indexes will be automatically recreated\")\n index_n = 0\n for inp in self.inputs:\n inp.index_n = index_n\n index_n += 1\n if outputs is None:\n self.outputs = []\n else:\n self.outputs = outputs\n if not output_total:\n output_total = sum([o.value for o in outputs])\n if fee is None and output_total and input_total:\n fee = input_total - output_total\n if fee < 0 or fee == 0 and not self.coinbase:\n raise TransactionError(\"Transaction inputs total value must be greater then total value of \"\n \"transaction outputs\")\n if not version:\n version = b'\\x00\\x00\\x00\\x01'\n if isinstance(version, int):\n self.version = version.to_bytes(4, 'big')\n self.version_int = version\n else:\n self.version = version\n self.version_int = int.from_bytes(version, 'big')\n self.locktime = locktime\n self.network = network\n if not isinstance(network, Network):\n self.network = Network(network)\n self.flag = flag\n self.fee = fee\n self.fee_per_kb = fee_per_kb\n self.size = size\n self.vsize = size\n self.txid = txid\n self.txhash = txhash\n self.date = date\n self.confirmations = confirmations\n self.block_height = block_height\n self.block_hash = block_hash\n self.input_total = input_total\n self.output_total = output_total\n self.rawtx = rawtx\n self.status = status\n self.verified = verified\n self.witness_type = witness_type\n self.change = 0\n self.calc_weight_units()\n if self.witness_type not in ['legacy', 'segwit']:\n raise TransactionError(\"Please specify a valid witness type: legacy or segwit\")\n if not self.txid:\n self.txid = self.signature_hash()[::-1].hex()", "def merge_transaction(self, transaction):\n self.inputs += transaction.inputs\n self.outputs += transaction.outputs\n self.shuffle()\n self.update_totals()\n self.sign_and_update()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Caches the transactions list. Cache the inputs and outputs which belong to the user's wallet for each `raw_transaction`
def cache_txs(self, raw_txs): # Get the cached `raw_transactions` dict (txid -> tx) as a list of txs transactions = list(sorted(raw_txs.values(), key = lambda tx: tx['time'], reverse=True)) result = [] # If unconfirmed transactions were mined, assign them their block height if len(self.cache["raw_tx_block_update"]) > 0: for i in range(0, len(self.cache["transactions"])): if self.cache["transactions"][i]["txid"] in cache[self.walletname]["raw_tx_block_update"]: cache[self.walletname]["transactions"][i]["block_height"] = cache[self.walletname]["raw_tx_block_update"][cache[self.walletname]["transactions"][i]["txid"]] cache[self.walletname]["raw_tx_block_update"] = {} # If the `raw_transactions` did not change - exit here. if not self.cache["tx_changed"]: return self.cache["transactions"] # Loop through the raw_transactions list for i, tx in enumerate(transactions): # If tx is a user generated one (categories: `send`/ `receive`) and not coinbase (categories: `generated`/ `immature`) if tx["category"] == "send" or tx["category"] == "receive": is_send = True is_self = True # Check if the transaction is a `send` or not (if all inputs belong to the wallet) if len(tx["from"]) == 0: is_send = False for fromdata in tx["from"]: if not fromdata["internal"]: is_send = False # Check if the transaction is a `self-transfer` (if all inputs and all outputs belong to the wallet) for to in tx["to"]: if not is_send or not to["internal"]: is_self = False break tx["is_self"] = is_self if not is_send or is_self: for to in tx["to"]: if to["internal"]: # Cache received outputs result.append(self.prepare_tx(tx, to, "receive", destination=None, is_change=(to["address"] in self.change_addresses))) if is_send or is_self: destination = None for to in tx["to"]: if to["address"] in self.change_addresses and not is_self: # Cache change output result.append(self.prepare_tx(tx, to, "receive", destination=destination, is_change=True)) elif not to["internal"] or (is_self and to["address"] not in self.change_addresses): destination = to for fromdata in tx["from"]: # Cache sent inputs result.append(self.prepare_tx(tx, fromdata, "send", destination=destination)) else: tx["is_self"] = False # Cache coinbase output result.append(self.prepare_tx(tx, tx["to"][0], tx["category"])) # Save the result to the cache cache[self.walletname]["transactions"] = result return self.cache["transactions"]
[ "def cache_raw_txs(self, cli_txs): \n # Get list of all tx ids\n txids = list(dict.fromkeys(cli_txs.keys()))\n tx_count = len(txids)\n\n # If there are new transactions (if the transations count changed)\n if tx_count != self.cache[\"tx_count\"]:\n for txid in txids:\n # Cache each tx, if not already cached.\n # Data is immutable (unless reorg occurs) and can be saved in a file for permanent caching\n if txid not in self.cache[\"raw_transactions\"]:\n # Call Bitcoin Core to get the \"raw\" transaction - allows to read detailed inputs and outputs\n raw_tx_hex = self.cli.gettransaction(txid)[\"hex\"]\n raw_tx = self.cli.decoderawtransaction(raw_tx_hex)\n # Some data (like fee and category, and when unconfirmed also time) available from the `listtransactions`\n # command is not available in the `getrawtransacion` - so add it \"manually\" here.\n if \"fee\" in cli_txs[txid]:\n raw_tx[\"fee\"] = cli_txs[txid][\"fee\"]\n if \"category\" in cli_txs[txid]:\n raw_tx[\"category\"] = cli_txs[txid][\"category\"]\n if \"time\" in cli_txs[txid]:\n raw_tx[\"time\"] = cli_txs[txid][\"time\"]\n\n if \"blockhash\" in cli_txs[txid]:\n raw_tx[\"block_height\"] = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n else:\n raw_tx[\"block_height\"] = -1\n\n # Loop on the transaction's inputs\n # If not a coinbase transaction:\n # Get the the output data corresponding to the input (that is: input_txid[output_index])\n tx_ins = []\n for vin in raw_tx[\"vin\"]:\n # If the tx is a coinbase tx - set `coinbase` to True\n if \"coinbase\" in vin:\n raw_tx[\"coinbase\"] = True\n break\n # If the tx is a coinbase tx - set `coinbase` to True\n vin_txid = vin[\"txid\"]\n vin_vout = vin[\"vout\"]\n try:\n raw_tx_hex = self.cli.gettransaction(vin_txid)[\"hex\"]\n tx_in = self.cli.decoderawtransaction(raw_tx_hex)[\"vout\"][vin_vout]\n tx_in[\"txid\"] = vin[\"txid\"]\n tx_ins.append(tx_in)\n except:\n pass\n # For each output in the tx_ins list (the tx inputs in their output \"format\")\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"from\"] = [{\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n } for out in tx_ins]\n # For each output in the tx (`vout`)\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"to\"] = [({\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n }) for out in raw_tx[\"vout\"] if \"addresses\" in out[\"scriptPubKey\"]]\n # Save the raw_transaction to the cache\n cache[self.walletname][\"raw_transactions\"][txid] = raw_tx\n # Set the tx count to avoid unnecessary indexing\n cache[self.walletname][\"tx_count\"] = tx_count\n # Set the tx changed to indicate the there are new transactions to cache\n cache[self.walletname][\"tx_changed\"] = True\n else:\n # Set the tx changed to False to avoid unnecessary indexing\n cache[self.walletname][\"tx_changed\"] = False\n\n # If unconfirmed transactions were mined, assign them their block height\n blocks = self.cli.getblockcount()\n if blocks != self.cache[\"last_block\"]:\n for txid in self.cache[\"raw_transactions\"]:\n if self.cache[\"raw_transactions\"][txid][\"block_height\"] == -1 and \"blockhash\" in cli_txs[txid]:\n height = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n cache[self.walletname][\"raw_transactions\"][txid][\"block_height\"] = height\n cache[self.walletname][\"raw_tx_block_update\"][txid] = height\n cache[self.walletname][\"last_block\"] = blocks\n\n return self.cache[\"raw_transactions\"]", "def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }", "def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)", "def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions", "def merge_transaction(self, transaction):\n self.inputs += transaction.inputs\n self.outputs += transaction.outputs\n self.shuffle()\n self.update_totals()\n self.sign_and_update()", "def _update_UTXO(self): # TODO\n for transaction in self.waiting_transactions:\n for tx_in in transaction.tx_ins:\n for i in range(len(self.UTXO_pool)):\n # Delete UTXOs that have been spent\n self.UTXO_pool = [\n x for x in self.UTXO_pool if x.tx_id != tx_in.tx_out_id]\n for idx, tx_out in enumerate(transaction.tx_outs):\n new_utxo = tx.UTXO(transaction.id, idx,\n tx_out.address, tx_out.amount)\n self.UTXO_pool.append(new_utxo)\n self.waiting_transactions = []", "def calculate_input_unspent_transactions(self):\n pass", "def transactions(self):\n return copy.deepcopy(self._transactions)", "def __init__(self, inputs=None, outputs=None, locktime=0, version=None,\n network=DEFAULT_NETWORK, fee=None, fee_per_kb=None, size=None, txid='', txhash='', date=None,\n confirmations=None, block_height=None, block_hash=None, input_total=0, output_total=0, rawtx=b'',\n status='new', coinbase=False, verified=False, witness_type='legacy', flag=None):\n\n self.coinbase = coinbase\n self.inputs = []\n if inputs is not None:\n for inp in inputs:\n self.inputs.append(inp)\n if not input_total:\n input_total = sum([i.value for i in inputs])\n id_list = [i.index_n for i in self.inputs]\n if list(set(id_list)) != id_list:\n _logger.info(\"Identical transaction indexes (tid) found in inputs, please specify unique index. \"\n \"Indexes will be automatically recreated\")\n index_n = 0\n for inp in self.inputs:\n inp.index_n = index_n\n index_n += 1\n if outputs is None:\n self.outputs = []\n else:\n self.outputs = outputs\n if not output_total:\n output_total = sum([o.value for o in outputs])\n if fee is None and output_total and input_total:\n fee = input_total - output_total\n if fee < 0 or fee == 0 and not self.coinbase:\n raise TransactionError(\"Transaction inputs total value must be greater then total value of \"\n \"transaction outputs\")\n if not version:\n version = b'\\x00\\x00\\x00\\x01'\n if isinstance(version, int):\n self.version = version.to_bytes(4, 'big')\n self.version_int = version\n else:\n self.version = version\n self.version_int = int.from_bytes(version, 'big')\n self.locktime = locktime\n self.network = network\n if not isinstance(network, Network):\n self.network = Network(network)\n self.flag = flag\n self.fee = fee\n self.fee_per_kb = fee_per_kb\n self.size = size\n self.vsize = size\n self.txid = txid\n self.txhash = txhash\n self.date = date\n self.confirmations = confirmations\n self.block_height = block_height\n self.block_hash = block_hash\n self.input_total = input_total\n self.output_total = output_total\n self.rawtx = rawtx\n self.status = status\n self.verified = verified\n self.witness_type = witness_type\n self.change = 0\n self.calc_weight_units()\n if self.witness_type not in ['legacy', 'segwit']:\n raise TransactionError(\"Please specify a valid witness type: legacy or segwit\")\n if not self.txid:\n self.txid = self.signature_hash()[::-1].hex()", "def all_transactions(self):\n self._update()\n with self.all_tx_lock:\n all_tx_copy = copy.deepcopy(self._all_transactions)\n return all_tx_copy", "def process_transactions(self, data):\n\n self.transactions = []\n \n transaction_nums = range(6)\n for transaction_num in transaction_nums:\n transdict = IPN.Transaction.slicedict(data, ('transaction[%s].'\n % transaction_num))\n if len(transdict) > 0:\n self.transactions.append(IPN.Transaction(**transdict))", "def _build_received_transactions(self):\n transactions = (\n Transaction.select(\n Transaction.recipient_id, fn.SUM(Transaction.amount).alias(\"amount\")\n )\n .where(Transaction.type == TRANSACTION_TYPE_TRANSFER)\n .group_by(Transaction.recipient_id)\n )\n for transaction in transactions:\n # TODO: make this nicer. It feels like a hack to do it this way\n wallet = self.find_by_address(transaction.recipient_id)\n wallet.balance = int(transaction.amount)\n self.save_wallet(wallet)", "def test_wallets_get_transaction_list(self):\n pass", "def read_trans(self) -> None:\n self.trans = {}\n for t in range(self.nTransactions):\n\n # Make transaction objects (and table later?)\n trans = Trans(self.mmap, self.cursor,\n verb=self.verb,\n **self.trans_kwargs)\n\n # Read the transaction\n trans.get_transaction()\n\n # Validate, if on\n if self.validateTrans:\n trans.api_verify()\n\n # Update cursor\n self.cursor = trans.cursor\n\n # Save\n self.trans[t] = trans", "def _save_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\twith open(self._state_file, 'wb') as tmp:\r\n\t\t\tlogger.debug(\"Dumping transactions: %r\" % self.transactions)\r\n\t\t\tpickle.dump(self.transactions, tmp)\r\n\t\t\r\n\t\tlogger.debug(\"Exit\")", "def get_transactions():\n email = os.environ['EMAIL']\n password = os.environ['PASSWORD']\n mint = mintapi.Mint(email, password)\n mint.initiate_account_refresh()\n transactions = mint.get_transactions()\n transactions.drop_duplicates(['date', 'original_description', 'amount'], inplace=True)\n return transactions", "def prepare_tx_set(self, redeem_script, funding_tx, out_address, amount):\n\n # Prepare reals\n self.reals = [] # hashes\n self.real_txs = [] # tx's in serial form\n for i in range(self.m):\n tx, sighash = get_unsigned_tx(funding_tx, redeem_script,\n out_address, amount, n_sequence=i)\n self.reals.append(sighash)\n self.real_txs.append(tx)\n\n # print(\"TX # %d Hash: %s\" % (i, hexlify(sighash)))\n\n # Prepare fakes\n self.fakes = []\n self.fake_blinds = []\n for i in range(self.n):\n r = self.compute_rand(256)\n self.fakes.append(hash256(self.FAKE_FORMAT + r))\n self.fake_blinds.append(r)\n\n # Create Shuffled puzzle set\n self.tx_set = self.fakes[:]\n self.tx_set += self.reals\n random.shuffle(self.tx_set)\n\n # Record indices\n self.R = [self.tx_set.index(x) for x in self.reals]\n self.F = [self.tx_set.index(x) for x in self.fakes]\n\n self.salt = self.compute_rand(256)\n\n # Serialize lists\n R = self.serialize_int_list(self.R)\n F = self.serialize_int_list(self.F)\n\n # HMAC with salt as key\n self.R_hash = hmac_sha256(self.salt, R)\n self.F_hash = hmac_sha256(self.salt, F)\n\n return (self.tx_set, self.R_hash, self.F_hash)", "def update_utxo_pool(self, tx):\n if not tx['coinbase']:\n for i in tx['ins']:\n self.utxo_pool.remove(i)\n for ix, o in enumerate(tx['outs']):\n self.utxo_pool.append({'tx_hash':tx['hash'], 'output_index':ix, 'amount':o['amount'], 'addr': o['addr']})\n return True", "def __init__(self):\n self.transaction_index = {}\n self.transaction_list = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced.
def hide_fields_in_newer_versions(obj): if not api_utils.allow_start_end_audit_time(): obj.start_time = wtypes.Unset obj.end_time = wtypes.Unset if not api_utils.allow_force(): obj.force = wtypes.Unset
[ "def hide_fields_in_newer_versions(obj):\n pass", "def remove_read_only_fields(self):\n self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if\n not field.read_only or not str_to_bool(field.read_only)])", "def remove_access_request_field(self):\n self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if\n field.FIELD_CONTENT_ATTRIBUTES != Elements.ACCESS_REQUESTS])", "def non_version_fields(self):\n fields = [{'alias': d.get('alias'), 'pipeline': d.get('pipeline')} for\n d in self._field_dic if\n d.get('versioned') == 0 and d.get('calculatedAtRunTime') == 0 and d.get('alias') not in self._exclude]\n return fields", "def extra_fields(self):\n return {}", "def get_only_fields(self, request):\n return self._get_only_items(request, 'only-fields', 'only_fields')", "def hide_ar_add_fields(portal):\n logger.info(\"Hiding default fields from AR Add ...\")\n storage = get_manage_add_storage(portal)\n visibility = storage.get('visibility', {}).copy()\n ordered = storage.get('order', [])\n fields = list(set(visibility.keys() + ADD_AR_FIELDS_TO_HIDE + ordered))\n for field_name in fields:\n visibility[field_name] = field_name not in ADD_AR_FIELDS_TO_HIDE\n storage.update({\"visibility\": visibility})\n update_manage_add_storage(portal, storage)\n logger.info(\"Hiding default fields from AR Add [DONE]\")", "def non_removable_attrs(cls):\n if cls._non_removable_attrs is None:\n cls._non_removable_attrs = cls._extra_non_removable_attrs.copy()\n if cls._api_base:\n fields = inspect.getmembers(cls._api_base,\n lambda a: not inspect.isroutine(a))\n for name, field in fields:\n if getattr(field, 'mandatory', False):\n cls._non_removable_attrs.add('/%s' % name)\n return cls._non_removable_attrs", "def get_fields(self, exclude=('id',)):\n fields = {}\n for field in self._meta.fields:\n if not field.name in exclude and getattr(self, field.name):\n fields[field.name] = getattr(self, field.name)\n return fields", "def hide_confidential_fields(record, fields=_CONFIDENTIAL_FIELDS):\n if not(isinstance(record, dict) and fields):\n return record\n\n keys = list(record.keys())\n keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))\n\n return merge_dicts(record, {k: '********' for k in keys if record[k]})", "def test_missing_superclass_field(self):\n class TestSerializer(serializers.ModelSerializer):\n missing = serializers.ReadOnlyField()\n\n class Meta:\n model = RegularFieldsModel\n fields = '__all__'\n\n class ChildSerializer(TestSerializer):\n missing = serializers.ReadOnlyField()\n\n class Meta:\n model = RegularFieldsModel\n fields = ('auto_field',)\n\n ChildSerializer().fields", "def get_schema_fields(self, view):\n super().get_schema_fields(view)\n return getattr(view, \"filter_fields_schema\", [])", "def non_editable_metadata_fields(self):\r\n # We are not allowing editing of xblock tag and name fields at this time (for any component).\r\n return [XBlock.tags, XBlock.name]", "def _extra(self, request, node_dict):\n return {}", "def test_missing_field(self):\n class TestSerializer(serializers.ModelSerializer):\n missing = serializers.ReadOnlyField()\n\n class Meta:\n model = RegularFieldsModel\n fields = ('auto_field',)\n\n with self.assertRaises(AssertionError) as excinfo:\n TestSerializer().fields\n expected = (\n \"The field 'missing' was declared on serializer TestSerializer, \"\n \"but has not been included in the 'fields' option.\"\n )\n assert str(excinfo.exception) == expected", "def get_field_names(self, declared_fields, info):\n return super(BaseProductSerializer, self).get_field_names({}, info)", "def test_extra_field_when_not_requested(self):\n self.client.login(username=self.admin_user.username, password='test')\n response = self.verify_response(params={\n 'all_blocks': True,\n 'requested_fields': ['course_visibility'],\n })\n self.verify_response_block_dict(response)\n for block_data in response.data['blocks'].values():\n assert 'other_course_settings' not in block_data\n\n self.assert_in_iff(\n 'course_visibility',\n block_data,\n block_data['type'] == 'course'\n )", "def _excluded_properties(self):\n return ['tcex', 'kwargs', 'api_endpoint']", "def json_ignore_attrs():\n return ['metadata']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve information about the given audit.
def get_one(self, audit): if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context rpc_audit = api_utils.get_resource('Audit', audit) policy.enforce(context, 'audit:get', rpc_audit, action='audit:get') return Audit.convert_with_links(rpc_audit)
[ "def get(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=True, withScans=True)\n return audit", "def get_audit(self, session, query, auth_context):\n entity_name = query.get('entity')\n if not entity_name:\n raise exceptions.NotFound('entity')\n ent = self.schema.get_entity(entity_name)\n if not ent:\n raise exceptions.NotFound(entity_name)\n\n self.schema.acl_check_entity(ent, auth_context, 'r', None)\n return self.db.get_audit(query, session)", "async def getAudit(self, auditid) -> GetAuditResponse:\n\n print(\"get audit 1\" + auditid)\n res = await self.stub.GetAudit(\n GetAuditRequest(_id=auditid\n ))\n print(res.status, res.message, res.audit)\n return res", "def get_audit(self, query, session):\n raise NotImplementedError()", "def get_auditlog_entry_report_status(session):\n\n url = session.get_url('audit', 'main')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Obtained audit log entry report status.')", "def get(self, audit_uuid):\n\n schema = AuditDownloadInputSchema()\n params, errors = schema.load(request.args)\n if errors:\n abort(400, errors)\n\n audit_query = AuditTable.select().where(AuditTable.uuid == audit_uuid)\n\n audit = audit_query.dicts()[0]\n output = audit[\"name\"] + \"\\n\" + audit[\"description\"] + \"\\n\\n\"\n\n scan_ids = []\n for scan in audit_query[0].scans.dicts():\n if scan[\"processed\"] is True:\n scan_ids.append(scan[\"id\"])\n\n results = (\n ResultTable.select(ResultTable, ScanTable, VulnTable)\n .join(ScanTable)\n .join(VulnTable, on=(ResultTable.oid == VulnTable.oid))\n .where(ResultTable.scan_id.in_(scan_ids))\n .order_by(ResultTable.scan_id)\n )\n\n with tempfile.TemporaryFile(\"r+\") as f:\n writer = csv.DictWriter(f, AuditDownload.AUDIT_CSV_COLUMNS, extrasaction=\"ignore\")\n writer.writeheader()\n for result in results.dicts():\n result[\"started_at\"] = result[\"started_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"ended_at\"] = result[\"ended_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"description\"] = Utils.format_openvas_description(result[\"description\"])\n writer.writerow(result)\n f.flush()\n f.seek(0)\n output += f.read()\n\n headers = {\"Content-Type\": \"text/csv\", \"Content-Disposition\": \"attachment\"}\n return Response(response=output, status=200, headers=headers)", "def describe_audit_finding(findingId=None):\n pass", "def describe_account_audit_configuration():\n pass", "def test_cons_get_audlog(self):\n\n # the function to be tested:\n resp = self.urihandler.get(\n self.hmc, '/api/console/operations/get-audit-log', True)\n\n assert resp == []", "def get(self):\n path = 'auditlogEntryReport'\n # status complete\n # download\n return self._session.get(path)", "def get_one(self, audit_template):\n if self.from_audit_templates:\n raise exception.OperationNotPermitted\n\n context = pecan.request.context\n rpc_audit_template = api_utils.get_resource('AuditTemplate',\n audit_template)\n policy.enforce(context, 'audit_template:get', rpc_audit_template,\n action='audit_template:get')\n\n return AuditTemplate.convert_with_links(rpc_audit_template)", "def netapi32_NetAuditRead(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"server\", \"service\", \"auditloghandle\", \"offset\", \"reserved1\", \"reserved2\", \"offsetflag\", \"bufptr\", \"prefmaxlen\", \"bytesread\", \"totalavailable\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def describe_scheduled_audit(scheduledAuditName=None):\n pass", "def audit(self):\n return Session.query(ConfigAudit).limit(100).all()", "def source_audit(self) -> SourceAudit:\n return self._source_audit", "def audit(self, auditors, guardInfo):\n\n report = self.getReport(auditors, guardInfo)\n # if self.newReportRate.observe(report is None):\n if report is None:\n report = self.createReport(auditors, guardInfo)\n self.putReport(auditors, guardInfo, report)\n return report", "def get_order_audit_trail(order_guid):\n return linnapi.orders.get_processed_order_audit_trail(order_guid)", "def getIncidentDetail(self, id):\n if self.verbose is True: print \"Getting incident details for id: %s\" % id \n return self.request(\"%s/incidents/%s/log_entries\" % (self.baseurl, id))", "def test_audit_log_view(self):\n initial_datetime = now()\n with reversion.create_revision():\n company = CompanyFactory(\n description='Initial desc',\n )\n\n reversion.set_comment('Initial')\n reversion.set_date_created(initial_datetime)\n reversion.set_user(self.user)\n\n changed_datetime = now()\n with reversion.create_revision():\n company.description = 'New desc'\n company.save()\n\n reversion.set_comment('Changed')\n reversion.set_date_created(changed_datetime)\n reversion.set_user(self.user)\n\n versions = Version.objects.get_for_object(company)\n version_id = versions[0].id\n url = reverse('api-v4:company:audit-item', kwargs={'pk': company.pk})\n\n response = self.api_client.get(url)\n response_data = response.json()['results']\n\n # No need to test the whole response\n assert len(response_data) == 1\n entry = response_data[0]\n\n assert entry['id'] == version_id\n assert entry['user']['name'] == self.user.name\n assert entry['comment'] == 'Changed'\n assert entry['timestamp'] == format_date_or_datetime(changed_datetime)\n assert entry['changes']['description'] == ['Initial desc', 'New desc']\n assert not set(EXCLUDED_BASE_MODEL_FIELDS) & entry['changes'].keys()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check request is authenticated. If API_AUTH_SECRET_HEADER_NAME is not in request headers then return 401. If API_AUTH_SECRET_HEADER_NAME is in request headers but incorrect then return 403. Else return none.
def is_authenticated_request(req: Request) -> Optional[Response]: if API_AUTH_SECRET_HEADER_NAME not in req.headers: return make_error_response(HTTP_STATUS_CODE.UNAUTHORIZED) if req.headers[API_AUTH_SECRET_HEADER_NAME] != API_AUTH_SECRET: return make_error_response(HTTP_STATUS_CODE.FORBIDDEN) return None
[ "def check_auth_header_secret():\n return True\n # bearer_header = request.headers.get('Authorization')\n # return 'bearer ' + app.config.get('SECRET_KEY') == bearer_header", "def test_authorization_header_not_present(self, _get_key_secret):\n request = Request(self.environ)\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def _auth_check():\n try:\n if not _is_authenticated():\n raise web.unauthorized()\n except:\n raise web.unauthorized()", "def check_secret(method_to_decorate):\n @wraps(method_to_decorate)\n def wrapper(self, request, *args, **kwargs):\n if 'Secret' not in request.headers or request.headers['Secret'] == API_SECRET:\n return method_to_decorate(self, request, *args, **kwargs)\n return HttpResponse(status=403)\n return wrapper", "def unauthorized():\n return HttpError(401)", "def requires_auth(fn):\n #@wraps(fn)\n def _wrap(*args, **kwargs):\n if 'Authorization' not in request.headers:\n # Unauthorized\n logger.warn(\"No token in header\")\n abort(401)\n return None\n\n\n logger.debug(\"Checking token...\")\n userid = validate_token(request.headers['Authorization'])\n if userid is None:\n logger.warn(\"Check returned FAIL!\")\n # Unauthorized\n abort(401)\n return None\n\n return fn(userid=userid, *args, **kwargs)\n return _wrap", "def check_authentication(self):\n try:\n cookies = os.environ['HTTP_COOKIE'].split('; ')\n except KeyError:\n cookies = []\n for c in cookies:\n prefix = Auth.AUTH_COOKIE_NAME + '='\n if (c.startswith(prefix) and\n self.is_authentication_token(c[len(prefix):])):\n return True\n print 'Status: 403 Forbidden'\n print 'Content-Type: application/json'\n print self.logout_headers()\n print json.JSONEncoder().encode({'error': 'Not authenticated.'})\n sys.exit(1)", "def test_authorization_header_empty(self, get_key_secret):\r\n request = Request(self.environ)\r\n request.authorization = \"bad authorization header\"\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'OAuth verification error: Malformed authorization header',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def test_authorization_header_empty(self, _get_key_secret):\n request = Request(self.environ)\n request.authorization = \"bad authorization header\"\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_security_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('X-Frame-Options') == 'DENY'\n assert headers.get('X-Content-Type-Options') == 'nosniff'", "def should_skip_auth(flask_request):\n return flask_request.method in ['HEAD', 'OPTIONS']", "def stub_challenge_decider(environ, status, headers):\n return status.split(None, 1)[0] in (\"401\", \"403\")", "def _http_unauthorized(start_response):\n start_response(falcon.HTTP_401, [('Content-Length', '0')])\n return []", "async def handle_unauthorized(self, request: web.Request) -> web.Response:\n raise HTTPUnauthorized()", "def test_bad_credentials_mean_401_returned(self):\n sender = hawk_auth_sender()\n response = APIClient().get(\n test_url,\n content_type=\"\",\n HTTP_AUTHORIZATION=sender.request_header,\n HTTP_X_FORWARDED_FOR=\"1.2.3.4, 123.123.123.123\",\n )\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n error = {\"detail\": \"Incorrect authentication credentials.\"}\n assert response.json() == error", "def test_if_authentication_reused_401_returned(self, api_client, settings):\n settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS\n auth = _auth_sender().request_header\n response_1 = api_client.get(\n _url(),\n content_type=\"\",\n HTTP_AUTHORIZATION=auth,\n )\n assert response_1.status_code == status.HTTP_200_OK\n\n response_2 = api_client.get(\n _url(),\n content_type=\"\",\n HTTP_AUTHORIZATION=auth,\n )\n assert response_2.status_code == status.HTTP_401_UNAUTHORIZED\n assert response_2.json() == {\"detail\": \"Incorrect authentication credentials.\"}", "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def test_get_401_with_bad_key(self):\n\n result = testing_app.get(f'{CURRENT_API_VER}/user/?API_KEY=dgdhfkj&user_id=1')\n assert result.status_code == 401" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the the number of minutes represented by min to the currentDate input and returns that new date timestamp
def addMinutes(self, currentDate:str, dateFormat:str, mins:int) -> str: inputDateTime = datetime.strptime(currentDate, dateFormat) nextTime = inputDateTime + timedelta(minutes=mins) return nextTime.strftime(dateFormat)
[ "def now_plus_minutes(num_minutes):\n now_seconds = time.time()\n now_date = datetime.datetime.fromtimestamp(now_seconds)\n now_plus = now_date + datetime.timedelta(minutes=num_minutes)\n return now_plus.isoformat(' ')", "def dt_min_ago(obj):\n return round(dt_sec_ago(obj=obj) / 60)", "def get_datetime_before_given_minutes(minutes):\n from datetime import datetime\n import datetime as dt\n date_obj_before_3min = datetime.now()- dt.timedelta(minutes=minutes)\n return date_obj_before_3min", "def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )", "def date_minute(date):\n return date.minute", "def get_min_time(date):\n if isinstance(date, datetime.date):\n date = datetime.datetime(date.year, date.month, date.day)\n\n min_date = date.replace(hour=0, minute=0, second=0, microsecond=0)\n return min_date", "def setMinute(self, *args):\n return _libsbml.Date_setMinute(self, *args)", "def adjust_to_start_date(self, execute_date, start_hours, start_min, interval):\n\n if start_hours is None and start_min is None:\n adjusted = execute_date\n else:\n if start_hours is None:\n start_hours = 0\n if start_min is None:\n start_min = 0\n\n execute_day = dt.datetime.combine(execute_date.utcnow().date(), dt.datetime.min.time())\n scheduled = (execute_day + dt.timedelta(hours=start_hours) + dt.timedelta(minutes=start_min))\n if scheduled > execute_date:\n scheduled = scheduled - dt.timedelta(days=1)\n\n interval = freq_to_timedelta(interval)\n periods = (execute_date - scheduled) // interval\n adjusted = scheduled + periods * interval\n\n return adjusted", "def getMinute(self):\n return _libsbml.Date_getMinute(self)", "def min_to_datetime(minutes: int) -> datetime:\n return datetime.utcfromtimestamp(minutes * 60)", "def date_earliest_date_time(date: datetime.date) -> datetime.datetime:\n\n return datetime.datetime.combine(\n date, datetime.datetime.min.time()\n )", "def _get_interval_start_time(self):\n current_time = timezone.now()\n minutes = self._get_time_interval_in_minutes()\n time_delta = datetime.timedelta(minutes=minutes)\n return current_time - time_delta", "def _calculateStartAndEndTimes(self, current_date):\n current_datetime = date_to_datetime(current_date)\n self.start_time = current_datetime + timedelta(minutes=self.start_min)\n self.end_time = current_datetime + timedelta(minutes=self.end_min)", "def multMinuteAlign(ts, min):\n\tintv = secInMinute * min\n\treturn int((ts / intv)) * intv", "def _compute_next_update(self):\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)", "def min_time(self, min_time: str):\n\n self._min_time = min_time", "def set_Minute(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Minute', value)", "def min_current(self):\n return Metric.amps(self._min_current or 0, 'min_current').to_unit(\n MILLIAMP)", "def seconds_til_next_minute(self):\n\n now = datetime.datetime.utcnow().second\n delay = 60 - now\n return delay" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints out ">>" to make the prompt look nice.
def prompt(): sys.stdout.write('>> ') sys.stdout.flush()
[ "def user_prompt():\n sys.stdout.write('\\nYou > ')\n sys.stdout.flush()", "def showPrompt(self):\r\n self.terminal.nextLine()\r\n self.terminal.write(self.ps[self.pn])", "def do_prompt(self, line):\n self.prompt = line + ': '", "def prompt(self, question):\n self.output(' ')\n self.output(question)\n self.output(self.parse_response(str(self.ui())))", "def prompt():\n if os.name == 'nt' and sys.modules.has_key('idlelib'):\n prompt = colored(\"Press ENTER to continue, or Q to quit...\", 'green')\n sys.stdout.write(prompt)\n sys.stdout.flush()\n c = getch()\n # Win IDLE doesn't do carriage return for '\\r'.\n sys.stdout.write('\\r')\n sys.stdout.flush()\n else:\n prompt = colored(\"Press Q to quit, any other key to continue...\", 'green')\n sys.stdout.write(prompt)\n sys.stdout.flush()\n c = getch()\n sys.stdout.write('\\r' + ' '*len(prompt) + '\\r')\n sys.stdout.flush()\n\n if 'q' in c or 'Q' in c:\n return True\n else:\n return False", "def print_cmd(cmd):\n padding = \" \" * 80\n sys.stdout.write(\"\\r\"+padding)\n sys.stdout.write(\"\\r\"+prompt+cmd)\n sys.stdout.flush()", "def show_prompt():\n msg = 'Please enter your selections.'\n print('*' * (len(msg) + 4))\n print(f'* {msg} *')\n print('*' * (len(msg) + 4))\n print()", "def prompt() -> None:\n\n username = click.prompt(\n text=\"Please enter a username\",\n type=click.STRING\n )\n password = click.prompt(\n text=\"Please enter a new password\",\n hide_input=True,\n confirmation_prompt=True\n )\n newsletter_subscription = click.prompt(\n text=\"Would you like to subscribe to our newsletter?\",\n default=False,\n type=click.BOOL\n )\n favorite_color=click.prompt(\n text=\"What is your favorite color?\",\n type=click.Choice([\"blue\", \"green\", \"yellow\"], case_sensitive=False)\n )\n\n click.echo(\n f\"Username: {username} | Password: {'*' * len(password)} | \"\n + f\"Newsletter: {newsletter_subscription} | Favorite color: \"\n + click.style(favorite_color, fg=favorite_color)\n )", "def display_prompt(self):\n if self.continue_session:\n\n print(self.cli_prompt)\n return self.continue_session\n\n else:\n return self.continue_session", "def main_doPrompt(self):\n # Don't have to worry about the rank, because we only ever run a single kernel.\n sys.stdout.write(self.command_prompt)\n sys.stdout.flush()\n return", "def write_output_prompt(self):\n # Use write, not print which adds an extra space.\n IPython.utils.io.Term.cout.write(self.output_sep)\n outprompt = str(self.prompt_out)\n if self.do_full_cache:\n IPython.utils.io.Term.cout.write(outprompt)", "def getpass(prompt):\n print(prompt, end='', flush=True)\n buf = ''\n while True:\n ch = getch()\n if 'Windows' == platform.system():\n if ch in [b'\\r']:\n print()\n break\n elif ch in [b'\\x03', b'\\x06']:\n print()\n raise KeyboardInterrupt\n else:\n buf += ch.decode('utf-8')\n print('*', end='', flush=True)\n else:\n if ch in ['\\r', '\\n']:\n print()\n break\n elif ch in ['\\x03', '\\x06']:\n print()\n raise KeyboardInterrupt\n else:\n buf += ch\n print('*', end='', flush=True)\n return buf", "def create_prompt(name):\r\n return '\\n[' + Fore.LIGHTBLACK_EX + name.upper() + Fore.WHITE + '] '", "def waitprompt(c):\n c.expect('\\n> ')\n time.sleep(0.1)", "def format_prompt(prompt: str) -> str:\n prompt_format = '\\n' * 2\n return (prompt_format + prompt)", "def auto_rewrite_input(self, cmd):\n if not self.show_rewritten_input:\n return\n\n # This is overridden in TerminalInteractiveShell to use fancy prompts\n print(\"------> \" + cmd)", "def main_menu_for_testing():\n print(PROMPT_TEXT)", "def print_if_interactive(self, msg):\n if self.interactive:\n stdout.write(\"\".join([\"\\r\", msg, \" \" * 20]))", "def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes video file path and a transcode profile, transcode the file, and returns the transcoded file in bytes, along with ffmpeg's stderr output.
def transcode_segment(self, in_path: str, profile: TranscodeProfile ) -> Tuple[bytes, str, str]: out_filepath = f"/tmp/{uuid4()}.ts" transcode_command = [ "ffmpeg", "-i", in_path, "-vf", f"scale={profile.video_width}:-1", *profile.get_video_transcode_parameters(), "-bsf:v", "h264_mp4toannexb", *profile.get_audio_transcode_parameters(), "-copyts", "-muxdelay", "0", "-preset", profile.video_preset, out_filepath ] process = subprocess.Popen(transcode_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process.wait() stderr = process.stderr.read().decode("utf-8") # Read new file back in and delete try: with open(out_filepath, "rb") as f: file_out_bytes = f.read() os.remove(out_filepath) except FileNotFoundError: raise TranscodeError("FFmpeg returned a non-zero code.\n" + stderr) return file_out_bytes, stderr, transcode_command
[ "def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath", "def decode(src, dst):\n cmd = f\"\"\"\nffmpeg -y -hide_banner -nostats -v warning\n -i {src}\n -c:v rawvideo -an\n {dst}\n\"\"\"\n try:\n subprocess.check_call(shlex.split(cmd))\n except subprocess.CalledProcessError as err:\n raise DecodeFailed(f\"Failed to decode '{src}' - {err}\")", "def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin", "def transcode(self) -> None:\n # Get source mediainfo to use in validation\n source_media_info = self.get_media_info(self.source)\n\n # Common ffmpeg flags\n ff = FFMPEG(overwrite=True, loglevel='repeat+level+info')\n # Init source file\n ff < SourceFile(self.source)\n # Scaling\n fc = ff.init_filter_complex()\n fc.video | Scale(**TRANSCODING_OPTIONS[SCALE]) | fc.get_video_dest(0)\n\n # set group of pixels length to segment size\n gop = math.floor(source_media_info[VIDEO_FRAME_RATE] * GOP_DURATION)\n # preserve source audio sampling rate\n arate = source_media_info[AUDIO_SAMPLING_RATE]\n # preserve original video FPS\n vrate = source_media_info[VIDEO_FRAME_RATE]\n # codecs, muxer and output path\n\n cv0 = VideoCodec(\n gop=gop,\n vrate=vrate,\n **TRANSCODING_OPTIONS[VIDEO_CODEC])\n ca0 = AudioCodec(\n arate=arate,\n **TRANSCODING_OPTIONS[AUDIO_CODEC])\n out0 = Muxer(self.destination, format='mp4')\n\n # Add output file to ffmpeg\n ff.add_output(out0, cv0, ca0)\n\n # Run ffmpeg\n self.run(ff)\n\n # Get result mediainfo\n dest_media_info = self.get_media_info(self.destination)\n\n # Validate ffmpeg result\n self.validate(source_media_info, dest_media_info)", "def extract_sequence(video_file, ss_from, ss_to, output_file):\n # reencoding is required to cut precisely because not all frames are keyframes!\n command = ['ffmpeg', '-i', video_file, '-ss', ss_from, '-c:v', 'libx264', \\\n '-pix_fmt', 'yuv420p', output_file]\n if ss_to:\n command[5:5] = ['-to', ss_to]\n subprocess.call(command)\n return output_file", "def convert(self, infile, outfile, opts, timeout=10, preopts=None, postopts=None):\n if os.name == 'nt':\n timeout = 0\n\n if not os.path.exists(infile):\n raise FFMpegError(\"Input file doesn't exist: \" + infile)\n\n cmds = [self.ffmpeg_path]\n if preopts:\n cmds.extend(preopts)\n cmds.extend(['-i', infile])\n\n # Move additional inputs to the front of the line\n for ind, command in enumerate(opts):\n if command == '-i':\n cmds.extend(['-i', opts[ind + 1]])\n del opts[ind]\n del opts[ind]\n\n cmds.extend(opts)\n if postopts:\n cmds.extend(postopts)\n cmds.extend(['-y', outfile])\n\n if timeout:\n def on_sigalrm(*_):\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n raise Exception('timed out while waiting for ffmpeg')\n\n signal.signal(signal.SIGALRM, on_sigalrm)\n\n try:\n p = self._spawn(cmds)\n except OSError:\n raise FFMpegError('Error while calling ffmpeg binary')\n\n yielded = False\n buf = ''\n total_output = ''\n pat = re.compile(r'time=([0-9.:]+) ')\n\n while True:\n if timeout:\n signal.alarm(timeout)\n\n ret = p.stderr.read(10)\n\n if timeout:\n signal.alarm(0)\n\n if not ret:\n # For small or very fast jobs, ffmpeg may never output a '\\r'. When EOF is reached, yield if we haven't yet.\n if not yielded:\n yielded = True\n yield 10\n break\n\n try:\n ret = ret.decode(console_encoding)\n except UnicodeDecodeError:\n try:\n ret = ret.decode(console_encoding, errors=\"ignore\")\n except:\n pass\n\n total_output += ret\n buf += ret\n if '\\r' in buf:\n line, buf = buf.split('\\r', 1)\n\n tmp = pat.findall(line)\n if len(tmp) == 1:\n timespec = tmp[0]\n if ':' in timespec:\n timecode = 0\n for part in timespec.split(':'):\n timecode = 60 * timecode + float(part)\n else:\n timecode = float(tmp[0])\n yielded = True\n yield timecode\n\n if timeout:\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n\n p.communicate() # wait for process to exit\n\n if total_output == '':\n raise FFMpegError('Error while calling ffmpeg binary')\n\n cmd = ' '.join(cmds)\n if '\\n' in total_output:\n line = total_output.split('\\n')[-2]\n\n if line.startswith('Received signal'):\n # Received signal 15: terminating.\n raise FFMpegConvertError(line.split(':')[0], cmd, total_output, pid=p.pid)\n if line.startswith(infile + ': '):\n err = line[len(infile) + 2:]\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n err, pid=p.pid)\n if line.startswith('Error while '):\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n line, pid=p.pid)\n if not yielded:\n raise FFMpegConvertError('Unknown ffmpeg error', cmd,\n total_output, line, pid=p.pid)\n if p.returncode != 0:\n raise FFMpegConvertError('Exited with code %d' % p.returncode, cmd,\n total_output, pid=p.pid)\n\n return outfile", "def process_video(input_file, output_file):\n\n exists = os.path.isfile(output_file)\n if exists:\n # If the files already exists.\n # it is not necessary to perform\n # processing again\n return 0, None, None\n\n logging.info(f'Start processing {input_file} to {output_file}')\n\n # Take time of video processing step\n \n command = f'ffmpeg -i {input_file} {output_file}'\n \n process = subprocess.run(\n args=command,\n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE, \n shell=True, \n universal_newlines=True,\n )\n\n success = 0\n code = process.returncode\n out = process.stdout\n err = process.stderr\n\n if code == success:\n # Video processing was converted correctly\n logging.info(f'End processing file {input_file}')\n else:\n # An erro has ocurred during videos processing\n logging.error(f'{out} {err} {input_file}')\n\n return process.returncode, process.stdout, process.stderr", "def get_transcoder(*, path=None):\n\n\ttranscoders = ['ffmpeg', 'avconv']\n\ttranscoder_details = {}\n\n\tfor transcoder in transcoders:\n\t\tcommand_path = shutil.which(transcoder, path=path)\n\t\tif command_path is None:\n\t\t\ttranscoder_details[transcoder] = 'Not installed.'\n\t\t\tcontinue\n\n\t\tstdout = subprocess.run(\n\t\t\t[command_path, '-codecs'],\n\t\t\tstdout=subprocess.PIPE,\n\t\t\tstderr=subprocess.DEVNULL,\n\t\t\tuniversal_newlines=True,\n\t\t).stdout\n\n\t\tmp3_encoding_support = (\n\t\t\t'libmp3lame' in stdout\n\t\t\tand 'disable-libmp3lame' not in stdout\n\t\t)\n\n\t\tif mp3_encoding_support:\n\t\t\tbreak\n\t\telse:\n\t\t\ttranscoder_details[transcoder] = \"No MP3 encoding support.\"\n\telse:\n\t\traise ValueError(\n\t\t\tf\"ffmpeg or avconv must be in the path and support mp3 encoding.\"\n\t\t\tf\"\\nDetails: {transcoder_details}\"\n\t\t)\n\n\treturn command_path", "def __run(srcfile):\n\n # Test out with:\n # probe() {\n # ffprobe -v quiet -hide_banner -of json -print_format json -show_format -show_streams -i \"$1\"\n # }\n\n cp = subprocess.run([BIN_FFPROBE, \"-v\", \"quiet\", \"-hide_banner\", \"-of\",\n \"json\", \"-print_format\", \"json\", \"-show_format\", \"-show_streams\", \"-i\", srcfile],\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return cp.stdout.decode('utf-8')", "def compress_video(video_file_path):\n \"\"\"\n :param video_file_path:\n :return:\n \"\"\"\n log.info(\"Compression video...\")\n command = [\"ffmpeg\", \"-i\", video_file_path, \"-vcodec\", \"libx264\", \"-crf\", \"20\", video_file_path + \".out.mp4\"]\n execute(command, True)\n os.rename(video_file_path + \".out.mp4\", video_file_path)", "def _decode(item):\n tivo_filename = item.filename()\n logger.info(\"Decoding %s\" % tivo_filename)\n\n mpeg_filename = item.filename(ext=\"mpg\")\n videos_dir = item.vdir()\n\n p = subprocess.Popen([\"/usr/local/bin/tivodecode\", \"--mak\", os.environ[\"MAK\"], \n \"--out\", mpeg_filename, tivo_filename], cwd=videos_dir,\n stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n rc = p.wait()\n\n logger.info(\"tivodecode returned %d\" % rc)\n logger.info(\"tivodecode output: '%s'\" % p.stdout.read())\n if rc == 0:\n # success!\n item.decoded = True\n item.save()\n else:\n raise Exception(\"Tivodecode failed on file '%s' with rc %d\" %\n (tivo_filename, rc))", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def ffmpeg_run(\n args: List[str],\n stdin: Optional[bytes] = None,\n timeout: Optional[int] = None,\n) -> bytes:\n ffmpeg_path = 'ffmpeg'\n try:\n cmd_args = [ffmpeg_path] + args\n return subprocess.run(\n cmd_args,\n check=True,\n input=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n timeout=timeout,\n ).stdout\n except subprocess.CalledProcessError as e:\n raise ValueError(\n f'Command {e.cmd} returned error code {e.returncode}:\\n'\n f'stdout={e.stdout.decode(\"utf-8\")}\\n'\n f'stderr={e.stderr.decode(\"utf-8\")}\\n'\n ) from e\n except FileNotFoundError as e:\n raise FileNotFoundError(\n 'It seems that ffmpeg is not installed on the system. Please follow '\n 'the instrutions at https://ffmpeg.org/. '\n f'Original exception: {e}'\n ) from e", "def run_ffmpeg(self, task):\n ffmpeg_file = task['file'].replace(\".ts\", \".mp4\")\n\n cmd = [\n ['ffmpeg', '-nostats', '-loglevel', 'quiet', '-y', '-i',\n task['file']],\n self.config['ffmpeg-flags'].split(),\n [ffmpeg_file]\n ]\n cmd = [item for sublist in cmd for item in sublist]\n\n ffmpeg_process = subprocess.Popen(cmd,\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n self.tasks.append({\n 'id': ffmpeg_process.pid,\n 'model': task['model'],\n 'process': ffmpeg_process,\n 'type': 'ffmpeg',\n 'file': task['file'],\n 'ffmpeg_file': ffmpeg_file\n })\n\n log(\"Encode START: \", self, 20, \"{}:{}\".format(ffmpeg_process.pid,\n task['model']))", "def frame_dump(filename, frametime, output_filename='out.png', \n meth='ffmpeg fast', subseek_cushion=20., verbose=False, dry_run=False,\n very_verbose=False):\n \n if meth == 'mplayer':\n raise ValueError(\"mplayer not supported\")\n elif meth == 'ffmpeg best':\n # Break the seek into a coarse and a fine\n coarse = np.max([0, frametime - subseek_cushion])\n fine = frametime - coarse\n syscall = 'ffmpeg -y -ss %r -i %s -ss %r -vframes 1 %s' % (\n coarse, filename, fine, output_filename)\n elif meth == 'ffmpeg accurate':\n syscall = 'ffmpeg -y -i %s -ss %r -vframes 1 %s' % (\n filename, frametime, output_filename)\n elif meth == 'ffmpeg fast':\n syscall = 'ffmpeg -y -ss %r -i %s -vframes 1 %s' % (\n frametime, filename, output_filename)\n \n if verbose:\n print(syscall)\n if not dry_run:\n #os.system(syscall)\n syscall_l = syscall.split(' ')\n syscall_result = subprocess.check_output(syscall_l, \n stderr=subprocess.STDOUT)\n if very_verbose:\n print(syscall_result)", "def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()", "def demux_audio(media_file, output_file):\n subprocess.call(['ffmpeg', '-i', media_file, '-vn', '-acodec', 'copy', output_file])\n return output_file", "def from_video_to_text(video_path, source='youtube') -> text.Text:\r\n\r\n try:\r\n if source == 'youtube':\r\n ydl_opts = {\r\n 'format': 'bestaudio/best',\r\n 'postprocessors': [{\r\n 'key': 'FFmpegExtractAudio',\r\n 'preferredcodec': 'mp3',\r\n 'preferredquality': '192',\r\n }]\r\n }\r\n\r\n output_file_name = None\r\n with yt.YoutubeDL(ydl_opts) as ydl:\r\n extracted_info = ydl.extract_info(url=video_path, download=False)\r\n output_file_name = ydl.prepare_filename(extracted_info)\r\n ydl.download([video_path])\r\n\r\n output_file_name = output_file_name.split('.', 1)[0] + \".mp3\"\r\n\r\n return convert_sound_to_text(output_file_name)\r\n\r\n elif source == 'file':\r\n return\r\n except:\r\n print('This functionality needs a ffmpeg decoder, please install it.')\r\n print('Ubuntu/debian -> sudo apt-get install ffmpeg')\r\n print('MacOS -> brew install ffmpeg')\r\n print('Windows using chocolatey -> choco install ffmpeg')", "def _handle_decode(args):\n try:\n print(protool.decode(args.profile))\n except Exception as ex:\n print(f\"Could not decode: {ex}\", file=sys.stderr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get duration from an ffmpeg stderr dump.
def parse_duration_from_stderr(self, stderr: str) -> float: pattern = "Duration: (\\d\\d):(\\d\\d):(\\d\\d\\.\\d\\d)" pattern = re.compile(pattern) result = pattern.search(stderr) if result is None: return None # Parse result hours = float(result.group(1)) minutes = float(result.group(2)) seconds = float(result.group(3)) duration = ( (hours * 60 * 60) + (minutes * 60) + seconds) return duration
[ "def get_movie_length(file):\n\n len = 0\n p = subprocess.Popen(\"ffmpeg -i '\" + file + \"'\", shell=True,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n for line in p.stderr:\n if \"Duration:\" in line:\n h,m,s = (float(x) for x in re.findall(\"Duration: (\\d+):(\\d+):(\\d+\\.\\d+)\", line)[0])\n return h*3600+m*60+s\n break\n return 0", "def get_video_duration(video_file):\n return float(FFProbe(video_file).video[0].duration)", "def get_length(self):\r\n if self.path:\r\n #calls a process that reads video data\r\n process = SUB.Popen(['ffmpeg', '-i', self.path], stdout=SUB.PIPE, stderr=SUB.STDOUT)\r\n #gets process output\r\n stdout, stderr = process.communicate()\r\n #gets needed data\r\n matches = re.search(r\"Duration:\\s{1}(?P<hours>\\d+?):(?P<minutes>\\d+?):(?P<seconds>\\d+\\.\\d+?),\", stdout, re.DOTALL).groupdict()\r\n\r\n return (int(matches['hours']) *3600 + int(matches['minutes']) * 60 + int(float(matches['seconds'])))", "def parse_stderr(stderr):\n data = {\n 'audio_bitrate': None,\n 'video_bitrate': None,\n 'file_bitrate': None,\n 'video_size': None,\n 'duration': None,\n }\n regexp = r'Stream.*(?P<stream_type>Audio|Video):.*?(?P<bitrate>\\d+) kb/s'\n for match in re.finditer(regexp, stderr):\n stream_type = match.group('stream_type').lower()\n key = '%s_bitrate' % stream_type\n if not data[key]:\n data[key] = int(match.group('bitrate')) * 1000\n\n regexp = '\\n\\s+Duration:.*?, bitrate: (?P<bitrate>\\d+) kb/s\\n'\n match = re.search(regexp, stderr)\n if match:\n data['file_bitrate'] = int(match.group('bitrate')) * 1000\n\n regexp = r'Stream.*Video:.*?\\s(?P<size>\\d+x\\d+)(?:,|\\n)'\n match = re.search(regexp, stderr)\n if match:\n data['video_size'] = match.group('size')\n\n regexp = r'Duration: (?P<duration>\\d+:\\d+:\\d+\\.\\d+)'\n match = re.search(regexp, stderr)\n if match:\n try:\n d = datetime.strptime(match.group('duration'), '%H:%M:%S.%f')\n duration = timedelta(\n hours=d.hour, minutes=d.minute,\n seconds=d.second, microseconds=d.microsecond)\n data['duration'] = duration.total_seconds()\n except:\n pass\n return data", "def get_media_file_duration(self, filename):\n\n if filename.startswith('file://'):\n filename = filename[7:]\n\n result = subprocess.Popen(\n [\"ffprobe\", filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT\n )\n\n return functools.reduce(\n lambda t, t_i: t + t_i,\n [\n float(t) * pow(60, i)\n for (i, t) in enumerate(\n re.search(\n r'^Duration:\\s*([^,]+)',\n [\n x.decode()\n for x in result.stdout.readlines()\n if \"Duration\" in x.decode()\n ]\n .pop()\n .strip(),\n )\n .group(1)\n .split(':')[::-1]\n )\n ],\n )", "def getDuration(video_path):\n return moviepy.editor.VideoFileClip(video_path).duration", "def get_duration(video_file):\n info = get_info(video_file)\n return float(info['ID_LENGTH'])", "def parse_video_duration(duration):\n return parse_duration(duration)", "def get_duration(vid_file, mpeg=False, fps=30):\n try:\n cmd = 'ffprobe -i {} -show_entries format=duration -v quiet -of csv=\"p=0\"'.format(vid_file)\n output = subprocess.check_output(\n cmd,\n shell=True, # Let this run in the shell\n stderr=subprocess.STDOUT\n )\n length = float(output)\n mpeg = True\n except:\n cap = cv2.VideoCapture(vid_file)\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\n if int(major_ver) < 3:\n fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)\n frame_count = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n else:\n fps = cap.get(cv2.CAP_PROP_FPS)\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n length = frame_count/fps\n mpeg = False\n return length, mpeg, fps", "def duration_seconds(self):\n duration = 0.0\n if self.is_video() or self.is_audio():\n if self.__dict__['duration']:\n try:\n duration = float(self.__dict__['duration'])\n except ValueError:\n raise FFProbeError('None numeric duration')\n return duration", "def get_duration(line):\n duration = re.search(' in (\\d+)\\.\\d+s', line)\n if duration:\n return int(duration.group(1))", "def duration():\r\n elapsed_time, duration = video_time()\r\n return duration", "def __get_duration_from_string(cls, dstr):\n mtch = re.search(r'^(\\d+)$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)s(?:ec(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)m(?:in(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 60\n mtch = re.search(r'^(\\d+)h(?:r(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 3600\n mtch = re.search(r'^(\\d+)d(?:ay(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 86400\n raise FlashFileException(('String \"%s\" is not a known duration'\n ' format. Try 30sec, 10min, 2days etc.') %\n str(dstr))", "def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration", "def get_data_duration(meta_file_name):\n try:\n with open(meta_file_name) as meta_file:\n info = kaa_metadata.parse(meta_file)\n except IOError:\n config_pytomo.LOG.error('Unable to open tempfile for kaa_metadata')\n\n if (info and 'length' in info):\n data_duration = info.length\n return data_duration", "def _duration_to_secs(duration):\n secs = int(duration[:-1])\n if duration[-1] == 's':\n pass\n elif duration[-1] == 'm':\n secs *= 60\n elif duration[-1] == 'h':\n secs *= 60 * 60\n elif duration[-1] == 'd':\n secs *= 60 * 60 * 24\n else:\n raise ValueError('Invalid duration: %r' % duration)\n\n return secs", "def getSoundFileDuration(fn):\n audiofile = wave.open(fn, \"r\")\n\n params = audiofile.getparams()\n framerate = params[2]\n nframes = params[3]\n\n duration = float(nframes) / framerate\n return duration", "def parse_duration(duration):\n match = re.match(r'P\\d+Y\\d+M\\d+DT(\\d+)H(\\d+)M(\\d+.?\\d*)S', duration)\n hms = match.groups()\n return int(hms[0]) * 3600 + int(hms[1]) * 60 + float(hms[2])", "def findDuration(self, nextTimestamp):\n elapsedMs = nextTimestamp.totalms - self.totalms\n return abs(elapsedMs)\n # durationTimestamp = self.stringifyTimestamp(hour, min, sec, ms)\n # return durationTimestamp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save this entry in the SugarCRM server. If the 'id' field is blank, it creates a new entry and sets the 'id' value.
def save(self): # If 'id' wasn't blank, it's added to the list of dirty fields; this # way the entry will be updated in the SugarCRM connection. if self['id'] != '': self._dirty_fields.append('id') # nvl is the name_value_list, which has the list of attributes. nvl = [] for field in set(self._dirty_fields): # Define an individual name_value record. nv = {} nv['name'] = field nv['value'] = self[field] nvl.append(nv) # Use the API's set_entry to update the entry in SugarCRM. result = self._module._connection.set_entry(self._module._name, nvl) self._fields['id'] = result['id'] self._dirty_fields = [] return True
[ "def save(self):\n if self.id is None:\n self._insert()\n else:\n self._update()", "def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)", "def save(self):\n session = _get_session()\n session.add(self)\n session.commit()", "def save(self):\n try:\n edit_id = redis_api.store_edit(\n self.content_id, self.edit_text, self.edit_rationale,\n self.content_part, self.part_id, self.timestamp,\n self.start_timestamp, self.author_type,\n self.author.user_id if self.author else None)\n except:\n raise\n else:\n self.validation_status = \"validating\"\n self.edit_id = edit_id", "def save(self):\n # Get information.\n name = self.ids['name'].text.strip()\n phone = self.ids['phone'].text.strip()\n\n # Check if a name was specified.\n if not name:\n ErrorPopup('É necessário especificar um novo nome para o cliente!').open()\n return\n\n # Update the client.\n self.client.name = name\n self.client.phone = phone\n app = App.get_running_app()\n app.session.commit()\n\n # Update information of the current screen.\n app.consult.sm.client = self.client\n app.consult.sm.current_screen.on_pre_enter()\n\n # Show that the operation succeeded.\n SuccessPopup(\"Cliente editado com sucesso!\").open()\n\n # Close the popup.\n self.dismiss()", "def save(self) -> None:\n try:\n db.session.add(self)\n db.session.flush()\n except IntegrityError as ite:\n db.session.rollback()\n logger.error(\n \"Error occurred when saving Tracker to session: id {}\".format(\n self.id),\n ite.with_traceback(ite.__traceback__))", "def save(self):\n if self.uid != Calendar.EMPTY_UID:\n self.put()", "def save(self):\n self.save_to_db()\n if hasattr(self, 'id'):\n self.status_code = 201\n return True\n else:\n self.errors['messages'].append(\"DataBase Error, Please Try again\")\n self.status_code = 500\n return False", "def update(self):\n self.validate_id('Sorry unable to update this contact as no ID value'\n ' has been defined.')\n response = connection.put(\n '{}/{}'.format(self.end_point, self.id),\n self.param_dict()\n )\n self._update_values(response)", "async def monsave(self, ctx, *, entry):\r\n\r\n self.connect()\r\n discord_id = str(ctx.message.author.id)\r\n\r\n self.database.entries.insert_one({\r\n \"discord_id\": discord_id,\r\n \"entry\": entry\r\n })\r\n\r\n await ctx.send('You have successfully saved this entry in the Viking database.')", "def save(self):\n if self._deleted:\n raise DBObjectSaveError, \"Cannot save a previously deleted object.\"\n\n def _save(isValid):\n if self.id is None and isValid:\n return self._create()\n elif isValid:\n return self._update()\n return self\n return self.isValid().addCallback(_save)", "def save(self):\r\n\t\tif not self._modified:\r\n\t\t\treturn\r\n\r\n\t\tupdated_fields = []\r\n\t\tfor k,v in self.__fields__.items():\r\n\t\t\tif self.__dict__[k].modified and self.__dict__[k] != self.primary_key:\r\n\t\t\t\tupdated_fields.append((k,self.__dict__[k]))\r\n\r\n\t\tdatabases.database.modify(self.__class__.__name__,self.primary_key,updated_fields)", "def save(self) -> str:\n datagrid_json = self.__as_json()\n if self.id_:\n response = GsSession.current._put(f'{API}/{self.id_}', datagrid_json, request_headers=DATAGRID_HEADERS)\n else:\n response = GsSession.current._post(f'{API}', datagrid_json, request_headers=DATAGRID_HEADERS)\n self.id_ = response['id']\n return DataGrid.from_dict(response).id_", "def save(id):\n entry = FEED_DATA[id]\n return db.save_news(entry)", "def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def _save(self):\n collection = JSONClientValidated('assessment',\n collection='AssessmentSection',\n runtime=self._runtime)\n if '_id' in self._my_map: # This is the first time:\n collection.save(self._my_map)\n else:\n insert_result = collection.insert_one(self._my_map)\n self._my_map = collection.find_one({'_id': insert_result.inserted_id}) # To get the _id", "def save(self):\n Log.debug('Saving object to database')\n cursor = Database.db_connection.cursor()\n # test if object is already in database\n try:\n old_object = self.__class__.get_exactly(self.c_id)\n if self.c_pk != old_object.c_pk:\n raise InstanceAlreadyExists('That object already exists in the'\n ' database.',\n old_object)\n except InstanceNotFoundError:\n # just go on, nothing to do\n pass\n try:\n if not self.c_pk == -1:\n raise sqlite3.IntegrityError()\n sql, values = self._insert_query()\n Log.debug(\"Database.save(): %s as insert with %s as dict\" %\n (sql, values))\n cursor.execute(sql, values)\n Database.db_connection.commit()\n self.c_pk = cursor.lastrowid\n except sqlite3.IntegrityError:\n sql, values = self._update_query()\n# Log.debug(\"Database.save(): %s is old object\" %\n# self.__class__.get_exactly(self.c_pk, 'c_pk'))\n Log.debug(\"Database.save(): %s as update query with %s as dict\" %\n (sql, values))\n cursor.execute(sql, values)\n Database.db_connection.commit()\n except sqlite3.OperationalError:\n raise NoSuchTable()", "def save(self):\r\n\r\n if self.id:\r\n super().save()\r\n else:\r\n none_fields = [\r\n name\r\n for name in [\"genes\", \"source\", \"species\"]\r\n if getattr(self, name, None) is None\r\n ]\r\n\r\n if none_fields:\r\n msg = \"Fields {} must not be none\".format(\", \".join(none_fields))\r\n raise ValueError(msg)\r\n\r\n data = {\r\n \"process\": {\"slug\": \"create-geneset\"},\r\n \"input\": {\r\n \"genes\": list(self.genes),\r\n \"source\": self.source,\r\n \"species\": self.species,\r\n },\r\n }\r\n if self.name:\r\n data[\"name\"] = self.name\r\n if self.collection:\r\n data[\"collection\"] = {\"id\": get_collection_id(self.collection)}\r\n\r\n model_data = self.api.post(data)\r\n tmp_genes, tmp_source, tmp_species = self.genes, self.source, self.species\r\n self._update_fields(model_data)\r\n # Since there is no output values in model_data\r\n # the original genes, source and species values gets overwritten\r\n # so we set them back here\r\n self._genes, self._source, self._species = (\r\n tmp_genes,\r\n tmp_source,\r\n tmp_species,\r\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the related entries in another module.
def get_related(self, module): connection = self._module._connection result = connection.get_relationships(self._module._name, self['id'], module._name.lower(), '', ['id']) entries = [] for elem in result['entry_list']: entry = SugarEntry(module) entry._fields['id'] = elem['id'] entries.append(entry) return entries
[ "def get_modules_and_revisions(self, ctx):", "def related_items(self):\r\n return self._related_items", "def get_related(page):\n related = []\n entry = Entry.get_for_model(page)\n\n if entry:\n related = entry.related\n\n return related", "def relationships(self):", "def related_intenions(self):\n return self._related_intenions", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def associated_objects(self):\n return self._associated_objects", "def all(self):\n return [entry_rel.rel for entry_rel in iter(self)]", "def _parts(self):\n return [part for part in Package.__walkparts(self.__relationships)]", "def MODULES(self):\n pass", "def related_data_sets(self):\n return self._related_data_sets", "def get_related_with_scores(page):\n related = []\n entry = Entry.get_for_model(page)\n\n if entry:\n related = entry.related_with_scores\n\n return related", "def read_questions(mod_nm):\n recs = None\n if mod_nm is None:\n recs = Question.objects.values()\n else:\n recs = Question.objects.filter(module=mod_nm).values()\n return recs", "def getModuleList():\n try:\n return (Module.objects.all().values_list('moduleName',flat=True)\n .order_by('moduleName'))\n except:\n return None", "def get_relations() -> List[Dict[str, str]]:\n return configuration.relations", "def get_relationships(self):\n raise NotImplementedError(\n 'operation get_relationships(...) not yet implemented')", "def getAncestors():", "def relations(cls):\n return {}", "def get_related_indicators(self):\n # imported here to prevent circular deps\n from fn_threatq.threatqsdk.indicator import Indicator\n return self.get_related_objects(Indicator)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Partition list ``l`` in ``K`` partitions. Examples >>> l = [0, 1, 2] >>> list(clusters(l, K=3)) [[[0], [1], [2]], [[], [0, 1], [2]], [[], [1], [0, 2]], [[0], [], [1, 2]], [[], [0], [1, 2]], [[], [], [0, 1, 2]]] >>> list(clusters(l, K=2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[], [0, 1, 2]]] >>> list(clusters(l, K=1)) [[[0, 1, 2]]]
def clusters(l, K): # noqa if l: prev = None for t in clusters(l[1:], K): tup = sorted(t) if tup != prev: prev = tup for i in range(K): yield tup[:i] + [ [l[0]] + tup[i], ] + tup[i + 1 :] else: yield [[] for _ in range(K)]
[ "def partition_to_k(layerlist, k, order=False):\n for each_partition_candidate in partition(layerlist):\n if len(each_partition_candidate) == k:\n if not order:\n yield each_partition_candidate\n else:\n for enum_item in permutations(each_partition_candidate):\n yield enum_item", "def split_list(l, k):\n n = len(l)\n\n d = n // k\n r = n % k\n\n offset = 0\n for i in range(k):\n if i < r:\n size = d + 1\n else:\n size = d\n\n yield l[offset:offset+size]\n offset += size", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result", "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def chunks(l, k):\n n = len(l)\n return [l[i * (n // k) + min(i, n % k):(i+1) * (n // k) + min(i+1, n % k)] for i in range(k)]", "def hierarchical_clustering(cluster_list, num_clusters):\n # print \"\\n\\ncluster_list:\\n\", cluster_list, \"\\n\\n\"\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n # new_cluster_list.append(cluster_list[fc_pair[1]])\n # del cluster_list[fc_pair[1]]\n\n # print \"k = %r \\n\\nnew_cluster_list: \\n%r\" % (num_clusters, new_cluster_list)\n return new_cluster_list", "def hierarchical_clustering(cluster_list, num_clusters):\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n\n return new_cluster_list", "def neclusters(l, K): # noqa\n for c in clusters(l, K):\n if all(x for x in c):\n yield c", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n # position initial clusters at the location of clusters with largest populations\n \n cluster_n = len(cluster_list)\n\n miu_k = sorted(cluster_list,\n key=lambda c: c.total_population())[-num_clusters:]\n miu_k = [c.copy() for c in miu_k]\n\n # n: cluster_n\n # q: num_iterations\n for _ in xrange(num_iterations):\n cluster_result = [alg_cluster.Cluster(set([]), 0, 0, 0, 0) for _ in range(num_clusters)]\n # put the node into closet center node\n\n for jjj in xrange(cluster_n):\n min_num_k = 0\n min_dist_k = float('inf')\n for num_k in xrange(len(miu_k)):\n dist = cluster_list[jjj].distance(miu_k[num_k])\n if dist < min_dist_k:\n min_dist_k = dist\n min_num_k = num_k\n\n cluster_result[min_num_k].merge_clusters(cluster_list[jjj])\n\n # re-computer its center node\n for kkk in xrange(len(miu_k)):\n miu_k[kkk] = cluster_result[kkk]\n\n return cluster_result", "def cluster_by_partitioning(active_sites,klist):\n score_keeper = dict()\n n_trials = 100\n for k in klist:\n score_keeper[k] = 0\n # compute the average silhouette score for k and store in score_keeper\n for i in range(n_trials):\n s = silhouette_score(do_partitioning_cluster(active_sites,k))\n score_keeper[k] = score_keeper.get(k) + (s/n_trials)\n maximum = -np.inf\n n_clusters = 0\n # find the maximum average silhouette score and use the corresponding k\n for k in score_keeper.keys():\n if score_keeper.get(k) > maximum:\n maximum = score_keeper.get(k)\n n_clusters = k\n print(score_keeper)\n return do_partitioning_cluster(active_sites,n_clusters)", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res", "def create_k_clusters(self, X, k, n_jobs=-1):\r\n\r\n kmeans = KMeans(n_clusters=k, random_state=self.RANDOM_STATE, n_jobs=n_jobs).fit(X)\r\n return kmeans", "def kmeans_clustering(cluster_list, num_clusters, num_iterations): \n # initialize k-means clusters to be initial clusters with largest populations\n return set([()])", "def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p", "def cluster(positions, num_iters, k):\n cluster_center = initalise_cluster(positions, k)\n n = 0\n while n < num_iters:\n closest_cluster = get_distances(positions, cluster_center)\n cluster_center, cluster_size = updated_centres(positions, k, closest_cluster)\n n += 1\n for i in range(k):\n print(\"Cluster \" + str(i)\n + \" is centred at \" + str(cluster_center[i])\n + \" and has \" + str(cluster_size[i]) + \" points.\")", "def __clustering(self, k, min_instances_per_cluster, algorithm='MiniBatchKMeans'):\r\n if algorithm == 'MiniBatchKMeans':\r\n self.__print_msg('Applying MiniBatchKMeans...')\r\n if k is None:\r\n k = self.__find_best_k(max_k_allowed=0.1*len(self.data))\r\n self.__print_msg('Clustering...')\r\n model = MiniBatchKMeans(n_clusters=k, init=\"k-means++\", max_iter=100, n_init=1, init_size=1000, batch_size=1000)\r\n model.fit(self.text_repr)\r\n self.__add_clusters_to_data(model)\r\n clusters_pruned = self.__prune_clusters(model, min_instances_per_cluster)\r\n return model, clusters_pruned\r\n else:\r\n raise Exception('Unsupported algorithm')", "def k_gaussian_clusters(K):\n centres = [np.random.uniform(low= -5.0, high=5.0, size=(1, 2)) for k in xrange(K)]\n #Change up the sizes of the clusters\n clust_sizes = [np.random.randint(2, 10) for k in xrange(K)]\n # clust_sizes = [5 for k in xrange(K)]\n clusters = [0.5*np.random.randn(cs, 2) + c for cs, c in zip(clust_sizes, centres)]\n n = sum(clust_sizes)\n global_opt = _compute_global_opt(clusters, centres, 2, n)\n return clusters, centres, global_opt", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # print \"\\n\\ncluster_list:\", cluster_list\n # print \"\\n\\nnum_clusters:\", num_clusters\n # print \"num_iterations:\", num_iterations, \"\\n\"\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n # Cluster centers computed in lines 2 and 8-9 should stay fixed as lines 5-7 are executed during one iteration of the outer loop. To avoid modifying these values during execution of lines 5-7, you should consider storing these cluster centers in a separate data structure.\n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n # print \"temp_cl:\", temp_cl\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n # print \"\\n\\ncluster_centers:\", cluster_centers, \"\\n\\n\"\n\n # For number of iterations\n # for i <-- 1 to q do\n for dummy_var in range(num_iterations):\n # initialize the new clusters to be empty, i.e., represent an empty cluster as a Cluster object whose set of counties is empty and whose total population is zero\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n # print \"\\n\\ncluster_centers[index]:\", cluster_centers[index], \"\\n\\n\"\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n \n # print \"\\n\\n\\n\\npoints[index]:\", points[index]\n # print \"\\nmin_dist:\", min_dist\n # print \"\\ncluster_groupings[nearest_cluster_index]:\", cluster_groupings[nearest_cluster_index]\n\n # Add the county to the corresponding new cluster\n # C sub L <-- C sub L Union {psub j}; // handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # print \"\\n\\n\\n\\ncluster_groupings:\", cluster_groupings\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n # print \"\\n\\nupdated cluster_centers:\", cluster_centers\n\n # return {C1, C2, ..., Ck};\n # Return the new clusters \n return cluster_groupings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Partition list ``l`` in ``K`` partitions, without empty parts. >>> l = [0, 1, 2] >>> list(neclusters(l, 2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]]] >>> list(neclusters(l, 1)) [[[0, 1, 2]]]
def neclusters(l, K): # noqa for c in clusters(l, K): if all(x for x in c): yield c
[ "def clusters(l, K): # noqa\n if l:\n prev = None\n for t in clusters(l[1:], K):\n tup = sorted(t)\n if tup != prev:\n prev = tup\n for i in range(K):\n yield tup[:i] + [\n [l[0]] + tup[i],\n ] + tup[i + 1 :]\n else:\n yield [[] for _ in range(K)]", "def all_segmentations(l):\n for K in range(1, len(l) + 1):\n gen = neclusters(l, K)\n yield from gen", "def kmeans_clustering(cluster_list, num_clusters, num_iterations): \n # initialize k-means clusters to be initial clusters with largest populations\n return set([()])", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result", "def hierarchical_clustering(cluster_list, num_clusters):\n # print \"\\n\\ncluster_list:\\n\", cluster_list, \"\\n\\n\"\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n # new_cluster_list.append(cluster_list[fc_pair[1]])\n # del cluster_list[fc_pair[1]]\n\n # print \"k = %r \\n\\nnew_cluster_list: \\n%r\" % (num_clusters, new_cluster_list)\n return new_cluster_list", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n # position initial clusters at the location of clusters with largest populations\n \n cluster_n = len(cluster_list)\n\n miu_k = sorted(cluster_list,\n key=lambda c: c.total_population())[-num_clusters:]\n miu_k = [c.copy() for c in miu_k]\n\n # n: cluster_n\n # q: num_iterations\n for _ in xrange(num_iterations):\n cluster_result = [alg_cluster.Cluster(set([]), 0, 0, 0, 0) for _ in range(num_clusters)]\n # put the node into closet center node\n\n for jjj in xrange(cluster_n):\n min_num_k = 0\n min_dist_k = float('inf')\n for num_k in xrange(len(miu_k)):\n dist = cluster_list[jjj].distance(miu_k[num_k])\n if dist < min_dist_k:\n min_dist_k = dist\n min_num_k = num_k\n\n cluster_result[min_num_k].merge_clusters(cluster_list[jjj])\n\n # re-computer its center node\n for kkk in xrange(len(miu_k)):\n miu_k[kkk] = cluster_result[kkk]\n\n return cluster_result", "def partition_to_k(layerlist, k, order=False):\n for each_partition_candidate in partition(layerlist):\n if len(each_partition_candidate) == k:\n if not order:\n yield each_partition_candidate\n else:\n for enum_item in permutations(each_partition_candidate):\n yield enum_item", "def hierarchical_clustering(cluster_list, num_clusters):\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n\n return new_cluster_list", "def __clustering(self, k, min_instances_per_cluster, algorithm='MiniBatchKMeans'):\r\n if algorithm == 'MiniBatchKMeans':\r\n self.__print_msg('Applying MiniBatchKMeans...')\r\n if k is None:\r\n k = self.__find_best_k(max_k_allowed=0.1*len(self.data))\r\n self.__print_msg('Clustering...')\r\n model = MiniBatchKMeans(n_clusters=k, init=\"k-means++\", max_iter=100, n_init=1, init_size=1000, batch_size=1000)\r\n model.fit(self.text_repr)\r\n self.__add_clusters_to_data(model)\r\n clusters_pruned = self.__prune_clusters(model, min_instances_per_cluster)\r\n return model, clusters_pruned\r\n else:\r\n raise Exception('Unsupported algorithm')", "def KNN(train, test, k):\r\n xord = []\r\n for i in range(len(train)):\r\n xord.append((euclidDistance(test,train[i]), i))\r\n xord.sort()\r\n KNN = xord[0:k]\r\n return KNN", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # print \"\\n\\ncluster_list:\", cluster_list\n # print \"\\n\\nnum_clusters:\", num_clusters\n # print \"num_iterations:\", num_iterations, \"\\n\"\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n # Cluster centers computed in lines 2 and 8-9 should stay fixed as lines 5-7 are executed during one iteration of the outer loop. To avoid modifying these values during execution of lines 5-7, you should consider storing these cluster centers in a separate data structure.\n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n # print \"temp_cl:\", temp_cl\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n # print \"\\n\\ncluster_centers:\", cluster_centers, \"\\n\\n\"\n\n # For number of iterations\n # for i <-- 1 to q do\n for dummy_var in range(num_iterations):\n # initialize the new clusters to be empty, i.e., represent an empty cluster as a Cluster object whose set of counties is empty and whose total population is zero\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n # print \"\\n\\ncluster_centers[index]:\", cluster_centers[index], \"\\n\\n\"\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n \n # print \"\\n\\n\\n\\npoints[index]:\", points[index]\n # print \"\\nmin_dist:\", min_dist\n # print \"\\ncluster_groupings[nearest_cluster_index]:\", cluster_groupings[nearest_cluster_index]\n\n # Add the county to the corresponding new cluster\n # C sub L <-- C sub L Union {psub j}; // handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # print \"\\n\\n\\n\\ncluster_groupings:\", cluster_groupings\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n # print \"\\n\\nupdated cluster_centers:\", cluster_centers\n\n # return {C1, C2, ..., Ck};\n # Return the new clusters \n return cluster_groupings", "def partition(n, k=None, zeros=False):\n if not zeros or k is None:\n for i in ordered_partitions(n, k):\n yield tuple(i)\n else:\n for m in range(1, k + 1):\n for i in ordered_partitions(n, m):\n i = tuple(i)\n yield (0,)*(k - len(i)) + i", "def all_clusters(game_map, k_fighters = 60, k_miners = 30):\n clusters = []\n for player in game_map.all_players():\n fighters = []\n miners = []\n for ship in player.all_ships():\n if ship.docking_status == ship.DockingStatus.UNDOCKED:\n fighters.append(ship)\n else:\n miners.append(ship)\n pc = {\"fighters\": get_clusters(fighters, k_fighters), \"miners\": get_clusters(miners, k_miners)}\n clusters.append(pc)\n return clusters", "def chunks(l, k):\n n = len(l)\n return [l[i * (n // k) + min(i, n % k):(i+1) * (n // k) + min(i+1, n % k)] for i in range(k)]", "def create_k_clusters(self, X, k, n_jobs=-1):\r\n\r\n kmeans = KMeans(n_clusters=k, random_state=self.RANDOM_STATE, n_jobs=n_jobs).fit(X)\r\n return kmeans", "def chooseCentroids(LeafBucket, k):\n lbcentroids = []\n density = []\n # Calculate densities and mean values for each leafbucket\n for lb in LeafBucket:\n density.append(calDensity(lb))\n lbcentroids.append(np.mean(lb, axis=0))\n\n # Init and choose first centroids\n centroids = []\n first = np.argmax(density)\n\n centroids.append(lbcentroids[first])\n # Remove chosen lbcentroids\n del density[first]\n del lbcentroids[first]\n #density.remove(density[first])\n #lbcentroids.remove(lbcentroids[first])\n\n for i in xrange(1,k):\n next = -1\n maxg = 0; # g = mindist*density\n # Choose index of next centroids\n for index, lbc in enumerate(lbcentroids):\n # Calculate min distance from centroids\n mindist = min([distance(c,lbc) for c in centroids])\n if (mindist*density[index] > maxg):\n next = index\n maxg = mindist*density[index]\n centroids.append(lbcentroids[next])\n #print lbcentroids[next]\n del density[next]\n del lbcentroids[next]\n #density.remove(density[next])\n #lbcentroids.remove(lbcentroids[next])\n return centroids", "def gen_clusters_from_class_list(group_list,skip_list=[]):\n dic_clusters = {}\n for i in range(len(group_list)):\n if not group_list[i] in skip_list:\n if group_list[i] in dic_clusters.keys():\n dic_clusters[group_list[i]].append(i)\n else:\n dic_clusters[group_list[i]] = [i]\n clusters = []\n for k in dic_clusters.keys():\n clusters.append(Cluster(dic_clusters[k][0],dic_clusters[k]))\n return clusters", "def split_list(l, k):\n n = len(l)\n\n d = n // k\n r = n % k\n\n offset = 0\n for i in range(k):\n if i < r:\n size = d + 1\n else:\n size = d\n\n yield l[offset:offset+size]\n offset += size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all segmentations of a list ``l``.
def all_segmentations(l): for K in range(1, len(l) + 1): gen = neclusters(l, K) yield from gen
[ "def Chunks(l):\n return_list = [[]]\n counter = 0\n index = 0\n for i in l:\n # Size is split in half due to the max size being a sum of src and dst.\n if counter > (self._ADDRESS_LENGTH_LIMIT/2):\n counter = 0\n index += 1\n return_list.append([])\n if i.version == 6:\n counter += self._IPV6_SIZE\n else:\n counter += 1\n return_list[index].append(i)\n return return_list", "def get_all(self):\n return self._segments", "def segment_token_list(self, token_list):\r\n token_seg_list = []\r\n for token in token_list:\r\n token_seg_list.append(self.segment_token(token))\r\n return token_seg_list", "def getSegments(self):\n segments=[]\n l=len(self.points)\n for j in range(l):\n for i in range(j):\n if self.network[i][j]:\n segment=Segment(self.points[i],self.points[j])\n segments.append(segment)\n return segments", "def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list", "def splitList(self, l):\n\t\tlength = self.maxNodesPerWay\n\t\t#l = self._cutBeginning(l)\n\t\tif len(l) < 2:\n\t\t\treturn [], 0, 0\n\t\tif length == 0 or len(l) <= length:\n\t\t\ttmpList = [l, ]\n\t\telse:\n\t\t\t\"\"\"\n\t\t\tif len(l)%(length-1) == 1:\n\t\t\t\t# the last piece of a path should contain at least 2 nodes\n\t\t\t\tl, endPiece = l[:-1], l[-2:]\n\t\t\telse:\n\t\t\t\tendPiece = None\n\t\t\ttmpList = [l[i:i+length] for i in range(0, len(l), length-1)]\n\t\t\tif endPiece != None:\n\t\t\t\ttmpList.append(endPiece)\n\t\t\t\"\"\"\n\t\t\t# we don't need to do the stuff with the end piece if we stop the list\n\t\t\t# comprehension at the second-last element of the list (i being at maximum\n\t\t\t# len(l)-2. This works because <length> is at least two, so we are sure\n\t\t\t# to always include the last two elements.\n\t\t\ttmpList = [l[i:i+length] for i in range(0, len(l)-1, length-1)]\n\t\tpathList = []\n\t\tnumOfClosedPaths = 0\n\t\tfor path in tmpList:\n\t\t\t#path = self._cutBeginning(path)\n\t\t\tif len(path) == 0:\n\t\t\t\t# self._cutBeginning() returned an empty list for this path\n\t\t\t\tcontinue\n\t\t\tif numpy.all(path[0]==path[-1]):\n\t\t\t\t# a closed path with at least 3 nodes\n\t\t\t\tnumOfClosedPaths += 1\n\t\t\tpathList.append(path)\n\t\tnumOfPaths = len(pathList)\n\t\tnumOfNodes = sum([len(p) for p in pathList])-numOfClosedPaths\n\t\treturn pathList, numOfNodes, numOfPaths", "def intersect_list(self, lis):\n intersection = []\n for i in lis:\n if self.in_interval(i):\n intersection.append(i)\n return intersection", "def lists_and_segments(self):\n response = self._get(self.uri_for(\"listsandsegments\"))\n return json_to_py(response)", "def getSegments(self) -> List[int]:\n ...", "def get_segment_objects(self):\n segments = []\n for form in self.formset:\n segments.append(form.get_segment_object())\n\n return segments", "def calculate_segment_lengths(self):\n self.segment_lengths = []\n for sec in self.sec_list:\n for seg in sec:\n area = seg.area()\n r = seg.diam/2\n l = (area - 2 *np.pi * (r**2))/(2*np.pi*r)\n self.segment_lengths.append(l)", "def _split_into_segments(input_list):\n max_n = 100 # as of 23.02.2021\n\n # n cuts divide a list into n+1 segments (math.floor(len(song_uris) / max_n) =: number_of_cuts)\n n_of_segments = math.floor(len(input_list) / max_n) + 1\n list_of_lists = []\n\n # i is element of interval [0, n_of_segments) => i always < n_of_segments\n for i in range(n_of_segments):\n if i < n_of_segments - 1:\n list_of_lists.append(input_list[i * max_n: (i + 1) * max_n])\n\n # the last segment can contain < max_n songs\n else:\n list_of_lists.append(input_list[i * max_n: (i * max_n) + len(input_list) % max_n])\n\n return list_of_lists", "def segments(seg_type=None):\n\n for index in xrange(idaapi.get_segm_qty()):\n seg = Segment(index=index)\n if (seg_type is None) or (seg.type == seg_type):\n yield Segment(index=index)", "def segment(self, seq, indexlist):\n out = []\n for i in range(len(indexlist)):\n if i==len(indexlist)-1:\n out.append(seq[indexlist[i]:])\n else:\n out.append(seq[indexlist[i]:indexlist[i+1]])\n return out", "def divide_list(ld, division):\n buckets = []\n current = []\n for obj in ld:\n if len(current) < division:\n current.append(obj)\n else:\n buckets.append(current)\n current = [obj]\n if len(current) > 0:\n buckets.append(current)\n return buckets", "def getData(SegmentsList, attrib):\n\n s = set()\n r = []\n for SegmentObj in SegmentsList:\n value = getattr(SegmentObj, attrib)\n if type(value) == music21.contour.contour.Contour:\n value = tuple(value)\n s.add(value)\n r = [music21.contour.contour.Contour(cseg) for cseg in sorted(s)]\n elif type(value) == list:\n for el in value:\n s.add(el)\n r = sorted(s)\n else:\n s.add(value)\n r = sorted(s)\n return r", "def get_segments_for_cat(pred_lst, cat_id):\n int_seg_dist = 60 # 2 for 8*i frames\n segments = []\n beg , end = -1, -1\n seg_flag = False\n for i,pr in enumerate(pred_lst):\n if pr==cat_id and not seg_flag:\n beg = i\n seg_flag = True\n elif pr!=cat_id and seg_flag:\n end = i\n segments.append((beg, end))\n seg_flag = False\n beg, end = -1, -1\n if seg_flag:\n segments.append((beg, i+1))\n \n seg_flag = True\n new_segments = []\n if len(segments)==0:\n return []\n (bPrev, ePrev) = segments[0]\n for i,(bCurr,eCurr) in enumerate(segments):\n if i==0:\n continue\n if (bCurr-ePrev)<=int_seg_dist :\n ePrev = eCurr\n else:\n new_segments.append((bPrev, ePrev))\n bPrev = bCurr\n ePrev = eCurr\n new_segments.append((bPrev, ePrev))\n \n return new_segments", "def convert_list2range(self, l):\n ranges = []\n sl = sorted(set(l))\n for k,g in groupby(enumerate(sl), lambda (i,x): i-x):\n group = list(map(itemgetter(1), g))\n if len(group) == 1:\n ranges.append(str(group[0]))\n else:\n ranges.append(str(group[0])+'-'+str(group[-1]))\n return ranges", "def get_all_segments(edfFiles):\n\n segments = []\n preprocessor = Preprocessor(config_startShift,\n config_endShift,\n config_powerLineFreq,\n config_bandLowCut,\n config_bandHighCut)\n for edf in edfFiles:\n print(\"getting the labeled segments from the recording \", str(edf.filename))\n segments.extend(get_segments_from_edf(edf, preprocessor))\n if edfFiles.index(edf) == 20: break\n return segments" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if ``s1`` and ``s2`` are in the same symbol, given the ``segmentation``.
def q(segmentation, s1, s2): index1 = find_index(segmentation, s1) index2 = find_index(segmentation, s2) return index1 == index2
[ "def __eq__(self, other: Segment) -> bool:\n return any(\n (\n self.start == other.start and self.end == other.end,\n self.start == other.end and self.end == other.start,\n )\n )", "def sequence_in(s1, s2):\n return bool(re.search(\".*\".join(s1), s2))", "def segment_segment(s1, s2):\n l1=s1.line()\n l2=s2.line()\n i = line_line(l1, l2)\n if isinstance(i, bool): return False\n k = s1.affine(i)\n return k >= 0 and k <= 1 and i", "def seg_x_in_y(self, x: str, y: str) -> bool:\n return len(set(x + y)) == len(y)", "def are_segments_compatible(self, seg0, seg1):\n if seg0.h1 != seg1.h0:\n print(\"Warning: seg0 h1 and seg1 h0 are not the same\")\n print(seg0.h1)\n print(seg1.h0)\n return self.check_tolerance(seg0.h0, seg0.h1, seg1.h1)", "def identical_cds(sc1,sc2):\n # Input 2 identical segment chains, return True if cds the same\n if sc1.covers(sc2) and sc2.covers(sc1):\n return True\n else:\n return False", "def intersects(segment_a,segment_b):\n try:\n # if isinstance(segment_a, Snake):\n # segment_a = segment_a.segments\n # elif isinstance(segment_b, Snake):\n # segment_b = segment_b.segments\n if isinstance(segment_a, Segment) and isinstance(segment_b, Segment):\n seg_a, seg_a_heading = Snake._extract(segment_a)\n seg_b, seg_b_heading = Snake._extract(segment_b)\n return Snake._compare_segments(seg_a_heading, seg_a, seg_b_heading, seg_b)\n else:\n if isinstance(segment_a, Snake):\n segment_a = segment_a.segments\n else:\n segment_a = [segment_a]\n if isinstance(segment_b, Snake):\n segment_b = segment_b.segments\n else:\n segment_b = [segment_b]\n result = True\n if len(segment_a) == 0 or len(segment_b) == 0:\n result = False\n elif len(segment_a) == 1:\n seg_a, seg_a_heading = Snake._extract(segment_a[0])\n for sb in segment_b:\n seg_b, seg_b_heading = Snake._extract(sb)\n result = (result and Snake._compare_segments(seg_a_heading, seg_a, seg_b_heading, seg_b))\n elif len(segment_b) == 1:\n seg_b, seg_b_heading = Snake._extract(segment_b[0])\n for sa in segment_a:\n seg_a, seg_a_heading = Snake._extract(sa)\n result = (result and Snake._compare_segments(seg_a_heading, seg_a, seg_b_heading, seg_b))\n else:\n for sg_a in segment_a:\n seg_a, seg_a_heading = Snake._extract(sg_a)\n for sg_b in segment_b:\n seg_b, seg_b_heading = Snake._extract(sg_b)\n result = (result and Snake._compare_segments(seg_a_heading, seg_a, seg_b_heading, seg_b))\n return result\n except Exception as err:\n raise err\n return False", "def match_sp_sep(first, second):\n if isinstance(first, list):\n one = [set(v.split(\" \")) for v in first]\n else:\n one = [{v} for v in first.split(\" \")]\n\n if isinstance(second, list):\n other = [set(v.split(\" \")) for v in second]\n else:\n other = [{v} for v in second.split(\" \")]\n\n # all values in one must appear in other\n if any(rt not in other for rt in one):\n return False\n return True", "def doIntersect(seg1, seg2):\n (p1, p2), (p3, p4) = seg1, seg2\n num1 = (p4.x - p3.x) * (p1.y - p3.y) - (p4.y - p3.y) * (p1.x - p3.x)\n num2 = (p2.x - p1.x) * (p1.y - p3.y) - (p2.y - p1.y) * (p1.x - p3.x)\n den = (p4.y - p3.y) * (p2.x - p1.x) - (p4.x - p3.x) * (p2.y - p1.y)\n if acmp(den, 0) == 0:\n if acmp(num1, 0) == 0 and acmp(num2, 0) == 0:\n return True # segments are coincident\n else:\n return False # segments are parallel\n else:\n ua = num1/den\n ub = num2/den\n if (0 <= ua <= 1) and (0 <= ub <= 1):\n return True\n else:\n return False", "def conflateable(seg1, seg2, segment_pairs):\n for segment_pair in segment_pairs:\n seg_set = set(segment_pair)\n if seg1 in seg_set and seg2 in seg_set:\n return True\n return False", "def are_equal(self, sp1, sp2):\n for s1 in sp1.keys():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n for s2 in sp2.keys():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2 and\n spin2 == -spin1):\n break\n else:\n return False\n return True", "def __is_contained_in(first_symbol, second_symbol):\n\n first_symbol_top_left = first_symbol.top_left_corner\n first_symbol_top_right = first_symbol.top_right_corner\n first_symbol_bottom_left = first_symbol.bottom_left_corner\n first_symbol_bottom_right = first_symbol.bottom_right_corner\n\n second_symbol_top_left = second_symbol.top_left_corner\n second_symbol_top_right = second_symbol.top_right_corner\n second_symbol_bottom_left = second_symbol.bottom_left_corner\n second_symbol_bottom_right = second_symbol.bottom_right_corner\n\n if (\n second_symbol_top_left[0] <= first_symbol_top_left[0] and\n first_symbol_top_right[0] <= second_symbol_top_right[0] and\n second_symbol_bottom_left[0] <= first_symbol_bottom_left[0] and\n first_symbol_bottom_right[0] <= second_symbol_bottom_right[0] and\n\n second_symbol_top_left[1] <= first_symbol_top_left[1] and\n first_symbol_bottom_left[1] <= second_symbol_bottom_left[1] and\n second_symbol_top_right[1] <= first_symbol_top_right[1] and\n first_symbol_bottom_right[1] <= second_symbol_bottom_right[1]\n ):\n return True\n else:\n return False", "def contain(text1: str, text2: str, symbol: str)->bool:\n text1 = clean(text1)\n return ((symbol+text2+' ') in (text1+' '))", "def are_equal(self, sp1, sp2):\n for s1, amt2 in sp1.items():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n found = False\n for s2, amt2 in sp2.items():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2\n and spin2 == -spin1):\n found = True\n break\n if not found:\n return False\n return True", "def twoStrings(s1, s2):\n\n set1 = set(s1)\n set2 = set(s2)\n\n for char in set1:\n if char in set2:\n return True\n\n return False", "def problem_06():\n s1, s2 = \"paraparaparadise\", \"paragraph\"\n\n s1 = get_n_gram(s1, 2)\n s2 = get_n_gram(s2, 2)\n\n s1 = set(s1)\n s2 = set(s2)\n\n wa_set = s1 | s2\n seki_set = s1 & s2\n\n print(wa_set)\n print(seki_set)\n\n # 差集合\n diff1, diff2 = s1-s2, s2-s1\n\n print(diff1!=diff2) # true\n print(diff1)\n print(diff2)\n\n if 'se' in s1:\n print('se in s1')\n if 'se' in s2:\n print('se in s2')", "def isSimilar(bin1, bin2, s):\n assert len(bin1) == len(bin2)\n for i in range(len(bin1)):\n if abs(bin1[i] - bin2[i]) > s:\n return False\n return True", "def eq_tokens(token_sequence1: str, token_sequence2: str) -> bool:\n return set(token_sequence1.split(' ')) - {''} == set(token_sequence2.split(' ')) - {''}", "def intersects_segment(A, B, X):\n \n AX = np.array([X.x - A.x, X.y - A.y])\n XB = np.array([B.x - X.x, B.y - X.y])\n equal_signs = np.array_equal(np.sign(AX), np.sign(XB))\n\n return equal_signs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }