query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
QCoreApplication.winEventFilter(MSG) > (bool, int)
def winEventFilter(self, MSG): # real signature unknown; restored from __doc__ pass
[ "def user32_ChangeWindowMessageFilter(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"message\", \"dwFlag\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def user32_ChangeWindowMessageFilterEx(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"message\", \"action\", \"pChangeFilterStruct\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def dwm_evt_listener_register():\n return -1", "def monitor_cb(ud, msg):\r\n return False", "def eventFilter(self, widget, event):\r\n if event.type() in [QtCore.QEvent.MouseButtonPress]:\r\n if not self.geometry().contains(event.globalPos()):\r\n # Clicked outside the widget\r\n self.close()\r\n return False\r\n\r\n if event.button() == QtCore.Qt.LeftButton:\r\n # Clicked on one of the opaque areas of the widget\r\n self.start_drawing(event.pos())\r\n return True\r\n\r\n # Any other click we bail\r\n self.close()\r\n return False\r\n\r\n elif event.type() in [QtCore.QEvent.MouseButtonRelease]:\r\n if event.button() == QtCore.Qt.LeftButton:\r\n self.stop_drawing()\r\n return True\r\n\r\n elif event.type() in [QtCore.QEvent.MouseMove]:\r\n # Mouse moved, if we had a handle grabbed, resize nodes.\r\n if self.drawing:\r\n self.draw_segment(event.pos())\r\n return True\r\n\r\n elif event.type() == QtCore.QEvent.KeyRelease:\r\n if event.isAutoRepeat():\r\n return True\r\n self.close()\r\n return True\r\n\r\n elif event.type() == QtCore.QEvent.KeyPress:\r\n if event.isAutoRepeat():\r\n return True\r\n\r\n return False # Swallow everything\r", "def handle_watch_event(self, event):", "def showWindow(self, sender):", "def eventFilter(self, obj, event):\n\n # If a NonClientAreaMouseMove (173) event immediately follows a Move event...\n if self.lastEvent == QtCore.QEvent.Move and event.type() == 173:\n\n # Determine the position of the mouse cursor and emit it with the\n # onDropSignal\n mouseCursor = QtGui.QCursor()\n dropPos = mouseCursor.pos()\n self.onDropSignal.emit(dropPos)\n self.lastEvent = event.type()\n return True\n\n else:\n self.lastEvent = event.type()\n return False", "def in_window(self):\n if self.actions == -1:\n return True\n else:\n return False", "def event(self, some_event):\n if some_event.type() == QEvent.ToolTip:\n lines = self.fltr.getInfo()\n self.setToolTip(lines) \n self.tooltip_display_sig.emit(QCursor.pos()) \n return True\n elif some_event.type() == QEvent.Resize:\n self.filterframe_resized_sig.emit()\n return True\n else:\n return False", "def is_visible() -> bool:\n return win.winfo_ismapped()", "def sendEvent(self, QObject, QEvent): # real signature unknown; restored from __doc__\r\n return False", "def __searchHwnds(name: str) -> list:\n hwnds = []\n def foreach_window(hwnd, lParam):\n if name in win32gui.GetWindowText(hwnd):\n hwnds.append(hwnd)\n win32gui.EnumWindows(foreach_window, None)\n return hwnds", "def processEventQueue(self) -> \"SbBool\":\n return _coin.ScXMLEventTarget_processEventQueue(self)", "def setEventFilter(self, filter):\n self.__eventFilter = filter", "def buttons(self):\n self.__poll()\n return (not (self.buffer[5] & 2 == 2), not (self.buffer[5] & 1 == 1))", "def eventFilter(self, object, event):\n# # starts the scrolling\n# if event.type() == event.HoverEnter:\n# \n# if object == self._scrollDownLabel:\n# ydelta = -5\n# elif object == self._scrollUpLabel:\n# ydelta = 5\n# else:\n# ydelta = 0\n# \n# if ydelta != 0:\n# self._scrollingDelta = ydelta\n# self.startScrolling()\n# else:\n# self.stopScrolling()\n# \n# # cancel the scrolling\n# elif event.type() == event.HoverLeave:\n# self.stopScrolling()\n \n return False", "def _need_to_listen_to_events(self):\n return any([self.scratch, self.con_mark, self.target_workspace])", "def namedWindow(winname, flags=...) -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ajoute nb_out sorties, (1 ou 2)
def set_out(self, nb_out): if nb_out > 0: random_out1 = random.randrange(1, self.size - 1) if self.direction_in == UP: self.coord_sorties.append([self.size - 1, random_out1]) if self.direction_in == DOWN: self.coord_sorties.append([0, random_out1]) if self.direction_in == RIGHT: self.coord_sorties.append([random_out1, 0]) if self.direction_in == LEFT: self.coord_sorties.append([random_out1, self.size - 1]) if nb_out > 1: random_out2 = random.randrange(1, self.size - 1) if self.direction_in == RIGHT: self.coord_sorties.append([self.size - 1, random_out2]) if self.direction_in == LEFT: self.coord_sorties.append([0, random_out2]) if self.direction_in == DOWN: self.coord_sorties.append([random_out2, 0]) if self.direction_in == UP: self.coord_sorties.append([random_out2, self.size - 1]) # print(self.coord_sorties)
[ "def sort(self):", "def norders(self):\n return None", "def countIncomparable(self, verbose=False):\n\t\ti=0\n\t\tn=len(self.partialOrder.nodes())\n\t\tlistOutcomes = list(self.partialOrder.nodes())\n\t\tcount=0\n\t\tfor i in range(n):\n\t\t\tfor j in range(i+1,n):\n\t\t\t\tif self.compareOutcomes(listOutcomes[i],listOutcomes[j])==-1: \n\t\t\t\t\tif verbose: print(listOutcomes[i]+ \" \" +listOutcomes[j])\n\t\t\t\t\tcount+=1\n\t\t\t\n\t\treturn count;", "def reset_counts_merge_sort_three_way():\r\n # reset comparison and exchange counts for next run\r\n global M3X_COMP\r\n global M3X_EX\r\n M3X_COMP = 0\r\n M3X_EX = 0", "def topAlgs(self,sort=False):\n self._printDict('TopAlg',sort)", "def sort_by_numelems(node):\n return len(node.elems)", "def testSorting(self):\n target = [100,0,0,0,0,0,0,0,0,0]\n organisms = []\n code = \"\"\n for i in range(1,90,1):\n code+=\"+\"\n organisms.append(organism.Organism(code))\n for org in organisms:\n org.evaluate(target)\n organisms.sort()\n #print organisms[::-1][0], len(organisms[::-1][0].code)\n self.assertEqual(89, len(organisms[::-1][0].code))", "def sort_by_numnodes(node):\n return len(node.nodes)", "def sorting(data, count_b, count_c, count_e, count_m, count_p, pref, exam):\n if exam == 2:\n data.sort(key=lambda x: (min(-(x[2] + x[4]) / 2, -x[6]), x[0], x[1]))\n if exam == 3:\n data.sort(key=lambda x: (min(-x[3], -x[6]), x[0], x[1]))\n if exam == 4:\n data.sort(key=lambda x: (min(-x[4], -x[6]), x[0], x[1]))\n if exam == 5:\n data.sort(key=lambda x: (min(-(x[5] + x[4]) / 2, -x[6]), x[0], x[1]))\n if exam == 6:\n data.sort(key=lambda x: (min(-(x[2] + x[3]) / 2, -x[6]), x[0], x[1]))\n for d in data:\n if 'Bio' in d[6 + pref] and exam == 6:\n if count_b < N:\n bio.append(d)\n count_b = count_b + 1\n elif 'Chem' in d[6 + pref] and exam == 3:\n if count_c < N:\n chem.append(d)\n count_c = count_c + 1\n elif 'Eng' in d[6 + pref] and exam == 5:\n if count_e < N:\n eng.append(d)\n count_e = count_e + 1\n elif 'Math' in d[6 + pref] and exam == 4:\n if count_m < N:\n math.append(d)\n count_m = count_m + 1\n elif 'Phy' in d[6 + pref] and exam == 2:\n if count_p < N:\n phy.append(d)\n count_p = count_p + 1\n for s in bio:\n if s in data:\n data.remove(s)\n for s in chem:\n if s in data:\n data.remove(s)\n for s in eng:\n if s in data:\n data.remove(s)\n for s in math:\n if s in data:\n data.remove(s)\n for s in phy:\n if s in data:\n data.remove(s)\n return data, count_b, count_c, count_e, count_m, count_p", "def analyze_sorties(self):\n print('Analyzing Mission %d' % self.mission_number)\n print('=============================')\n responses = self.call_sortie_function('analyze',[])\n self.get_sortie_times()", "def sort_nms_ir(data, valid_count, output, axis, is_ascend):\n\n size = 1\n axis_mul_before = 1\n axis_mul_after = 1\n shape = data.shape\n if axis < 0:\n axis = len(shape) + axis\n for i, value in enumerate(shape, 0):\n size *= value\n if i < axis:\n axis_mul_before *= value\n elif i > axis:\n axis_mul_after *= value\n max_threads = int(tvm.target.current_target(allow_none=False).max_num_threads)\n ib = tvm.ir_builder.create()\n data = ib.buffer_ptr(data)\n valid_count = ib.buffer_ptr(valid_count)\n output = ib.buffer_ptr(output)\n nthread_tx = max_threads\n nthread_bx = size // max_threads + 1\n tx = tvm.thread_axis(\"threadIdx.x\")\n bx = tvm.thread_axis(\"vthread\")\n ib.scope_attr(tx, \"thread_extent\", nthread_tx)\n ib.scope_attr(bx, \"virtual_thread\", nthread_bx)\n tid = bx * nthread_tx + tx\n temp_data = ib.allocate(\"float32\", (1,), name=\"temp_data\", scope=\"local\")\n temp_index = ib.allocate(\"int32\", (1,), name=\"temp_index\", scope=\"local\")\n is_ascend = tvm.make.node(\"IntImm\", dtype=\"int32\", value=is_ascend)\n\n idxd = tvm.indexdiv\n idxm = tvm.indexmod\n\n with ib.for_range(0, axis_mul_before) as i:\n with ib.for_range(0, axis_mul_after) as j:\n current_sort_num = valid_count[i * axis_mul_after + j]\n base_idx = i * shape[axis] * axis_mul_after + j\n with ib.if_scope(tid < shape[axis]):\n output[base_idx + tid * axis_mul_after] = tid\n # OddEvenTransposeSort\n with ib.for_range(0, current_sort_num) as k:\n with ib.if_scope(tid < idxd(current_sort_num + 1, 2)):\n offset = base_idx + (2 * tid + idxm(k, 2)) * axis_mul_after\n with ib.if_scope(tvm.all(is_ascend == 1, \\\n 2 * tid + idxm(k, 2) + 1 < current_sort_num, \\\n data[offset] > data[offset + axis_mul_after])):\n temp_data[0] = data[offset]\n data[offset] = data[offset + axis_mul_after]\n data[offset + axis_mul_after] = temp_data[0]\n temp_index[0] = output[offset]\n output[offset] = output[offset + axis_mul_after]\n output[offset + axis_mul_after] = temp_index[0]\n with ib.if_scope(tvm.all(is_ascend == 0, \\\n 2 * tid + idxm(k, 2) + 1 < current_sort_num, \\\n data[offset] < data[offset + axis_mul_after])):\n temp_data[0] = data[offset]\n data[offset] = data[offset + axis_mul_after]\n data[offset + axis_mul_after] = temp_data[0]\n temp_index[0] = output[offset]\n output[offset] = output[offset + axis_mul_after]\n output[offset + axis_mul_after] = temp_index[0]\n ib.emit(tvm.make.Call(None, 'tvm_storage_sync',\n tvm.convert(['shared']),\n tvm.expr.Call.Intrinsic, None, 0))\n\n return ib.get()", "def test_sort(self):\n data = self.df.groupby('Age',sort=True).size()\n self.assertEqual(len(data.index), 89)\n self.assertEqual(data[0.42], 1)\n self.assertEqual(data.iloc[0], 1)\n data = self.df.groupby('Age',sort=False).size()\n self.assertEqual(len(data.index), 89)\n self.assertEqual(data[0.42], 1)\n self.assertEqual(data.iloc[0], 39)", "def topo_sort(self):", "def GenerateSortIndices(self, p_int, void, p_int_1, p_int_2, p_int_3, *int):\n ...", "def _sort_by(self, criteria):\n log.info('Sorting kernels by {}')\n assert self._select_drop_down('sort', criteria)", "def sort_outbox_by_trust(self) :\n new_outbox = []\n for (trust, fact, neighbor) in self.outbox:\n new_outbox.append( (self.trust[neighbor].trust, fact, neighbor) )\n new_outbox.sort(reverse = True)\n self.outbox = new_outbox", "def wiggleSort(self, nums: List[int]) -> None:\n m, n = median(nums), len(nums)\n # x(i) mapping such that if nums[x(i)] is sorted or at least three partitioned (<median, =median, >median)\n # such as in dutch national flag problem, then nums[i] is wiggle sorted as required.\n x = lambda i: (((n - 1) // 2 - i) * 2) if i <= (n - 1) // 2 else (((n - 1) - i) * 2 + 1)\n # print('virtual index mapping x(i):')\n # for i in range(n):\n # print(f\"{i=}, {x(i)=}\")\n # numxs[x[i]]\n i, j, k = 0, 0, n - 1\n while j <= k:\n if nums[x(j)] < m:\n # if i < j:\n # nums[x(i)], nums[x(j)] = nums[x(j)], nums[x(i)]\n nums[x(i)], nums[x(j)] = nums[x(j)], nums[x(i)]\n i += 1\n j += 1\n elif nums[x(j)] > m:\n nums[x(k)], nums[x(j)] = nums[x(j)], nums[x(k)]\n k -= 1\n else:\n # nums[x(j)] == median:\n j += 1\n # print(f'{i=}, {j=}, {k=}, {[nums[x(z) for z in range(n)]]=}')\n return None", "def InitializeSortIndices(self, p_int):\n ...", "def nb_coups(tableau : list) -> int:\r\n nb_colonnes = len(lignes)\r\n for lignes in tableau :\r\n for nb in range (nb_colonnes) :\r\n if nb == 1 :\r\n if" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`{cmd}` 50/50 Heads or tails. Or... at least in theory. Some say there's a 1/6000 possibility of seeing the coin land on its side, though I've personally never seen it happen.
async def _cmdf_coin(self, substr, msg, privilege_level): if random.randint(0,1) == 1: buf = "Heads" else: buf = "Tails" if random.randint(1,600) == 1: buf = "The coin landed on its side." buf += "\nThis happens every approx. 1/6000 times!" buf += "\nhttp://journals.aps.org/pre/abstract/10.1103/PhysRevE.48.2547" buf += "\n(Disclaimer: Actually, this RNG does it every 600th flip" buf += " to give this event a slight probability boost.)" elif random.randint(1,80) == 1: buf = "You accidentally tear a hole in the fabric of spacetime. Good job. Idiot." await self._client.send_msg(msg, buf) return
[ "def cute_head():\n print(part_hair_flat())\n print(part_eyes_winking())\n print(part_nose_bowtie())\n print(part_mouth_surprised())\n print(part_chin_squiggle())", "def handle_coin(command: Command):\n if command.has_arg() and command.arg.isnumeric():\n flips = min(max(int(command.arg), 1), 500)\n else:\n flips = 1\n\n response = []\n emoji = (':heads:', ':tails:')\n for i in range(flips):\n response.append(choice(emoji))\n\n bot.post_message(command.channel_id, \"\".join(response))", "async def hotdogs(ctx, amount : int):\r\n await ctx.send( \":hotdog: \" * amount)", "def flip():\n coin=random.random()\n if coin>.5: #Possibility of getting heads is 50%.\n return 1\n return 0", "def coinflip(self, number):\r\n for i in range(0,number):\r\n coinz = np.random.choice(headortail)\r\n if coinz == \"heads\":\r\n self.checkheads = self.checkheads + 1\r\n return self.checkheads \r\n \"\"\"checkheads returns number of heads\"\"\"", "async def trump(self, ctx):\n quote = trump.getRandomQuote()\n await ctx.send(\"**\\\"{}**\\\"\\n{} - {}\".format(quote[1], quote[2], quote[3]))", "def print_cpu_hand_mask_first_card(hand):\n print(\"Dealer's hand:\")\n print(\"??\", end=' ')\n print_hand(hand[2:])", "def six_heads():\n flips=0\n heads=0\n while heads<6: #Repeats until six consecutive heads are achieved.\n if flip()==1:\n heads+=1\n else:\n heads=0\n flips+=1\n return flips", "def bird_head():\n print(part_hair_curly())\n print(part_eyes_basic())\n print(part_nose_down())\n print(part_mouth_mustache())\n print(part_chin_basic())", "async def wow(self, ctx):\n await ctx.send(\"( ' O ')\")", "def _good_calls_cmd(std_bed, out_bed):\n cmds = [\n \"\"\"bedtools closest -k 10 -a {out_bed} -b {std_bed}\"\"\".format(out_bed=out_bed, std_bed=std_bed),\n \"\"\"awk '($4==$12) { print $0; }'\"\"\",\n \"\"\"awk '{ n=($5>$13)?$13:$5; x=($5>$13)?$5:$13; if ((x-n)/n <= 0.25) { print $0; } }'\"\"\",\n \"\"\"awk '{ l=($2>$10)?($2-$10):($10-$2); r=($3>$11)?($3-$11):($11-$3); if(l<=500 && r<=500) { print $0; } }'\"\"\",\n \"\"\"cut -f 1-8 | uniq | wc -l # good calls\"\"\"\n ]\n return ' | '.join(cmds)", "def commands(number: int) -> list:\n results: list = list()\n n: int = decimal_to_binary(number)\n # 10000 = Reverse the order of the\n # operations in the secret handshake.\n reverse = True if n >= 10000 else False\n for key in sorted(HANDSHAKE.keys(), reverse=True):\n if key <= n:\n if key != 10000:\n results.append(HANDSHAKE[key])\n n -= key\n\n return sort_results(results, reverse)", "def test_HeadingCMD(self):\n self.testController.HeadingCMD(500, 250)\n self.assertEqual(self.TestStreamBuffer.getvalue(), '!G 1 500\\n!G 2 250\\n')", "async def ping(self, ctx, count : int=1):\n\t\tif count < 1:\n\t\t\tawait self.bot.say(\"thats not enough pings. stahp trying to break me.๐Ÿ˜ \")\n\t\t\treturn\n\t\tif count > 20:\n\t\t\tawait self.bot.say(\"thats too many pings. stahp trying to break me.๐Ÿ˜ \")\n\t\t\treturn\n\n\t\tping_string = \"\"\n\t\tfor i in range(0, count):\n\t\t\tping_string += \"pong \"\n\t\tawait self.bot.say(ping_string)", "def get_mtHeadCommand(self):\n\n HdCtrl1 = int(self.SerialParam['HdCtrl1'], 2)\n HdCtrl2 = int(self.SerialParam['HdCtrl2'], 2)\n llim = (self.LLim * 6400/360.) % 6400 # convert from degrees to 1/16th Gradians, make sure value is below 6400\n rlim = (self.RLim * 6400/360.) % 6400\n # check the Step size is within the standard settings\n #if not (self.Step in [4, 8, 16, 32]):\n #rospy.logwarn(\"Setting sonar step size to non-standard value \" + str(self.Step))\n\n mtHeadCommand = (\n # Hdr, Hex Length, Bin Length {1| 2, 3, 4, 5 | 6 7}\n [0x40] + int_to_hex_length_uint8_array(60, 4) + [60, 0] +\n # Tx Nde - serial ID, Rx Nde - device ID { 8 | 9}\n [self.SerialParam['SID'], self.SerialParam['DID']] +\n # No. Byte, mtHeadCommand ID 19, Message Sequence Bitset=End {10 | 11 | 12}\n [55] + [0x13] + [0x80] +\n # repeat of Tx Nde, V3B Params {13 | 14}\n [self.SerialParam['SID'], self.SerialParam['HeadType']] +\n # HdCtrl, HdType {15, 16 | 17}\n [HdCtrl1, HdCtrl2, self.SerialParam['DstHead']] +\n # TXN Ch1,Ch2, RXN Ch1, Ch2, Tx PulseLen ignored by DST {18 to 21|22 to 25|26 to 29|30 to 33|34,35}\n [0] * 18 +\n # range (ignored), LLim, RLim {36, 37| 38, 39| 40, 41}\n number_to_uint8(self.Range, 2) + number_to_uint8(llim, 2) + number_to_uint8(rlim, 2) +\n # ADSpan, ADLow, {42 | 43}\n number_to_uint8(self.ADSpan, 1) + number_to_uint8(self.ADLow, 1) +\n # Igain Ch1, Igain Ch2 {44 | 45}\n number_to_uint8(self.IGainB1, 1) + number_to_uint8(self.IGainB2, 1) +\n # Slope Ch1, Ch2 ignored by DST {46, 47| 48, 49}\n [0] * 4 +\n # MoTime, Step Angle Size {50 | 51}\n number_to_uint8(self.MoTime, 1) + number_to_uint8(self.Step, 1) +\n # ADInterval, NBins {52, 53| 54, 55}\n number_to_uint8(self.ADInterval, 2) + number_to_uint8(self.NBins, 2) +\n # MaxADBuf, Lockout {56, 57| 58, 59}\n number_to_uint8(self.SerialParam['MaxADBuf'], 2) + number_to_uint8(self.SerialParam['Lockout'], 2) +\n # Minor Axis Direction, Major Axis Pan {60, 61| 62}\n number_to_uint8(self.SerialParam['MinorAxis'], 2) + number_to_uint8(self.SerialParam['MajorAxis'], 1) +\n # Ctl2, ScanZ, LF {63| 64, 65| 66}\n [0x00] + [0x00, 0x00] + [0x0A]\n )\n #rospy.logdebug(\"Assembled new mtHeadCommand: \" +\n # str(mtHeadCommand[0:14]) + \"\\n\" +\n # str(mtHeadCommand[14:28]) + \"\\n\" +\n # str(mtHeadCommand[28:42]) + \"\\n\" +\n # str(mtHeadCommand[42:56]) + \"\\n\" +\n # str(mtHeadCommand[56:]) + \"\\n\")\n\n return mtHeadCommand", "def quests():\n\n kill_the_rats()\n goblin_extermination()\n find_the_castle()", "def get_instructions(self) -> str:\n return \"Players take turns claiming cells. When a player captures\" \\\n \" at least half of the cells in a ley-line, then player\" \\\n \" captures the ley-line. Player wins when captures at\" \\\n \" least half of the lay-lines.\"", "def trick_or_treat():\n return 'trick' if random.random() < .5 else 'treat'", "def test_random_alphanumeric512(self):\n payload = ''.join(choice(ascii_letters) for x in range(512))\n rawsend(payload)\n self.assertTrue(puck())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`{cmd}` Chooses a random member from this server. This command does not mention users.
async def _cmdf_colour(self, substr, msg, privilege_level): random_member = random.choice(list(msg.server.members)) buf = "**Your random user is:** {0} (UID: {1})\n".format(random_member.name, random_member.id) buf += "(Chosen out of {} users.)".format(str(len(msg.server.members))) # TODO: Fix concurrent access? await self._client.send_msg(msg, buf) return
[ "def srandmember(self, key):\r\n return self.execute_command(\"SRANDMEMBER\", key)", "def kick_random(client, author_id, message_object, thread_id, thread_type):\n gc_thread = Client.fetchThreadInfo(client, thread_id)[thread_id]\n persons_list = Client.fetchAllUsersFromThreads(self=client, threads=[gc_thread])\n num = random.randint(0, len(persons_list)-1) #random number within range\n person = persons_list[num]\n log.info(\"{} removed {} from {}\".format(author_id, \"random\", thread_id))\n action_queue.put(Action(client, 'removeuser', thread_id, thread_type, pid=person.uid))\n return\n log.info(\"Unable to remove: person not found.\")", "def exec_random(msg):\r\n try:\r\n a = int(msg.args[0])\r\n b = int(msg.args[1])\r\n return str(randint(min(a, b), max(a, b)))\r\n except (IndexError, TypeError):\r\n return \"The format is: !random {number} {number}\"", "def get_leader(group):\n return random.choice(group)", "async def kick(self, ctx, member : discord.Member):\n\t\tperms = await util.check_perms(self, ctx)\n\t\tprint(perms)\n\t\tif (perms):\n\t\t\ttry:\n\t\t\t\tawait self.bot.kick(member)\n\t\t\t\tawait self.bot.say(\"{} has been kicked from the server.\".format(member.name))\n\t\t\texcept:\n\t\t\t\tawait self.bot.say(\"Could not kick member. This might be a permissions issue.\")", "def quote_random(self, msg, status, args):\n quotes = self.getAll()\n\n # Send it\n msg.Chat.SendMessage(\"%s\" % (random.choice(quotes)['value']['text']))", "def choice(hosts):\n return random.choice(hosts)", "def discord_toggle_members():", "def get_random_server(servers):\n\n return 'https://{}.omegle.com'.format(random.choice(servers))", "def random_node(self):\n\n rnd = random.randint(0, len(self.clients_lst)-1) if not self.fixed_node else self.fixed_node\n pod_ip, pod_name = self.clients_lst[rnd]['pod_ip'], self.clients_lst[rnd]['name']\n if not self.fixed_node:\n print(\"randomly \", end=\"\")\n\n print(f\"selected pod: ip = {pod_ip}, name = {pod_name}\")\n return pod_ip, pod_name", "def get_random_server(self,domain):\n # print(\"get server randomly\")\n js= self.collection.find_one({\"domain\":self.domain})\n # print(type(js))\n server_list = js[\"mx_server\"]\n num = random.randint(0,len(server_list)-1)\n return server_list[num]", "async def membership(ctx, arg):\n\n target = msg_to_member(ctx.message)\n reply = membership_duration(target)\n await ctx.send(reply)", "async def kick(self, ctx: customContext, member: MemberConvert):\r\n await member.kick()\r\n await ctx.send(\"kicked **\" + member.display_name + \"**\")", "async def ismuted(ctx, member : discord.Member = None):\r\n\t\t\r\n\tif member == None:\r\n\t\tmsg = 'Usage: `ismuted [member]`'\r\n\t\tawait bot.send_message(ctx.message.channel, msg)\r\n\t\treturn\r\n\r\n\tif type(member) is str:\r\n\t\ttry:\r\n\t\t\tmember = discord.utils.get(message.server.members, name=member)\r\n\t\texcept:\r\n\t\t\tprint(\"That member does not exist\")\r\n\t\t\treturn\r\n\r\n\t# Initialize User\r\n\tglobals.serverList = checkUser(member, ctx.message.server, globals.serverList)\r\n\t\t\t\r\n\tisMute = getUserStat(member, ctx.message.server, globals.serverList, \"Muted\")\r\n\tif isMute.lower() == \"yes\":\r\n\t\tmsg = '{} is *Muted*.'.format(member)\t\r\n\telse:\r\n\t\tmsg = '{} is *Unmuted*.'.format(member)\r\n\t\t\r\n\tawait bot.send_message(ctx.message.channel, msg)", "def set_new_user(self):\n self.current_user = random.choice(self.hosts)", "async def pick(ctx: commands.Context, choice):\n await pick_or_ban(ctx, \"picks\", choice)", "async def vr(self, ctx, amount: int):\n # First getting the voice channel object\n voice_channel = ctx.message.author.voice.channel\n if not voice_channel:\n return await ctx.message.channel.send(\"That is not a valid voice channel.\")\n members = voice_channel.members\n if len(members) < amount:\n return await self.say(\"Sample larger than population.\")\n member_names = [x.display_name for x in members]\n msg = random.sample(member_names, int(amount))\n\n embed = make_embed(\n \"{} random users from {}\".format(str(amount), voice_channel.name),\n msg\n )\n return await ctx.message.channel.send(embed=embed)", "def show_random_once(message):\n chat_id = message.from_user.id\n if chat_id not in threads.keys():\n threads[chat_id] = MyThread(chat_id, users, bot)\n\n bot.send_message(chat_id, random.choice(users.select(chat_id, 'reminders')))", "def randomkey(self):\r\n return self.execute_command(\"RANDOMKEY\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a specified number of ZeroPadding and Covolution layers to the model, and a MaxPooling layer at the very end.
def ConvBlock(model, layers, filters): for i in range(layers): model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(filters, 3, 3, activation='relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2)))
[ "def add_new_last_layer(base_model, nb_classes):\r\n x = base_model.output\r\n x = GlobalAveragePooling2D()(x)\r\n x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init\r\n predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer\r\n model = Model(input=base_model.input, output=predictions)\r\n return model", "def __init__(self, max_pool_layer_index=1, dropout_ratio=0.5, last_layer_num_params=2):\n\n assert 1 <= max_pool_layer_index <= 3, \\\n f\"Invalid max_pool_layer_index {max_pool_layer_index}\"\n\n super().__init__()\n\n # extract the first a few layers, then combine it with two extra layers\n # (1) a flattening layer that returns an 1d array\n # (2) a fully connected linear layer\n layers_to_stack = self.MAX_POOL_LAYERS[max_pool_layer_index - 1]\n maxpool_out_shape = self.MAX_POOL_OUT_SHAPE[max_pool_layer_index - 1]\n\n num_params = reduce(mul, maxpool_out_shape)\n\n alexnet = models.alexnet(pretrained = True)\n # freeze the parameters of all layers\n for param in alexnet.parameters():\n param.requires_grad = False # fix weights\n\n self.conv_pool = nn.Sequential(*(\n list(alexnet.features.children())[:layers_to_stack] +\n [\n Flatten(num_params),\n nn.Dropout(p=dropout_ratio),\n nn.Linear(num_params, last_layer_num_params)\n ]\n ))", "def SqueezeNet(nb_classes, inputs=(128,128,3)):\n\n input_img = Input(shape=inputs)\n conv1 = Conv3D(\n 32, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n strides=(2,2,1), padding='same', name='conv1')(input_img)\n maxpool1 = MaxPooling3D(\n pool_size=(3,3,1), strides=(2,2,1), name='maxpool1')(conv1)\n\n fire2_squeeze = Conv3D(\n 8, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire2_squeeze')(maxpool1)\n fire2_expand1 = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire2_expand1')(fire2_squeeze)\n fire2_expand2 = Conv3D(\n 16, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire2_expand2')(fire2_squeeze)\n merge2 = Concatenate(axis=-1)([fire2_expand1, fire2_expand2])\n\n fire3_squeeze = Conv3D(\n 8, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire3_squeeze')(merge2)\n fire3_expand1 = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire3_expand1')(fire3_squeeze)\n fire3_expand2 = Conv3D(\n 16, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire3_expand2')(fire3_squeeze)\n merge3 = Concatenate(axis=-1)([fire3_expand1, fire3_expand2])\n\n residual32 = add([merge2, merge3])\n\n fire4_squeeze = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire4_squeeze')(residual32)\n fire4_expand1 = Conv3D(\n 32, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire4_expand1')(fire4_squeeze)\n fire4_expand2 = Conv3D(\n 32, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire4_expand2')(fire4_squeeze)\n merge4 = Concatenate(axis=-1)([fire4_expand1, fire4_expand2])\n #maxpool4 = MaxPooling3D(\n # pool_size=(3,3,1), strides=(2,2,2), name='maxpool4')(merge4)\n\n fire5_squeeze = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire5_squeeze')(merge4)\n fire5_expand1 = Conv3D(\n 32, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire5_expand1')(fire5_squeeze)\n fire5_expand2 = Conv3D(\n 32, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire5_expand2')(fire5_squeeze)\n merge5 = Concatenate(axis=-1)([fire5_expand1, fire5_expand2])\n\n residual45 = add([merge4, merge5])\n\n fire6_squeeze = Conv3D(\n 24, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire6_squeeze')(merge5)\n fire6_expand1 = Conv3D(\n 64, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire6_expand1')(fire6_squeeze)\n fire6_expand2 = Conv3D(\n 64, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire6_expand2')(fire6_squeeze)\n merge6 = Concatenate(axis=-1)([fire6_expand1, fire6_expand2])\n\n fire6_dropout = Dropout(0.5, name='fire6_dropout')(merge6)\n conv10 = Conv3D(\n nb_classes, (1,1,1), kernel_initializer='glorot_uniform',\n padding='valid', name='conv10')(fire6_dropout)\n\n # The size should match the output of conv10\n avgpool10 = AveragePooling3D(\n (2, 2, 3), name='avgpool10')(conv10)\n\n flatten = Flatten(name='flatten')(avgpool10)\n softmax = Activation(\"softmax\", name='softmax')(flatten)\n\n return Model(inputs=input_img, outputs=softmax)", "def add_layer(self, layer):\n\n if layer == 0:\n # print(self.input_shape)\n if not self.input_shape:\n self.model.add(tf.keras.layers.Conv2D(int(self.ConvolutionLineEdit.text()),\n eval(self.ConvKernelBox.currentText()),\n activation=self.ConvActivationBox.currentText(),\n input_shape=(self.target_size[0], self.target_size[1], 3)))\n self.input_shape = True\n else:\n self.model.add(tf.keras.layers.Conv2D(int(self.ConvolutionLineEdit.text()),\n eval(self.ConvKernelBox.currentText()),\n activation=self.ConvActivationBox.currentText()\n ))\n\n elif layer == 1:\n self.model.add(tf.keras.layers.MaxPool2D(eval(self.PoolingKernelBox.currentText())))\n\n elif layer == 2:\n # print(self.flatten)\n if not self.flatten:\n self.model.add(tf.keras.layers.Flatten())\n self.model.add(tf.keras.layers.Dense(int(self.DenseLineEdit.text()),\n activation=self.DenseAcitvationBox.currentText()))\n self.ConvolutionAddButton.setEnabled(False)\n self.PoolingAddButton.setEnabled(False)\n self.CompileButton.setEnabled(True)\n self.flatten = True\n else:\n self.model.add(tf.keras.layers.Dense(int(self.DenseLineEdit.text()),\n activation=self.DenseAcitvationBox.currentText()))\n\n self.model_summary()", "def add_inception_6(self, bottom, counter = 'inception5',\n outs_1x1 = None,\n outs_3x3_reduce = None, outs_3x3 = None,\n outs_double_3x3_reduce = None, outs_double_3x3 = None,\n outs_pool_proj = None, outs_pool_proj_type = params.Pooling.MAX, pool_pad = 1,\n reduction_stride = None, reduction_inception = False, prepend_batch_norm = False):\n # Values that are common for conv layers\n common_params = [dict(lr_mult = 1, decay_mult = 1), dict(lr_mult = 2, decay_mult = 0)]\n fill_xavier = dict(type='xavier')\n fill_const = dict(type='constant', value = 0.2)\n \n pool_stride = reduction_stride if reduction_inception else 1\n \n # reduction branch\n conv_1x1 = None\n conv_1x1_relu = None\n if not(reduction_inception):\n conv_1x1 = self.add_convolution(bottom, counter = counter + '/1x1',\n num_output = outs_1x1, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n conv_1x1_relu = self.add_relu(conv_1x1, counter = counter + '/relu_1x1', prepend_batch_norm = prepend_batch_norm)\n \n # 3x3 branch : reduce -> relu -> conv -> relu\n reduce_3x3 = self.add_convolution(bottom, counter = counter + '/3x3_reduce',\n num_output = outs_3x3_reduce, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n reduce_3x3_relu = self.add_relu(reduce_3x3, counter = counter + '/relu_3x3_reduce', prepend_batch_norm = prepend_batch_norm)\n conv_3x3 = self.add_convolution(reduce_3x3_relu, counter = counter + '/3x3',\n num_output = outs_3x3, kernel_size = 3, pad = 1, stride = pool_stride,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n conv_3x3_relu = self.add_relu(conv_3x3, counter = counter + '/relu_3x3', prepend_batch_norm = prepend_batch_norm)\n \n # double 3x3 branch : reduce -> relu -> conv -> relu -> conv -> relu\n double_3x3_reduce = self.add_convolution(bottom, counter = counter + '/double_3x3_reduce',\n num_output = outs_double_3x3_reduce, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n relu_double_3x3_reduce = self.add_relu(double_3x3_reduce, counter = counter + '/relu_double_3x3_reduce', prepend_batch_norm = prepend_batch_norm)\n\n double_3x3_1 = self.add_convolution(relu_double_3x3_reduce, counter = counter + '/double_3x3_1',\n num_output = outs_double_3x3, kernel_size = 5, pad = 2,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n relu_double_3x3_1 = self.add_relu(double_3x3_1, counter = counter + '/relu_double_3x3_1', prepend_batch_norm = prepend_batch_norm)\n \n double_3x3_2 = self.add_convolution(relu_double_3x3_1, counter = counter + '/double_3x3_2',\n num_output = outs_double_3x3, kernel_size = 5, pad = 2, stride = pool_stride,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n relu_double_3x3_2 = self.add_relu(double_3x3_2, counter = counter + '/relu_double_3x3_2', prepend_batch_norm = prepend_batch_norm)\n \n # pool branch: pool-> projection -> relu\n pool_max_3x3 = self.add_pooling(bottom, counter = counter + '/pool',\n pool = outs_pool_proj_type,\n kernel_size = 3, stride = pool_stride, pad = pool_pad,\n )\n pool_max_3x3_proj = None\n pool_max_3x3_relu = None\n if not(reduction_inception):\n pool_max_3x3_proj = self.add_convolution(pool_max_3x3, counter = counter + '/pool_proj',\n num_output = outs_pool_proj, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n pool_max_3x3_relu = self.add_relu(pool_max_3x3_proj, counter = counter + '/relu_pool_proj', prepend_batch_norm = prepend_batch_norm)\n \n concatentation = None\n if not(reduction_inception):\n concatentation = [conv_1x1_relu, conv_3x3_relu, relu_double_3x3_2, pool_max_3x3_relu]\n else:\n concatentation = [conv_3x3_relu, relu_double_3x3_2, pool_max_3x3]\n concat = self.add_concat(concatentation, counter = counter + '/output')\n return concat", "def add_maxpool(net, bottom, name, k, ks, pad):\n net[name] = L.Pooling(bottom, kernel_size=k, stride=ks, pad=pad, \n pool=P.Pooling.MAX)", "def build_CNN(input_shape, filters= [3], kernels = [4], activation = 'relu', optimizer= 'Adam'):\n model = Sequential()\n model.add(BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05, input_shape = input_shape))\n model.add( Conv1D(filters=3, kernel_size= 4, strides =1, activation = activation))\n for (f, k) in zip(filters, kernels):\n \n model.add(BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05))\n model.add(Dropout(0.1))\n model.add(Conv1D(filters=f, kernel_size= k, strides =1, activation = activation))\n model.add(MaxPooling1D())\n\n model.add(Flatten())\n model.add(BatchNormalization(axis=1, momentum=0.1, epsilon=1e-05))\n model.add(Dropout(0.1))\n model.add(Dense(units=1, activation='sigmoid')) \n\n\n\n model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n\n\n return model", "def add_layer(self, model, incr):\r\n \r\n # Set previous layers not trainable\r\n for layer in model.layers[:-1]:\r\n layer.trainable = False\r\n \r\n # Output of previous layer\r\n out = model.layers[-2].output\r\n \r\n # Add the new layer\r\n layer_new = Dense(units = self.units_nbr, activation = self.activation,\r\n name = 'hidden' + str(incr))(out)\r\n \r\n decoded = model.layers[-1](layer_new)\r\n \r\n return Model(model.layers[0].input, decoded)", "def create_layers(self):\n # Inputs\n X = tf.placeholder(tf.float32, shape=(None, 784), name=\"data\")\n Y = tf.placeholder(tf.float32, shape=(None, 10), name=\"labels\") \n self.inputs = [X,Y]\n\n # inference\n layer1 = Layer(256,X,'layer1')\n layer2 = Layer(256,layer1.output,'layer2')\n layer3 = Layer(256,layer2.output,'layer3')\n logits = Layer(10,layer3.output,'layer4',out=True)\n self.layers = [layer1,layer2,layer3,logits]\n\n # sg layers\n synth_layer1 = Layer(256, [layer1.output,Y], 'sg2', sg=True)\n synth_layer2 = Layer(256, [layer2.output,Y], 'sg3', sg=True)\n synth_layer3 = Layer(256, [layer3.output,Y], 'sg4', sg=True)\n self.synth_layers = [synth_layer1,synth_layer2,synth_layer3]", "def _addOutputLayers(self):\n layerName = self.layerLayout[-1]\n layerDict = self.initOptionDict.pop(layerName)\n layerType = layerDict.pop('type').lower()\n layerSize = layerDict.pop('dim_out',None)\n if layerSize is not None and layerSize != self.numClasses:\n self.raiseAWarning('The \"dim_out\" of last output layer: ', layerName, 'will be resetted to values provided in \"num_classes\", i.e.', self.numClasses)\n if layerType not in ['dense']:\n self.raiseAnError(IOError,'The last layer should always be Dense layer, but',layerType,'is provided!')\n layerInstant = self.__class__.availLayer[layerType]\n self._ROM.add(self._getLastLayer(layerInstant, layerDict))", "def nasnet_maxpool():\n return nn.MaxPool2D(\n pool_size=3,\n strides=2,\n padding=1)", "def add_inception_7(self, bottom, counter = 'inception5',\n outs_1x1 = None,\n outs_3x3_reduce = None, outs_3x3 = None,\n outs_double_3x3_reduce = None, outs_double_3x3 = None,\n outs_pool_proj = None, outs_pool_proj_type = params.Pooling.MAX, pool_pad = 1,\n reduction_stride = None, reduction_inception = False, prepend_batch_norm = False):\n # Values that are common for conv layers\n common_params = [dict(lr_mult = 1, decay_mult = 1), dict(lr_mult = 2, decay_mult = 0)]\n fill_xavier = dict(type='xavier')\n fill_const = dict(type='constant', value = 0.2)\n \n pool_stride = reduction_stride if reduction_inception else 1\n \n # reduction branch\n conv_1x1 = None\n conv_1x1_relu = None\n if not(reduction_inception):\n conv_1x1 = self.add_convolution(bottom, counter = counter + '/1x1',\n num_output = outs_1x1, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n conv_1x1_relu = self.add_relu(conv_1x1, counter = counter + '/relu_1x1', prepend_batch_norm = prepend_batch_norm)\n \n # 3x3 branch : reduce -> relu -> conv -> relu\n reduce_3x3 = self.add_convolution(bottom, counter = counter + '/3x3_reduce',\n num_output = outs_3x3_reduce, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n reduce_3x3_relu = self.add_relu(reduce_3x3, counter = counter + '/relu_3x3_reduce', prepend_batch_norm = prepend_batch_norm)\n conv_3x3 = self.add_separated_3x3_convolution(reduce_3x3_relu, counter = counter + '/3x3',\n prepend_batch_norm = prepend_batch_norm,\n num_output = outs_3x3, pad = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n \n # double 3x3 branch : reduce -> relu -> conv -> relu -> conv -> relu\n double_3x3_reduce = self.add_convolution(bottom, counter = counter + '/double_3x3_reduce',\n num_output = outs_double_3x3_reduce, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n relu_double_3x3_reduce = self.add_relu(double_3x3_reduce, counter = counter + '/relu_double_3x3_reduce')\n\n double_3x3_1 = self.add_separated_3x3_convolution(\n relu_double_3x3_reduce, counter = counter + '/double_3x3_1',\n prepend_batch_norm = prepend_batch_norm,\n num_output = outs_double_3x3, pad = 2,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n \n double_3x3_2 = self.add_separated_3x3_convolution(\n double_3x3_1, counter = counter + '/double_3x3_2',\n prepend_batch_norm = prepend_batch_norm,\n num_output = outs_double_3x3, pad = 2, stride = pool_stride,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n \n # pool branch: pool-> projection -> relu\n pool_max_3x3 = self.add_pooling(bottom, counter = counter + '/pool',\n pool = outs_pool_proj_type,\n kernel_size = 3, stride = pool_stride, pad = pool_pad,\n )\n pool_max_3x3_proj = None\n pool_max_3x3_relu = None\n if not(reduction_inception):\n pool_max_3x3_proj = self.add_convolution(pool_max_3x3, counter = counter + '/pool_proj',\n num_output = outs_pool_proj, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n pool_max_3x3_relu = self.add_relu(pool_max_3x3_proj, counter = counter + '/relu_pool_proj', prepend_batch_norm = prepend_batch_norm)\n \n concatentation = None\n if not(reduction_inception):\n concatentation = [conv_1x1_relu, conv_3x3, double_3x3_2, pool_max_3x3_relu]\n else:\n concatentation = [conv_3x3, double_3x3_2, pool_max_3x3]\n concat = self.add_concat(concatentation, counter = counter + '/output')\n return concat", "def vgg_block(layer_in, num_filters, num_conv):\n\n\tfor _ in range(num_conv):\n\t\tlayer_in = Conv2D(num_filters, (3,3), padding=\"same\", activation=\"relu\")(layer_in)\n\n\tlayer_in = MaxPooling2D((2,2), strides=(2,2))(layer_in)\n\treturn layer_in", "def add_inception_5(self, bottom, counter = 'inception5',\n outs_1x1 = None,\n outs_3x3_reduce = None, outs_3x3 = None,\n outs_5x5_reduce = None, outs_5x5 = None,\n outs_pool_proj = None):\n # Values that are common for conv layers\n common_params = [dict(lr_mult = 1, decay_mult = 1), dict(lr_mult = 2, decay_mult = 0)]\n fill_xavier = dict(type='xavier')\n fill_const = dict(type='constant', value = 0.2)\n \n #\n conv_1x1 = self.add_convolution(bottom, counter = counter + '/1x1',\n num_output = outs_1x1, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n conv_1x1_relu = self.add_relu(conv_1x1, counter = counter + '/relu_1x1')\n \n # 3x3 branch : reduce -> relu -> conv -> relu\n reduce_3x3 = self.add_convolution(bottom, counter = counter + '/3x3_reduce',\n num_output = outs_3x3_reduce, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n reduce_3x3_relu = self.add_relu(reduce_3x3, counter = counter + '/relu_3x3_reduce')\n conv_3x3 = self.add_convolution(reduce_3x3_relu, counter = counter + '/3x3',\n num_output = outs_3x3, kernel_size = 3, pad = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n conv_3x3_relu = self.add_relu(conv_3x3, counter = counter + '/relu_3x3')\n \n # 5x5 branch : reduce -> relu -> conv -> relu\n reduce_5x5 = self.add_convolution(bottom, counter = counter + '/5x5_reduce',\n num_output = outs_5x5_reduce, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n reduce_5x5_relu = self.add_relu(reduce_5x5, counter = counter + '/relu_5x5_reduce')\n conv_5x5 = self.add_convolution(reduce_5x5_relu, counter = counter + '/5x5',\n num_output = outs_5x5, kernel_size = 5, pad = 2,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n conv_5x5_relu = self.add_relu(conv_5x5, counter = counter + '/relu_5x5')\n \n # pool branch: pool-> projection -> relu\n pool_max_3x3 = self.add_pooling(bottom, counter = counter + '/pool',\n pool = params.Pooling.MAX,\n kernel_size = 3, stride = 1, pad = 1,\n )\n pool_max_3x3_proj = self.add_convolution(pool_max_3x3, counter = counter + '/pool_proj',\n num_output = outs_pool_proj, kernel_size = 1,\n param = common_params,\n weight_filler = fill_xavier,\n bias_filler = fill_const,\n )\n pool_max_3x3_relu = self.add_relu(pool_max_3x3_proj, counter = counter + '/relu_pool_proj')\n \n concatentation = [conv_1x1_relu,conv_3x3_relu,conv_5x5_relu,pool_max_3x3_relu]\n concat = self.add_concat(concatentation, counter = counter + '/output')\n return concat", "def build_model():\n model_weights = np.load('models/sound8.npy').item()\n\n filter_parameters = [{'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8', 'num_filters': 1000, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n inputs = Input(shape=(None, 1)) # define inputs\n\n x = inputs\n for layer in filter_parameters:\n if 'conv8' not in layer['name']:\n x = ZeroPadding1D(padding=layer['padding'])(x)\n else:\n x = ZeroPadding1D(padding=layer['padding'])(conv7_layer_output)\n\n conv_layer = Conv1D(layer['num_filters'],\n kernel_size=layer['kernel_size'],\n strides=layer['conv_strides'],\n padding='valid', name=layer['name'])\n\n weights = model_weights[layer['name']]['weights'].reshape(conv_layer.get_weights()[0].shape)\n biases = model_weights[layer['name']]['biases']\n conv_layer.set_weights([weights, biases])\n\n x = conv_layer(x)\n\n if 'conv8' not in layer['name']: # except the last layers\n gamma = model_weights[layer['name']]['gamma']\n beta = model_weights[layer['name']]['beta']\n mean = model_weights[layer['name']]['mean']\n var = model_weights[layer['name']]['var']\n\n batch_norm = BatchNormalization()\n batch_norm.set_weights([gamma, beta, mean, var])\n x = batch_norm(x)\n x = Activation('relu')(x)\n if 'pool_size' in layer:\n x = MaxPooling1D(pool_size=layer['pool_size'],\n strides=layer['pool_strides'],\n padding='valid')(x)\n if layer['name'] == 'conv7':\n conv7_layer_output = x\n elif layer['name'] == 'conv8':\n imagenet_output = x\n elif layer['name'] == 'conv8_2':\n places_output = x\n\n model = Model(inputs=inputs,outputs=[imagenet_output, places_output])\n return model", "def test_disable_last_layer_softmax():\n batch_size = 6\n\n input1 = tf.random.uniform((batch_size,) + RESNET_DEFAULT_INPUT_SHAPE, minval=0.0, maxval=1.0)\n\n model11 = keras.applications.ResNet50(classifier_activation=None) # Somehow the `classifier_activation` function is undocumented.\n output11 = model11(input1)\n print(f\"Output 11: min: {tf.reduce_min(output11).numpy()}, max: {tf.reduce_max(output11).numpy()}, mean: {tf.reduce_mean(output11).numpy()}, sum: {tf.reduce_sum(output11).numpy()}\") # Sum is no longer `batch_size`.\n\n model12 = keras.Sequential([\n keras.applications.ResNet50(include_top=False),\n keras.layers.GlobalAveragePooling2D(name=\"avg_pool\"),\n keras.layers.Dense(1000, name=\"predictions\"),\n ])\n model12.set_weights(model11.get_weights())\n output12 = model12(input1)\n print(f\"Output 12: min: {tf.reduce_min(output12).numpy()}, max: {tf.reduce_max(output12).numpy()}, mean: {tf.reduce_mean(output12).numpy()}, sum: {tf.reduce_sum(output12).numpy()}\")\n\n input2 = tf.random.uniform((batch_size,) + RESNET_DEFAULT_INPUT_SHAPE, minval=-1.0, maxval=1.0)\n model21 = keras.applications.ResNet50V2(classifier_activation=None)\n output21 = model21(input2)\n print(f\"Output 21: min: {tf.reduce_min(output21).numpy()}, max: {tf.reduce_max(output21).numpy()}, mean: {tf.reduce_mean(output21).numpy()}, sum: {tf.reduce_sum(output21).numpy()}\") # Sum is no longer `batch_size`.", "def unet_model_3d(n_labels,shape,W,lr=1e-5, pool_size=(2, 2, 2), initial_learning_rate=0.00001, deconvolution=False,\n depth=3, n_base_filters=16, include_label_wise_dice_coefficients=False, metrics=dice_coefficient,\n batch_normalization=False, activation_name=\"sigmoid\"):\n inputs = Input(shape)\n print('Input shape:',shape)\n current_layer = inputs\n levels = list()\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters*(2**layer_depth),\n batch_normalization=batch_normalization)\n layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters*(2**layer_depth)*2,\n batch_normalization=batch_normalization)\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth-2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size, deconvolution=deconvolution,\n n_filters=current_layer._keras_shape[4])(current_layer)\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=4)\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=concat, batch_normalization=batch_normalization)\n current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n\n\n if n_labels>1:\n final_convolution = Conv3D(n_labels, 1)(current_layer)\n o = Reshape((shape[0] * shape[1]* shape[2],n_labels), input_shape=(shape[0], shape[1], shape[2],n_labels))(final_convolution)\n activation_name=\"softmax\"\n# o = (Permute((2, 1)))(o)\n if n_labels==1:\n o = Conv3D(n_labels, (1, 1, 1))(current_layer)\n activation_name=\"sigmoid\"\n act = Activation(activation_name)(o)\n model = Model(inputs=inputs, outputs=act)\n\n if not isinstance(metrics, list):\n metrics = [metrics]\n\n if include_label_wise_dice_coefficients and n_labels > 1:\n label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(n_labels)]\n if metrics:\n metrics = metrics + label_wise_dice_metrics\n else:\n metrics = label_wise_dice_metrics\n if W !='':\n model.load_weights(W)\n if n_labels>1:\n# model.compile(loss=weighted_dice_coefficient_loss, optimizer = Adam(lr = initial_learning_rate) , metrics=metrics )\n model.compile(loss=\"categorical_crossentropy\", optimizer=Adam(lr = lr) , metrics=['accuracy'] )\n if n_labels==1:\n model.compile(loss=\"binary_crossentropy\", optimizer = Adam(lr = lr) , metrics=['accuracy'] )\n# model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)\n model.summary()\n return model", "def add_pooling(model, method, psize):\n if method == 'max':\n model.add(MaxPooling2D(pool_size=psize))\n if method == 'average':\n model.add(AveragePooling2D(pool_size=psize))", "def optimized_inception_module(layer_in, f1, f2_in, f2_out, f3_in, f3_out, f4_out):\n\n\tconv1 = Conv2D(f1, (1,1), padding=\"same\", activation=\"relu\")(layer_in)\n\n\tconv3 = Conv2D(f2_in, (1,1), padding=\"same\", activation=\"relu\")(layer_in)\n\tconv3 = Conv2D(f2_out, (3,3), padding=\"same\", activation=\"relu\")(conv3)\n\n\tconv5 = Conv2D(f3_in, (1,1), padding=\"same\", activation=\"relu\")(layer_in)\n\tconv5 = Conv2D(f3_out, (5,5), padding=\"same\", activation=\"relu\")(conv5)\n\n\tpool = MaxPooling2D((3,3), strides=(1,1), padding=\"same\")(layer_in)\n\tpool = Conv2D(f4_out, (1,1), padding=\"same\", activation=\"relu\")(pool)\n\n\tlayer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)\n\n\treturn layer_out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a fully connected layer of 4096 neurons to the model with a Dropout of 0.5
def FCBlock(model): model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5))
[ "def FCBlock(self, dropoutP=0.5):\n model = self.model\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(dropoutP))", "def __init__(self, max_pool_layer_index=1, dropout_ratio=0.5, last_layer_num_params=2):\n\n assert 1 <= max_pool_layer_index <= 3, \\\n f\"Invalid max_pool_layer_index {max_pool_layer_index}\"\n\n super().__init__()\n\n # extract the first a few layers, then combine it with two extra layers\n # (1) a flattening layer that returns an 1d array\n # (2) a fully connected linear layer\n layers_to_stack = self.MAX_POOL_LAYERS[max_pool_layer_index - 1]\n maxpool_out_shape = self.MAX_POOL_OUT_SHAPE[max_pool_layer_index - 1]\n\n num_params = reduce(mul, maxpool_out_shape)\n\n alexnet = models.alexnet(pretrained = True)\n # freeze the parameters of all layers\n for param in alexnet.parameters():\n param.requires_grad = False # fix weights\n\n self.conv_pool = nn.Sequential(*(\n list(alexnet.features.children())[:layers_to_stack] +\n [\n Flatten(num_params),\n nn.Dropout(p=dropout_ratio),\n nn.Linear(num_params, last_layer_num_params)\n ]\n ))", "def generate_layer(self, in_channels_of_layer, out_channels_of_layer, number_of_blocks,\r\n dropout_rate, stride_of_first_block):\r\n\r\n strides_of_each_block = [stride_of_first_block] + [1] * (number_of_blocks - 1)\r\n blocks = []\r\n # generate a layer with number_of_blocks blocks\r\n for i in range(0, number_of_blocks):\r\n # generate the first basic block in this layer\r\n if i == 0:\r\n blocks.append(BasicBlock(in_channels_of_basic_block = in_channels_of_layer, out_channels_of_basic_block = out_channels_of_layer,\r\n dropout_rate = dropout_rate, stride = strides_of_each_block[i]))\r\n # generate other basic blocks\r\n else:\r\n blocks.append(BasicBlock(in_channels_of_basic_block = out_channels_of_layer, out_channels_of_basic_block = out_channels_of_layer,\r\n dropout_rate = dropout_rate, stride = strides_of_each_block[i]))\r\n # generate the whole layer using blocks \r\n layer = nn.Sequential(*blocks)\r\n return layer", "def dropout_create_layer(prev, n, activation, keep_prob):\n i = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n new_layer = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=i)(prev)\n dropout = tf.layers.Dropout(rate=keep_prob)(new_layer)\n return dropout", "def cnn_oned_60(self,BINS,WIN_LEN):\n\n print(\"Attention: The current TF Version requirs weights to be saved seperatly in sparsly connected Nets\")\n ceil_bins=joblib.load(\"ceil_bins3.pkl\")\n ceil_bins=list(ceil_bins)\n #when using customLoss squeeze axis 3:\n noise_in = keras.Input((BINS,WIN_LEN,1))\n noise_fft=tf.squeeze(noise_in,3)\n\n \"\"\"Split up Subbands from STFT\"\"\"\n group=[1]*60\n sum_of_bins=0\n ceil_bins[59]=9\n\n for k in range(0,len(ceil_bins)):\n print(k)\n ## FFT Bins getting split for processing with specific neurons\n sum_of_bins=sum_of_bins+ceil_bins[k]\n if k==0:\n group[k]= Lambda(lambda x: x[:,0:2,:], output_shape=((2,WIN_LEN)))(noise_fft)\n print(group[k])\n\n if k==59:\n print( Lambda(lambda x: x[248:,:], output_shape=((9,16)))(noise_fft))\n group[k]= Lambda(lambda x: x[:,248:,:], output_shape=((9,16)))(noise_fft)\n\n else:\n print(int(sum_of_bins+ceil_bins[k]))\n group[k]=Lambda(lambda x: x[:,int(sum_of_bins):int(sum_of_bins+ceil_bins[k]),:], output_shape=((int(ceil_bins[k]),WIN_LEN)))(noise_fft)\n print(group[k])\n\n\n for e in range(0,len(ceil_bins)):\n group[e]=tf.keras.layers.Conv1D(64, 4, strides=1, padding='same',dilation_rate=1, activation='relu')(group[e])\n group[e]=tf.keras.layers.MaxPooling1D(pool_size=2, strides=None, padding='same', data_format=None)(group[e])\n group[e]=tf.keras.layers.Conv1D(64, 8, strides=1, padding='same',dilation_rate=2, activation='relu')(group[e])\n group[e]=tf.keras.layers.MaxPooling1D(pool_size=2, strides=None, padding='same', data_format=None)(group[e])\n group[e]=tf.keras.layers.Conv1D(64, 16, strides=1, padding='same',dilation_rate=4, activation='relu')(group[e])\n group[e]=tf.keras.layers.MaxPooling1D(pool_size=2, strides=None, padding='same', data_format=None)(group[e])\n\n for j in range(0,len(ceil_bins)):\n group[j]=tf.keras.layers.GlobalAveragePooling1D()(group[j])\n\n for b in range(0,len(ceil_bins)):\n group[b]=tf.expand_dims(group[b],1)\n\n for i in range(0,len(ceil_bins)):\n group[i]=tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32))(group[i])\n\n \"\"\"\n Concatenate Feature Vectors, init x_Tensor as first element\n \"\"\"\n x_Tensor = group[0]\n for g in range(1,60):\n x_Tensor = Concatenate(axis=1)([x_Tensor,group[g]])\n\n\n x_Tensor = Dense(60*64, activation='relu')(x_Tensor)\n x = tf.keras.layers.Dropout(0.05)(x_Tensor)\n outputs = tf.keras.layers.Dense(257, activation='sigmoid')(x)\n model = tf.keras.Model(noise_in, outputs)\n\n\n return model", "def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetGenerator, self).__init__()\n # construct unet structure\n # REW: ๅ…ˆๆฑ‚ๆœ€ๅบ•ๅฑ‚็š„่ทจๅฑ‚่ฟžๆŽฅ๏ผŒๅ†้€ๆธๅพ€ไธŠๅฑ‚ๅŽป ๅคง็ฅž็บงไปฃ็ ๅ•Š๏ผ\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer\n for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters\n unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)\n # gradually reduce the number of filters from ngf * 8 to ngf\n unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)\n self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer", "def all_cnn_module():\n layers = [\n nn.Dropout(p=0.2),\n nn.Conv2d(3, 96, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(96, 96, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(96, 96, 3, stride=2, padding=1),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Conv2d(96, 192, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(192, 192, 3, stride=1, padding=1),\n nn.ReLU(),\n nn.Conv2d(192, 192, 3, stride=2, padding=1),\n nn.ReLU(),\n nn.Dropout(p=0.5),\n nn.Conv2d(192, 192, 3, stride=1, padding=0),\n nn.ReLU(),\n nn.Conv2d(192, 192, 1, stride=1, padding=0),\n nn.ReLU(),\n nn.Conv2d(192, 10, 1, stride=1, padding=0),\n nn.ReLU(),\n nn.AvgPool2d(6),\n Flatten()\n ]\n return Sequential(*layers)", "def apply_custom_network(self, hidden_counts):\n network = FeedForwardNetwork()\n\n bias = BiasUnit(name='bias')\n if self.bias:\n network.addModule(bias)\n\n in_layer = LinearLayer(self.inp_cnt, 'in')\n network.addInputModule(in_layer)\n\n out_layer = LinearLayer(self.out_cnt, 'out')\n network.addOutputModule(out_layer)\n\n hidden_layer = SigmoidLayer(hidden_counts[0], 'hidden0')\n network.addModule(hidden_layer)\n\n in_to_hidden = FullConnection(in_layer, hidden_layer)\n network.addConnection(in_to_hidden)\n\n if self.bias:\n bias_to_hiden = FullConnection(bias, hidden_layer)\n network.addConnection(bias_to_hiden)\n\n for i in range(1, len(hidden_counts)):\n last_hidden_layer = hidden_layer\n hidden_layer = SigmoidLayer(hidden_counts[i])\n network.addModule(hidden_layer)\n hidden_to_hidden = FullConnection(last_hidden_layer, hidden_layer)\n network.addConnection(hidden_to_hidden)\n if self.bias:\n bias_to_hiden = FullConnection(bias, hidden_layer)\n network.addConnection(bias_to_hiden)\n\n hidden_to_out = FullConnection(hidden_layer, out_layer)\n network.addConnection(hidden_to_out)\n if self.bias:\n bias_to_out = FullConnection(bias, out_layer)\n network.addConnection(bias_to_out)\n\n network.sortModules()\n self.network = network\n return self", "def add_new_last_layer(base_model, nb_classes):\r\n x = base_model.output\r\n x = GlobalAveragePooling2D()(x)\r\n x = Dense(FC_SIZE, activation='relu')(x) #new FC layer, random init\r\n predictions = Dense(nb_classes, activation='softmax')(x) #new softmax layer\r\n model = Model(input=base_model.input, output=predictions)\r\n return model", "def gen_layers(n_in):\n nonlocal hidden, dropouts\n assert len(dropouts) <= len(hidden)\n \n for n_out, rate in zip_longest(hidden, dropouts):\n yield torch.nn.Linear(n_in, n_out)\n yield torch.nn.ReLU()\n if rate is not None and rate > 0.:\n yield torch.nn.Dropout(rate)\n n_in = n_out", "def buildDropModel(self, img_size, dropout):\n base_model = DenseNet121(weights=None, include_top=False, \n input_shape = (img_size,img_size,3))\n x = base_model.output\n x = layers.GlobalAveragePooling2D()(x)\n x = layers.Dropout(dropout)(x)\n predictions = layers.Dense(1, activation='sigmoid', name='last')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n model.load_weights(self.weights)\n return model", "def build_NN(input_var=None):\n \n \"\"\"\n Here we set up input layer l_in and shape of input will be 1*28*28 and link the \n input to input var\n \"\"\"\n l_in=lasagne.layers.InputLayer(shape=(None,1,28,28),input_var=input_var)\n \n \"\"\"\n Since we are using CNN we add a 20% dropout where randomly 20% of the edges\n between the inputs and the next layer will be dropped-done to avoid \n overfitting\n \"\"\"\n \n l_in_drop=lasagne.layers.DropoutLayer(l_in,p=0.2)\n \n \n \"\"\"\n Addd a layer(hidden layer) with 800 nodes ,initially it is dense or fully\n connected.It takes the previous layer(l_in_drop) as input.\n Glorot helps to initialize the layer with some weights,this is done \n so that training becomes faster\n \"\"\"\n l_hid1=lasagne.layers.DenseLayer(l_in_drop,num_units=800,\n nonlinearity=lasagne.nonlinearities.rectify,\n W=lasagne.init.GlorotUniform())\n \"\"\"\n We will now add a dropout of 50% to first hidden layer\n \"\"\"\n \n l_hid1_drop=lasagne.layers.DropoutLayer(l_hid1,p=0.5)\n \n \"\"\"\n Add another hidden layer which takes as its input the first hidden layyer\n \"\"\"\n l_hid2=lasagne.layers.DenseLayer(l_hid1_drop,num_units=800,\n nonlinearity=lasagne.nonlinearities.rectify,\n W=lasagne.init.GlorotUniform())\n \n l_hid2_drop=lasagne.layers.DropoutLayer(l_hid2,p=0.5)\n \n \"\"\"\n Add the final output layer,has 10 nodes,each one for each digit\n \"\"\"\n \n l_out=lasagne.layers.DenseLayer(l_hid2_drop,num_units=10,\n nonlinearity=lasagne.nonlinearities.softmax)\n \"\"\"\n Output is a softmax where we get outputs between 0-1,max of those\n is the final output\n \"\"\"\n return l_out", "def Resnet50_transfer():\r\n \r\n # load ResNet50 model with ImageNet weights without classifier layers\r\n model = ResNet50(include_top=False,\r\n weights = 'resnet50_weights_notop.h5',\r\n input_tensor=Input(shape=(32,32, 3)))\r\n \r\n # Freeze all weights of model, we will not train it\r\n for layer in model.layers:\r\n layer.trainable = False\r\n \r\n # define new model\r\n model = Model(inputs=model.inputs, outputs=model.layers[-1].output)\r\n return model", "def __init__(self, model_dimension, dropout_percentage, number_of_heads, feedforward_dimension, number_of_layers):\n super(Decoder, self).__init__()\n self.dec_layers = clone(DecoderLayer(model_dimension, dropout_percentage, number_of_heads, feedforward_dimension), number_of_layers)", "def __init__(self, model_dimension, dropout_percentage, number_of_heads, feedforward_dimension):\n\n super(DecoderLayer, self).__init__()\n self.res_layers = clone(ResidualConnection(model_dimension, dropout_percentage), 3)\n self.self_att = MultiheadedAttention(model_dimension, number_of_heads)\n self.enc_att = MultiheadedAttention(model_dimension, number_of_heads)\n self.feed_forward = PositionwiseFeedForward(model_dimension, feedforward_dimension)", "def SqueezeNet(nb_classes, inputs=(128,128,3)):\n\n input_img = Input(shape=inputs)\n conv1 = Conv3D(\n 32, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n strides=(2,2,1), padding='same', name='conv1')(input_img)\n maxpool1 = MaxPooling3D(\n pool_size=(3,3,1), strides=(2,2,1), name='maxpool1')(conv1)\n\n fire2_squeeze = Conv3D(\n 8, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire2_squeeze')(maxpool1)\n fire2_expand1 = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire2_expand1')(fire2_squeeze)\n fire2_expand2 = Conv3D(\n 16, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire2_expand2')(fire2_squeeze)\n merge2 = Concatenate(axis=-1)([fire2_expand1, fire2_expand2])\n\n fire3_squeeze = Conv3D(\n 8, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire3_squeeze')(merge2)\n fire3_expand1 = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire3_expand1')(fire3_squeeze)\n fire3_expand2 = Conv3D(\n 16, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire3_expand2')(fire3_squeeze)\n merge3 = Concatenate(axis=-1)([fire3_expand1, fire3_expand2])\n\n residual32 = add([merge2, merge3])\n\n fire4_squeeze = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire4_squeeze')(residual32)\n fire4_expand1 = Conv3D(\n 32, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire4_expand1')(fire4_squeeze)\n fire4_expand2 = Conv3D(\n 32, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire4_expand2')(fire4_squeeze)\n merge4 = Concatenate(axis=-1)([fire4_expand1, fire4_expand2])\n #maxpool4 = MaxPooling3D(\n # pool_size=(3,3,1), strides=(2,2,2), name='maxpool4')(merge4)\n\n fire5_squeeze = Conv3D(\n 16, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire5_squeeze')(merge4)\n fire5_expand1 = Conv3D(\n 32, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire5_expand1')(fire5_squeeze)\n fire5_expand2 = Conv3D(\n 32, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire5_expand2')(fire5_squeeze)\n merge5 = Concatenate(axis=-1)([fire5_expand1, fire5_expand2])\n\n residual45 = add([merge4, merge5])\n\n fire6_squeeze = Conv3D(\n 24, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire6_squeeze')(merge5)\n fire6_expand1 = Conv3D(\n 64, (1,1,1), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire6_expand1')(fire6_squeeze)\n fire6_expand2 = Conv3D(\n 64, (3,3,3), activation='relu', kernel_initializer='glorot_uniform',\n padding='same', name='fire6_expand2')(fire6_squeeze)\n merge6 = Concatenate(axis=-1)([fire6_expand1, fire6_expand2])\n\n fire6_dropout = Dropout(0.5, name='fire6_dropout')(merge6)\n conv10 = Conv3D(\n nb_classes, (1,1,1), kernel_initializer='glorot_uniform',\n padding='valid', name='conv10')(fire6_dropout)\n\n # The size should match the output of conv10\n avgpool10 = AveragePooling3D(\n (2, 2, 3), name='avgpool10')(conv10)\n\n flatten = Flatten(name='flatten')(avgpool10)\n softmax = Activation(\"softmax\", name='softmax')(flatten)\n\n return Model(inputs=input_img, outputs=softmax)", "def nasnet_maxpool():\n return nn.MaxPool2D(\n pool_size=3,\n strides=2,\n padding=1)", "def freeze_bottom_efficientnet(model, freeze_ratio=.75):\n \n n_layers = int(np.round(\n len(model._blocks) * freeze_ratio))\n\n for i in range(n_layers):\n for param in model._blocks[i].parameters():\n param.requires_grad = False", "def add_layer(self, model, incr):\r\n \r\n # Set previous layers not trainable\r\n for layer in model.layers[:-1]:\r\n layer.trainable = False\r\n \r\n # Output of previous layer\r\n out = model.layers[-2].output\r\n \r\n # Add the new layer\r\n layer_new = Dense(units = self.units_nbr, activation = self.activation,\r\n name = 'hidden' + str(incr))(out)\r\n \r\n decoded = model.layers[-1](layer_new)\r\n \r\n return Model(model.layers[0].input, decoded)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take an array of unsorted items and replace the contents of this priority queue by them.
def heapify(self, arg_items): # cleaning the present PQ self._array.clear() #fill the array for it in arg_items: self._array.append(it) #heapifying the unsorted input n = len(self._array) idx = n-1 parent_idx = self._parent(idx) while ( parent_idx >= 0 ): self._sift_down(parent_idx) parent_idx -= 1 return
[ "def replace(self, newInds, pop):\n\n pop = pop[np.argsort(pop)]\n pop[0:newInds.size] = newInds\n return pop", "def ageQueue(self):\n [t.setPriority(t.getPriority() + 1) for t in self._queue]", "def update_batch_priorities(self, priorities):\n if not self._sampled_unique: # Must remove duplicates\n self.prev_tree_idxs, unique_idxs = np.unique(self.prev_tree_idxs,\n return_index=True)\n priorities = priorities[unique_idxs]\n self.reconstruct(self.prev_tree_idxs, priorities)", "def queue_reposition(self, queue):\n bisect.insort(queue, queue.pop(queue.index(self)))", "def update_batch_priorities(self, priorities):\n priorities = numpify_buffer(priorities)\n self.priority_tree.update_batch_priorities(priorities ** self.alpha)", "def update(self, indices, priorities):\n for i, priority in zip(indices, priorities):\n self.priorities[i] = priority", "def fill_queue(queue_fill, any_list):\n for elem in any_list:\n queue_fill.put(elem)", "def update_priorities(self, priorities: np.ndarray):\n # add a small number to ensure every transition can be sampled\n tempered_priorities = priorities + self.priority_eps\n for idx, priority in zip(self.sampled_indices, tempered_priorities):\n assert priority > 0\n assert 0 <= idx < self.num_stored\n\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n\n self.max_priority = max(self.max_priority, priority)\n self.sampled_indices = None", "def heapify(self, input_elements: List[Tuple[T, Priority]]):\n entry_list = list()\n for item in input_elements:\n i, priority = item\n entry_list.append(self.entry_handler(i, priority=priority))\n heapq.heapify(entry_list)\n self._queue = entry_list", "def shrink(self):\n half = int(len(self._items) / 2)\n halfArray = Array(half)\n if half > ArraySortedBag.DEFAULT_CAPACITY:\n for i in range(len(self)):\n halfArray[i] = self._items[i]\n self._items = halfArray\n else:\n pass", "def tree_sort(items):\n sorted_items = BinarySearchTree(items)\n items[:] = sorted_items.items_in_order()", "def extend(self, iterable):\r\n map(lambda i: self.rpush(i), iterable)", "def replaceElements(self, arr):\r\n \r\n # initial max = -1\r\n # reverse iteration\r\n # newmax = max( oldmax, arr[i] )\r\n maxval = -1\r\n for i in range( len(arr)-1, -1, -1 ):\r\n newmax = max( maxval, arr[i] ) # update the new max\r\n arr[i] = maxval # replace in place the arr value with the maxval\r\n maxval = newmax # update the new maxval\r\n \r\n return arr", "def insort(array, item, key):\n ci = key(item)\n s = 0\n e = len(array)\n p = (s+e)//2\n while s < e:\n cp = key(array[p])\n if ci > cp:\n s = p+1\n elif ci < cp:\n e = p\n else:\n break\n p = (s+e)//2\n array.insert(p, item)", "def heapify(self, values):\n return map(self.push, values)", "def set(self, *value):\n del self[0:len(self)]\n for item in value:\n self.append(item)", "def modify(self, index, value):\n oldValue = self._list[index]\n self._list[index] = value\n if self._cmp(value, oldValue) > 0:\n self._top_heapify(index)\n elif self._cmp(value, oldValue) < 0:\n self._down_heapify(index)", "def replacer(arr, str):\n for item in arr:\n str = str.replace(item[0], item[1])\n return str", "def update (self, value, priority, take_max = False):\n \n if value in self.indices:\n old_priority = self.data[self.indices[value]][1]\n if take_max and priority <= old_priority:\n return\n \n self.data[self.indices[value]] = (self.data[self.indices[value]][0], priority)\n \n if priority > old_priority:\n self.float_up(self.indices[value])\n \n else:\n self.float_down(self.indices[value])\n \n else:\n if self.max_entries > 0 and len(self.data) >= self.max_entries:\n del self.indices[self.data[len(self.data) - 1][0]]\n self.data[len(self.data) - 1] = (value, priority)\n \n else:\n self.data.append((value, priority))\n \n self.indices[value] = len(self.data) - 1\n \n self.float_up(len(self.data) - 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert from geocentric spherical to geodetic coordinates.
def spherical_to_geodetic(geocentric_latitude, radius): ellipsoid = get_ellipsoid() k, big_d, big_z = _spherical_to_geodetic_parameters(geocentric_latitude, radius) latitude = np.degrees( 2 * np.arctan(big_z / (big_d + np.sqrt(big_d ** 2 + big_z ** 2))) ) height = ( (k + ellipsoid.first_eccentricity ** 2 - 1) / k * np.sqrt(big_d ** 2 + big_z ** 2) ) return latitude, height
[ "def geocentricToGeodetic(Latitude):\n return np.arctan((np.tan(Latitude)) / 0.99330562)", "def geom2geog(lat, long):\n lat = np.deg2rad(lat)\n long = np.deg2rad(long)\n\n # Pole coordinates for 2015\n pole_lat = np.deg2rad(80.37)\n pole_long = np.deg2rad(-72.62)\n\n pole_lat_s = np.sin(pole_lat)\n pole_lat_c = np.cos(pole_lat)\n pole_long_s = np.sin(pole_long)\n pole_long_c = np.cos(pole_long)\n\n # Rotation matrix\n matrix = np.array([\n [pole_lat_s * pole_long_c, pole_lat_s * pole_long_s, -pole_lat_c],\n [-pole_long_s, pole_long_c, 0],\n [pole_lat_c * pole_long_c, pole_lat_c * pole_long_s, pole_lat_s]\n ])\n matrix = np.linalg.inv(matrix)\n\n x = earth_radii * np.cos(lat) * np.cos(long)\n y = earth_radii * np.cos(lat) * np.sin(long)\n z = earth_radii * np.sin(lat)\n vect_geom = np.array([x, y, z])\n vect_geog = np.dot(matrix, vect_geom)\n norm = np.linalg.norm(vect_geog)\n\n lat_geog = np.arcsin(vect_geog[2] / norm)\n long_geog = np.arctan2(vect_geog[1], vect_geog[0])\n\n lat_geog = np.rad2deg(lat_geog)\n long_geog = np.rad2deg(long_geog)\n return lat_geog, long_geog", "def toSpherical(threeVec):\n\trho = math.sqrt(threeVec[0]**2+threeVec[1]**2)\n\tlong = math.atan2(threeVec[1], threeVec[0])\n\tlat = math.atan2(threeVec[2], rho)\n\treturn long, lat", "def geocentric_lat(lat_gd, sm_axis=earth.A, ecc2=earth.ECC2):\n # return math.atan((1.0 - ecc2) * math.tan(lat_gd))\n return math.atan2(math.tan(lat_gd), 1.0 / (1.0 - ecc2))", "def get_galactic_coords(self):\n ra=self.column_by_name('raJ2000')\n dec=self.column_by_name('decJ2000')\n\n glon, glat = _galacticFromEquatorial(ra,dec)\n\n return numpy.array([glon,glat])", "def g_to_spherical(self, cartesian):\n xxxx, yyyy, zzzz, xxxy, xxxz, yyyx, yyyz, zzzx, zzzy, xxyy, xxzz, yyzz, xxyz, yyxz, zzxy = cartesian\n\n xyr2 = xxxy + yyyx + zzxy\n xzr2 = xxxz + yyxz + zzzx\n yzr2 = xxyz + yyyz + zzzy\n x2r2 = xxxx + xxyy + xxzz\n y2r2 = xxyy + yyyy + yyzz\n z2r2 = xxzz + yyzz + zzzz\n r4 = x2r2 + y2r2 + z2r2\n\n zero = (35.0 * zzzz - 30.0 * z2r2 + 3.0 * r4) / 8.0\n plus_1 = sqrt(10) * (7.0 * zzzx - 3.0 * xzr2) / 4.0\n minus_1 = sqrt(10) * (7.0 * zzzy - 3.0 * yzr2) / 4.0\n plus_2 = sqrt(5) * (7.0 * (xxzz - yyzz) - (x2r2 - y2r2)) / 4.0\n minus_2 = sqrt(5) * (7.0 * zzxy - xyr2) / 2.0\n plus_3 = sqrt(70) * (xxxz - 3.0 * yyxz) / 4.0\n minus_3 = sqrt(70) * (3.0 * xxyz - yyyz) / 4.0\n plus_4 = sqrt(35) * (xxxx - 6.0 * xxyy + yyyy) / 8.0\n minus_4 = sqrt(35) * (xxxy - yyyx) / 2.0\n return zero, plus_1, minus_1, plus_2, minus_2, plus_3, minus_3, plus_4, minus_4", "def geodetic_lat(lat_gc, sm_axis=earth.A, ecc2=earth.ECC2):\n return math.atan2(math.tan(lat_gc), (1.0 - ecc2))", "def cartesian_to_spherical(x, y, z):\n\n r = (x**2 + y**2 + z**2)**0.5\n phi = math.atan(y/x)\n theta = math.asin(z/r)\n\n return r, phi, theta", "def geodetic_from_ecef(x, y, z):\n #http://code.google.com/p/pysatel/source/browse/trunk/coord.py?r=22\n\n # load wgs constants\n wgs = wgs_constants()\n a = wgs.a\n b = wgs.b\n esq = wgs.esq\n e1sq = wgs.e1sq\n \n r = sqrt(x * x + y * y)\n Esq = a * a - b * b\n F = 54 * b * b * z * z\n G = r * r + (1 - esq) * z * z - esq * Esq\n C = (esq * esq * F * r * r) / (pow(G, 3))\n S = cbrt(1 + C + sqrt(C * C + 2 * C))\n P = F / (3 * pow((S + 1 / S + 1), 2) * G * G)\n Q = sqrt(1 + 2 * esq * esq * P)\n r_0 = -(P * esq * r) / (1 + Q) + sqrt(0.5 * a * a*(1 + 1.0 / Q) - \\\n P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)\n #U = sqrt(pow((r - esq * r_0), 2) + z * z)\n V = sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z)\n Z_0 = b * b * z / (a * V)\n #h = U * (1 - b * b / (a * V))\n lat = arctan((z + e1sq * Z_0) / r)\n lon = arctan2(y, x)\n return lat, lon\n #return degrees(lat), degrees(lon)", "def _geographic_to_geocentric(lons, lats, compute=True):\n # Ensure dask array\n lons = dask.array.asarray(lons)\n lats = dask.array.asarray(lats)\n # Define geocentric cartesian and geographic projection\n geocentric_proj = pyproj.Proj(proj=\"geocent\")\n geographic_proj = pyproj.Proj(proj=\"latlong\")\n\n # Conversion from geographic coordinate system to geocentric cartesian\n res = map_blocks(\n _do_transform,\n geographic_proj,\n geocentric_proj,\n lons,\n lats,\n dask.array.zeros_like(lons), # altitude\n new_axis=[2],\n chunks=(lons.chunks[0], lons.chunks[1], 3),\n )\n if compute:\n res = res.compute()\n x = res[:, :, 0]\n y = res[:, :, 1]\n z = res[:, :, 2]\n return x, y, z", "def cartesian_to_spherical(data):\n x = data[:, 0]\n y = data[:, 1]\n z = data[:, 2]\n\n # distances to origin\n r = np.sqrt(x**2 + y**2 + z**2)\n\n # angle between x-y plane and z\n theta = np.arccos(z/r)/np.pi\n\n # angle on x-y plane\n phi = np.arctan2(y, x)/np.pi\n\n # spherical_coord = np.vstack([r, theta, phi])\n # spherical_coord = np.transpose(spherical_coord)\n return r, theta, phi", "def compute_spherical_coord(data):\n # center the data around origin\n center = np.mean(data, axis=0)\n shifted_data = data - center\n\n r, theta, phi = cartesian_to_spherical(shifted_data)\n return r, theta, phi", "def spherical_coord_to_cartesian_coord(self, point):\n \n theta_1, theta_2, r = point\n \n # Cartesian coordinates\n x = np.sin(theta_1) * np.cos(theta_2)\n y = np.sin(theta_1) * np.sin(theta_2)\n z = np.cos(theta_1)\n \n return r * np.array([x, y, z])", "def to_spherical(self, **kwargs):\n t, r, theta, phi = self.convert_spherical()\n\n return Spherical(t * u.s, r * u.m, theta * u.rad, phi * u.rad)", "def cartesian2spherical(x,y,z):\n\tr = np.sqrt(x**2+y**2+z**2)\n\ttheta_rad = arctan2(np.sqrt(x**2+y**2)/(1.*z))\n\tbeta_rad = arctan2(y/(1.*x))\n\treturn r, theta_rad, beta_rad", "def cartposlos2geocentric(x, y, z, dx, dy, dz, ppc=None,\n lat0=None, lon0=None, za0=None, aa0=None):\n # Here be dragons!\n\n # Broadcast all input variables to the same shape. Atleast (1)\n if(ppc is not None and za0 is not None and lat0 is not None and\n aa0 is not None and lon0 is not None):\n x, y, z, dx, dy, dz, ppc, lat0, lon0, za0, aa0 = _broadcast(\n x, y, z, dx, dy, dz, ppc, lat0, lon0, za0, aa0)\n elif ppc is not None:\n x, y, z, dx, dy, dz, ppc = _broadcast(x, y, z, dx, dy, dz, ppc)\n else:\n x, y, z, dx, dy, dz = _broadcast(x, y, z, dx, dy, dz)\n\n r, lat, lon = cart2geocentric(x, y, z, lat0, lon0, za0, aa0)\n\n # Renormalize for length of the variables (not in atmlab)\n norm_r = np.sqrt(dx**2 + dy**2 + dz**2)\n dx = dx / norm_r\n dy = dy / norm_r\n dz = dz / norm_r\n\n coslat = np.cos(np.deg2rad(lat))\n sinlat = np.sin(np.deg2rad(lat))\n coslon = np.cos(np.deg2rad(lon))\n sinlon = np.sin(np.deg2rad(lon))\n dr = np.clip(coslat * coslon * dx + sinlat * dz + coslat * sinlon * dy,\n -1., 1.)\n\n # Get LOS angle\n if ppc is None:\n za = np.rad2deg(np.arccos(dr))\n else:\n za = np.rad2deg(np.arcsin(ppc / r))\n aa = np.zeros(za.shape)\n\n # Fix zenith and azimuth angle with optional input only when all exists\n if(za0 is not None and lat0 is not None and\n aa0 is not None and lon0 is not None):\n\n # Determine the type for zenith\n noz = np.logical_or(za0 < 1e-06, za0 > 180 - 1e-06)\n nan = np.isnan(za)\n pre = np.logical_and(~noz, nan)\n\n # Either set or do not\n za[noz] = za0[noz]\n za[pre] = 90.\n # NB: removed check for dr<0 since by putting dr==1 is more sensible\n\n # Determine the type for azimuth\n cir1 = abs(aa0) < 1e-06\n cir2 = np.logical_or(cir1, abs(aa0 - 180) < 1e-06)\n same = np.equal(lon, lon0)\n circ = np.logical_and(cir2, same)\n left = np.logical_and(cir1, ~same)\n right = np.logical_and(~cir1, ~same)\n\n # This should set all cases\n aa[circ] = aa0[circ]\n aa[left] = 180.\n aa[right] = 0.\n else:\n\n # Determine the type of calculations to be carried out\n noz = np.logical_or(za < 1e-06, za > 180 - 1e-06)\n pol = abs(lat) > 90 - 1e-08\n pre = np.logical_and(~noz, pol)\n non = np.logical_and(~noz, ~pol)\n aa[noz] = 0.\n aa[pre] = np.rad2deg(np.arctan2(dy[pre], dx[pre]))\n\n dlat = (- sinlat[non] * coslon[non] / r[non] * dx[non] + coslat[non] /\n r[non] * dz[non] - sinlat[non] * sinlon[non] / r[non] * dy[non]\n )\n dlon = (- sinlon[non] / coslat[non] / r[non] * dx[non] + coslon[non] /\n coslat[non] / r[non] * dy[non])\n aa[non] = (np.rad2deg(np.arccos(r[non] *\n dlat / np.sin(np.deg2rad(za[non])))))\n\n fix = np.logical_or(np.isnan(aa), ~np.isreal(aa))\n\n aa[np.logical_and(fix, dlat >= 0)] = 0\n aa[np.logical_and(fix, dlat < 0)] = 180\n\n aa[np.logical_and(~fix, dlon < 0)] *= -1\n\n return r, lat, lon, za, aa", "def cart2geodetic(x, y, z, ellipsoid=None):\n if ellipsoid is None:\n ellipsoid = ellipsoidmodels()['WGS84']\n\n errtext = 'Invalid excentricity value in ellipsoid model.'\n inrange(ellipsoid[1], 0, 1, exclude='upper', text=errtext)\n\n lon = np.rad2deg(np.arctan2(y, x))\n B0 = np.arctan2(z, np.hypot(x, y))\n B = np.ones(B0.shape)\n e2 = ellipsoid[1]**2\n if e2 == 0.0:\n h, lat, lon = cart2geocentric(x, y, z)\n h -= ellipsoid[0]\n else:\n while (np.any(np.abs(B - B0) > 1e-10)):\n N = ellipsoid[0] / np.sqrt(1 - e2 * np.sin(B0)**2)\n h = np.hypot(x, y) / np.cos(B0) - N\n B = B0.copy()\n B0 = np.arctan(z/np.hypot(x, y) * ((1-e2*N/(N+h))**(-1)))\n\n lat = np.rad2deg(B)\n\n return h, lat, lon", "def convert_coords(self, stz):\n return np.array(\n [stz[0], np.mod(stz[1], 2.0 * np.pi), np.mod(stz[2], 2.0 * np.pi)],\n dtype=np.float64,\n )", "def geocentric2geodetic(r, lat, lon, ellipsoid=None):\n if ellipsoid is None:\n ellipsoid = ellipsoidmodels()['WGS84']\n\n errtext = 'Invalid excentricity value in ellipsoid model.'\n inrange(ellipsoid[1], 0, 1, exclude='upper', text=errtext)\n\n cart = geocentric2cart(r, lat, lon)\n return cart2geodetic(*cart, ellipsoid)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
receive payload from master and respond
def _receive(self): payloadFromMaster = [] pos = 0 payloadFromMaster += [self.serial.read()] while payloadFromMaster[pos] != chr(ETX): payloadFromMaster += [self.serial.read()] pos += 1 # read FCC payloadFromMaster += [self.serial.read()] payloadFromMaster = filter(None, payloadFromMaster) print "payload from master:{}".format(repr(payloadFromMaster)) return payloadFromMaster
[ "def recv(self):", "def dataReceived(data):", "def receive(self, api_spec, response):\n pass", "def _processPayload(self):\n self.stringReceived(self._payload.getvalue()[:-1])", "def receive(self):\n self.respone = None\n try:\n def callback(ch, method, properties, body):\n # print(\" [x] Received data\")\n self.response = body\n\n # stops listening\n self.channel.stop_consuming()\n\n self.channel.basic_consume(callback, queue=QUEUENAME, no_ack=True)\n\n print(\" [*] Waiting for messages. To exit press CTRL+C\")\n self.channel.start_consuming()\n return self.response\n # must exit program if user interruption\n except(KeyboardInterrupt, SystemExit):\n raise\n except Exception as e:\n print(e)", "def __on_request_response__(self, ch, method, props, body):\r\n\t\ttry:\r\n\t\t\tself.last_message = json.loads(body)\r\n\t\texcept ValueError:\r\n\t\t\tprint 'encountered an error while decoding the message'\r\n\t\t\tself.last_message = body\r\n\r\n\t\tself.response = 'received'", "def _recv(self, expected_length=-1):\n \n response = \"\"\n\n##################### Modified by yaoming.lin on 2013-07-09 ####################\n\n is_ok = True\n\n #read the 5 bytes of the pdu message\n while (len(response) < 5) and is_ok: \n new_byte = self._sock.recv(1)\n if len(new_byte) == 0:\n is_ok = False\n else:\n response += new_byte\n if is_ok:\n #read the rest of the request\n #length = self._get_request_length(request)\n if ord(response[1]) < 7: # Modified by yaoming.lin on 2015-08-17\n length = ord(response[2]) + 5\n elif ord(response[1]) < 17:\n length = 8\n else:\n length = 5\n \n while (len(response) < length) and is_ok:\n new_byte = self._sock.recv(1)\n if len(new_byte) == 0:\n is_ok = False\n else:\n response += new_byte\n\n################################################################################\n\n retval = call_hooks(\"modbus_rtu_over_tcp.RtuOverTcpMaster.after_recv\", (self, response))\n if retval <> None:\n return response\n return response", "def handle_client_data(self, data, client_sock):\n prot = data[0].lower()\n if prot == \"n\":\n # Sent by the central server when a new node joins\n address = json.loads(data[1:])\n # print(f\"{self._worker.name} added a node to their network.\")\n self._worker.add_peer(address)\n client_sock.close()\n elif prot == \"h\":\n # Receive new block header\n block_header = json.loads(data[1:])\n client_sock.close()\n self._worker.add_block_header(block_header)\n elif prot == \"t\":\n # Receive new transaction\n tx_json = json.loads(data[1:])[\"tx_json\"]\n client_sock.close()\n self._worker.add_transaction(tx_json)\n elif prot in \"rx\":\n # Receive request for transaction proof or balance\n # Send \"spv\" back so client can exclude this reply\n client_sock.sendall(\"spv\".encode())\n client_sock.close()\n else:\n client_sock.close()", "def receive_data(self):\n while 1:\n client, address = self.sock.accept()\n print('Client connection recieved from:', address[0])\n data = client.recv(self.buffer_size)\n if data:\n print(' Response recieved:', data.decode())\n client.send(data)\n client.close()", "def dataReceived(self, returned_data):\n server_queue.put(returned_data)", "def handle_data(self):\n self.client.settimeout(5) # should really be enough\n loops = 0\n payload = None\n self.buffer = bytearray()\n\n # only allowed mirrors, plus localhost for 10059 since that's where admin interfaces live\n if self.port == 10059:\n if self.ip != \"127.0.0.1\":\n self.ls.log.warning(\"Outside IP %s tried connection to remote admin API\" % self.ip)\n self.end()\n return\n elif self.port == 10056:\n if self.ip not in all_mirrors() or self.ip == \"127.0.0.1\" or self.ip == self.ls.ip:\n self.ls.log.warning(\"Unauthorized ServerNet connection from %s:%s\" % (self.ip, self.port))\n self.end()\n return\n query(\"UPDATE mirrors SET lifesign = ? WHERE address = ?\", (int(time.time()), self.ip))\n\n # receive API call\n while True:\n try:\n self.buffer.extend(self.client.recv(2048))\n loops += 1\n except (socket.timeout, TimeoutError):\n self.ls.log.error(\"ServerNet connection from %s timed out while receiving data\" % self.key)\n break\n\n try:\n payload = json.loads(self.buffer.decode(\"ascii\", \"ignore\"))\n break\n except ValueError: # older python3s don't support json.JSONDecodeError\n pass\n\n if loops > 12: # even our patience knows its limits\n break\n\n # if API call not received or readable for whatever reason, give up\n if not payload:\n if len(self.buffer) > 0:\n self.ls.log.error(\"ServerNet update received from %s, but could not acquire valid payload (got %s)\" % (\n self.ip, self.buffer.decode(\"ascii\", \"ignore\")))\n self.end()\n return\n\n # same for incomplete call\n if \"action\" not in payload or \"data\" not in payload or \"origin\" not in payload:\n self.ls.log.error(\"ServerNet update received from %s, but JSON was incomplete\" % self.ip)\n self.end()\n return\n\n # this shouldn't happen, but just in case...\n if payload[\"origin\"] == self.ls.address:\n self.end()\n return\n\n # payload data should be a list, though usually with 0 or 1 items\n try:\n pass_on = []\n for item in payload[\"data\"]:\n if self.process_data(payload[\"action\"], item):\n pass_on.append(item)\n except TypeError:\n self.ls.log.error(\"ServerNet update received from %s, but data was not iterable\" % self.ip)\n self.end()\n return\n\n # ok, payload is valid, process it\n self.ls.log.info(\"Received ServerNet update from %s: %s\" % (self.ip, payload[\"action\"]))\n\n # switch on the engine, pass it on\n no_broadcast = [\"hello\", \"request\", \"delist\", \"request-log\", \"send-log\", \"request-log-from\"]\n if self.port == 10059 and len(pass_on) > 0 and payload[\"action\"] not in no_broadcast and \\\n payload[\"action\"][0:4] != \"get-\" and payload[\"origin\"] == \"web\":\n self.ls.broadcast(action=payload[\"action\"], data=pass_on, ignore=[self.ip])\n\n self.end()\n\n # was a reload command given?\n if self.reload_mode is not None:\n self.ls.reload(mode=self.reload_mode)\n\n return", "def handle(self) -> None:\n while True:\n raw_command = self.request.recv(1024)\n if not raw_command:\n break\n result = dispatch(self.state, raw_command)\n self.request.send(result)", "def receive(self, data, addr):\n if data.startswith(self.secret):\n eventData = data[len(self.secret):].decode(\"UTF-8\")\n event = None\n\n if self.settings.heartbeat.use_encryption:\n try:\n encryptor = Encryptor(self.settings.heartbeat.enc_password)\n event = Event.from_json(encryptor.decrypt(eventData))\n except:\n pass\n\n if event is None and self.settings.accept_plaintext:\n try:\n event = Event.from_json(eventData)\n except Exception:\n pass\n\n if (event is not None and event.type in self.topics):\n try:\n # Sanitize the source to an IP. Using a hostname is\n # sketchy because in some network environments or VPS\n # setups, gethostbyaddr to get a hostname doesn't\n # provide stable information (sometimes IP, sometimes hostname)\n # This should also prevent the exception below.\n event.host = str(socket.gethostbyname(addr[0])) + \"-\" + str(event.host)\n except socket.herror:\n event.host = str(addr[0]) + \"-\" + str(event.host)\n\n if (self._bcastIsOwn(event.host)):\n return\n\n event.payload['histamine_rxtime'] = time()\n self.callback(event)\n\n if (self.acking and 'histamine_acking' not in event.payload):\n ack_e = Event('ACKing ' + event.id + \"/\" + event.host, 'ACK', type=Topics.ACK)\n ack_e.payload['histamine_acking'] = event.id\n ack_e.payload['dest'] = event.host\n self.callback(ack_e)", "def handle(self):\n request_data = parse_request_json(self.request)\n response = None\n if request_data[SC.MSG_TITLE] == SC.MESSAGE_GET_ROLE:\n response = self.handle_get_role(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_BROADCAST_ROLES:\n response = self.handle_get_network_information(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_PRODUCE_VOTES:\n response = self.handle_produce_votes(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_DISTRIBUTE_VOTES:\n response = self.handle_distribute_votes(request_data)\n else:\n response = self.handle_unexpected_request()\n send_response_json(self.request, response, request_data[SC.MSG_ORIGIN])", "def receive_data(self):\n data = b''\n part = self.Socket.recv(4096)\n payload_len = unpack_from('<H', part, 2)[0]\n data += part\n\n while len(data)-24 < payload_len:\n part = self.Socket.recv(4096)\n data += part\n\n return data", "def receive(self):\n \n\n try:\n if not self.session:\n return\n \n t = time.time()\n str_types = [str, unicode]\n for name, topic in self.topics.items():\n\n # if no user has requested data for this topic in a while\n # close the topic\n if topic.inactive(t):\n print \"Closing topic %s for inactivity, %s\" % (name, topic.config)\n topic.close()\n del self.topics[name]\n continue\n\n while topic.available() > 0:\n msg = topic.receive()\n data = msg.content\n \n if type(data) == str:\n data = json.loads(data)\n if type(data) == unicode:\n data = json.loads(data)\n\n for callback in topic.callbacks:\n callback(msg, data)\n \n self.store(msg.subject, data)\n #print \"Got data for %s: %s\" % (msg.subject, data)\n\n self.session.acknowledge()\n except qmsg.exceptions.ConnectionError, inst:\n self.dbg(traceback.format_exc())\n self.disconnect()\n except qmsg.exceptions.SessionClosed, inst:\n self.dbg(traceback.format_exc())\n self.disconnect()", "def Recv_LoginAck(data):\n global g_actorId\n global g_actorDict\n (g_actorId,) = struct.unpack(\"<B\", data[0])\n print g_programName+'Login ok, local actor id is %s' % (g_actorId,)\n print g_inputPrompt\n g_actorDict[g_actorId] = actor.Actor()\n return data[1:]", "def response( self, msg ):\n\t\tPROTOCOL.info( 'Sending UMCP RESPONSE %s' % msg.id )\n\t\tdata = str( msg )\n\t\tself.__queue += str(msg)\n\n\t\tif self._do_send( self.__comm ):\n\t\t\tnotifier.socket_add( self.__comm, self._do_send, notifier.IO_WRITE )", "def received_callback(self, data):\n if data['plType'] is 'videoRequest':\n req = data['payload']\n #response = {'idVideo': req['idVideo'], 'duration': 60, 'size': 2048, 'bitrate': 2048/60}\n #resp_data = {'sender': self.id, 'payload': response, 'plSize': response['size'], 'plType': 'video'}\n response = self.__db[req['idVideo']]\n resp_data = self._pack_data(response, response['size'], \n 'video', data['packetId'])\n self.connection.send(resp_data)\n req = data['payload']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract the payload data part from the slave's response.
def _extractPayload(response): # extract bytecount and check it print "response:{}".format(repr(response)) pos = 2 bytecount = ord(response[pos]) pos += 1 if bytecount < 6: raise ValueError(bytecount) subframe = response[2:3+bytecount] # extract DA if ord(subframe[pos]) == DLE: pos += 1 da = ord(subframe[pos]) pos += 1 # extract CW if ord(subframe[pos]) == DLE: pos += 1 cw = ord(subframe[pos]) pos += 1 # extract SAX if ord(subframe[pos]) == DLE: pos += 1 sax = ord(subframe[pos]) pos += 1 # extract SA if ord(subframe[pos]) == DLE: pos += 1 sa = ord(subframe[pos]) pos += 1 # extract cmd cmd = ord(subframe[pos]) return subframe
[ "def _extractPayload(self):\n if self._payloadComplete():\n remainingPayloadSize = (self._expectedPayloadSize -\n self._currentPayloadSize)\n self._payload.write(self._remainingData[:remainingPayloadSize])\n self._remainingData = self._remainingData[remainingPayloadSize:]\n self._currentPayloadSize = self._expectedPayloadSize\n else:\n self._payload.write(self._remainingData)\n self._currentPayloadSize += len(self._remainingData)\n self._remainingData = b\"\"", "def receive_data(self):\n data = b''\n part = self.Socket.recv(4096)\n payload_len = unpack_from('<H', part, 2)[0]\n data += part\n\n while len(data)-24 < payload_len:\n part = self.Socket.recv(4096)\n data += part\n\n return data", "def get_payload(self) -> bytes:\n return self.payload", "def extract_body(self):\n for part in self.msg.walk():\n if part.get_content_type() == 'text/plain':\n return part.get_payload()", "def _tear_down_response(data):\n response_header = data[2:17]\n # Below is actually not used\n response_payload_size = data[18]\n response_payload = data[19:-2]\n response_end = data[-2:]\n return response_header, response_payload, response_end", "def _receive(self):\n\n payloadFromMaster = []\n pos = 0\n\n payloadFromMaster += [self.serial.read()]\n\n while payloadFromMaster[pos] != chr(ETX):\n payloadFromMaster += [self.serial.read()]\n pos += 1\n\n # read FCC\n payloadFromMaster += [self.serial.read()]\n\n payloadFromMaster = filter(None, payloadFromMaster)\n print \"payload from master:{}\".format(repr(payloadFromMaster))\n\n return payloadFromMaster", "def parsed_data(self):\n try:\n return parse_data(\n data=self.get_data(),\n mimetype=self.mimetype,\n charset=self.mimetype_params.get('charset'),\n )\n except (ValueError, LookupError):\n abort(httplib.BAD_REQUEST, 'The payload could not be parsed')", "def _processPayload(self):\n self.stringReceived(self._payload.getvalue()[:-1])", "def extract_body(payload):\n if isinstance(payload, str):\n return payload\n else:\n return '\\n'.join([extract_body(part.get_payload()) for part in payload])", "def get_iot_payload(self):\n\n pass", "def read_LIN(session):\n\n slave_frame, = input_session.frames.read(frame_type=types.LinFrame)\n print('Received SlaveResp Frame :{}'.format(slave_frame))\n data =[int(struct.unpack('B',val)[0]) for val in slave_frame.payload]\n print('SlaveResp Payload = {}'.format([hex(val) for val in data]))\n return data", "def parsePayload(self):\n\n # Checking if the payload has the correct size\n if self.expectedPayloadSize != len(self.payload) - len(self.eop):\n self.sendType6()\n self.log(f\"[ERROR] Received payload size is wrong. Please send it again.\", \"server\")\n return\n\n self.findEOP()\n\n self.checkCrc()\n self.payload = self.payload[:-2]\n \n self.removeStuffedBytes()", "def decode_payload(cls, payload: bytes) -> MsgGenericPayload:\n pass", "def get_bt_payload(self):\n\n pass", "def readMessage(self):\n message_type_raw = self.server_socket.recv(1)\n message_len_raw = self.server_socket.recv(1)\n message_type = struct.unpack('>B', message_type_raw)[0]\n message_len = struct.unpack('>B', message_len_raw)[0]\n\n if message_len == 0:\n message_data = bytearray()\n message_payload = None\n else:\n message_data = self.server_socket.recv(message_len)\n logging.debug(\"*** {}\".format(message_data))\n message_payload = json.loads(message_data.decode('utf-8'))\n\n logging.debug('Turned message {} into type {} payload {}'.format(\n binascii.hexlify(message_data),\n self.message_types.to_string(message_type),\n message_payload))\n return message_type, message_payload", "def extract_content(self, response):\n content = response.content\n return content", "def read_response(self, read_packet):\n pass", "def interpret_response(cls, response):\t\t\r\n\t\tif len(response.data) < 1:\r\n\t\t\traise InvalidResponseException(response, \"Response data must be at least 1 bytes\")\r\n\r\n\t\tresponse.service_data = cls.ResponseData()\r\n\t\tresponse.service_data.sequence_number_echo = response.data[0]\r\n\t\tresponse.service_data.parameter_records = response.data[1:] if len(response.data) > 1 else b''", "def deserialize_payload(self):\n if len(self.ibuffer) > 0:\n self.forwarding_func(cPickle.loads(self.ibuffer))\n self.ibuffer = b''" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the inputstring. To make it compatible with Python2 and Python3.
def _print_out(inputstring): _checkString(inputstring, description='string to print') sys.stdout.write(inputstring + '\n')
[ "def printout(string):\r\n print(string)", "def format_string(console):\n\n pass", "def prettyPrint(string):\n print('*'*75)\n print(string)\n print('*'*75)", "def op_print(self):\n zstr_address = self._opdecoder.get_zstring()\n self._ui.screen.write(self._string.get(zstr_address))", "def ansiprint(self, *args: str, **kwargs):\n\n new_args = (str(i) if not isinstance(i, str) else i for i in args)\n parts = self.parse(*new_args, aslist=True)\n builtins.print(*parts, **kwargs)", "def formatPrintString(self, value):\n pass", "def printNow(output):\n print(output)", "def print_bytes(byte_str):\n if isinstance(byte_str, str):\n self.print(byte_str)\n else:\n self.print(str(byte_str, encoding='utf8'))", "def printStr(self, string):\n x, y = self._cursor\n for char in string:\n if char == '\\n': # line break\n x = 0\n y += 1\n continue\n if char == '\\r': # return\n x = 0\n continue\n x, y = self._normalizeCursor(x, y)\n self.drawChar(x, y, char, self._fgcolor, self._bgcolor)\n x += 1\n self._cursor = (x, y)", "def get_string(self):\n self.text = input(\"Please enter string: \")", "def output_shapestring_console(shapeString):\n print(\"Assignment Number one (Shape List) output:\\n\\n\")\n print(shapeString)", "def check_and_print(string):\n detoks, preds = check_string(string)\n print(f'Checking string: {string}')\n print('Label key: 0: unimportant token, 1: correct, 2: error detected.')\n print('Token | Label')\n for d, p in zip(detoks, preds):\n print(f'{d:<8}| {p}')", "def printString(self):\r\n self.findPath(self.getStart())\r\n for i in range(0, len(self.stringPath)):\r\n if i == 0:\r\n self.sequence += self.stringPath[i]\r\n else:\r\n self.sequence += self.stringPath[i][-1]\r\n print(self.sequence)", "def printInfo(self, aString):\n prefix = \"REFTEST INFO | \"\n print(prefix + aString.replace(\"\\n\", \"\\n\" + prefix))", "def print_lsstring(arg):\n print \"LSString (.p, .n, .l, .s available). Value:\"\n print arg", "def print_file(string, file):\n print(string)\n print(string, file=file)", "def output(self, string):\n self.output_stream.write(f'{string}\\n')", "def snip_print(str,width = 75,print_full = 0,header = ''):\n\n if print_full == 1:\n page(header+str)\n return 0\n\n print header,\n if len(str) < width:\n print str\n snip = 0\n else:\n whalf = int((width -5)/2)\n print str[:whalf] + ' <...> ' + str[-whalf:]\n snip = 1\n if snip and print_full == 2:\n if raw_input(header+' Snipped. View (y/n)? [N]').lower() == 'y':\n page(str)\n return snip", "def prints_thisiscs50ap(self):\n expected = \"[Tt]his is CS50AP!?\\n\"\n actual = self.spawn(\"./syntax\").stdout()\n if not re.match(expected, actual):\n err = Error(Mismatch(\"This is CS50AP!\\n\", actual))\n if re.match(expected[:-1], actual):\n err.helpers = \"Did you forget a newline (\\\"\\\\n\\\") at the end of your printf string?\"\n raise err", "def print_text(txt):\n print(txt)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a diagnostic string, showing the module version, the platform, current directory etc.
def _getDiagnosticString(): text = '\n## Diagnostic output from tacos2 ## \n\n' text += 'Tacos2 version: ' + __version__ + '\n' text += 'Tacos2 status: ' + __status__ + '\n' text += 'File name (with relative path): ' + __file__ + '\n' text += 'Full file path: ' + os.path.abspath(__file__) + '\n\n' text += 'pySerial version: ' + serial.VERSION + '\n' text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\n\n' text += 'Platform: ' + sys.platform + '\n' text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\n' text += 'Byteorder: ' + sys.byteorder + '\n' text += 'Python version: ' + sys.version + '\n' text += 'Python version info: ' + repr(sys.version_info) + '\n' text += 'Python flags: ' + repr(sys.flags) + '\n' text += 'Python argv: ' + repr(sys.argv) + '\n' text += 'Python prefix: ' + repr(sys.prefix) + '\n' text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\n' text += 'Python executable: ' + repr(sys.executable) + '\n' try: text += 'Long info: ' + repr(sys.long_info) + '\n' except: text += 'Long info: (none)\n' # For Python3 compatibility try: text += 'Float repr style: ' + repr(sys.float_repr_style) + '\n\n' except: text += 'Float repr style: (none) \n\n' # For Python 2.6 compatibility text += 'Variable __name__: ' + __name__ + '\n' text += 'Current directory: ' + os.getcwd() + '\n\n' text += 'Python path: \n' text += '\n'.join(sys.path) + '\n' text += '\n## End of diagnostic output ## \n' return text
[ "def get_system_version_info() -> str:\n output_template = '{:<12} {}'\n line_separator = '-' * 60\n not_found_str = '[Not Found]'\n out_lines = []\n\n # System (Python, OS)\n out_lines += ['System Version Info', line_separator]\n out_lines += [\n output_template.format(name, version) for name, version in (\n ('OS', '%s' % platform.platform()),\n ('Python', '%d.%d.%d' % sys.version_info[0:3]),\n )\n ]\n\n # Third-Party Packages\n out_lines += ['', 'Package Version Info', line_separator]\n backend_modules = (\n 'appdirs',\n 'av',\n 'click',\n 'cv2',\n 'moviepy',\n 'numpy',\n 'tqdm',\n )\n for module_name in backend_modules:\n try:\n module = importlib.import_module(module_name)\n out_lines.append(output_template.format(module_name, module.__version__))\n except ModuleNotFoundError:\n out_lines.append(output_template.format(module_name, not_found_str))\n\n # External Tools\n out_lines += ['', 'Tool Version Info', line_separator]\n\n tool_version_info = (\n ('ffmpeg', get_ffmpeg_version()),\n ('mkvmerge', get_mkvmerge_version()),\n )\n\n for (tool_name, tool_version) in tool_version_info:\n out_lines.append(\n output_template.format(tool_name, tool_version if tool_version else not_found_str))\n\n return '\\n'.join(out_lines)", "def _get_version_string() -> str:\n return \" GDM Version: {}. Registered extension packages: {}.\".format(\n _version.version, extensions.get_registered_package_info())", "def pprint():\n\n ret = \"\"\"----\nBohrium API version: %s\nInstalled through PyPI: %s\nConfig file: %s\nHeader dir: %s\nBackend stack:\n%s----\n\"\"\" % (__version__, installed_through_pypi(), config_file_path(), header_dir(), messaging.runtime_info())\n if not (is_opencl_in_stack() or is_cuda_in_stack()):\n ret += \"Note: in order to activate and retrieve GPU info, set the `BH_STACK=opencl` \" \\\n \"or `BH_STACK=cuda` environment variable.\\n\"\n return ret", "def info(self):\n s = ''\n s = s + '%-16s = %s\\n'%('pdbfile', self.pdbfile)\n s = s + '%-16s = %s\\n'%('seqfile', self.seqfile)\n s = s + '%-16s = %s\\n'%('salt', self.salt)\n s = s + '%-16s = %s\\n'%('saltconc', self.saltconc)\n s = s + '%-16s = %s\\n'%('pH', self.pH)\n s = s + '%-16s = %s\\n'%('boxProtocol', self.boxProtocol)\n s = s + '%-16s = %s\\n'%('modelPDB_out', self.modelPDBout)\n s = s + '%-16s = %s\\n'%('mccePDBout', self.mccePDBout)\n return s", "def printable_location(self):\n return '\"{0}\" ({1})'.format(\n concise_path(self.base_dir), self.pyver)", "def version(self):\n# import subprocess\n# p = subprocess.Popen('ecl --version', shell=True, stdin=subprocess.PIPE,\n# stdout = subprocess.PIPE, stderr=subprocess.PIPE)\n# return AsciiArtString(p.stdout.read())\n return \"Version information is given by lisp.console().\"", "def get_version_str():\n return pkg_resources.get_distribution(\"lando_messaging\").version", "def get_distribution():\n\ts = platform.system() + ' ' + platform.version()\n\treturn s", "def FrameworkDescription(self) -> str:", "def print_version():\n try:\n print('Build date: %s (%#x)' % (build_date, hex_version()))\n print('LibVLC version: %s (%#x)' % (bytes_to_str(libvlc_get_version()), libvlc_hex_version()))\n print('LibVLC compiler: %s' % bytes_to_str(libvlc_get_compiler()))\n if plugin_path:\n print('Plugin path: %s' % plugin_path)\n except:\n print('Error: %s' % sys.exc_info()[1])", "def debug(context):\n\n from pprint import pformat\n output = [pformat(val) for val in context]\n output.append('\\n\\n')\n output.append(pformat(sys.modules))\n return ''.join(output)", "def OSDescription(self) -> str:", "def get_version() -> str:\n return VERSION", "def show(ctx, module_name, version):\n module_tree = ctx.obj.check_module_tree()\n loader = ctx.obj.check_module(module_tree, module_name, version)\n click.echo(\"\".join(open(loader.moduledotfile_path()).readlines()))", "def create_installation_doc_version_text(version):\n os_text = \"installed within a Windows or Linux environment.\"\n if version == PythonPackageConfigSection.UNIVERSAL_LANGUAGE_VERSION:\n version_text = \\\n PythonPackageConfigSection.MINIMUM_PYTHON_2_LANGUAGE_VERSION + \\\n \" or higher in the Python 2.x series or \" + \\\n PythonPackageConfigSection.MINIMUM_PYTHON_3_LANGUAGE_VERSION + \\\n \" or higher in the Python 3.x series \" + \\\n os_text\n elif version == \"3\":\n version_text = \\\n PythonPackageConfigSection.MINIMUM_PYTHON_3_LANGUAGE_VERSION + \\\n \" or higher \" + os_text\n elif version == \"2\":\n version_text = \\\n PythonPackageConfigSection.MINIMUM_PYTHON_2_LANGUAGE_VERSION + \\\n \" or higher \" + os_text + \\\n \" (Python 3 is not supported at this time)\"\n else:\n raise Exception(\"Unexpected version passed into \" +\n \"create_installation_doc_version_text function\")\n return version_text", "def get_app_info_string():\n app_info_list = _get_formatted_thread_stack_traces()\n return '\\n'.join(app_info_list)", "def get_product_identifier_string():\n ver = None\n try:\n ver = get_product_version()\n except:\n ver = '(unknown version)'\n\n return '%s %s' % (constants.PRODUCT_NAME, ver)", "def shortname(self):\n return '%s-%s' % (self.product, self.version)", "def _construct_msg(self) -> str:\n return '\\n'.join([\n self._formatted_filename(), self._err_description()])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check das is equal to or less than dae.
def _checkAddress(das, dae): if not(das <= dae): raise ValueError('The DAS{0} must be equal to or less than DAE{0}'.format(das, dae))
[ "def validate(ddtable):\n margin_upp = ddtable.sum(axis=1).transpose()\n count_upp = count_vec(margin_upp)\n remainder_upp = np.remainder(margin_upp, count_upp)\n\n margin_low = ddtable.sum(axis=0)\n count_low = count_vec(margin_low)\n remainder_low = np.remainder(margin_low, count_low)\n\n if not ((remainder_low == 0).all() and (remainder_upp == 0).all()):\n return False\n\n # e_ij <= d^u_i * d^l_j\n div_upp = np.divide(margin_upp, count_upp)\n div_low = np.divide(margin_low, count_low)\n for i in xrange(0,div_upp.size):\n for j in xrange(0,div_low.size):\n if ddtable[i,j] > div_upp.A1[i] * div_low.A1[j]: # is this the right way to access this?\n print (i, j, ddtable[i,j], div_upp.A1[i] * div_low.A1[j])\n return False\n return True", "def test_pSmad2(self):\n self.assertGreater(self.df.loc['T', 'pSmad2'], self.df.loc['D', 'pSmad2'], 1)", "def dust_detect(self):\n self.df[\"dust\"] = (self.df[\"aod_1020nm\"] > 0.3) & (\n self.df[\"440-870_angstrom_exponent\"] < 0.6\n )", "def __ge__(self, rhs_Die):\n return self._currentRoll > rhs_Die._currentRoll", "def eval_dose(self):\n if self.juice.amount < self.dose.amount_defined:\n # the amount of juice left is not enough\n return False\n else:\n # the dose is valid so the fruit juice amount can be decreased\n return True", "def validstep(self,ene,ds):\n return True", "def are_gsds_valid(self, cam_num, gsds):\n if self.bool_k_gsd:\n if self.min_gsd == -1 and self.max_gsd == -1:\n if gsds[2]/gsds[0] <= self.k_gsd:\n self.min_gsd = gsds[0]\n self.max_gsd = gsds[2]\n return True\n else:\n return False\n\n if ((self.max_gsd/gsds[0] >= self.k_gsd\n or gsds[2]/self.min_gsd >= self.k_gsd)):\n if self.max_gsd/gsds[0] >= self.k_gsd:\n print(\"GSD problem at cam {}: gsds[0]({:.2f}), max_gsd(\"\n \"{:.2f}), ratio {}\".upper().format(\n cam_num, gsds[0], self.max_gsd,\n self.max_gsd/gsds[0]))\n\n elif gsds[2]/self.min_gsd >= self.k_gsd:\n print(\"GSD problem at cam {}: gsds[2]({:.2f}), min_gsd(\"\n \"{:.2f}), ratio {}\".upper().format(\n cam_num, gsds[2], self.min_gsd,\n gsds[2]/self.min_gsd))\n else:\n print(\"UNKNOWN GSD ERROR\")\n return False\n return True\n else:\n return set(\n [self.min_gsd <= i <= self.max_gsd for i in gsds]) == set(\n len(gsds)*[True])", "def __gt__(self, rhs_Die):\n return self._currentRoll > rhs_Die._currentRoll", "def cfcheck(**das):\n return True", "def __lt__(self, rhs_Die):\n return self._currentRoll < rhs_Die._currentRoll", "def test_comp_CDO(self):\n A = 8\n CL = 1\n e = 0.8\n CD = 0.4\n # Got value from a hand computation\n self.assertAlmostEqual(Aircraft.comp_CD0(CL, A, e, CD), 0.3503, places=4)", "def check_doy(doy):\n\n if 0 <= doy <= 359:\n return True\n else:\n print('Error: day of year out of bounds')\n print(doy)\n return False", "def test_less_than(self):\n self.assertFalse(self.atom1 < self.atom2) # Because the sorting keys should be identical\n self.assertLess(self.atom2, self.atom3)\n self.assertLess(self.atom4, self.atom1)", "def compare(self,est,trueval):\n relerror = abs((est - trueval)/trueval )\n print relerror\n return relerror < TOL", "def test_x1(self):\n self.assertEqual(sd.e_distance((0, 0), (1, 0)), 1)", "def test_field_id_lt(self):\n outname=self.prefix+self.postfix\n field = '<6'\n self.res=sdmath(field=field,expr=self.expr,outfile=outname)\n tbsel = {'FIELDNAME': ['M100__5']}\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._comparecal_with_selection(outname, tbsel)", "def constraint_not_satisfied(self, var, val, v, dom_val):\n if val == dom_val:\n return True\n col1 = int(var[1:]) - 1\n col2 = int(v[1:]) - 1\n if val == dom_val - abs(col2 - col1) or val == dom_val + abs(col2 - col1):\n return True\n return False", "def test_less_than(self):\n self.assertFalse(self.mol1 < self.mol2) # Because the sorting keys should be identical\n self.assertLess(self.mol1, self.mol3)", "def mayorEdad(self,edad):\n\t\t\t\n\t\tif edad>=18:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Momentum as a function of angle for an isotropic wind
def isotropic_momentum(theta): return 1.0
[ "def get_heeling_moment(angle_to_wind):\n a = angle_to_wind % tau\n if a > pi:\n a -= tau\n if closest_starboard < a < closest_port:\n return 0\n return sin(0.5 * a) - 0.25 * sin(1.5 * a)", "def anisotropic_momentum(theta):\n if theta <= np.pi/2:\n return np.cos(theta)**MOMENTUM_K\n else:\n return 0.0", "def moon_illuminated_fraction(self):\n return (1 + np.cos(np.deg2rad(self.moon_phase_angle))) / 2.0", "def moon_phase_angle(self):\n # Trigger calculation if necessary.\n _ = self.alt_az_frame\n elongation = self._sun_radec.separation(self._moon_radec)\n return np.arctan2(\n self._sun_radec.distance*np.sin(elongation),\n self._moon_radec.distance -\n self._sun_radec.distance * np.cos(elongation)).to(u.deg).value", "def get_circular_intensity(self):\n return np.linalg.norm(self.angular_momentum)*2", "def moment_stall_angle(self):\n data = self.data\n dcm = data['cm'].values[1:] - data['cm'].values[:-1]\n aoa = (data['aoa'].values[1:] + data['aoa'].values[:-1]) * 0.5\n dcm = dcm[np.where(aoa > 5)]\n aoa = aoa[np.where(aoa > 5)]\n try:\n if (np.min(dcm) < 0):\n stall_idx = np.where( dcm > 0)[0][0]-1\n return aoa[stall_idx] - dcm[stall_idx]/(dcm[stall_idx+1] - dcm[stall_idx])\n else:\n data['dsqcm'] = np.gradient(np.gradient(data['cm']))\n t_data = data.loc[data['aoa'] < 10]\n return t_data.iloc[t_data['dsqcm'].argmax()]['aoa']\n except:\n t_data = data.loc[data['aoa'] < 10]\n return t_data.iloc[t_data['cm'].argmin()]['aoa']", "def angular(self):\n return 2.0 * np.pi * self.center", "def declination_angle(self):\n\t\tinside_sin = math.radians((360 * (284 + int(self.n)))/(float(365)))\n\t\t#return float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees\n\t\treturn float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees", "def angle_to_wind(self):\n wd = self.wind_direction\n if wd > 180:\n wd -= 360\n return -wd", "def moment_of_inertia(self):\n return (2 * self.mass() * self.radius ** 2) / 5", "def illuminated_fraction_of_moon(y, m, d):\n\n a0, d0, r0 = lunar_position(y, m, d)\n a, d, r = solar_position(y, m, d)\n\n a0 *= DEG_TO_RAD\n d0 *= DEG_TO_RAD\n a *= DEG_TO_RAD\n d *= DEG_TO_RAD\n r *= AU_TO_M\n\n phi = acos(sin(d0)*sin(d) + cos(d0)*cos(d)*cos(a0 - a))\n i = atan2(r*sin(phi), (r0 - r*cos(phi)))\n return 0.5 * (1 + cos(i))", "def angular_momentum(snap: SnapLike) -> ndarray:\n mass: ndarray = snap['mass']\n pos: ndarray = snap['position']\n vel: ndarray = snap['velocity']\n return (mass[:, np.newaxis] * np.cross(pos, vel)).sum(axis=0)", "def proplyd_momentum(theta):\n return DIFFUSE_BETA + (1.0 - DIFFUSE_BETA)*np.sqrt(max(0.0,np.cos(theta)))", "def azimuth_angle(self):\n\t\tdiv = math.cos(math.radians(self.declination_angle())) * (math.sin(math.radians(self.hour_angle())) / math.cos(math.radians(self.altitude_angle())))\n\t\treturn math.degrees(math.asin(div))", "def hour_angle(self):\n\n\t\t#turn the solar time into total seconds (since midnight)\n\t\tseconds_solartime = self.solar_time().hour*3600 + self.solar_time().minute*60 + self.solar_time().second\n\t\tseconds_from_solar_noon = abs(seconds_solartime - 12*3600)#noon in seconds\t\t\n\t\treturn (float(seconds_from_solar_noon)/60)/4 #units = degrees", "def minAngNum(ang):\n while ang < 0: ang += math.radians(360)\n ang %= math.radians(360)\n return ang", "def unit_direction(angle):\n return complex(math.cos(angle), math.sin(angle))", "def _phase_lag(self):\n return np.angle(self.unnorm_power)", "def moment_arm(self, x):\n return x - self.center_of_mass_" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Momentum as a function of angle for a proplyd wind Proportional to sqrt(cos(theta)) in the head (theta pi/2). The tail value is set via the modulelevel variable DIFFUSE_BETA.
def proplyd_momentum(theta): return DIFFUSE_BETA + (1.0 - DIFFUSE_BETA)*np.sqrt(max(0.0,np.cos(theta)))
[ "def isotropic_momentum(theta):\n return 1.0", "def anisotropic_momentum(theta):\n if theta <= np.pi/2:\n return np.cos(theta)**MOMENTUM_K\n else:\n return 0.0", "def get_heeling_moment(angle_to_wind):\n a = angle_to_wind % tau\n if a > pi:\n a -= tau\n if closest_starboard < a < closest_port:\n return 0\n return sin(0.5 * a) - 0.25 * sin(1.5 * a)", "def temp_alpha(self,deltaT,theta, omega):\n return -(9.81/self.length) * np.sin(theta)", "def angular(self):\n return 2.0 * np.pi * self.center", "def moon_illuminated_fraction(self):\n return (1 + np.cos(np.deg2rad(self.moon_phase_angle))) / 2.0", "def heading_theta(self):\n return (pgdrive_heading(self.origin.getH()) - 90) / 180 * math.pi", "def declination_angle(self):\n\t\tinside_sin = math.radians((360 * (284 + int(self.n)))/(float(365)))\n\t\t#return float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees\n\t\treturn float(23.45 * math.sin (( inside_sin) )) #returns a number with units of Degrees", "def angular_momentum(snap: SnapLike) -> ndarray:\n mass: ndarray = snap['mass']\n pos: ndarray = snap['position']\n vel: ndarray = snap['velocity']\n return (mass[:, np.newaxis] * np.cross(pos, vel)).sum(axis=0)", "def moment_stall_angle(self):\n data = self.data\n dcm = data['cm'].values[1:] - data['cm'].values[:-1]\n aoa = (data['aoa'].values[1:] + data['aoa'].values[:-1]) * 0.5\n dcm = dcm[np.where(aoa > 5)]\n aoa = aoa[np.where(aoa > 5)]\n try:\n if (np.min(dcm) < 0):\n stall_idx = np.where( dcm > 0)[0][0]-1\n return aoa[stall_idx] - dcm[stall_idx]/(dcm[stall_idx+1] - dcm[stall_idx])\n else:\n data['dsqcm'] = np.gradient(np.gradient(data['cm']))\n t_data = data.loc[data['aoa'] < 10]\n return t_data.iloc[t_data['dsqcm'].argmax()]['aoa']\n except:\n t_data = data.loc[data['aoa'] < 10]\n return t_data.iloc[t_data['cm'].argmin()]['aoa']", "def pendulumPeriod(self):\n return 2.0*math.pi*math.sqrt(self.l/self.g)*(1.0 + self.theta0**2*(1.0/16.0))", "def moon_phase_angle(self):\n # Trigger calculation if necessary.\n _ = self.alt_az_frame\n elongation = self._sun_radec.separation(self._moon_radec)\n return np.arctan2(\n self._sun_radec.distance*np.sin(elongation),\n self._moon_radec.distance -\n self._sun_radec.distance * np.cos(elongation)).to(u.deg).value", "def momentum_resolution(p) :\n return 0.005", "def angular_velocity(self):\r\n\r\n self.omega += self.angular_acceleration*self.dt\r\n return self.omega", "def angular_speed_set_point(self):\n return self.radians(self._motor.speed_sp / self._gear_ratio)", "def adjust_heading_degrees(alpha):\n return mod(alpha + 180, 360) - 180", "def theta_max(self, phi):\n\n # The polar angle is fixed, so return zero.\n return 0.0", "def get_circular_intensity(self):\n return np.linalg.norm(self.angular_momentum)*2", "def pendulumEnergy(self):\n return abs(0.5*self.m*(self.l)**2*(self.omega0)**2 - self.m*self.g*self.l*(math.cos(self.theta0)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Momentum as a power law in cos(theta) on the forward hemisphere only
def anisotropic_momentum(theta): if theta <= np.pi/2: return np.cos(theta)**MOMENTUM_K else: return 0.0
[ "def isotropic_momentum(theta):\n return 1.0", "def proplyd_momentum(theta):\n return DIFFUSE_BETA + (1.0 - DIFFUSE_BETA)*np.sqrt(max(0.0,np.cos(theta)))", "def moment_of_inertia(self):\n return (2 * self.mass() * self.radius ** 2) / 5", "def _psi(self, m):\n return -self._twosigma2 * numpy.exp(-(m**2.0) / self._twosigma2)", "def get_circular_equivalent_fwhm(self):\n return np.sqrt(self.x_fwhm * self.y_fwhm).to(\n self.x_fwhm.unit)", "def cosmo_Omega_gw(self):\n ## Cosmological Spectrum\n # cosmo_omega_gw = self.omega_gw ## units = Independent of frequency\n cosmo_omega_gw = self.omega_gw * (self.frequency / 10)**0\n return cosmo_omega_gw", "def setup_phi_eff(self):\n momentum = self.p0\n time = 0.\n phi_eff = 0.\n for turn in range(self.n_turns+5):\n # evolve through one full revolution\n time += self.tof(momentum)\n self.phase_list[0].append(momentum)\n # phi_eff is the phase that a particle on the synchronous phase \n # passes through the reference surface\n phi_eff = time*self.omega + 2.*math.pi*self.phi_s\n self.phase_list[1].append(phi_eff)\n # increment the energy\n energy = (momentum**2+self.mass**2)**0.5\n delta_energy = self.v_eff*math.sin(2.*math.pi*self.phi_s)\n energy += delta_energy\n momentum = (energy**2-self.mass**2)**0.5", "def f(self,t,y):\n return -self.lambd*y + 2*scipy.ones_like(y)*scipy.exp(-t)*scipy.cos(2*t)", "def cos_nu(self):\n return np.clip((self.p - self.r) / (self.e * self.r), -1.0, 1.0)", "def frame_central_moment ( frame , order , expression , cuts = '' ) :\n node = as_rnode ( frame )\n return SV.data_central_moment ( node , order = order , expression = expression , cuts = cuts )", "def f_molGas_dyn(self):\n# print self.M_gas, self.M_dyn\n return self.M_gas / self.M_dyn", "def moment(self, order, Rg, volume, maxiter=100):\n amp = self.amplitude # we want to convolve the normalized power spectrum\n kSwitch = 2*np.pi/Rg\n s1 = integrate(self.moment_integrant, 0, kSwitch, \\\n args = (order, Rg), maxiter=maxiter)[0]\n s2 = integrate(self.moment_integrantInv, 1e-30, 1/kSwitch, \\\n args = (order, Rg), maxiter=maxiter)[0]\n return np.sqrt( amp * (s1+s2) * volume / (2*np.pi)**3 )", "def calc_mag(self, time):\n\n return (self.f0 + (self.df * math.sin((self.w * time) + self.theta)))", "def driving_force(self, t):\n return self.gamma_ext * np.cos(self.omega_ext*t)", "def ApproxHeel(self, Fs, gammas, Fk, gammak, deltaFs, deltagammas):\n tanheel = (Fs * self.hs * np.sin(gammas) + Fk * self.hk * np.sin(gammak)) / (self.hb * self.wb)\n heel = np.arctan(tanheel)\n dheel = self.hs * (deltaFs * np.sin(gammas) + Fs * np.cos(gammas) * deltagammas) \\\n / ((1.0 + tanheel ** 2) * self.hb * self.wb)\n return heel, dheel", "def first_moment_inertia(self):\r\n if self.int_method =='OpenFAST':\r\n dr = checkRegularNode(self.s_span)\r\n s_span = self.s_span[1:-1] # NOTE: temporary, m shouldn't me used with this method\r\n m = self.m[1:-1] *dr # Important HACK \r\n s_G = self.s_G[:,1:-1]\r\n #np.sum(yy) \r\n #p['FirstMom'] = sum(p['BElmntMass']*p['RNodes']) + p['TipMass']*p['BldFlexL'] # wrt blade root \r\n S1x = np.sum(s_G[0,:]*m)\r\n S1y = np.sum(s_G[1,:]*m)\r\n S1z = np.sum(s_G[2,:]*m)\r\n return S1x, S1y, S1z\r\n\r\n else:\r\n raise NotImplementedError()", "def _fiber_length_explicit_musculotendon_dynamics(self):\n self._l_M_tilde = dynamicsymbols(f'l_M_tilde_{self.name}')\n self._l_MT = self.pathway.length\n self._v_MT = self.pathway.extension_velocity\n self._l_M = self._l_M_tilde*self._l_M_opt\n self._l_T = self._l_MT - sqrt(self._l_M**2 - (self._l_M_opt*sin(self._alpha_opt))**2)\n self._l_T_tilde = self._l_T/self._l_T_slack\n self._cos_alpha = (self._l_MT - self._l_T)/self._l_M\n self._fl_T = TendonForceLengthDeGroote2016.with_defaults(self._l_T_tilde)\n self._fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self._l_M_tilde)\n self._fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self._l_M_tilde)\n self._F_T_tilde = self._fl_T\n self._F_T = self._F_T_tilde*self._F_M_max\n self._F_M = self._F_T/self._cos_alpha\n self._F_M_tilde = self._F_M/self._F_M_max\n self._fv_M = (self._F_M_tilde - self._fl_M_pas)/(self.a*self._fl_M_act)\n self._v_M_tilde = FiberForceVelocityDeGroote2016.with_defaults(self._fv_M)\n self._dl_M_tilde_dt = (self._v_M_max/self._l_M_opt)*self._v_M_tilde\n\n self._state_vars = Matrix([self._l_M_tilde])\n self._input_vars = zeros(0, 1)\n self._state_eqns = Matrix([self._dl_M_tilde_dt])", "def mass(self):\n return 4 / 3 * np.pi * self.radius ** 3 * self.rho", "def get_circular_intensity(self):\n return np.linalg.norm(self.angular_momentum)*2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The arguments w and w1 should be instances of the class Wind() The inner wind, w, should have origin=True, while the outer wind, w1, should have origin=False See the Shell() class for an easier to use wrapper around this class
def __init__(self, w, w1): self.w = w # "inner" wind self.w1 = w1 # "outer" wind # We save the values of theta and theta1, so we can use them # to find an initial estimate of theta1 for the next angle # theta self.th1_save = None self.th_save = None # Pre-calculate the on-axis radius of the shell self.beta = self.w.axial_momentum_flux / self.w1.axial_momentum_flux self.R0 = np.sqrt(self.beta)/(1.0 + np.sqrt(self.beta))
[ "def wind(self) -> WindData:\n pass", "def wind(self) -> ObservationsSummaryWind:\n return ObservationsSummaryWind(self.summary[\"wind\"])", "def generate_wind():\n# Taken by converting UTM Zone 11 coordinates on\n# https://www.engineeringtoolbox.com/utm-latitude-longitude-d_1370.html\n# These values specific to files called yosemite_landscape_12-03-2019_0900_120m\n west_lon = -120.006255\n east_lon = -119.4736\n south_lat = 37.464649\n north_lat = 37.822073\n\n# Open .shp and .dbf files with rb\n myshp = open(\"SHAPEFILES/HOUR1/yosemite_landscape_12-03-2019_0900_120m.shp\", \"rb\")\n mydbf = open(\"SHAPEFILES/HOUR1/yosemite_landscape_12-03-2019_0900_120m.dbf\", \"rb\")\n wind = Wind(myshp, mydbf, west_lon, east_lon, south_lat, north_lat)\n\n# Regrid the base data onto a 30mx30m grid and bounded at the coordinates described\n# Our model focuses on the area between -120W to -119.5W, and 37.5N to 37.8N\n new_wind = wind.regrid(30, -120, -119.5, 37.5, 37.8)\n return new_wind", "def create_windows(self):\n\n # implemented in sub classes", "def windpower(dataManager: DataManager, config: Config) -> None:\n st.title('Wind power experiments')\n\n PLOTS = dict(\n variable='Climate Model windspeeds',\n upscale='Provisioning windpower for Krummhรถrn', \n ternary='Ternary surface plot for Krummhรถrn',\n management='Uncertainty analysis for windpower provisioning'\n )\n \n # add the expert Mode\n expert_mode = st.sidebar.checkbox('Unlock Expert mode', value=False)\n\n # add the plot controller\n n_plots = int(st.sidebar.number_input('Number of Charts', value=1, min_value=1, max_value=5))\n\n for i in range(n_plots):\n with st.expander(f'Detail Chart #{i + 1}', expanded=i == n_plots - 1):\n plt_type = st.selectbox('Chart Type', options=list(PLOTS.keys()), format_func=lambda k: PLOTS.get(k), key=f'plot_select_{i}')\n\n # switch the plots\n if plt_type == 'variable':\n windspeed_rcp_plots(dataManager, config, key=f'windspeed_{i + 1}')\n \n elif plt_type == 'upscale':\n upscale_plots(dataManager, config, expert_mode=expert_mode, key=f'upscale_{i + 1}')\n\n elif plt_type == 'ternary':\n upscale_ternary_plot(dataManager, config, expert_mode=expert_mode, key=f'ternary_{i + 1}')\n\n elif plt_type == 'management':\n management_plot(dataManager, config, expert_mode=expert_mode, key=f'management_{i + 1}')", "def swap_windows(self):\n w1 = w2 = None\n others = []\n for extractor in self.extractors_:\n if extractor.name == 'w1':\n w1 = extractor\n elif extractor.name == 'w2':\n w2 = extractor\n else:\n others.append(extractor)\n new_w1 = AggregatedFeatureExtractor(w2.extractors_, 'w1')\n new_w2 = AggregatedFeatureExtractor(w1.extractors_, 'w2')\n extractors = [new_w1, new_w2] + others\n return AggregatedFeatureExtractor(extractors, self.name)", "def _update_w(self):\n pass", "def Test(T,W,TrueWC):\r\n \r\n WC = WindChill(T,W)\r\n print 'WindChill returns %4d Actual = %5.1f Input = (%s,%s) ' % (WindChill(T,W),TrueWC,T,W)", "def test_init_WRSM(self):\n self.test_obj = MachineWRSM(type_machine=9)\n\n self.test_obj.stator = LamSlotWind()\n self.test_obj.stator.slot = SlotW22(Zs=36, H0=0.001, H2=0.01, W0=0.1, W2=0.2)\n self.test_obj.stator.winding = WindingDW2L(p=8, qs=4)\n\n self.test_obj.rotor = LamSlotWind()\n self.test_obj.rotor.slot = SlotW22(Zs=36, H0=0.001, H2=0.01, W0=0.1, W2=0.2)\n self.test_obj.rotor.winding = Winding(p=8, qs=4)\n\n self.widget = SWindPat(machine=self.test_obj, matlib=[], is_stator=True)\n self.widget2 = SWindPat(machine=self.test_obj, matlib=[], is_stator=False)\n\n # Check result stator\n assert type(self.test_obj.stator.winding) == WindingDW2L\n assert self.test_obj.stator.winding.p == 8\n assert self.test_obj.stator.winding.qs == 4\n assert self.widget.si_qs.isEnabled() == True\n assert self.widget.si_coil_pitch.isHidden() == False\n assert self.widget.si_Nslot.value() == 0\n assert self.widget.c_wind_type.currentIndex() == 2\n assert self.widget.c_wind_type.currentText() == \"Double Layer Distributed\"\n assert self.widget.is_reverse.checkState() == Qt.Unchecked\n assert self.widget.out_shape.text() == \"Winding Matrix shape: [2, 1, 36, 4]\"\n # check result rotor\n assert type(self.test_obj.rotor.winding) == WindingCW2LT\n assert self.test_obj.rotor.winding.p == 8\n assert self.test_obj.rotor.winding.qs == 1\n assert self.widget2.si_qs.value() == 1\n assert self.widget2.si_qs.isEnabled() == False\n assert self.widget2.si_coil_pitch.isHidden() == True\n assert self.widget2.si_Nslot.value() == 0\n assert self.widget2.c_wind_type.currentIndex() == 0\n assert (\n self.widget2.c_wind_type.currentText()\n == \"DC wound winding for salient pole\"\n )\n assert self.widget2.is_reverse.checkState() == Qt.Unchecked\n assert self.widget2.out_shape.text() == \"Winding Matrix shape: [1, 2, 36, 1]\"", "def land_widget_mover_new(LandWidget *parent, char const *text, int x, int y, int w, int h) -> LandWidget *:\n LandWidgetMover *self\n\n land_widget_mover_interface_initialize()\n\n # FIXME: inhibit layout changes until our own vt is set\n\n land_alloc(self)\n LandWidget *base = (LandWidget *)self\n land_widget_button_initialize(base, parent, text, None, false, None, x, y, w, h)\n base->vt = land_widget_mover_interface\n \n land_widget_layout_set_shrinking(base, 0, 1)\n land_widget_theme_initialize(base)\n if parent: land_widget_layout(parent)\n\n # by default, move the parent. \n self.target = parent\n self.dragged = 0\n return base", "def workspaceControl(name, dockToPanel=\"string\", defineTemplate=\"string\", requiredControl=\"string\", useTemplate=\"string\", label=\"string\", heightProperty=\"string\", tabToControl=\"string\", widthProperty=\"string\", width=bool, collapse=bool, checksPlugins=bool, height=bool, loadImmediately=bool, requiredPlugin=\"string\", exists=bool, restore=bool, initialWidth=int, visibleChangeCommand=\"string\", visible=bool, closeCommand=\"string\", dockToMainWindow=\"string\", minimumWidth=bool, dockToControl=\"string\", r=bool, uiScript=\"string\", retain=bool, initialHeight=int, close=bool, floating=bool):\n pass", "def moveWest(self):\n pass", "def window(w, func=hanning, axis=0): ###\n if isnumpyarray(w):\n y = w\n elif hasattr(w, \"y\"):\n w = w.copy()\n y = w.y\n else:\n raise TypeError(\"don't know how to handle this kind of carrier object\")\n envelope = func(y.shape[0])\n envelope.shape = [\n {True: envelope.size, False: 1}[dim == axis] for dim in range(y.ndim)\n ]\n y = y * envelope\n if isnumpyarray(w):\n w = y\n else:\n w.y = y\n return w", "def __toggleWindow(self, w):\n if w.isHidden():\n w.show()\n else:\n w.hide()", "def winding(self, winding: int):\n\n self._winding = winding", "def hpc_window(dx,dy,dz,hwall):\n\thwindow = hwall-(hwall/4.) \n\tdimborder = hwindow/2.\n\n\tc = CUBOID([dx,dy,dz])\n\tc1 = CUBOID([(dx-(3*dimborder))/2.,dy,(dz-(3*dimborder))/2.])\n\tc2 = STRUCT([T([1,3])([dimborder,dimborder]),c1,T([1])([((dx-(3*dimborder))/2.)+dimborder]),c1])\n\tc3 = STRUCT([c2, T(3)([((dz-(3*dimborder))/2.)+dimborder]),c2])\n\n\n\tborders = STRUCT([DIFFERENCE([c,c3])])\n\tglasses = STRUCT([T(2)(3*(dy/8.)),CUBOID([dx,dy/4.,dz])])\n\n\tborders = TEXTURE(\"src/texture/texturefloor.jpg\")(borders)\n\tglasses = TEXTURE(\"src/texture/textureglass.jpg\")(glasses)\n\n\n\twindow = STRUCT([borders,glasses])\n\treturn window", "def adj_west(self):\n \n entries_GWsky = self.load_entries(\"GWsky_entries\")\n fov_center_ra, fov_center_dec = entries_GWsky[0::2], entries_GWsky[1::2]\n\n for ra_start, dec_start in zip (fov_center_ra, fov_center_dec):\n ra_start, dec_start = float(ra_start), float(dec_start)\n\n aladin.select(\"P:\"+str(ra_start) + ',' + str(dec_start))\n \n ra_distance = self.ra0ra1((0 - self.SHIFT_CORRECTION + self.shift_right),\n float(dec_start), float(dec_start))\n \n aladin.select(\"P:\"+str(ra_start) + ',' + str(dec_start))\n \n west_adj = [(float(ra_start) - ra_distance), (float(dec_start) + 0)]\n ra, dec = west_adj[0], west_adj[1]\n\n aladin.set_target(ra, dec)\n aladin.set_plane_id(\"P:\"+str(ra) + ',' + str(dec)) \n\n new_sky_pos = [ra,dec] # cycle variables\n self.entries_GWsky_new.extend(new_sky_pos)\n\n #aladin.remove(\"Q:\"+str(ra_start)+\"/\"+str(dec_start))\n Utils.delete_pointing(infile=\"GWsky_pointings.txt\",\n ra=str(ra_start), dec=str(dec_start))\n\n #aladin.remove(\"C_\" + str(ra_start) + \"/\" + str(dec_start))\n\n with open('GWsky_entries', 'wb') as data:\n pickle.dump(self.entries_GWsky_new, data)", "def _mac_unsetwin(self, arg):\n if type(arg) == type(()):\n apply(Qdoffs.SetGWorld, arg)\n else:\n Qd.SetPort(arg)", "def wind_dir_decider(self, where_low_r: ndarray, wdir_cube: Cube) -> None:\n if self.backup_method == \"neighbourhood\":\n # Performs smoothing over a 6km square neighbourhood.\n # Then calculates the mean wind direction.\n child_class = WindDirection(backup_method=\"first_realization\")\n child_class.wdir_complex = self.nbhood(\n wdir_cube.copy(data=self.wdir_complex)\n ).data\n child_class.realization_axis = self.realization_axis\n child_class.wdir_slice_mean = self.wdir_slice_mean.copy()\n child_class.calc_wind_dir_mean()\n improved_values = child_class.wdir_slice_mean.data\n else:\n # Takes realization zero (control member).\n improved_values = wdir_cube.extract(iris.Constraint(realization=0)).data\n\n # If the r-value is low - substitute average wind direction value for\n # the wind direction taken from the first ensemble realization.\n self.wdir_slice_mean.data = np.where(\n where_low_r, improved_values, self.wdir_slice_mean.data\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the spherical radius of the shell as a function of angle Should work with scalar or vector argument `theta`. Returns `radius`, but if positional argument `full` is `True`, then
def radius(self, theta, method='brent', full=False): def _radius(theta): """Helper function to find the shell radius for a single angle, theta""" if theta == 0.0: # special treatment for the axis return self.R0 elif theta >= self.th_infty: # Indicate that we have gone too far return -1.0 else: if method == 'fsolve': if self.th1_save is None: # For the first off-axis angle, we use the fact # that R0 tan(theta) ~= (1 - R0) tan(theta1) for # small theta th1_guess = theta*self.R0 / (1.0 - self.R0) else: # For subsequent angles, we do geometric extrapolation th1_guess = self.th1_save*theta/self.th_save # The tricky bit here is getting th1_guess to be close # enough to the true solution. If it is not, then the # solver will fail theta1 = _solve_for_th1(self.w, self.w1, theta, th1_guess, method=method) else: # Assume other methods require root to be bracketed # Must be between 0 and th1_infty if self.th1_save is None: a, b = 1e-10, self.th1_infty else: a, b = self.th1_save, self.th1_infty theta1 = _solve_for_th1(self.w, self.w1, theta, bounds=[a, b], method=method) if DEBUG_LEVEL > 0: print('+++', self.th_infty - theta, self.th1_infty - theta1) self.th_save = theta self.th1_save = theta1 return _radius_eq23(theta, theta1) try: # case where theta is iterable rslt = np.empty_like(theta) th1_rslt = np.empty_like(theta) for i, t in enumerate(theta): r = _radius(t) if r > 0.0: rslt[i] = r th1_rslt[i] = self.th1_save else: # assume we have got to th_max # so fill the remainder with NaNs rslt[i:] = np.nan th1_rslt[i:] = np.nan break if full: return rslt, th1_rslt else: return rslt except TypeError: # fall-over case where theta is scalar if full: return _radius(theta), self.th1_save else: return _radius(theta)
[ "def sphere(radius):\n if not isinstance(radius, float) or radius <= 0:\n raise ValueError(f\"Incorrect value ({radius}) for radius\")\n substrate = _Substrate(\"sphere\", radius=radius)\n return substrate", "def sphere(radius):\n M = np.diag([1., 1., 1., -(radius ** 2)])\n if radius < 0:\n M *= -1\n return Quadric(M)", "def sphere(r):\n S = 4 * pi * (r**2)\n return S", "def _topo_to_sphere(theta, radius):\n sph_phi = (0.5 - radius) * 180\n sph_theta = -theta\n return sph_phi, sph_theta", "def sphere_area(r):\n return 4*pi*r**2", "def sphere_radius(self) -> float:\n return self.GetSphereRadius()", "def spherical(self):\n radius = abs(self)\n theta = Vector.k_hat.angle(self)\n xy_projection = Vector(self.i, self.j, 0) or Vector.i_hat\n phi = Vector.i_hat.angle(xy_projection)\n return radius, theta, phi", "def circlesector_area(angle, radius):\n return 2*numpy.pi*radius*sectorangle/360", "def radius(s, n):\n return s*0.5/np.sin(np.pi/n)", "def sphere_simple(radius, precision=0.0001):\n center = np.ceil(radius)\n extent = center*2 + 1\n rr, cc, zz = np.mgrid[0:extent, 0:extent, 0:extent]\n\n return sphere(rr, cc, zz, (center, center, center), radius, precision)", "def circumference_of_circle(radius: float) -> float:\n return 2 * pi * radius", "def circle_area(radius):\n return numpy.pi*radius**2", "def radius(self):\n return self.get_planet_radius(unit='Rjup')", "def _is_full_circle_rad(thetamin, thetamax):\n return abs(abs(thetamax - thetamin) - 2 * np.pi) < 1.74e-14", "def R(theta, pkg=np):\n theta *= conversion_factor\n return self.major_radius + self.minor_radius * pkg.cos(\n theta + self.triangularity * pkg.sin(theta)\n )", "def _radius_at_fraction_of_total_cas(self, fraction):\n image = self._tonemapped\n center = (self._xc_asym, self._yc_asym)\n r_upper = self._petro_extent_cas * self._rpetro_circ_centroid#rpetro_circ\n\n r, flag = _radius_at_fraction_of_total_circ(image, center, r_upper, fraction)\n self.flag = max(self.flag, flag)\n\n if np.isnan(r) or (r <= 0.0):\n print('[CAS] Invalid radius_at_fraction_of_total.')\n self.flag = 1\n r = -99.0 # invalid\n\n return r", "def area_sphere(r):\n check_positive(r)\n area = 4 * np.pi * r**2\n return area", "def spherical_theta(v: 'Vector') -> FLOAT:\n\treturn np.arccos(np.clip(v.z, -1., 1.))", "def stereographic_polar(point):\n\n theta,phi = point\n\n if phi%(2.0*np.pi) == 0:\n print('Stereographic projection of the pole (0,0,1) is undefined. Returning 0 vector.')\n return np.zeros(3)\n else:\n r = np.sin(phi)/(1-np.cos(phi))\n return np.array([r,theta])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Literal implementation of CRW96 Eq 6 for two winds w, w1 Returns the radius for a given pair of angles th and th1 in terms of the momentum rates injected by the two winds
def _radius_eq6(w, w1, th, th1): numerator = w.Jdot(th) + w1.Jdot(th1) denominator = (w.Pidot_r(th) + w1.Pidot_r(th1))*np.cos(th) \ - (w.Pidot_z(th) + w1.Pidot_z(th1))*np.sin(th) return numerator/denominator
[ "def _radius_eq23(th, th1):\n return np.sin(th1)/np.sin(th+th1)", "def __init__(self, w, w1):\n self.w = w # \"inner\" wind\n self.w1 = w1 # \"outer\" wind\n\n # We save the values of theta and theta1, so we can use them\n # to find an initial estimate of theta1 for the next angle\n # theta\n self.th1_save = None\n self.th_save = None\n\n # Pre-calculate the on-axis radius of the shell\n self.beta = self.w.axial_momentum_flux / self.w1.axial_momentum_flux\n self.R0 = np.sqrt(self.beta)/(1.0 + np.sqrt(self.beta))", "def trapezoid_conv(w0, w1):\n\n (x00, y00), (x01, y01) = w0\n (x10, y10), (x11, y11) = w1\n\n c = intersection((x00, x01), (x10, x11))\n if not len(c):\n return 0\n x0, x1 = c\n\n if x00 == x01 or x10 == x11:\n return 0\n\n z00 = ((x0 - x00)*y01 + (x01 - x0)*y00) / (x01 - x00)\n z01 = ((x1 - x00)*y01 + (x01 - x1)*y00) / (x01 - x00)\n z10 = ((x0 - x10)*y11 + (x11 - x0)*y10) / (x11 - x10)\n z11 = ((x1 - x10)*y11 + (x11 - x1)*y10) / (x11 - x10)\n\n return aligned_trapezoid_conv((z00, z01), (z10, z11)) * (x1 - x0)", "def get_winkeldiff(w1, w2):\n\n x = get_winkel(w1 - w2)\n if x > 180:\n x = 360 - x\n return x", "def derivative(init_conditions: vector, t,\n l1: float, l2: float, m1: float,\n m2: float, g: float = 9.81) -> vector:\n theta_1, theta_2, w1, w2 = init_conditions\n\n sine = np.sin(theta_1 - theta_2)\n cosine = np.cos(theta_1 - theta_2)\n\n denominator1 = l1 * (m1 + m2 * sine * sine)\n denominator2 = l2 * (m1 + m2 * sine * sine)\n\n numerator1 = (m2 * g * np.sin(theta_2) * cosine\n - m2 * sine * (l1 * w1 * w1 * cosine + l2 * w2 * w2)\n - (m1 + m2) * g * np.sin(theta_1))\n\n numerator2 = ((m1 + m2) * (l1 * w1 * w1 * sine\n - g * np.sin(theta_2) + g * np.sin(theta_1) * cosine)\n + m2 * l2 * w2 * w2 * sine * cosine)\n\n w1_dot = numerator1 / denominator1\n w2_dot = numerator2 / denominator2\n\n return np.array([w1, w2, w1_dot, w2_dot])", "def wavelength(state_1, state_2):\n return 1e9 * h * c / interval(state_1, state_2)", "def calc_sample_thicknesses(parameters):\n sample_radius = parameters['thichness_sample'] / 2.0 # [mm]\n\n# a = (parameters['sample_distance']**2.0 - sample_radius**2.0) / \\\n# (1.0 + np.tan(parameters['thetas'])**2.0) # [mm^2]\n# b = (parameters['sample_distance'] * np.tan(parameters['thetas'])) / \\\n# (1.0 + np.tan(parameters['thetas'])**2.0) # [mm]\n#\n# x_1 = b - np.sqrt(b**2 - a) # [mm]\n# x_2 = b + np.sqrt(b**2 - a) # [mm]\n\n a = (parameters['sample_distance'] * np.tan(parameters['thetas'])) / \\\n (1.0 + np.tan(parameters['thetas'])**2.0) # [mm]\n\n b = (sample_radius**2 - parameters['sample_distance']**2 +\n (parameters['sample_distance']**2 /\n (1.0 + np.tan(parameters['thetas'])**2.0))) * \\\n ((np.tan(parameters['thetas'])**2.0) /\n ((1.0 + np.tan(parameters['thetas'])**2.0))) # [mm^2]\n\n x_1 = a - np.sqrt(b) # [mm]\n x_2 = a + np.sqrt(b) # [mm]\n\n # Set nans (complex) and negative (?) to 0\n x_1 = np.nan_to_num(x_1)\n x_1[x_1 < 0] = 0.0\n x_2 = np.nan_to_num(x_2)\n x_2[x_2 < 0] = 0.0\n\n # Calc sample cross section\n y_1 = x_1 / np.tan(parameters['thetas']) # Element wise, [mm]\n y_2 = x_2 / np.tan(parameters['thetas']) # Element wise, [mm]\n\n dx = x_2 - x_1 # [mm]\n dy = y_2 - y_1 # [mm]\n sample_thicknesses = np.sqrt(dx**2.0 + dy**2.0) # [mm]\n\n # Set all angles that do not pass throught the sample (thickness==0) to 0\n thetas = parameters['thetas'].copy()\n thetas[sample_thicknesses == 0] = 0\n return sample_thicknesses, thetas", "def ccw(p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def wavenumbers(state_1, state_2):\n return 1e-2 * interval(state_1, state_2) / (h * c)", "def component_Sersic_r2(ns, weights, hlrs):\n t = np.array(weights).sum()\n ws = [w / t for w in weights]\n r2s = [Sersic_r2_over_hlr(n) * hlr for n, hlr in zip(ns, hlrs)]\n return np.sqrt(reduce(lambda x,y:x+y, [r2**2 * w for r2, w in zip(r2s, ws)]))", "def comp_radius_mid_wind(self):\n\n Rbo = self.get_Rbo()\n Hslot = self.comp_height()\n Hwind = self.comp_height_wind()\n if self.is_outwards():\n return Rbo + Hslot - Hwind / 2\n else:\n return Rbo - Hslot + Hwind / 2", "def brewster(n1, n2):\n\n na, nb = __setup_medium_indexes(n1, n2)\n\n # calculate brewster angle\n if na[2] == nb[2]:\n thb = None\n else:\n thb = float(np.rad2deg(\n np.arctan((na[2] * nb[2] / na[0] ** 2) * np.sqrt((na[0] ** 2 - nb[0] ** 2) / (na[2] ** 2 - nb[2] ** 2)))))\n\n # calculate critical angle for TM\n if na[2] > nb[2]:\n thc_tm = float(np.rad2deg(\n np.arcsin(na[2] * nb[2] / np.sqrt(na[2] ** 2 * nb[2] ** 2 + na[0] ** 2 * (na[2] ** 2 - nb[2] ** 2)))))\n else:\n thc_tm = float(np.rad2deg(\n np.arcsin(na[2] * nb[2] / np.sqrt(na[2] ** 2 * nb[2] ** 2 + nb[0] ** 2 * (nb[2] ** 2 - na[2] ** 2)))))\n\n # calculate critical angle for TE\n if na[1] > nb[1]:\n thc_te = float(np.rad2deg(np.arcsin(nb[1] / na[1])))\n else:\n thc_te = float(np.rad2deg(np.arcsin(na[1] / nb[1])))\n\n # return results\n return thb, thc_te, thc_tm", "def points2radius(p1, p2, p3):\n a = np.linalg.norm(p3 - p2)\n b = np.linalg.norm(p3 - p1)\n c = np.linalg.norm(p2 - p1)\n s = (a + b + c) / 2\n return a*b*c / 4 / np.sqrt(s * (s - a) * (s - b) * (s - c))", "def getbeamparameters(w1, w2, x, wl, option=0, plot=0):\n # from Alessandros python script\n # In case the two waists are equal the beam waist is in the center of\n # the two.\n if w2 == w1:\n z = x/2.\n else:\n # define these to clean up the notation\n delta = w2**2-w1**2\n lp = wl/pi\n # define the coefficients of z in the quadratic equation in standard form\n a = delta+lp**2*4*x**2/delta\n b = (lp**2*4*x**3)/delta-(2*x*w1**2)\n c = (lp**2*x**4)/delta-(x*w1)**2\n\n # Solve the quadratic formula\n # This root corresponds to a waist between the measurements\n z1 = (- b-np.sqrt(b**2-4*a*c))/(2*a)\n\n # This root corresponts to a waist outside the measurements\n z2 = (- b+np.sqrt(b**2-4*a*c))/(2*a)\n if (b**2-4*a*c) < 0:\n z1 = 0\n z2 = 0\n print('No solution exists for this combination of measurements.')\n\n if option == 1:\n z = z1\n else:\n z = z2\n\n # Calculate zR\n rayleigh = wl/pi*(2*x*z+x**2)/(w2**2-w1**2)\n\n # turn zR into some other useful forms\n q0 = 1j*rayleigh\n waist_0 = np.sqrt(wl*rayleigh/pi)\n\n # decide which side the beam waist is on\n # if (w1 > w2):\n # origin = z\n # else:\n # origin = -z\n origin = z\n if option == 1:\n origin = -z\n # print(\n #'Guesses for curve fit \\n Beam waist: \\t {0:.3f} micro m\\nPositioned at \\t {1:.2f} mm from first waist measurement'.format(waist_0*1000,\n # origin))\n if option == 1:\n zrange = np.linspace(-origin*1.05, (x-origin)*1.05, 100)\n #plotbeam(waist_0, wl, zrange)\n #plt.vlines(-origin, 0, w1, color='r')\n #plt.vlines((x-origin), 0, w2, color='r')\n else:\n if w1 > w2:\n origin = z\n zrange = np.linspace(0, origin*1.05, 100)\n else:\n origin = z\n zrange = np.linspace(0, (x+origin)*1.05, 100)\n #plotbeam(waist_0, wl, zrange)\n #plt.vlines(origin, 0, w1, color='r')\n #plt.vlines((origin+x), 0, w2, color='r')\n if plot != 0:\n plt.show()\n\n return q0, waist_0, rayleigh, origin", "def pysurvey_distance(r1,r2):\n ra1=r1[0]\n dec1=r1[1]\n z1=r1[2]\n\n ra2=r2[:,0]\n dec2=r2[:,1]\n z2=r2[:,2]\n\n loc1 = location.convert2distance(ra1,dec1,z1,[0.,0.])\n loc2 = location.convert2distance(ra2,dec2,z2,[0.,0.])\n\n dist = mag([loc1[0]-loc2[0],loc1[1]-loc2[1],loc1[2]-loc2[2]])\n\n fake_vals = np.zeros(len(dist))\n\n return dist,fake_vals", "def distance(self, wd1, wd2):\n try:\n wdidx1 = self.vocabulary.index(wd1)\n wdidx2 = self.vocabulary.index(wd2)\n w1_vector = self.U[wdidx1]\n w2_vector = self.U[wdidx2]\n return min(cosine(w1_vector, w2_vector), 1.0) # nicer distance\n except Exception as e:\n print(e)", "def intRoexpwt(g1, g2, p, w, t):\n\n w = 10.0 ** (w / 10.0)\n uplimit = -(1 - w) * np.exp(-p * g2) * (2 + p * g2) / p\n - w * np.exp(-p * g2 / t) * (2 + p * g2 / t) / (p / t)\n\n lowlimit = -(1 - w) * np.exp(-p * g1) * (2 + p * g1) / p\n - w * np.exp(-p * g1 / t) * (2 + p * g1 / t) / (p / t)\n\n I = uplimit - lowlimit\n\n return I", "def fixed_radii_for_Nweights():\n\n # 1. D < 1.0 micron\n # CLASSIC dry radii [microns] - Bellouin et al 2011\n rn_pmlt1p0_microns = {'(NH4)2SO4': 9.5e-02, # accumulation mode\n 'NH4NO3': 9.5e-02, # accumulation mode\n 'NaCl': 1.0e-01, # generic sea salt (fine mode)\n 'CORG': 1.2e-01, # aged fosil fuel organic carbon\n 'CBLK': 3.0e-02} # soot\n\n rn_pmlt1p0_m={}\n for key, r in rn_pmlt1p0_microns.iteritems():\n rn_pmlt1p0_m[key] = r * 1e-06\n\n # 2. D < 10 micron\n\n # pm1 to pm10 median volume mean radius calculated from clearflo winter data (calculated volume mean diameter / 2.0)\n rn_pm10_microns = 0.07478 / 2.0\n # turn units to meters and place an entry for each aerosol\n rn_pm10_m = {}\n for key in rn_pmlt1p0_m.iterkeys():\n rn_pm10_m[key] = rn_pm10_microns * 1.0e-6\n\n # # old 2. D < 10 micron\n # # pm1 to pm10 median volume mean radius calculated from clearflo winter data (calculated volume mean diameter / 2.0)\n # pm1t10_rv_microns = 1.9848902137534531 / 2.0\n # # turn units to meters and place an entry for each aerosol\n # pm1t10_rv_m = {}\n # for key in rn_pmlt1p0_m.iterkeys():\n # pm1t10_rv_m[key] = pm1t10_rv_microns * 1.0e-6\n\n\n # 3. D < 2.5 microns\n # calculated from Chilbolton data (SMPS + GRIMM 2016)\n rn_pmlt2p5_microns = 0.06752 / 2.0\n\n rn_pmlt2p5_m = {}\n for key in rn_pmlt1p0_m.iterkeys():\n rn_pmlt2p5_m[key] = rn_pmlt2p5_microns * 1.0e-6\n\n # 4. 2.5 < D < 10 microns\n # calculated from Chilbolton data (SMPS + GRIMM 2016)\n rn_2p5_10_microns = 2.820 / 2.0\n\n rn_2p5_10_m = {}\n for key in rn_pmlt1p0_m.iterkeys():\n rn_2p5_10_m[key] = rn_2p5_10_microns * 1.0e-6\n\n\n return \\\n rn_pmlt1p0_microns, rn_pmlt1p0_m, \\\n rn_pm10_microns, rn_pm10_m, \\\n rn_pmlt2p5_microns, rn_pmlt2p5_m, \\\n rn_2p5_10_microns, rn_2p5_10_m", "def line_of_sight(r1, r2, R):\n r1_norm = norm(r1)\n r2_norm = norm(r2)\n\n theta = np.arccos((r1 @ r2) / r1_norm / r2_norm)\n theta_1 = np.arccos(R / r1_norm)\n theta_2 = np.arccos(R / r2_norm)\n\n return (theta_1 + theta_2) - theta" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Literal implementation of CRW Eq 23 Gives the radius in terms of the two angles th and th1
def _radius_eq23(th, th1): return np.sin(th1)/np.sin(th+th1)
[ "def _radius_eq6(w, w1, th, th1):\n numerator = w.Jdot(th) + w1.Jdot(th1)\n denominator = (w.Pidot_r(th) + w1.Pidot_r(th1))*np.cos(th) \\\n - (w.Pidot_z(th) + w1.Pidot_z(th1))*np.sin(th)\n return numerator/denominator", "def right_circular_cone(r,h):\n s = sqrt((r**2) + (h**2))\n return s", "def Wilkin_R90(i, t, R):\n\n def tangent_phi(x, y):\n \"\"\"\n Azimutal angle tangent to LOS\n \"\"\"\n tan_alpha = np.diff(y)/np.diff(x)\n return np.tan(np.radians(i))*tan_alpha\n x, y = R*np.cos(t), R*np.sin(t)\n RR, tt = R[:-1], t[:-1]\n xt = RR*(np.cos(tt)*np.cos(np.radians(i)) - np.sin(tt)*tangent_phi(x, y)*np.sin(np.radians(i)))\n yt = RR*np.sqrt(1- tangent_phi(x, y)**2)\n mask = np.isfinite(yt)\n Rt = np.hypot(xt[mask], yt[mask])\n tht = np.arctan2(yt[mask], xt[mask])\n Rt_int = interp1d(tht, Rt)\n return Rt_int(0.5*np.pi)", "def _get_radius(self) -> \"double\" :\n return _core.Arc2D__get_radius(self)", "def _get_radius(self) -> \"double\" :\n return _core.Cone__get_radius(self)", "def calculate_r3(self):\n logger.debug('Calculating O(r**3) terms')\n \n # Shorthand\n sign_psi = self.spsi\n sign_G = self.sG\n G2 = self.G2\n N_helicity = self.iota - self.iotaN\n B0 = self.B0\n G0 = self.G0\n I2 = self.I2\n X1c = self.X1c\n Y1c = self.Y1c\n Y1s = self.Y1s\n X20 = self.X20\n X2s = self.X2s\n X2c = self.X2c\n Y20 = self.Y20\n Y2s = self.Y2s\n Y2c = self.Y2c\n Z20 = self.Z20\n Z2s = self.Z2s\n Z2c = self.Z2c\n B20 = self.B20\n B1c = self.etabar * B0\n torsion = self.torsion\n curvature = self.curvature\n abs_G0_over_B0 = self.abs_G0_over_B0\n d_X1c_d_varphi = self.d_X1c_d_varphi\n d_Y1c_d_varphi = self.d_Y1c_d_varphi\n d_Z20_d_varphi = self.d_Z20_d_varphi\n\n # The expression below is computed in \"20190305-01 GarrenBoozer r2 corrected radius.nb\" in the section \"Approach of adding r**3 terms, assuming quasisymmetry\"\n # 20190714: To account for QH cases, changed iota -> iota_N where it occurs 3 lines below:\n flux_constraint_coefficient = (-4*B0**2*G0*X20**2*Y1c**2 + 8*B0**2*G0*X20*X2c*Y1c**2 - 4*B0**2*G0*X2c**2*Y1c**2 - \\\n 4*B0**2*G0*X2s**2*Y1c**2 + 8*B0*G0*B1c*X1c*X2s*Y1c*Y1s + 16*B0**2*G0*X20*X2s*Y1c*Y1s + \\\n 2*B0**2*I2*self.iotaN*X1c**2*Y1s**2 - G0*B1c**2*X1c**2*Y1s**2 - 4*B0*G0*B20*X1c**2*Y1s**2 - \\\n 8*B0*G0*B1c*X1c*X20*Y1s**2 - 4*B0**2*G0*X20**2*Y1s**2 - 8*B0*G0*B1c*X1c*X2c*Y1s**2 - \\\n 8*B0**2*G0*X20*X2c*Y1s**2 - 4*B0**2*G0*X2c**2*Y1s**2 - 4*B0**2*G0*X2s**2*Y1s**2 + \\\n 8*B0**2*G0*X1c*X20*Y1c*Y20 - 8*B0**2*G0*X1c*X2c*Y1c*Y20 - 8*B0**2*G0*X1c*X2s*Y1s*Y20 - \\\n 4*B0**2*G0*X1c**2*Y20**2 - 8*B0**2*G0*X1c*X20*Y1c*Y2c + 8*B0**2*G0*X1c*X2c*Y1c*Y2c + \\\n 24*B0**2*G0*X1c*X2s*Y1s*Y2c + 8*B0**2*G0*X1c**2*Y20*Y2c - 4*B0**2*G0*X1c**2*Y2c**2 + \\\n 8*B0**2*G0*X1c*X2s*Y1c*Y2s - 8*B0*G0*B1c*X1c**2*Y1s*Y2s - 8*B0**2*G0*X1c*X20*Y1s*Y2s - \\\n 24*B0**2*G0*X1c*X2c*Y1s*Y2s - 4*B0**2*G0*X1c**2*Y2s**2 - 4*B0**2*G0*X1c**2*Z20**2 - \\\n 4*B0**2*G0*Y1c**2*Z20**2 - 4*B0**2*G0*Y1s**2*Z20**2 - 4*B0**2*abs_G0_over_B0*I2*Y1c*Y1s*Z2c + \\\n 8*B0**2*G0*X1c**2*Z20*Z2c + 8*B0**2*G0*Y1c**2*Z20*Z2c - 8*B0**2*G0*Y1s**2*Z20*Z2c - \\\n 4*B0**2*G0*X1c**2*Z2c**2 - 4*B0**2*G0*Y1c**2*Z2c**2 - 4*B0**2*G0*Y1s**2*Z2c**2 + \\\n 2*B0**2*abs_G0_over_B0*I2*X1c**2*Z2s + 2*B0**2*abs_G0_over_B0*I2*Y1c**2*Z2s - 2*B0**2*abs_G0_over_B0*I2*Y1s**2*Z2s + \\\n 16*B0**2*G0*Y1c*Y1s*Z20*Z2s - 4*B0**2*G0*X1c**2*Z2s**2 - 4*B0**2*G0*Y1c**2*Z2s**2 - \\\n 4*B0**2*G0*Y1s**2*Z2s**2 + B0**2*abs_G0_over_B0*I2*X1c**3*Y1s*torsion + B0**2*abs_G0_over_B0*I2*X1c*Y1c**2*Y1s*torsion + \\\n B0**2*abs_G0_over_B0*I2*X1c*Y1s**3*torsion - B0**2*I2*X1c*Y1c*Y1s*d_X1c_d_varphi + \\\n B0**2*I2*X1c**2*Y1s*d_Y1c_d_varphi)/(16*B0**2*G0*X1c**2*Y1s**2)\n\n self.X3c1 = self.X1c * flux_constraint_coefficient\n self.Y3c1 = self.Y1c * flux_constraint_coefficient\n self.Y3s1 = self.Y1s * flux_constraint_coefficient\n self.X3s1 = self.X1s * flux_constraint_coefficient\n self.Z3c1 = 0\n self.Z3s1 = 0\n\n self.X3c3 = 0\n self.X3s3 = 0\n self.Y3c3 = 0\n self.Y3s3 = 0\n self.Z3c3 = 0\n self.Z3s3 = 0\n\n self.d_X3c1_d_varphi = self.d_d_varphi @ self.X3c1\n self.d_Y3c1_d_varphi = self.d_d_varphi @ self.Y3c1\n self.d_Y3s1_d_varphi = self.d_d_varphi @ self.Y3s1\n\n # The expression below is derived in the O(r**2) paper, and in \"20190318-01 Wrick's streamlined Garren-Boozer method, MHD.nb\" in the section \"Not assuming quasisymmetry\".\n # Note Q = (1/2) * (XYEquation0 without X3 and Y3 terms) where XYEquation0 is the quantity in the above notebook.\n Q = -sign_psi * B0 * abs_G0_over_B0 / (2*G0*G0) * (self.iotaN * I2 + mu0 * self.p2 * G0 / (B0 * B0)) + 2 * (X2c * Y2s - X2s * Y2c) \\\n + sign_psi * B0 / (2*G0) * (abs_G0_over_B0 * X20 * curvature - d_Z20_d_varphi) \\\n + I2 / (4 * G0) * (-abs_G0_over_B0 * torsion * (X1c*X1c + Y1s*Y1s + Y1c*Y1c) + Y1c * d_X1c_d_varphi - X1c * d_Y1c_d_varphi)\n predicted_flux_constraint_coefficient = - Q / (2 * sign_G * sign_psi)\n\n B0_order_a_squared_to_cancel = -sign_G * B0 * B0 * (G2 + I2 * N_helicity) * abs_G0_over_B0 / (2*G0*G0) \\\n -sign_G * sign_psi * B0 * 2 * (X2c * Y2s - X2s * Y2c) \\\n -sign_G * B0 * B0 / (2*G0) * (abs_G0_over_B0 * X20 * curvature - d_Z20_d_varphi) \\\n -sign_G * sign_psi * B0 * I2 / (4*G0) * (-abs_G0_over_B0 * torsion * (X1c*X1c + Y1c*Y1c + Y1s*Y1s) + Y1c * d_X1c_d_varphi - X1c * d_Y1c_d_varphi)\n\n logger.debug('max|flux_constraint_coefficient - predicted_flux_constraint_coefficient|: '\n f'{np.max(abs(flux_constraint_coefficient - predicted_flux_constraint_coefficient))}')\n logger.debug('max|flux_constraint_coefficient - B0_order_a_squared_to_cancel/(2*B0)|: '\n f'{np.max(abs(flux_constraint_coefficient - B0_order_a_squared_to_cancel/(2*B0)))}')\n\n if np.max(abs(flux_constraint_coefficient - predicted_flux_constraint_coefficient)) > 1e-7 \\\n or np.max(abs(flux_constraint_coefficient - B0_order_a_squared_to_cancel/(2*B0))) > 1e-7:\n logger.warning(\"Methods of computing lambda disagree!! Higher nphi resolution might be needed.\")\n\n self.flux_constraint_coefficient = flux_constraint_coefficient\n self.B0_order_a_squared_to_cancel = B0_order_a_squared_to_cancel\n\n if self.helicity == 0:\n self.X3c1_untwisted = self.X3c1\n self.Y3c1_untwisted = self.Y3c1\n self.Y3s1_untwisted = self.Y3s1\n self.X3s1_untwisted = self.X3s1\n self.X3s3_untwisted = self.X3s3\n self.X3c3_untwisted = self.X3c3\n self.Y3c3_untwisted = self.Y3c3\n self.Y3s3_untwisted = self.Y3s3\n self.Z3s1_untwisted = self.Z3s1\n self.Z3s3_untwisted = self.Z3s3\n self.Z3c1_untwisted = self.Z3c1\n self.Z3c3_untwisted = self.Z3c3\n else:\n angle = -self.helicity * self.nfp * self.varphi\n sinangle = np.sin(angle)\n cosangle = np.cos(angle)\n self.X3s1_untwisted = self.X3s1 * cosangle + self.X3c1 * sinangle\n self.X3c1_untwisted = self.X3s1 * (-sinangle) + self.X3c1 * cosangle\n self.Y3s1_untwisted = self.Y3s1 * cosangle + self.Y3c1 * sinangle\n self.Y3c1_untwisted = self.Y3s1 * (-sinangle) + self.Y3c1 * cosangle\n self.Z3s1_untwisted = self.Z3s1 * cosangle + self.Z3c1 * sinangle\n self.Z3c1_untwisted = self.Z3s1 * (-sinangle) + self.Z3c1 * cosangle\n sinangle = np.sin(3*angle)\n cosangle = np.cos(3*angle)\n self.X3s3_untwisted = self.X3s3 * cosangle + self.X3c3 * sinangle\n self.X3c3_untwisted = self.X3s3 * (-sinangle) + self.X3c3 * cosangle\n self.Y3s3_untwisted = self.Y3s3 * cosangle + self.Y3c3 * sinangle\n self.Y3c3_untwisted = self.Y3s3 * (-sinangle) + self.Y3c3 * cosangle\n self.Z3s3_untwisted = self.Z3s3 * cosangle + self.Z3c3 * sinangle\n self.Z3c3_untwisted = self.Z3s3 * (-sinangle) + self.Z3c3 * cosangle", "def circumCircle(p1, p2, p3):\n\n p1sm = squaredNorm(p1)\n x1 = p1[0]\n y1 = p1[1]\n p2sm = squaredNorm(p2)\n x2 = p2[0]\n y2 = p2[1]\n p3sm = squaredNorm(p3)\n x3 = p3[0]\n y3 = p3[1]\n a = numpy.linalg.det(\n numpy.array([[x1, y1, 1],\n [x2, y2, 1],\n [x3, y3, 1]]))\n d = numpy.linalg.det(\n -numpy.array([[p1sm, y1, 1],\n [p2sm, y2, 1],\n [p3sm, y3, 1]]))\n e = numpy.linalg.det(\n numpy.array([[p1sm, x1, 1],\n [p2sm, x2, 1],\n [p3sm, x3, 1]]))\n f = numpy.linalg.det(\n -numpy.array([[p1sm, x1, y1],\n [p2sm, x2, y2],\n [p3sm, x3, y3]]))\n circumCenter = Vector2(-d/(2*a), -e/(2*a))\n\n denom = 4*math.sq(a) - f/a\n\n circumRadius2 = (math.sq(d) + math.sq(e)) / (4*math.sq(a)) - f/a\n\n if circumRadius2 > 0:\n circumRadius = math.sqrt(circumRadius2)\n else:\n lengths = [(p2-p1).magnitude(),\n (p3-p2).magnitude(),\n (p1-p3).magnitude()]\n lengths.sort()\n circumRadius = (lengths[1] + lengths[2]) / 4.0\n sys.stderr.write(\"circumcircle: side lengths^2 are %s -> improvised radius = %s\\n\"\n % (lengths, circumRadius))\n\n return circumCenter, circumRadius", "def R(theta, pkg=np):\n theta *= conversion_factor\n return self.major_radius + self.minor_radius * pkg.cos(\n theta + self.triangularity * pkg.sin(theta)\n )", "def bottom_circumference(self):\n\t\treturn 2 * PI * self.r_1", "def circle_corner_arclen(h1, h2, R):\n return R*(np.arccos(h2 / R) - np.arcsin(h1 / R))", "def _get_radius(self) -> \"double\" :\n return _core.Cylinder__get_radius(self)", "def test_triangle_get_circumscribed_radius(self):\n triangle = RegularTriangle(0, 5)\n self.assertEqual(triangle.get_circumscribed_radius(), 2.8867513459481287)", "def circumference(self):\n\t\treturn 2 * PI * self.r", "def gRD(RD):\r\n q = 0.0057565\r\n pi = math.pi\r\n return 1 / math.sqrt(1 + 3 * q**2 * (RD**2)/(pi**2))", "def radius(self, theta, method='brent', full=False):\n def _radius(theta):\n \"\"\"Helper function to find the shell radius for a single angle, theta\"\"\"\n if theta == 0.0:\n # special treatment for the axis\n return self.R0\n elif theta >= self.th_infty:\n # Indicate that we have gone too far\n return -1.0\n else:\n if method == 'fsolve':\n if self.th1_save is None:\n # For the first off-axis angle, we use the fact\n # that R0 tan(theta) ~= (1 - R0) tan(theta1) for\n # small theta\n th1_guess = theta*self.R0 / (1.0 - self.R0)\n else:\n # For subsequent angles, we do geometric extrapolation\n th1_guess = self.th1_save*theta/self.th_save \n # The tricky bit here is getting th1_guess to be close\n # enough to the true solution. If it is not, then the\n # solver will fail\n theta1 = _solve_for_th1(self.w, self.w1, theta,\n th1_guess, method=method)\n else:\n # Assume other methods require root to be bracketed\n # Must be between 0 and th1_infty\n if self.th1_save is None:\n a, b = 1e-10, self.th1_infty\n else:\n a, b = self.th1_save, self.th1_infty\n\n theta1 = _solve_for_th1(self.w, self.w1, theta,\n bounds=[a, b], method=method)\n if DEBUG_LEVEL > 0:\n print('+++', self.th_infty - theta, self.th1_infty - theta1)\n self.th_save = theta\n self.th1_save = theta1\n return _radius_eq23(theta, theta1)\n\n try:\n # case where theta is iterable\n rslt = np.empty_like(theta)\n th1_rslt = np.empty_like(theta)\n for i, t in enumerate(theta):\n r = _radius(t)\n if r > 0.0:\n rslt[i] = r\n th1_rslt[i] = self.th1_save\n else:\n # assume we have got to th_max\n # so fill the remainder with NaNs\n rslt[i:] = np.nan\n th1_rslt[i:] = np.nan\n break\n if full:\n return rslt, th1_rslt\n else:\n return rslt\n except TypeError:\n # fall-over case where theta is scalar\n if full:\n return _radius(theta), self.th1_save\n else:\n return _radius(theta)", "def rad_chord(l, h):\n\n return (pow(l, 2) + 4 * pow(h, 2)) / (8 * h)", "def get_front_wheel_radius():\n\t# I ran the following code\n\t# cozmo_drive_straight(robot, 3.14 * 2 * 50, 30)\n\t# and I counted 13 rotations of 120 deg (the wheels have three radial marks)\n\t# Thus, 13/3 rotations takes you pi * 2 * r * (13/3) = pi * 2 * 50 mm\n\t# so r = 50 * (3/13)\n\treturn (50 * 3) / 13", "def points2radius(p1, p2, p3):\n a = np.linalg.norm(p3 - p2)\n b = np.linalg.norm(p3 - p1)\n c = np.linalg.norm(p2 - p1)\n s = (a + b + c) / 2\n return a*b*c / 4 / np.sqrt(s * (s - a) * (s - b) * (s - c))", "def test_tan_wcs(self):\n\n xPixList = []\n yPixList = []\n\n tanWcs = self.wcs.getTanWcs()\n wcsRa = []\n wcsDec = []\n for xx in np.arange(0.0, 4001.0, 1000.0):\n for yy in np.arange(0.0, 4001.0, 1000.0):\n xPixList.append(xx)\n yPixList.append(yy)\n\n pt = afwGeom.Point2D(xx ,yy)\n skyPt = tanWcs.pixelToSky(pt).getPosition()\n wcsRa.append(skyPt.getX())\n wcsDec.append(skyPt.getY())\n\n wcsRa = np.radians(np.array(wcsRa))\n wcsDec = np.radians(np.array(wcsDec))\n\n xPixList = np.array(xPixList)\n yPixList = np.array(yPixList)\n\n raTest, decTest = \\\n self.wcs._camera.raDecFromTanPixelCoords(xPixList, yPixList,\n [self.wcs._chip_name]*len(xPixList))\n\n for rr1, dd1, rr2, dd2 in zip(raTest, decTest, wcsRa, wcsDec):\n pp = CelestialCoord(rr1*galsim.radians, dd1*galsim.radians)\n\n dist = \\\n pp.distanceTo(CelestialCoord(rr2*galsim.radians, dd2*galsim.radians))/galsim.arcsec\n\n msg = 'error in tanWcs was %e arcsec' % dist\n self.assertLess(dist, 0.001, msg=msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the index of the point from allpoints closest to the passed point. Returns None if two points are equidistant.
def get_closest(point, allpoints): best_index = None best_distance = 999999999 is_dupe = False for index, p in enumerate(allpoints): # if p == point: # continue dist = getdist(point, p) if dist <= best_distance: if dist == best_distance: is_dupe = True else: is_dupe = False best_distance = dist best_index = index if is_dupe: return None return best_index
[ "def get_closest(self, point):\n distance = (self.dpath[:, 1] - point[1]) ** 2 + (self.dpath[:, 0] - point[0]) ** 2\n i = np.where(distance == distance.min())\n return i[0][0]", "def nearest_point(point, points):\n\n # Note this uses euculidean distances -- so beware possible inaccuracy\n # using it on geographic coordinates at high latitudes. (Not sure how\n # extreme the situation has to be for it to matter -- does it ever?)\n dist_2 = numpy.sum((points.transpose() - point) ** 2, axis=1)\n nearest_point_index = numpy.argmin(dist_2)\n return points.transpose()[nearest_point_index], nearest_point_index", "def find_closest_point_index(self, x, y, return_distance=False):\n assert len(self.exterior) > 0, (\n \"Cannot find the closest point on a polygon which's exterior \"\n \"contains no points.\")\n distances = []\n for x2, y2 in self.exterior:\n d = (x2 - x) ** 2 + (y2 - y) ** 2\n distances.append(d)\n distances = np.sqrt(distances)\n closest_idx = np.argmin(distances)\n if return_distance:\n return closest_idx, distances[closest_idx]\n return closest_idx", "def brute_force_closest(point, pointlist):\n import sys\n pid, d = -1, sys.maxint\n for i, p in enumerate(pointlist):\n nd = norm(point-p) \n if nd < d:\n d = nd\n pid = i\n return pointlist[pid]", "def find_closest_keypoint(point, keypoints):\n \n dx = point[0] - keypoints[0]\n dy = point[1] - keypoints[1]\n \n ds = np.hypot(dx, dy)\n \n minind = np.argmin(ds)\n \n mindx = dx[minind]\n mindy = dy[minind]\n mindist = ds[minind]\n closest_point = keypoints[:, minind]\n \n return minind, mindx, mindy, mindist, closest_point", "def closest_point(pt, xy):\n xy = np.asarray(xy)\n dist_2 = np.sum((xy - pt) ** 2, axis=1)\n return np.argmin(dist_2)", "def closest_point_finder( point, pointsArray ):\n\n mindex = 0\n mindist = 99999999999999999\n for i in range(len(pointsArray)):\n dist = np.linalg.norm( point - pointsArray[i] )\n if dist < mindist:\n mindist = dist\n mindex = i\n return mindex", "def closest_point(self,graph, current_point):\n closest_point = None\n dist = 100000\n for p in graph.nodes:\n d = LA.norm(np.array(p) - np.array(current_point))\n if d < dist:\n closest_point = p\n dist = d\n return closest_point", "def getClosestPoint(self, point: 'SbVec2f') -> \"SbVec2f\":\n return _coin.SbBox2f_getClosestPoint(self, point)", "def find_nearest_set_point(self, p):\n #print \"I'm in permutations_by_transpositions.py in find_nearest_set_point\"\n # converting point\n c = [-2 * x for x in p]\n return self.find_min_of_linear_function(c)\n #qres.put_nowait(self.find_min_of_linear_function(c))", "def closest(reference,points):\n min_dis = float('inf')\n for point in points:\n dis = distance(reference,point)\n if dis < min_dis:\n min_dis = dis\n closest_point = point\n return closest_point, min_dis", "def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n # The first 1 is for closest. The second 1 is for the index element.\n closest_idx = self.waypoints_tree.query([x, y], 1)[1]\n\n # Check if the closest waypoint is ahead or behind the ego car.\n closest_2d = self.waypoints_2d[closest_idx]\n prev_2d = self.waypoints_2d[closest_idx - 1]\n closest_vect = np.array(closest_2d)\n prev_vector = np.array(prev_2d)\n curr_vector = np.array([x, y])\n if np.dot(closest_vect - prev_vector, curr_vector - closest_vect) > 0:\n # The closest waypoint is behind. Pick the next index.\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx", "def getClosestPoint(self, point: 'SbVec3f') -> \"SbVec3f\":\n return _coin.SbLine_getClosestPoint(self, point)", "def on_point(pos, points, limit):\n for ind, point in enumerate(points):\n try:\n if np.linalg.norm(pos - point.xy) <limit:\n return ind\n except AttributeError:\n if np.linalg.norm(pos - point) < limit:\n return ind\n return -1", "def getClosestPoint(self, point: 'SbVec3f') -> \"SbVec3f\":\n return _coin.SbBox3f_getClosestPoint(self, point)", "def _get_closest_point_in_point_cloud(self, pixel):\n # Select only points that are in front.\n fwd_points = self.points[np.where(self.points[:, 2] > 0.0)]\n # Select x and y.\n pc_xy = fwd_points[:, 0:2]\n # Select z\n pc_z = fwd_points[:, 2]\n # Divize x, y by z\n normalized_pc = pc_xy / pc_z[:, None]\n xy = np.array([pixel.x, pixel.y]).transpose()\n # Compute distance\n dist = np.sum((normalized_pc - xy)**2, axis=1)\n # Select index of the closest point.\n closest_index = np.argmin(dist)\n # Return the closest point.\n return Location(fwd_points[closest_index][0],\n fwd_points[closest_index][1],\n fwd_points[closest_index][2])", "def get_closest_point(central_point, options):\n distances = [np.linalg.norm(central_point - option) for option in options]\n minimum = np.argmin(distances)\n return options[minimum]", "def find_closest_vertex(self,point):\r\n t, verts = self.t, self.verts \r\n dist = np.zeros(len(t))\r\n for i in range(len(t)):\r\n dist[i] = (point[0] - verts[i,0])**2 + (point[1] - verts[i,1])**2\r\n ind = np.argmin(dist)\r\n t0 = t[ind]\r\n return t0", "def getClosestPoint(self, point: 'SbVec3d') -> \"SbVec3d\":\n return _coin.SbDPLine_getClosestPoint(self, point)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists the Amazon DynamoDB tables for the current account.
def list_tables(self): try: tables = [] for table in self.dyn_resource.tables.all(): print(table.name) tables.append(table) except ClientError as err: logger.error( "Couldn't list tables. Here's why: %s: %s", err.response['Error']['Code'], err.response['Error']['Message']) raise else: return tables
[ "def list_tables():\r\n tables = []\r\n\r\n try:\r\n table_list = DYNAMODB_CONNECTION.list_tables()\r\n while True:\r\n for table_name in table_list[u'TableNames']:\r\n tables.append(get_table(table_name))\r\n\r\n if u'LastEvaluatedTableName' in table_list:\r\n table_list = DYNAMODB_CONNECTION.list_tables(\r\n table_list[u'LastEvaluatedTableName'])\r\n else:\r\n break\r\n\r\n except DynamoDBResponseError as error:\r\n dynamodb_error = error.body['__type'].rsplit('#', 1)[1]\r\n\r\n if dynamodb_error == 'ResourceNotFoundException':\r\n logger.error('No tables found')\r\n elif dynamodb_error == 'AccessDeniedException':\r\n logger.debug(\r\n 'Your AWS API keys lack access to listing tables. '\r\n 'That is an issue if you are trying to use regular '\r\n 'expressions in your table configuration.')\r\n elif dynamodb_error == 'UnrecognizedClientException':\r\n logger.error(\r\n 'Invalid security token. Are your AWS API keys correct?')\r\n else:\r\n logger.error(\r\n (\r\n 'Unhandled exception: {0}: {1}. '\r\n 'Please file a bug report at '\r\n 'https://github.com/sebdah/dynamic-dynamodb/issues'\r\n ).format(\r\n dynamodb_error,\r\n error.body['message']))\r\n\r\n except JSONResponseError as error:\r\n logger.error('Communication error: {0}'.format(error))\r\n sys.exit(1)\r\n\r\n return tables", "def list_tables(self, exclusive_start_table_name=None, limit=None):\n params = {}\n if exclusive_start_table_name is not None:\n params['ExclusiveStartTableName'] = exclusive_start_table_name\n if limit is not None:\n params['Limit'] = limit\n return self.make_request(action='ListTables',\n body=json.dumps(params))", "def list_tables(self):\n inspector = inspect(self.engine)\n print(inspector.get_table_names(self.schema))", "def getAllTables (self):\n\n return self.tables", "def listTables(self, instance):\n raise NotImplementedException()", "def tables(self):\n names = self.client.getTableNames()\n return names", "def showTables(self):\n self.tablesList.clear()\n self.dbMan.putNameDatabase(\n self.databasesList.currentItem().text())\n self.tablesList.addItems(self.dbMan.getListNamesTables())", "def show_tables(cls):\n for tbl in cls.engine.table_names():\n print(tbl)", "def table_list():\n db = CrawlDBI.DBI(dbtype='hpss', dbname='sub')\n db._dbobj.tbl_prefix = 'syscat.'\n rows = db.select(table='tables',\n fields=[\"substr(tabname, 1, 30) as \\\"Table\\\"\",\n \"substr(tabschema, 1, 30) as \\\"Schema\\\"\",\n \"type\"],\n where=\"tabschema = 'HPSS'\")\n return rows", "def show_tables(self): \n sql_command = (\"SELECT name FROM sqlite_master \"\n \"WHERE type='table' \"\n \"ORDER BY name;\") \n all_tables = self.fetch_data(sql_command) \n all_tables = [tb_name[0] for tb_name in all_tables]\n return all_tables", "def alltables(self):\n # Export all the tables information to the json files\n query = 'SELECT name FROM sqlite_master WHERE type = \"table\";'\n df_data = self.execute_sql(query)\n tablenames = df_data['name'].values.tolist()\n\n for i in tablenames:\n print(i)\n query = 'SELECT * FROM ' + i + ';'\n df_data = self.execute_sql(query)\n self.jsonwriter(df_data, i)", "def get_tables(self):\n query = self.osqlqry.get_tables()\n logger.info(u'Tables query: {0}'.format(query))\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])", "def tables():\n cur.execute(\n \"SELECT name FROM sqlite_master WHERE type='table'\")\n tables = [t for [t] in cur.fetchall()]\n print('\\n'.join(tables))", "def _get_table_list(self, con, objects):\n # basic sql\n tables_sql = (\"SELECT table_schema as s, table_name as t \"\n \"FROM tables WHERE is_system_table=false AND is_temp_table=false\")\n # extra where clause to find only specific tables\n where = []\n if len(objects) == 0:\n # Means all. We are happy with the default sql\n pass\n else:\n for o in objects:\n (schema, dot, table) = o.partition('.')\n if table == '':\n # we have a schema only\n where.append(\"table_schema='{s}'\".format(s=schema))\n else:\n # we have a table\n where.append(\n \"table_schema='{s}' AND table_name='{t}'\".format(\n t=table, s=schema))\n\n if len(where) > 0:\n tables_sql += ' AND ((' + ') OR ('.join(where) + '))'\n\n tret = con.execute(tables_sql).fetchall()\n return tret", "def getTables(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n c = self.conn.execute(\"select * from sqlite_master where type='table'\")\n return c.fetchall()", "def db_tables():\n\n from . import db\n print(' '.join(db.Base.metadata.tables))", "def show_tablespaces(self):\n sql = \"SELECT tablespace_name FROM dba_tablespaces ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#TABLESPACE}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )", "def db_tables(self):\r\n with self.__conn.cursor() as cursor:\r\n cursor.execute(\"SELECT table_name FROM user_tables\")\r\n\r\n table_names = [x[0] for x in cursor.fetchall()]\r\n return table_names", "def list_tables(\n self,\n ) -> Callable[[metastore.ListTablesRequest], metastore.ListTablesResponse]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_tables\" not in self._stubs:\n self._stubs[\"list_tables\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.bigquery.biglake.v1.MetastoreService/ListTables\",\n request_serializer=metastore.ListTablesRequest.serialize,\n response_deserializer=metastore.ListTablesResponse.deserialize,\n )\n return self._stubs[\"list_tables\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a movie to the table.
def add_movie(self, title, year, plot, rating): try: self.table.put_item( Item={ 'year': year, 'title': title, 'info': {'plot': plot, 'rating': Decimal(str(rating))}}) except ClientError as err: logger.error( "Couldn't add movie %s to table %s. Here's why: %s: %s", title, self.table.name, err.response['Error']['Code'], err.response['Error']['Message']) raise
[ "def add_movie(self, new_movie):\r\n self.movies.append(Movie(new_movie[0], new_movie[1], new_movie[2], new_movie[3]))", "def add_movie(self, movie: Movie):\r\n raise NotImplementedError", "def add_movie():\n movies.append(create_movie())\n print(\"\\nYour movie was successfully added!\")\n print(f\"Movies currently on your database: {len(movies)}\")", "def add_movie(movies):\n new_title = get_valid_selection(\"Title\")\n new_year = get_valid_year()\n new_category = get_valid_selection(\"Category\")\n movies.add_movie(Movie(new_title, new_year, new_category, False))\n print(\"{} ({} from {}) added to movie list\".format(new_title, new_category, new_year))\n movies.sort_movies(SORT_CONDITION)", "def add_movie(conn, *, id_parse=ACTOR_ID_PARSE, info_cap=MAX_INFO_SIZE):\n print('adding new movie')\n printc('b',\n '** Note ** : if release time is left blank, current date will be assumed. '\n 'To enter actors, provide each actor\\'s id #, space-separated. Actor ids are '\n 'not required, but a director id is. If the actor is a main actor, '\n 'enter the actor id with a * at its end (without space), e.g. 12345*.'\n )\n title, genre, url, rating, budget, gross_income, director_id, studio, actors, info = menu_selections(\n 'title', 'genre', 'url (at most 100 chars)', 'rating (e.g. G, PG-13)',\n 'budget ($)', 'gross revenue($)', 'director id', 'studio (at most 20 chars)',\n 'actor ids\\0', f'additional info/summary [{info_cap} chars max]\\0'\n )\n info = truncate(info, info_cap)\n # just take the date as today\n# date = custom_select(\n# \"Enter release date (empty field sets date to today)\", get_date)[1]\n# if not date:\n# date = dt.date.today()\n \n actors, is_main = zip(*(\n actor_id.groups() for actor_id in id_parse.finditer(actors)\n ))\n is_main = tuple('t' if m else 'f' for m in is_main)\n roles = tuple(truncate(input(f'enter role for actor {a} (at most 50 chars): '),50) for a in actors)\n \n\n conn.autocommit = False\n with conn.cursor() as cur:\n # IMPORTANT -- make this a transaction that succeeds only if both parts\n # (adding movie and actors) succeeds\n try:\n cur.execute(\n \"\"\"\n INSERT INTO movie\n (title, genre, url, rating, budget, gross_income, director_id, studio, summary, date_released)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, CURRENT_DATE) RETURNING id;\"\"\",\n (title, genre, url, rating, budget, gross_income, director_id, studio, info)\n )\n movie_id = cur.fetchone()[0]\n \n execute_batch(cur,\n \"\"\"\n INSERT INTO act\n (actor_id, movie_id, if_main, role)\n VALUES (%s, %s, %s, %s);\"\"\",\n list(zip(actors, [movie_id]*len(actors), is_main, roles))\n )\n\n printc('g', f'movie {title} inserted with id {movie_id}')\n conn.commit()\n except Exception as e:\n print('add_movie: error:', repr(e))\n conn.rollback()\n \n conn.autocommit = True", "def insert_movie(addname: str, addyear: int, addposter: str, nomatchresult: int) -> None:\n with UseDatabase(dbconfig) as cursor:\n _SQL = \"\"\"insert into moviedata\n (name, year, poster, moviematch, nomatch, disliked)\n values\n (%s, %s, %s, %s, %s, %s)\"\"\"\n cursor.execute(_SQL, (addname, addyear, addposter, 0, nomatchresult, 0))", "def __insert_movie(self, session, imdb_id, movie_title, movie_year):\n print(f\"start {self.__insert_movie.__name__}: {movie_title} {movie_year}\")\n\n # Create a new movie row with value of has_role of passed in param and insert it into Movies table\n new_movie = movie.Movie(\n movie_id=imdb_id,\n movie_title=movie_title,\n movie_year=movie_year,\n inserted_dtm=datetime.now()\n )\n\n # Add the new movie to database\n session.add(new_movie)\n session.commit()\n\n msg = f\"end {self.__insert_movie.__name__}: inserted movie {movie_title} ({movie_year})\"\n print_and_log(msg)\n return new_movie", "def _add_movie(movie):\r\n movie_name = movie['MovieName']\r\n movie_code = movie['MovieCode']\r\n\r\n # Try to extract the season numbering (it might be a season result).\r\n movie_name = cls._extract_season_number(movie_name)\r\n # Remove the year.\r\n movie_name = cls._remove_year(movie_name)\r\n # And convert to global format.\r\n movie_name = Utils.FormatMovieName(movie_name, False)\r\n\r\n stage = MovieSubStage(\r\n cls.PROVIDER_NAME, \r\n movie_name, \r\n movie_code, \r\n default_versum)\r\n\r\n # There might be duplication in the results.\r\n if stage not in movie_sub_stages:\r\n movie_sub_stages.append(stage)", "def addFrameToMovie(frame, movie):\n # frame = None\n # movie = None\n # if a.__class__ == Movie:\n # movie = a\n # frame = b\n # else:\n # movie = b\n # frame = a\n\n if not (isinstance(movie,Movie) and isinstance(frame, str)):\n # if movie.__class__ != Movie or frame.__class__ != String:\n repValError(\"addFrameToMovie(frame, movie): frame is not a string or movie is not a Movie objectd\")\n\n movie.addFrame(frame)", "def add_movie(self, title, alt_title, year, video_id, build_url):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n movie_meta = '%s (%d)' % (title, year)\n folder = re.sub(r'[?|$|!|:|#]', r'', alt_title)\n dirname = self.nx_common.check_folder_path(\n path=os.path.join(self.movie_path, folder))\n filename = os.path.join(dirname, movie_meta + '.strm')\n progress = xbmcgui.DialogProgress()\n progress.create(self.kodi_helper.get_local_string(650), movie_meta)\n if xbmcvfs.exists(filename):\n return\n if not xbmcvfs.exists(dirname):\n xbmcvfs.mkdirs(dirname)\n if self.movie_exists(title=title, year=year) is False:\n progress.update(50)\n time.sleep(0.5)\n self.db[self.movies_label][movie_meta] = {'alt_title': alt_title}\n self._update_local_db(filename=self.db_filepath, db=self.db)\n url = build_url({'action': 'play_video', 'video_id': video_id})\n self.write_strm_file(path=filename, url=url, title_player=movie_meta)\n progress.update(100)\n time.sleep(1)\n progress.close()", "def save_movie_activity():\n### FROM random_movies_search.html\n\n movie_info = literal_eval(request.args.get(\"movie\"))\n (movie_url, movie_image, movie_name, movie_id) = movie_info\n\n movie_entry = Movie.query.filter_by(movie_id=movie_id).first()\n\n # add entry to movies table if movie does not already exist\n if not movie_entry:\n new_movie_entry = Movie(movie_image=movie_image, movie_id=movie_id,\n movie_name=movie_name, movie_url=movie_url)\n\n db.session.add(new_movie_entry)\n db.session.commit()\n\n return ('', 204)", "def create_movie(self, name=\"Test Movie\", year=2000, genre_id=50):\n\t\tgenre = Genre.objects.filter(pk=genre_id).first()\n\t\treturn Movie.objects.create(name=name, year=year, genre=genre)", "def enterMoviePushButtonClicked(self):\n\n # Read the movie title from the GUI. This is UNSAFE data. Never trust a USER!\n movieTitle = self.centralWidget.enterMovieLineEdit.text()\n print(\"Movie Title {}\".format(movieTitle))\n\n # Query the database for all movies with this title\n try:\n movieTitleQuery = ORM.session.query(\n ORM.Movies).filter(ORM.Movies.title == movieTitle).one()\n except sqlalchemy.orm.exc.NoResultFound:\n logging.error(\"Movie Not in Database {}\".format(movieTitle))\n return\n\n #movieTitleSQL = \"\"\"select * from public.\"Movies\" where title = '{}';\"\"\".format(movieTitle)\n movieTitleSQL = \"\"\"select * from public.\"Movies\" where release_date>'2010-01-01' and release_date <'2011-01-01';\"\"\"\n movieDataFrame = pd.read_sql(movieTitleSQL, ORM.db.raw_connection())\n print(type(movieDataFrame))\n print(movieDataFrame)\n \n # There must be at least 1 movie with this title, look up the credits for this title.\n movieCreditsQuery = ORM.session.query(\n ORM.Credits).filter(ORM.Credits.title == movieTitle)\n\n # Try to get the cast and crew informatioon\n try:\n cast = json.loads(movieCreditsQuery[0].cast)\n crew = json.loads(movieCreditsQuery[0].crew)\n except:\n logging.error(\n \"enterMoviePushButtonClicked: Failed to retrieve movie or credits\"\n )\n return\n\n director = \"NONE\"\n for x in crew:\n if x['job'] == 'Director':\n director = x['name']\n\n # for x in movieTitleQuery:\n # print(\"FILM: {:20} TAGLINE: {:40} STARING {:15} DIRECTOR {:15} \".format(x.title, x.tagline, cast[0]['name'], director ))\n\n self.centralWidget.directorInformation.infoLabel.setText(director)\n self.centralWidget.actorInformation.infoLabel.setText(cast[0]['name'])\n self.centralWidget.releaseDateInformation.infoLabel.setText(\n movieTitleQuery.release_date)\n self.centralWidget.budgetInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.budget))\n self.centralWidget.revenueInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.revenue))\n self.centralWidget.runTimeInformation.infoLabel.setNum(\n movieTitleQuery.runtime)\n self.centralWidget.voteCountInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.vote_count))\n self.centralWidget.voteAverageInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.vote_average))\n self.centralWidget.statusInformation.infoLabel.setText(\n movieTitleQuery.status)\n\n openMovie = OpenMovie.OpenMovie(title=movieTitle)\n\n if (openMovie.getPoster() is False):\n return\n self.centralWidget.updatePoster(openMovie.posterFileName)\n return", "def add_video(self, video):\n self._videos[video.video_id] = video", "def AddFile(moviefile_uncpath):\n\n global num_errors, verbose_mode\n global movies_conn\n\n utils.Msg(\"Adding file: \\\"\"+moviefile_uncpath+\"\\\" ...\", verbose_mode)\n\n movie = GetMovieInfo(moviefile_uncpath)\n if movie == None:\n return\n\n c = \"\" # column list for INSERT\n v = \"\" # value list for INSERT\n c += \"idMovie, \"\n v += \"NULL, \"\n c += \"idCabinet, \"\n v += \"%(idCabinet)s, \"\n c += \"idMediumType, \"\n v += \"'FILE', \"\n c += \"idStatus, \"\n v += \"%(status)s, \"\n c += \"Uncut, \"\n v += \"%(fn_uncut)s, \"\n c += \"Language, \"\n v += \"%(fn_language)s, \"\n c += \"SubtitleLanguage, \"\n v += \"%(fn_subtitle_language)s, \"\n c += \"Duration, \"\n v += \"%(duration_min)s, \"\n c += \"idQuality, \"\n v += \"%(idVideoQuality)s, \"\n c += \"DesiredDisplayAspectRatioWidth, \"\n v += \"%(fn_dar_width)s, \"\n c += \"DesiredDisplayAspectRatioHeight, \"\n v += \"%(fn_dar_height)s, \"\n c += \"DisplayAspectRatio, \"\n v += \"%(video_dar)s, \"\n c += \"OriginalDisplayAspectRatio, \"\n v += \"%(video_dar_org)s, \"\n c += \"idContainerFormat, \"\n v += \"%(idContainerFormat)s, \"\n c += \"idVideoFormat, \"\n v += \"%(idVideoFormat)s, \"\n c += \"VideoFormatProfile, \"\n v += \"%(video_format_profile)s, \"\n c += \"VideoSamplingWidth, \"\n v += \"%(video_width)s, \"\n c += \"VideoSamplingHeight, \"\n v += \"%(video_height)s, \"\n c += \"VideoBitrate, \"\n v += \"%(video_bitrate_kbps)s, \"\n c += \"VideoFramerate, \"\n v += \"%(video_framerate_fps)s, \"\n c += \"idVideoFramerateMode, \"\n v += \"%(idVideoFramerateMode)s, \"\n # c += \"VideoQualityFactor, \"\n # v += # TBD: Get this value from MediaInfo\n c += \"idAudioFormat, \"\n v += \"%(idAudioFormat)s, \"\n c += \"AudioFormatProfile, \"\n v += \"%(audio_format_profile)s, \"\n # c += \"idAudioChannelType, \"\n # v += # TBD: Get this value from MediaInfo\n c += \"TechnicalFlaws, \"\n v += \"%(fn_techcomm)s, \"\n c += \"AudioBitrate, \"\n v += \"%(audio_bitrate_kbps)s, \"\n c += \"idAudioBitrateMode, \"\n v += \"%(idAudioBitrateMode)s, \"\n c += \"AudioSamplingRate, \"\n v += \"%(audio_samplingrate_hz)s, \"\n c += \"FilePath, \"\n v += \"%(file_path)s, \"\n c += \"FolderPath, \"\n v += \"%(folder_path)s, \"\n now = str(datetime.datetime.now())[0:19]\n c += \"TSUpdated, \"\n v += \"'\"+now+\"', \"\n c += \"TSVerified, \"\n v += \"'\"+now+\"', \"\n c += \"Title, \"\n v += \"%(title)s, \"\n c += \"ReleaseYear, \"\n v += \"%(year)s, \"\n c += \"SeriesTitle, \"\n v += \"%(series_title)s, \"\n c += \"EpisodeTitle, \"\n v += \"%(episode_title)s, \"\n c += \"EpisodeId\"\n v += \"%(episode_id)s\"\n\n # movie[\"fn_threed\"]+\", \"\n # movie[\"fn_partial\"]+\", \"\n\n sql = \"INSERT INTO Medium (\"+c+\") VALUES ( \"+v+\")\"\n\n medium_cursor = movies_conn.cursor(MySQLdb.cursors.Cursor)\n\n medium_cursor.execute(sql,movie)\n\n medium_cursor.close()\n\n movies_conn.commit()", "def add_actor(new_actor):\n session.add(new_actor)\n session.commit()", "def add_to_watch(request):\n data = get_data(request)\n username = data.get('username')\n film_id = data.get('film_id')\n film = Film.objects.get(imdb_id=film_id)\n user = User.objects.get(username=username)\n user.watch_films.add(film)\n user.save()\n return JsonResponse({'msg': 'success'})", "def add_movies():\n try:\n data = json.loads(request.data)\n\n if not data:\n raise MissingFields\n\n popularity, director, genre_list, imdb_score, name = Validator.parse_json(data)\n\n # Add a validation for popularity and imdb_score\n Validator.validate_param(popularity, imdb_score)\n\n if not all([popularity, director, genre_list, imdb_score, name]):\n raise MissingFields\n\n with terminating_sn() as session:\n if MoviesDao.movie_exists(session, name):\n return ResponseMaker(ResponseMaker.RESPONSE_400,\n ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_ENTRY_PRESENT\n ).return_response()\n\n MoviesDao.add_movie(session, popularity, director, genre_list, imdb_score, name)\n return ResponseMaker(ResponseMaker.RESPONSE_200).return_response(\n ResponseMaker.RESPONSE_200_MESSAGE)\n\n except (json.decoder.JSONDecodeError, MissingFields):\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_MISSING_FIELDS).return_response()\n except InputOutOfBounds:\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_OUT_OF_BOUNDS).return_response()\n except Exception:\n session.rollback()\n LOG.exception(\"Exception occurred while writting movie {} to db\".format(name))\n return ResponseMaker(ResponseMaker.RESPONSE_500).return_response(\n ResponseMaker.RESPONSE_500_MESSAGE)", "def add_movies_to_csv_file(movies):\n\ttry:\n\t\tf = open('movies.csv')\n\t\tf.close()\n\t\twith open('movies.csv', 'a') as file:\n\t\t\tfor movie in movies:\n\t\t\t\tif not 'imdbRating' in movie:\n\t\t\t\t\tmovie['imdbRating'] = 'N/A'\n\n\t\t\t\tif not 'BoxOffice' in movie:\n\t\t\t\t\tmovie['BoxOffice'] = 'N/A'\n\n\t\t\t\tfile.write(f\"\\n{movie['Title']};{movie['imdbRating']};{movie['BoxOffice']}\")\n\n\texcept OSError:\n\t\twith open('movies.csv', 'w') as file:\n\t\t\tfile.write('Title;imdbRating;BoxOffice')\n\n\t\t\tfor movie in movies:\n\t\t\t\tif not 'imdbRating' in movie:\n\t\t\t\t\tmovie['imdbRating'] = 'N/A'\n\n\t\t\t\tif not 'BoxOffice' in movie:\n\t\t\t\t\tmovie['BoxOffice'] = 'N/A'\n\n\t\t\t\tfile.write(f\"\\n{movie['Title']};{movie['imdbRating']};{movie['BoxOffice']}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets movie data from the table for a specific movie.
def get_movie(self, title, year): try: response = self.table.get_item(Key={'year': year, 'title': title}) except ClientError as err: logger.error( "Couldn't get movie %s from table %s. Here's why: %s: %s", title, self.table.name, err.response['Error']['Code'], err.response['Error']['Message']) raise else: return response['Item']
[ "def find_movie(self, movie_id):\n return self.__repo.find(movie_id)", "def get_movie_details(movie_id):\r\n\r\n url = \"https://api.themoviedb.org/3/movie/\" + \\\r\n str(movie_id) + \"?api_key=\" + config.FLASK_TMDB_API_KEY + \"&language=en-US\"\r\n\r\n # results = DB.Movie.find({\"tmdb_id\": str(movie_id)})\r\n\r\n # Cache Data in our Database\r\n # if results.count() != 1:\r\n #\r\n # results = requests.get(url).json()\r\n #\r\n #\r\n # movie = Movie()\r\n # movie.tmdb_id = results[\"id\"]\r\n # movie.original_title = results[\"original_title\"]\r\n # movie.popularity = results[\"popularity\"]\r\n #\r\n # DB.Movie.post(json.dumps(movie))\r\n\r\n results = requests.get(url)\r\n\r\n return results.json(), results.status_code", "def get_movie(title):\n params = {\n 't': title,\n 'apikey': settings.OMDB_API_KEY\n }\n\n r = requests.get(settings.OMDB_URL, params=params)\n response = r.json()\n\n if not r.ok:\n raise requests.exceptions(r.status_code, 'OMDB API error')\n\n else:\n response = r.json()\n if response['Response'] == 'False':\n \"\"\" When OMDB API can't find a movie status code is 200 \"\"\"\n raise (requests.exceptions.HTTPError(404, response['Error']))\n else:\n return response", "def get_movie(id):\n return get_object_or_404(Movie, id=id)", "def movie():\n return app.session.query(Movie)", "def find_movie(self, title):\n\n result = self.movie_db.get_movie(title)\n\n if result is None:\n data = self.download_movie(title)\n\n if data is not None:\n self.movie_db.add_movie_data(data)\n result = self.movie_db.get_movie(title)\n\n return result", "def load_movie_from_api(self, movie_title):\n columns = ['Title', 'Year', 'Runtime', 'Genre', 'Director', 'Actors', 'Writer', 'Language', 'Country', 'Awards',\n 'imdbRating', 'imdbVotes', 'BoxOffice']\n movie_data = Api.get(f'http://www.omdbapi.com/?t={movie_title}&apikey=39f41e43')\n data = {}\n for column in columns:\n data.update({column: movie_data[column]} if column in movie_data else {column: 'N/A'})\n self.data.append(data)", "def download_movie(self, title):\n\n # Call the client method to get a dictionary with the movies' data.\n data = get_movie_data(title)\n\n # Check to see if there is a key in the dictionary called 'Error'\n if not \"Error\" in data:\n\n # Add the values to the database.\n self.movie_db.add_movie_data(data)\n return data\n\n else:\n print(\"Error getting: {0}. Please check the title.\".format(title))\n return None", "def get_movie_data(self): \n raw_release_date = self._get_movie_value('Release Date')\n release_date = self._to_date(raw_release_date)\n raw_domestic_total_gross = self._get_movie_value('Domestic Total')\n domestic_total_gross = self._money_to_int(raw_domestic_total_gross)\n raw_runtime = self._get_movie_value('Runtime')\n runtime = self._runtime_to_minutes(raw_runtime)\n title = self._get_title()\n rating = self._get_movie_value('MPAA Rating')\n raw_budget = self._get_movie_value('Production Budget:')\n budget = self._money_to_int(raw_budget)\n genre = self._get_movie_value('Genre:')\n raw_opening_income_wend = self._get_opening_income()\n opening_income_wend = self._money_to_int(raw_opening_income_wend)\n distributor = self._get_movie_value('Distributor:')\n opening_theaters = self._get_opening_theaters()\n director = self._get_people('Director')\n actors = self._get_people('Actor')\n headers = ['BOM_id',\n 'movie_title',\n 'domestic_total_gross',\n 'release_date',\n 'runtime_mins',\n 'rating',\n 'budget',\n 'genre',\n 'opening_income_wend',\n 'distributor',\n 'opening_theaters',\n 'director',\n 'actors']\n movie_dict = dict(zip(headers, [self.BOM_id,\n title,\n domestic_total_gross,\n release_date,\n runtime,\n rating,\n budget,\n genre,\n opening_income_wend,\n distributor,\n opening_theaters,\n director,\n actors]))\n return movie_dict", "def get_movie_details(payload, movie_id):\n movie = Movie.query.filter(Movie.id == movie_id).one_or_none()\n if not movie:\n abort(404)\n return jsonify({\n 'success': True,\n 'movie': movie.format()\n })", "def search_movie_by_id(id):\n\n return movie_dict[id].movie_title", "def retrieve_movie_from_id(movie_id):\n logging.info('Retrieving %s', movie_id)\n\n url = BASE_URL_MYAPIFILMS + 'imdb?idIMDB=' + movie_id + '&format=JSON&aka=1&business=0&seasons=0&seasonYear=0&technical=0&filter=N&exactFilter=0&limit=1&lang=en-us&actors=S&biography=0&trailer=1&uniqueName=0&filmography=0&bornDied=0&starSign=0&actorActress=0&actorTrivia=0&movieTrivia=0&awards=0&token=307cccfe-d20b-4b69-b976-d6a024538864'\n\n json_page = get(url).encode('utf-8')\n json_data = json.loads(json_page)\n\n movie = Movie(id=json_data['idIMDB'],\n plot=json_data['plot'],\n poster=clear_url(json_data['urlPoster']) if ('urlPoster' in json_data and json_data['urlPoster'] != \"\") else None,\n rated=json_data['rated'],\n simple_plot=json_data['simplePlot'],\n genres=json_data['genres'])\n\n try:\n trailer_url = json_data['trailer']['videoURL']\n movie.trailer = trailer_url\n except KeyError:\n movie.trailer = None\n\n movie.original_title = json_data['title']\n\n akas = json_data['akas']\n for aka in akas:\n if aka['country'] == 'Italy':\n movie.title = aka['title']\n\n run_times = json_data['runtime']\n if len(run_times) == 0:\n movie.run_times = None\n else:\n movie.run_times = run_times[0]\n\n year = json_data['year']\n if len(year) > 4:\n year = year[-4:]\n\n movie.year = year\n key = movie.put()\n actors_list = json_data['actors']\n directors_list = json_data['directors']\n writers_list = json_data['writers']\n\n retrieve_artists(movie, actors_list, directors_list, writers_list)\n\n logging.info('Retrieved %s', movie_id)\n return key", "def getMovieData(field, movie=None, index=None):\n if movie is not None:\n if movie not in imdb_movie_data:\n print(\"Invalid Movie Name\")\n return -1 \n if field not in imdb_movie_data[movie]:\n print(\"Invalid Field Name\")\n return -1\n return imdb_movie_data[movie][field]\n\n if index is not None:\n if index not in imdb_movie_data:\n print(index)\n print(\"Invalid index\")\n return -1 \n if field not in imdb_movie_data[index]:\n print(\"Invalid Field Name\")\n return -1\n return imdb_movie_data[index][field]\n\n print(\"Please specify either the Movie Name or Rank\")", "def movie_data(film_id):\n data = dummy_movie_data(film_id) # Get all of the info for a single movie\n return render_template(\"doc_data_page.html\", data=data)", "def get_movie_info(movie):\n # Query TheMovie DB\n connection = urllib.urlopen(\"http://api.themoviedb.org/3/search/movie?query=\"+movie+\"&api_key=\"+str(api_key))\n movieDB_json = json.loads(connection.read())\n\n # Our Movie ID\n movie_id = movieDB_json['results'][0]['id']\n print(\"Movie ID is: \"+str(movie_id))\n connection.close()\n\n # Grab extended movie info\n extended_connection = urllib.urlopen(\"http://api.themoviedb.org/3/movie/\"+str(movie_id)+\"?api_key=\"+str(api_key)+\"&append_to_response=videos\")\n # Movie 'dict' object\n mdict = json.loads(extended_connection.read())\n print(mdict)\n extended_connection.close()\n\n print(\"VIDEO INFO\")\n print mdict['videos']['results']\n videos = mdict['videos']['results']\n mvideo = \"\"\n for video in videos:\n print video['name']\n if video['name'] == \"Official Trailer\":\n mvideo = video\n break\n\n # In the case there is no 'Official Trailer' named, grab the 1st video associated\n if mvideo == \"\":\n mvideo = videos[0]\n\n print (\"MVIDEO is: \")\n print mvideo\n print mvideo['key']\n\n # Our formatted movie info output\n movie_info = media.Movie(str(mdict['original_title']),\n str(mdict['runtime']),\n str(mdict['overview'].encode('utf8')),\n str(config['images']['base_url']+POSTER_SIZE+str(mdict['poster_path'])),\n \"https://www.youtube.com/watch?v=\"+str(mvideo['key']))\n\n print(movie_info)\n return movie_info", "def _get_movie(self, movie_id):\n # movie_id is always int (based on handler regexp), but dispatcher sends as str\n movie = Movie.get_by_id(int(movie_id))\n if not movie:\n logging.debug(\"MovieDetailsHandler. Movie not found. movie_id: {}. auth: {}\".format(\n movie_id, self.request.headers.get(\"Authorization\")))\n\n json_response(self, {\"message\": \"No such movie\"}, status=404)\n return False, None\n return True, movie", "def _find_movie_by_title(self, title):\n return Movie.objects.filter(title=title).first()", "def get_movies_by_actor(self, actor):\r\n raise NotImplementedError", "def related(self, movie_id):\n url = \"https://yts.ag/api/v2/movie_suggestions.json?movie_id=%s\" % movie_id\n res = requests.get(url)\n dic = res.json()\n return dic['data']['movies']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates rating and plot data for a movie in the table.
def update_movie(self, title, year, rating, plot): try: response = self.table.update_item( Key={'year': year, 'title': title}, UpdateExpression="set info.rating=:r, info.plot=:p", ExpressionAttributeValues={ ':r': Decimal(str(rating)), ':p': plot}, ReturnValues="UPDATED_NEW") except ClientError as err: logger.error( "Couldn't update movie %s in table %s. Here's why: %s: %s", title, self.table.name, err.response['Error']['Code'], err.response['Error']['Message']) raise else: return response['Attributes']
[ "def update_rating(user_id, movie_id, rating):\n usermovie_rating = UserMovie.query.filter(UserMovie.user_id == user_id,\n UserMovie.movie_id == movie_id).first()\n if usermovie_rating:\n usermovie_rating.rating = rating\n db.session.commit()", "def add_movie(self, title, year, plot, rating):\n try:\n self.table.put_item(\n Item={\n 'year': year,\n 'title': title,\n 'info': {'plot': plot, 'rating': Decimal(str(rating))}})\n except ClientError as err:\n logger.error(\n \"Couldn't add movie %s to table %s. Here's why: %s: %s\",\n title, self.table.name,\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise", "def movie_rated(movie_id):\n\n\n added_rating = request.form.get(\"rate_score\")\n user_id = User.query.filter_by(email=session[\"login\"]).first().user_id\n\n\n all_movies_rated_by_user = db.session.query(Rating.movie_id, Rating.score).filter_by(user_id=user_id).all()\n \n for movie_tuple in all_movies_rated_by_user:\n \n if int(movie_id) == movie_tuple[0]:\n print \"yes\"\n rating_object = Rating.query.filter_by(movie_id=movie_id,user_id=user_id).first()\n print rating_object\n\n rating_object.score = added_rating\n\n db.session.commit()\n \n return redirect(\"/movies\")\n \n new_rating = Rating(movie_id=movie_id, user_id=user_id, score=added_rating)\n\n db.session.add(new_rating)\n\n db.session.commit()\n\n flash(\"Your rating has been updated.\")\n\n return redirect(\"/movies\")", "def update_movie(self, movie_id, title, description, genre, rating):\n movie = Movie(movie_id, title, description, genre, rating)\n self.__repo.update(movie_id, movie)", "def analyze_and_plot_data(ratings):\n \n num_users = ratings['user_id'].nunique()\n num_items = ratings['movie_id'].nunique()\n print(\"Number of unique users is \" + str(num_users))\n print(\"Number of unique movies is \" + str(num_items))\n print(\"The number of ratings in the dataset set is \" + str(ratings.shape[0]))\n\n #Determine ratings distribution and plot results\n count = ratings['rating'].value_counts()\n count = count.to_frame('count')\n count.index.name = 'Rating'\n count = count.sort_values(by='Rating', ascending=1)\n count.plot(kind='bar')\n plt.ylabel('Number of ratings')\n plt.title('Distribution of Ratings')\n plt.savefig('ratings_distribution.png')\n\n #Pie plot\n count.plot(kind='pie', subplots=True, figsize=(5, 5), autopct='%1.0f%%')\n plt.title('Distribution of Ratings')\n plt.savefig('ratings_distribution_pie.png')\n plt.show()\n\n #Determine number of ratings per movie and plot data \n count_movies_rated = ratings['movie_id'].value_counts()\n buckets = [250, 150, 50, 25, 5, 1]\n ratings_dist = np.zeros(6)\n prior_count = 0\n for i in range(6):\n ratings_dist[i] = count_movies_rated[count_movies_rated >= buckets[i]].count()\n ratings_dist[i] -= prior_count\n prior_count += ratings_dist[i]\n\n plt.title('Ratings per Movie')\n plt.xlabel('Number of ratings')\n plt.ylabel('Number of movies')\n label = ['>250','150-250', '50-150','50-25', '25-5', '1-5']\n index = np.arange(len(label))\n plt.bar(index, ratings_dist)\n plt.xticks(index, label)\n plt.savefig('movies_distribution.png')\n\n plt.show()\n\n #Determine how the number of ratings per user and plot data\n count_users = ratings['user_id'].value_counts()\n buckets = [250, 150, 50, 25, 5, 1]\n users_dist = np.zeros(6)\n prior_count = 0\n for i in range(6):\n users_dist[i] = count_users[count_users >= buckets[i]].count()\n users_dist[i] -= prior_count\n prior_count += users_dist[i]\n\n plt.title('Ratings per User')\n plt.xlabel('Number of ratings')\n plt.ylabel('Number of users')\n plt.bar(index, users_dist)\n plt.xticks(index, label)\n plt.savefig('users_distribution.png')\n\n plt.show()", "def update_plot_annotations(self):\n self.match = self.match_selector.value\n self.match_data = self.data[self.match]\n self.teams = self.match_data.blue + self.match_data.red\n if self.title_div is not None:\n self.title_div.text = self.get_page_title()\n if self.video_row is not None:\n self.update_videos()\n if self.team_div is not None:\n self.team_div.text = self.get_team_links()\n if self.figure is not None:\n # Update plot title\n self.figure.title.text = self.get_plot_title()\n # Update Legend labels with new team numbers\n for idx, item in enumerate(self.figure.legend.items):\n self.figure.legend.items[idx] = models.LegendItem(\n label=self.teams[idx],\n renderers = item.renderers,\n index=idx)", "def update_or_add_rating():\n\n rating = request.args.get(\"rating\")\n movie_id = request.args.get(\"movie_id\")\n\n email = session[\"current_user\"]\n user = User.query.filter(email=email)\n\n # to check if user has previously rated the movie\n # if rating exists, update the rating.\n # otherwise, add new rating.\n # if user.rating.movie_id == movie_id:\n\n\n return redirect(\"/movies\")", "def update():\n df_active = select_reviews()\n source.data = ColumnDataSource(data=df_active).data", "def updateTable(self):\r\n self.dataTable = Table(self.frame, dataframe = self.data)\r\n self.dataTable.show()", "def viewData(self):\n keys = ('Title', 'Year', 'imdbRating', 'Runtime', 'Plot', 'Genre', 'Poster', 'Director', 'Actors', 'Awards')\n\n # Search for user selection in database and API\n try:\n movie_title = self.L.get(self.L.curselection())\n self.cur.execute(\"SELECT title, year, rating, runtime, plot, genre_id, posterLink, director, actor, award FROM Movies WHERE Title = ?\", (movie_title,))\n movie_values = self.cur.fetchone()\n\n # check if selection is in the local database\n if movie_values is not None:\n movie = dict(zip(keys, tuple(movie_values)))\n movie['Runtime'] = str(movie['Runtime'])\n\n # fetch all genres from the db\n genres = []\n for genre_id in [int(x) for x in movie['Genre'].split(',')]:\n self.cur.execute('''SELECT genre FROM GENRES WHERE id = ?''', (genre_id,))\n genres.append(self.cur.fetchone()[0])\n movie['Genre'] = ', '.join(genres)\n\n # fetch data from API if not in database\n else:\n movie = requests.get(FETCH_FROM_URL.replace('<imdb id>', self.fetched_movies[movie_title])).json()\n movie = {key: movie[key] for key in keys}\n MovieDataWin(self, movie)\n except tk.TclError:\n print(\"Nothing was selected\")", "def enterMoviePushButtonClicked(self):\n\n # Read the movie title from the GUI. This is UNSAFE data. Never trust a USER!\n movieTitle = self.centralWidget.enterMovieLineEdit.text()\n print(\"Movie Title {}\".format(movieTitle))\n\n # Query the database for all movies with this title\n try:\n movieTitleQuery = ORM.session.query(\n ORM.Movies).filter(ORM.Movies.title == movieTitle).one()\n except sqlalchemy.orm.exc.NoResultFound:\n logging.error(\"Movie Not in Database {}\".format(movieTitle))\n return\n\n #movieTitleSQL = \"\"\"select * from public.\"Movies\" where title = '{}';\"\"\".format(movieTitle)\n movieTitleSQL = \"\"\"select * from public.\"Movies\" where release_date>'2010-01-01' and release_date <'2011-01-01';\"\"\"\n movieDataFrame = pd.read_sql(movieTitleSQL, ORM.db.raw_connection())\n print(type(movieDataFrame))\n print(movieDataFrame)\n \n # There must be at least 1 movie with this title, look up the credits for this title.\n movieCreditsQuery = ORM.session.query(\n ORM.Credits).filter(ORM.Credits.title == movieTitle)\n\n # Try to get the cast and crew informatioon\n try:\n cast = json.loads(movieCreditsQuery[0].cast)\n crew = json.loads(movieCreditsQuery[0].crew)\n except:\n logging.error(\n \"enterMoviePushButtonClicked: Failed to retrieve movie or credits\"\n )\n return\n\n director = \"NONE\"\n for x in crew:\n if x['job'] == 'Director':\n director = x['name']\n\n # for x in movieTitleQuery:\n # print(\"FILM: {:20} TAGLINE: {:40} STARING {:15} DIRECTOR {:15} \".format(x.title, x.tagline, cast[0]['name'], director ))\n\n self.centralWidget.directorInformation.infoLabel.setText(director)\n self.centralWidget.actorInformation.infoLabel.setText(cast[0]['name'])\n self.centralWidget.releaseDateInformation.infoLabel.setText(\n movieTitleQuery.release_date)\n self.centralWidget.budgetInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.budget))\n self.centralWidget.revenueInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.revenue))\n self.centralWidget.runTimeInformation.infoLabel.setNum(\n movieTitleQuery.runtime)\n self.centralWidget.voteCountInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.vote_count))\n self.centralWidget.voteAverageInformation.infoLabel.setText(\n \"{:,}\".format(movieTitleQuery.vote_average))\n self.centralWidget.statusInformation.infoLabel.setText(\n movieTitleQuery.status)\n\n openMovie = OpenMovie.OpenMovie(title=movieTitle)\n\n if (openMovie.getPoster() is False):\n return\n self.centralWidget.updatePoster(openMovie.posterFileName)\n return", "def analysis2(actor):\n #Get the list of movies and movie ids of the actor\n actormovies,movieids=movielist(actor)\n #The performance metric of the actor here is the profitability of the movies he/she has worked in\n #Get the time series data of the profitability of the actors movies over time\n actor_performance=movie_popularity(movieids,actor)\n #plot the vizualization using bokeh\n show_viz(actor_performance,actor)", "def update_features(self):\r\n logging.info(\"Loading scores into the database.\")\r\n videos = self.video_queryset\r\n\r\n # todo: reimplement so that there's no double loop\r\n # in a transaction, reset all ratings, and then load the new ones\r\n\r\n # saving per-user scores\r\n for user in tqdmem(self.users, desc=\"user_scores_write\"):\r\n user_pref = UserPreferences.objects.get(id=user)\r\n\r\n # intermediate results are not visible to site visitors\r\n with transaction.atomic():\r\n # deleting old ratings\r\n VideoRating.objects.filter(user=user_pref).delete()\r\n\r\n # selecting rated videos by this person\r\n rated_videos = Video.objects.filter(\r\n Q(expertrating_video_1__user=user_pref)\r\n | Q(expertrating_video_2__user=user_pref)\r\n ).distinct()\r\n\r\n # only selecting \"pre-registered\" videos\r\n rated_videos = [x for x in rated_videos if x.video_id in self.videos_set]\r\n\r\n if rated_videos:\r\n result_user = self.predict_user(user=user_pref, videos=rated_videos)\r\n for i, video in enumerate(rated_videos):\r\n result = result_user[i]\r\n param_dict = dict(user=user_pref, video=video)\r\n\r\n if result is not None:\r\n rating_record = VideoRating.objects.get_or_create(\r\n **param_dict\r\n )[0]\r\n for j, attribute in enumerate(self.features):\r\n setattr(rating_record, attribute, result[j])\r\n rating_record.save()\r\n\r\n # saving overall scores\r\n # intermediate results are not visible to site visitors\r\n with transaction.atomic():\r\n\r\n # only selecting \"pre-registered\" videos\r\n videos = [x for x in videos if x.video_id in self.videos_set]\r\n\r\n results = self.predict_aggregated(videos=videos)\r\n for i, video in enumerate(tqdmem(videos, desc=\"agg_scores_write\")):\r\n result = results[i]\r\n\r\n # no raters -> score is 0 (-> not shown in search)\r\n if not video.rating_n_experts:\r\n result = [0.0 for _ in result]\r\n\r\n for i, attribute in enumerate(self.features):\r\n setattr(video, attribute, result[i])\r\n video.save()", "def rating_form(movie_id):\n\n\n return render_template(\"add_rating.html\")", "def show_rating_page(movie_id):\n movie = crud.get_movie_by_id(movie_id)\n session['movie_id'] = movie_id\n\n return render_template('rate_movie.html', movie=movie)", "async def updateratings(self, ctx):\n await ctx.channel.send(embed=self.embed(\"Updating ratings... Please wait.\"))\n await self.update_ratings(ctx)", "def plot_raw_data(ratings):\n # do statistics.\n num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()\n num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()\n sorted_num_movies_per_user = np.sort(num_items_per_user)[::-1]\n sorted_num_users_per_movie = np.sort(num_users_per_item)[::-1]\n\n # plot\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(sorted_num_movies_per_user, color='blue')\n ax1.set_xlabel(\"users\")\n ax1.set_ylabel(\"number of ratings (sorted)\")\n ax1.grid()\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(sorted_num_users_per_movie)\n ax2.set_xlabel(\"items\")\n ax2.set_ylabel(\"number of ratings (sorted)\")\n # ax2.set_xticks(np.arange(0, 2000, 300))\n ax2.grid()\n\n plt.tight_layout()\n plt.savefig(\"stat_ratings\")\n plt.show()\n # plt.close()\n return num_items_per_user, num_users_per_item", "def plot_raw_data(ratings):\n # do statistics.\n num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()\n num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()\n sorted_num_movies_per_user = np.sort(num_items_per_user)[::-1]\n sorted_num_users_per_movie = np.sort(num_users_per_item)[::-1]\n\n # plot\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(sorted_num_movies_per_user, color='blue')\n ax1.set_xlabel(\"users\")\n ax1.set_ylabel(\"number of ratings (sorted)\")\n ax1.grid()\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(sorted_num_users_per_movie, color='blue')\n ax2.set_xlabel(\"items\")\n ax2.set_ylabel(\"number of ratings (sorted)\")\n #ax2.set_xticks(np.arange(0, 2000, 300))\n ax2.grid()\n\n plt.tight_layout()\n plt.savefig(\"../plots/stat_ratings\")\n plt.show()\n # plt.close()\n return num_items_per_user, num_users_per_item", "def load_movie_data(ratings_data: str = \"ratings.csv\", movies_data: str = \"movies.csv\", tags_data: str = \"tags.csv\") -> DataFrame:\n \n\n #load different movie datasets\n \n ratings: DataFrame = pd.read_csv(ratings_data)\n ratings.drop(['timestamp'], 1, inplace = True)\n \n titles: DataFrame = pd.read_csv(movies_data)\n\n tags: DataFrame = pd.read_csv(tags_data)\n tags.drop(['timestamp'], 1, inplace = True)\n\n \n #combine ratings with titles\n \n ratings_with_titles: DataFrame = pd.merge(ratings, titles, on = \"movieId\")\n\n \n #combine genres and tags into metadata\n \n full_movie_dataset: DataFrame = pd.merge(ratings_with_titles, tags, on = [\"userId\", \"movieId\"], how = \"left\")\n full_movie_dataset.fillna(\"\", inplace = True)\n full_movie_dataset = full_movie_dataset.groupby('movieId')['tag'].apply(lambda x: \"%s\" % ' '.join(x))\n full_movie_dataset = pd.merge(ratings_with_titles, full_movie_dataset, on = \"movieId\", how = \"left\")\n full_movie_dataset['metadata'] = full_movie_dataset[[\"tag\", \"genres\"]].apply(lambda x: ' '.join(x), axis = 1)\n\n \n #clean dataset\n \n full_movie_dataset.drop([\"tag\", \"genres\"], 1, inplace = True)\n full_movie_dataset.to_csv(r'/Users/jzymet/Desktop/recommender/full_movie_dataset.csv', index = False)\n\n \n return full_movie_dataset" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries for movies that were released in the specified year.
def query_movies(self, year): try: response = self.table.query(KeyConditionExpression=Key('year').eq(year)) except ClientError as err: logger.error( "Couldn't query for movies released in %s. Here's why: %s: %s", year, err.response['Error']['Code'], err.response['Error']['Message']) raise else: return response['Items']
[ "def get_movies_by_year(self, year):\r\n raise NotImplementedError", "def _selectMovieByReleaseYear(entities):\n entities = map(lambda e: (e, _getYearFromDesc(e.description)), entities)\n entities.sort(key=lambda x: x[1], reverse=True)\n return entities[0][0]", "def scan_movies(self, year_range):\n movies = []\n scan_kwargs = {\n 'FilterExpression': Key('year').between(year_range['first'], year_range['second']),\n 'ProjectionExpression': \"#yr, title, info.rating\",\n 'ExpressionAttributeNames': {\"#yr\": \"year\"}}\n try:\n done = False\n start_key = None\n while not done:\n if start_key:\n scan_kwargs['ExclusiveStartKey'] = start_key\n response = self.table.scan(**scan_kwargs)\n movies.extend(response.get('Items', []))\n start_key = response.get('LastEvaluatedKey', None)\n done = start_key is None\n except ClientError as err:\n logger.error(\n \"Couldn't scan for movies. Here's why: %s: %s\",\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise\n\n return movies", "def query_omdb(movie, year):\n # example URL: http://www.omdbapi.com/?t=city+of+god&y=&plot=short&r=json\n #ย you can also use omdb (pip install omdb)\n params = urllib.urlencode({ 't' : movie, 'y': year, 'plot' : \"short\", 'r': \"json\"})\n url = \"%s?%s\" % (omdb_url, params)\n f = urllib.urlopen(url)\n return json.loads(f.read())", "async def find_by_year(self, ctx: commands.Context, year: int) -> None:\n async with aiohttp.ClientSession() as session:\n page_size, page_max = 0, 0\n uri = URL + f'games?released={year}&_bulk=True'\n games = []\n while page_size == page_max:\n resp = await fetch(session, uri)\n if len(resp['data']) == 0:\n await ctx.send(f'There are no records for the year \"{year}\"')\n return\n games += resp['data']\n pagination = resp['pagination']\n uri = pagination['links'][len(pagination['links']) - 1]['uri']\n page_size, page_max = pagination['size'], pagination['max']\n chosen_game = choice(games)\n embed = await format_embed_async(session, chosen_game)\n if embed is None:\n await ctx.send(\"There are no speedrun records for the selected game, please try the command again\")\n else:\n await ctx.send(embed=embed)", "def years_movies_released():\n reader = initialize_reader()\n years_list = [row[23] for row in reader]\n years_dicts = [{\"year\": i, \"movies_released\": years_list.count(i)} for i in years_list]\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'])\n year_less_movies = new_list[:1]\n print(f\"The year {year_less_movies[0].get('year')} had less movies released with {year_less_movies[0].get('movies_released')}\")\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'], reverse=True)\n year_more_movies = new_list[:1]\n print(f\"The year {year_more_movies[0].get('year')} had more movies released with {year_more_movies[0].get('movies_released')}\")", "def movie_exists(self, title, year):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n movie_meta = '%s (%d)' % (title, year)\n return movie_meta in self.db[self.movies_label]", "def moviesFromTo(start, end):\n data = movies.find({\"year\": {\"$gte\": start, \"$lte\": end}})\n for movie in data:\n for key, value in movie.items():\n if key == \"title\":\n print(\"{title: %s}\" % value)", "def get_genres_year(year) -> list:\n sql_request = sql_request_genres_year(year)\n\n sql_data = get_data_from_db(sql_request)\n genres = create_data_of_year(sql_data)\n return genres", "def group_by_year(self, year):\r\n self.if_list_empty(Library.books)\r\n self.validate_data_int(year)\r\n for book in Library.books:\r\n if year == book.year:\r\n print(book)", "def select_year(data: pd.DataFrame, year: str, date_column='date'):\n data = data[data[date_column] >= str(year)]\n data = data[data[date_column] < str(int(year)+1)]\n data['date'] = data[date_column]\n return data", "def get_models_between(start_year, end_year):\n\n # I tried to accomplish the below with a dictionary, similar to\n # line 89 for search_brands function, but I couldn't get the formatting right.\n # Does this still santize the inputs?\n start = start_year\n end = end_year\n\n released_mods = db.session.query(Model).filter((Model.year >= start) & (Model.year < end)).all()\n\n return released_mods", "def title_year_count():\n\n query_parameters = request.args\n\n result = db.engine.execute(\n \"SELECT release_year, count(*) AS released FROM netflix GROUP BY release_year ORDER BY release_year\")\n\n release_year_count_list = [\n {\"release_year\": row[0], \"count\": row[1]} for row in result]\n\n return jsonify(release_year_count_list)", "def get_movie_genre(monthb,monthl,genrenum):\n data = requests.get('https://api.themoviedb.org/3/discover/movie?api_key='+ TMDB_KEY +\n '&primary_release_date.gte='+ monthb + '&primary_release_date.lte=' \n + monthl +'&with_genres='+ str(genrenum)).json()['total_results']\n return data", "def ytd(self, year=None):\n if year is None:\n year = date.today().year\n return self.filter(time__year=year)", "def get_movie(self, title, year):\n try:\n response = self.table.get_item(Key={'year': year, 'title': title})\n except ClientError as err:\n logger.error(\n \"Couldn't get movie %s from table %s. Here's why: %s: %s\",\n title, self.table.name,\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise\n else:\n return response['Item']", "def get_films_by_year(year: str, df=create_df()):\n df1 = df.loc[df.air_year.str.contains(year, regex=False)]\n if df1.shape[0] < 10:\n return df\n return df1", "def read_cves_by_year(year):\n client = MongoClient(DB_HOST, DB_PORT)\n db = client[DB_NAME]\n collection = db[CVES_COLLECTION]\n import re\n regex = re.compile('CVE-' + str(year) + '-\\d*')\n res = collection.find({'name': regex})\n client.close()\n return res", "def get_count_year(year):\n count_year = rdd_review_data\\\n .filter(lambda x: year in x[\"date\"])\\\n .count()\n\n results[\"B\"] = count_year" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scans for movies that were released in a range of years. Uses a projection expression to return a subset of data for each movie.
def scan_movies(self, year_range): movies = [] scan_kwargs = { 'FilterExpression': Key('year').between(year_range['first'], year_range['second']), 'ProjectionExpression': "#yr, title, info.rating", 'ExpressionAttributeNames': {"#yr": "year"}} try: done = False start_key = None while not done: if start_key: scan_kwargs['ExclusiveStartKey'] = start_key response = self.table.scan(**scan_kwargs) movies.extend(response.get('Items', [])) start_key = response.get('LastEvaluatedKey', None) done = start_key is None except ClientError as err: logger.error( "Couldn't scan for movies. Here's why: %s: %s", err.response['Error']['Code'], err.response['Error']['Message']) raise return movies
[ "def get_movies_by_year(self, year):\r\n raise NotImplementedError", "def query_movies(self, year):\n try:\n response = self.table.query(KeyConditionExpression=Key('year').eq(year))\n except ClientError as err:\n logger.error(\n \"Couldn't query for movies released in %s. Here's why: %s: %s\", year,\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise\n else:\n return response['Items']", "def _selectMovieByReleaseYear(entities):\n entities = map(lambda e: (e, _getYearFromDesc(e.description)), entities)\n entities.sort(key=lambda x: x[1], reverse=True)\n return entities[0][0]", "def get_models_between(start_year, end_year):\n\n # I tried to accomplish the below with a dictionary, similar to\n # line 89 for search_brands function, but I couldn't get the formatting right.\n # Does this still santize the inputs?\n start = start_year\n end = end_year\n\n released_mods = db.session.query(Model).filter((Model.year >= start) & (Model.year < end)).all()\n\n return released_mods", "def moviesFromTo(start, end):\n data = movies.find({\"year\": {\"$gte\": start, \"$lte\": end}})\n for movie in data:\n for key, value in movie.items():\n if key == \"title\":\n print(\"{title: %s}\" % value)", "def filter_by_year(data: dict, year: int) -> dict:\n filtered_data = data | {\"places\": []}\n\n for place in data[\"places\"]:\n dataframes = []\n\n for dataframe in place[\"data\"]:\n if dataframe[\"startYear\"] <= year <= dataframe[\"endYear\"]:\n dataframes.append(dataframe)\n\n if dataframes:\n filtered_data[\"places\"].append(\n place | {\"data\": dataframes}\n )\n\n return filtered_data", "def filter_raster_filenames_by_year(\n self, filenames: list,\n start_year: int,\n end_year: int\n ):\n new_list = []\n years = [str(year) for year in range(start_year, end_year+1)]\n for f in filenames:\n date_match = re.search(\n r'(?P<year>\\d{4})(?P<month>\\d{2})(?P<day>\\d{2})', f)\n if date_match['year'] in years:\n new_list.append(f)\n return sorted(new_list)", "def get_models_between(start_year, end_year):\n\n if int(end_year) >= int(start_year):\n return Model.query.filter( (Model.year >= start_year), (Model.year < end_year)).all()\n else:\n return []", "def years_movies_released():\n reader = initialize_reader()\n years_list = [row[23] for row in reader]\n years_dicts = [{\"year\": i, \"movies_released\": years_list.count(i)} for i in years_list]\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'])\n year_less_movies = new_list[:1]\n print(f\"The year {year_less_movies[0].get('year')} had less movies released with {year_less_movies[0].get('movies_released')}\")\n new_list = sorted(years_dicts, key=lambda i: i['movies_released'], reverse=True)\n year_more_movies = new_list[:1]\n print(f\"The year {year_more_movies[0].get('year')} had more movies released with {year_more_movies[0].get('movies_released')}\")", "def get_films_by_year(year: str, df=create_df()):\n df1 = df.loc[df.air_year.str.contains(year, regex=False)]\n if df1.shape[0] < 10:\n return df\n return df1", "def select_year(data: pd.DataFrame, year: str, date_column='date'):\n data = data[data[date_column] >= str(year)]\n data = data[data[date_column] < str(int(year)+1)]\n data['date'] = data[date_column]\n return data", "def get_genres_year(year) -> list:\n sql_request = sql_request_genres_year(year)\n\n sql_data = get_data_from_db(sql_request)\n genres = create_data_of_year(sql_data)\n return genres", "def __get_years_(search_year, start, step) -> list:\n sql_request = _sql_request_search_years(search_year)\n years = get_ids_by_request(sql_request, start, step)\n return years", "def filter_years():\n years = sys.argv[1:]\n for year in years:\n infile = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME1)\n outfile1 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME2)\n outfile2 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME3)\n print year\n filter_terms(infile, outfile1, outfile2)\n print", "def retrieve_all_in_year(self, term, year):\n results_year = list()\n batch_start = 0\n\n search_results = self.search_by_term(term, start=batch_start, date=year)\n expected_num_of_ent = int(search_results[\"opensearch:totalResults\"])\n if self.status_code is not 200 or expected_num_of_ent is 0:\n logging.info(\" %s in year %d contains no results\" % (term, year))\n pass\n\n if 0 < expected_num_of_ent < 5000:\n num_batches = self.get_num_batches(expected_num_of_ent)\n for batch in trange(num_batches, ascii=True, desc=str(year)):\n batch_start = self.batch_size * batch\n try:\n search_results = self.search_by_term(term,\n start=batch_start,\n date=year)\n for entry in search_results['entry']:\n results_year.append(entry)\n except EOFError:\n logging.error(\n \"failed to retrieve %s in year %d\" % (term, year))\n break\n elif expected_num_of_ent >= 5000:\n logging.error(\n \"more than 5000 entries expected for %s in year %d\" % (\n term, year))\n list_of_subjects = get_classifications()\n for subject in list_of_subjects:\n batch_start = 0\n search_results = self.search_by_term(term, start=batch_start,\n date=year, subject=subject)\n expected_num_of_ent = int(\n search_results[\"opensearch:totalResults\"])\n if self.status_code is not 200 or expected_num_of_ent is 0:\n logging.info(\n \" %s in year %d contains no results\" % (term, year))\n pass\n\n num_batches = self.get_num_batches(expected_num_of_ent)\n for batch in trange(num_batches, ascii=True,\n desc=str(year)+str(subject)):\n batch_start = self.batch_size * batch\n search_results = self.search_by_term(term,\n start=batch_start,\n date=year,\n subject=subject)\n try:\n for entry in search_results['entry']:\n results_year.append(entry)\n except:\n logging.error(\n \"failed to retrieve %s in year %d\" % (term, year))\n break\n\n return results_year", "def get_available_years(self):\n \n items = [] \n query = {} \n portal_catalog = getToolByName(self, \"portal_catalog\")\n \n query[\"portal_type\"] = \"RepositoryItem\"\n query[\"path\"] = {\"query\" : \"/\".join(self.context.getPhysicalPath()),\n \"depth\" : 2 }\n \n brains = portal_catalog.searchResults(query)\n \n for item in brains:\n year = str(item[\"item_publication_year\"]).strip()\n if year not in items:\n items.append( year )\n \n # Sort the years in the least\n items = sorted(items, reverse=True)\n \n return items", "def year_subset(self,year_pl):\n index_list=[year_pl+str(i) for i in range(1,53,2)]\n index_list.extend(self.taxonomic_levels)\n df=self.df.loc[:,index_list]\n self.df=df.loc[df.max(axis=1)>100]", "def of_year(cls, year):\n start = datetime(year, 1, 1)\n start_quarter = list(\n rrule(MONTHLY, interval=3, dtstart=start, count=4)\n )\n end_quarter = [\n date + relativedelta(months=3, days=-1) for date in start_quarter\n ]\n return [cls(*item) for item in list(zip(start_quarter, end_quarter))]", "def retrieve_all_in_year(self, term, year) -> list:\n results_year = list()\n batch_start = 0\n\n # test the connection to the servers and determine the number of\n # entities to expect for the given search parameters.\n search_results = self.search_by_term(term, start=batch_start, date=year)\n expected_num_of_ent = int(search_results[\"opensearch:totalResults\"])\n if self.status_code is not 200 or expected_num_of_ent is 0:\n logging.info(\" %s in year %d contains no results\" % (term, year))\n pass\n\n # If there are entities in the search. Download the entities in\n # batches. The API limits the number of entities that can be\n # downloaded through the API to 5000. If the number of entities is less\n # than 5000, a general search will provide all the entities.\n if 0 < expected_num_of_ent < 5000:\n num_batches = self.get_num_batches(expected_num_of_ent)\n for batch in trange(num_batches, ascii=True, desc=str(year)):\n batch_start = self.batch_size * batch\n try:\n search_results = self.search_by_term(term,\n start=batch_start,\n date=year)\n for entry in search_results['entry']:\n results_year.append(entry)\n except EOFError:\n logging.error(\n \"failed to retrieve %s in year %d\" % (term, year))\n break\n except KeyError:\n logging.warning(\n \"no data to retrieve %s in year %d\" % (term, year))\n break\n # if the number of entities exceeds 5000 the search must be further\n # refined to searches by subject field, such as medical or\n # engineering. This extended search iterates over each subject field\n # within the general search parameters to work around the 5000 word\n # limit.\n elif expected_num_of_ent >= 5000:\n logging.warning(\n \"more than 5000 entries expected for %s in year %d\"\n % (term, year))\n # get a list of classifications for the search parameters.\n list_of_subjects = get_classifications()\n for subject in list_of_subjects:\n batch_start = 0\n search_results = self.search_by_term(term, start=batch_start,\n date=year, subject=subject)\n expected_num_of_ent = int(\n search_results[\"opensearch:totalResults\"])\n if self.status_code is not 200 or expected_num_of_ent is 0:\n logging.info(\n \" %s in year %d contains no results\" % (term, year))\n pass\n\n num_batches = self.get_num_batches(expected_num_of_ent)\n for batch in trange(num_batches, ascii=True,\n desc=str(year)+str(subject)):\n batch_start = self.batch_size * batch\n search_results = self.search_by_term(term,\n start=batch_start,\n date=year,\n subject=subject)\n try:\n for entry in search_results['entry']:\n results_year.append(entry)\n except KeyError:\n logging.warning(\n \"no data to retrieve %s in year %d\"\n % (term, year))\n break\n\n return results_year" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes a movie from the table.
def delete_movie(self, title, year): try: self.table.delete_item(Key={'year': year, 'title': title}) except ClientError as err: logger.error( "Couldn't delete movie %s. Here's why: %s: %s", title, err.response['Error']['Code'], err.response['Error']['Message']) raise
[ "def delete_movie(self, movieid):\n query = 'DELETE FROM video_lib_movies WHERE MovieID = ?'\n self._execute_query(query, (movieid,))", "def remove_movie(self, movie_id):\n self.__repo.delete(movie_id)", "def delete_movies():\n movie_id = int(request.args.get('id', 0))\n\n if not movie_id:\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_MISSING_FIELDS).return_response()\n\n try:\n with terminating_sn() as session:\n MoviesDao.delete_movie_from_db(session, movie_id)\n\n return ResponseMaker(ResponseMaker.RESPONSE_200).return_response(\n ResponseMaker.RESPONSE_200_MESSAGE)\n\n except Exception:\n session.rollback()\n LOG.exception(\"Exception occurred while deleting movie id {} from db\".format(movie_id))\n return ResponseMaker(ResponseMaker.RESPONSE_500).return_response(\n ResponseMaker.RESPONSE_500_MESSAGE)", "def delete_movies(request, movie_id, *args, **kwargs):\n try:\n movie = Movie.objects.filter(id=movie_id)\n if movie.exists():\n movie.delete()\n return Response({'msg': 'Movie deleted.'}, status=status.HTTP_200_OK)\n else:\n return Response({'msg': 'Movie does not exist.'}, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response({'msg': 'Some Execption Occured.', 'Execption': e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def handle_movie_delete_request(name):\n name = name.replace(\"_\", \" \")\n if name in MOVIES:\n del MOVIES[name]\n return make_response(jsonify(\"Deleted Successfully\"), 201)\n else:\n return make_response(jsonify(\"Movie not in database.\"), 400)", "def delete(self, request, movie_uuid):\n if not permission_check(request, role='SuperUser'):\n return access_denied()\n response = MovieHandlers().remove_movie(\n movie_uuid)\n return JsonResponse(response, safe=False)", "def delete_video_by_id(video):\n db.session.delete(video)\n db.session.commit()\n return jsonify(video.to_dict()), 200", "def test_delete_movies(self):\n response = self.client.delete('/movies/1')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['message'], 'Movie Successfully deleted.')", "def test_delete_movie(self):\n response = self.client.delete('/movies/0')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(body['message'], \"resource not found\")", "def delete_file(session,title):\r\n session.query(File).filter(File.title == title).delete()\r\n session.commit()", "def delete_monster(cls, monster_id, database=db_constants.DATABASE_PATH):\n\t\tconn = sqlite3.connect(database) # connect to that database (will create if it doesn't already exist)\n\t\tc = conn.cursor() # make cursor into database (allows us to execute commands)\n\t\t# Delete monster from monster table with given id\n\t\tc.execute('''DELETE FROM monster_table WHERE id = ?;''',(monster_id,))\n\t\tconn.commit() # commit commands\n\t\tconn.close() # close connection to database", "def delete(cls, id_):\n try:\n title = cls.query.filter_by(id=id_).one()\n db.session.delete(title)\n db.session.commit()\n except sqlalchemy.exc.SQLAlchemyError:\n db.session.rollback()\n raise", "def delete_record():", "def remove_movie(self, title, year):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n movie_meta = '%s (%d)' % (title, year)\n folder = re.sub(\n pattern=r'[?|$|!|:|#]',\n repl=r'',\n string=self.db[self.movies_label][movie_meta]['alt_title'])\n progress = xbmcgui.DialogProgress()\n progress.create(self.kodi_helper.get_local_string(1210), movie_meta)\n progress.update(50)\n time.sleep(0.5)\n del self.db[self.movies_label][movie_meta]\n self._update_local_db(filename=self.db_filepath, db=self.db)\n dirname = self.nx_common.check_folder_path(\n path=os.path.join(self.movie_path, folder))\n filename = os.path.join(self.movie_path, folder, movie_meta + '.strm')\n if xbmcvfs.exists(dirname):\n xbmcvfs.delete(filename)\n xbmcvfs.rmdir(dirname)\n return True\n return False\n time.sleep(1)\n progress.close()", "def deletegamers():\n connection = connect()\n cursor = connection.cursor()\n sqlquery = \"DELETE FROM gamer\"\n cursor.execute(sqlquery)\n connection.commit()\n connection.close()", "def delete_record(self, id):\n sql = 'DELETE FROM %s WHERE id=%s' % (self.table, id)\n print(sql)\n self.curs.execute(sql)\n self.conn.commit()", "def delete(self, sql):", "def delete_player(cls, player_id, database=db_constants.DATABASE_PATH):\n\t\tconn = sqlite3.connect(database) # connect to that database (will create if it doesn't already exist)\n\t\tc = conn.cursor() # make cursor into database (allows us to execute commands)\n\t\t# Delete player from player table with given id\n\t\tc.execute('''DELETE FROM player_table WHERE id = ?;''',(player_id,))\n\t\tconn.commit() # commit commands\n\t\tconn.close() # close connection to database", "async def delete(self, key):\n _LOGGER.debug(_(\"Deleting %s from sqlite\"), key)\n\n cur = await self.client.cursor()\n await cur.execute(\"DELETE FROM {} WHERE key=?\".format(self.table), (key,))\n await self.client.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets sample movie data, either from a local file or by first downloading it from the Amazon DynamoDB developer guide.
def get_sample_movie_data(movie_file_name): if not os.path.isfile(movie_file_name): print(f"Downloading {movie_file_name}...") movie_content = requests.get( 'https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/samples/moviedata.zip') movie_zip = ZipFile(BytesIO(movie_content.content)) movie_zip.extractall() try: with open(movie_file_name) as movie_file: movie_data = json.load(movie_file, parse_float=Decimal) except FileNotFoundError: print(f"File {movie_file_name} not found. You must first download the file to " "run this demo. See the README for instructions.") raise else: # The sample file lists over 4000 movies, return only the first 250. return movie_data[:250]
[ "def download_movie(self, title):\n\n # Call the client method to get a dictionary with the movies' data.\n data = get_movie_data(title)\n\n # Check to see if there is a key in the dictionary called 'Error'\n if not \"Error\" in data:\n\n # Add the values to the database.\n self.movie_db.add_movie_data(data)\n return data\n\n else:\n print(\"Error getting: {0}. Please check the title.\".format(title))\n return None", "def get_movie_details(movie_id):\r\n\r\n url = \"https://api.themoviedb.org/3/movie/\" + \\\r\n str(movie_id) + \"?api_key=\" + config.FLASK_TMDB_API_KEY + \"&language=en-US\"\r\n\r\n # results = DB.Movie.find({\"tmdb_id\": str(movie_id)})\r\n\r\n # Cache Data in our Database\r\n # if results.count() != 1:\r\n #\r\n # results = requests.get(url).json()\r\n #\r\n #\r\n # movie = Movie()\r\n # movie.tmdb_id = results[\"id\"]\r\n # movie.original_title = results[\"original_title\"]\r\n # movie.popularity = results[\"popularity\"]\r\n #\r\n # DB.Movie.post(json.dumps(movie))\r\n\r\n results = requests.get(url)\r\n\r\n return results.json(), results.status_code", "def read_movielens_data(url):\n\n data = urllib.request.urlopen(url).read()\n downloaded_zip = zipfile.ZipFile(six.BytesIO(data))\n logging.info('Downloaded zip file containing: %s', downloaded_zip.namelist())\n movies_df = pd.read_csv(\n downloaded_zip.open('ml-1m/movies.dat', 'r'),\n sep='::',\n names=['movieId', 'title', 'genres'],\n encoding='iso-8859-1')\n\n users_df = pd.read_csv(\n downloaded_zip.open('ml-1m/users.dat', 'r'),\n sep='::',\n names=['userId', 'sex', 'age', 'occupation', 'zip_code'],\n encoding='iso-8859-1')\n\n ratings_df = pd.read_csv(\n downloaded_zip.open('ml-1m/ratings.dat', 'r'),\n sep='::',\n names=['userId', 'movieId', 'rating', 'timestamp'],\n encoding='iso-8859-1')\n return movies_df, users_df, ratings_df", "def retrieve_movie_from_id(movie_id):\n logging.info('Retrieving %s', movie_id)\n\n url = BASE_URL_MYAPIFILMS + 'imdb?idIMDB=' + movie_id + '&format=JSON&aka=1&business=0&seasons=0&seasonYear=0&technical=0&filter=N&exactFilter=0&limit=1&lang=en-us&actors=S&biography=0&trailer=1&uniqueName=0&filmography=0&bornDied=0&starSign=0&actorActress=0&actorTrivia=0&movieTrivia=0&awards=0&token=307cccfe-d20b-4b69-b976-d6a024538864'\n\n json_page = get(url).encode('utf-8')\n json_data = json.loads(json_page)\n\n movie = Movie(id=json_data['idIMDB'],\n plot=json_data['plot'],\n poster=clear_url(json_data['urlPoster']) if ('urlPoster' in json_data and json_data['urlPoster'] != \"\") else None,\n rated=json_data['rated'],\n simple_plot=json_data['simplePlot'],\n genres=json_data['genres'])\n\n try:\n trailer_url = json_data['trailer']['videoURL']\n movie.trailer = trailer_url\n except KeyError:\n movie.trailer = None\n\n movie.original_title = json_data['title']\n\n akas = json_data['akas']\n for aka in akas:\n if aka['country'] == 'Italy':\n movie.title = aka['title']\n\n run_times = json_data['runtime']\n if len(run_times) == 0:\n movie.run_times = None\n else:\n movie.run_times = run_times[0]\n\n year = json_data['year']\n if len(year) > 4:\n year = year[-4:]\n\n movie.year = year\n key = movie.put()\n actors_list = json_data['actors']\n directors_list = json_data['directors']\n writers_list = json_data['writers']\n\n retrieve_artists(movie, actors_list, directors_list, writers_list)\n\n logging.info('Retrieved %s', movie_id)\n return key", "def get_movie_data(doc_id):\n\n s=read_data(\"doc-data.json\")\n\n movie=s.get(doc_id)\n title = movie.get(\"Title\")[0]\n director = \", \".join(movie.get(\"Director\"))\n starring = \", \".join(movie.get(\"Starring\"))\n country = \", \".join(movie.get(\"Country\"))\n location = \", \".join(movie.get(\"Location\"))\n text = movie.get(\"Text\")\n\n\n\n\n movie_object = {\"title\": title,\n \"director\": director,\n \"starring\": starring,\n \"country\": country,\n \"location\": location,\n \"text\": text\n }\n\n\n return movie_object", "def api_request(movie: str) -> dict:\n url = \"https://movie-database-imdb-alternative.p.rapidapi.com/\"\n querystring = {\"s\":movie,\"page\":\"1\",\"r\":\"json\"}\n headers = headerrequest()\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n return response.text", "def get_movie(title):\n params = {\n 't': title,\n 'apikey': settings.OMDB_API_KEY\n }\n\n r = requests.get(settings.OMDB_URL, params=params)\n response = r.json()\n\n if not r.ok:\n raise requests.exceptions(r.status_code, 'OMDB API error')\n\n else:\n response = r.json()\n if response['Response'] == 'False':\n \"\"\" When OMDB API can't find a movie status code is 200 \"\"\"\n raise (requests.exceptions.HTTPError(404, response['Error']))\n else:\n return response", "def get_random_movie():\n movies = pd.read_table(os.path.join(data_dir, 'movies.dat'), sep='::', header=None, names=mname, engine='python')\n movies.title = movies.title.apply(replace_the)\n ran_movie = movies.sample()\n movie_name = str(ran_movie['title'].values[0])\n movie_info = get_movie_details(movie_name)\n movie_info['movie_id'] = int(ran_movie['movie_id'])\n movie_info['movie_genres'] = ran_movie['genres'].values[0].split('|')\n\n return movie_info", "def load_movie_from_api(self, movie_title):\n columns = ['Title', 'Year', 'Runtime', 'Genre', 'Director', 'Actors', 'Writer', 'Language', 'Country', 'Awards',\n 'imdbRating', 'imdbVotes', 'BoxOffice']\n movie_data = Api.get(f'http://www.omdbapi.com/?t={movie_title}&apikey=39f41e43')\n data = {}\n for column in columns:\n data.update({column: movie_data[column]} if column in movie_data else {column: 'N/A'})\n self.data.append(data)", "def read_data(filename):\n with open(filename) as fin:\n movies = [json.loads(l) for l in fin]\n\n return movies", "def get_movie(self, title, year):\n try:\n response = self.table.get_item(Key={'year': year, 'title': title})\n except ClientError as err:\n logger.error(\n \"Couldn't get movie %s from table %s. Here's why: %s: %s\",\n title, self.table.name,\n err.response['Error']['Code'], err.response['Error']['Message'])\n raise\n else:\n return response['Item']", "def get_movie_info(movie):\n # Query TheMovie DB\n connection = urllib.urlopen(\"http://api.themoviedb.org/3/search/movie?query=\"+movie+\"&api_key=\"+str(api_key))\n movieDB_json = json.loads(connection.read())\n\n # Our Movie ID\n movie_id = movieDB_json['results'][0]['id']\n print(\"Movie ID is: \"+str(movie_id))\n connection.close()\n\n # Grab extended movie info\n extended_connection = urllib.urlopen(\"http://api.themoviedb.org/3/movie/\"+str(movie_id)+\"?api_key=\"+str(api_key)+\"&append_to_response=videos\")\n # Movie 'dict' object\n mdict = json.loads(extended_connection.read())\n print(mdict)\n extended_connection.close()\n\n print(\"VIDEO INFO\")\n print mdict['videos']['results']\n videos = mdict['videos']['results']\n mvideo = \"\"\n for video in videos:\n print video['name']\n if video['name'] == \"Official Trailer\":\n mvideo = video\n break\n\n # In the case there is no 'Official Trailer' named, grab the 1st video associated\n if mvideo == \"\":\n mvideo = videos[0]\n\n print (\"MVIDEO is: \")\n print mvideo\n print mvideo['key']\n\n # Our formatted movie info output\n movie_info = media.Movie(str(mdict['original_title']),\n str(mdict['runtime']),\n str(mdict['overview'].encode('utf8')),\n str(config['images']['base_url']+POSTER_SIZE+str(mdict['poster_path'])),\n \"https://www.youtube.com/watch?v=\"+str(mvideo['key']))\n\n print(movie_info)\n return movie_info", "def get_omdb_data(films):\n\n omdb_key = config.omdb_key\n films_list = []\n missed = []\n bad_response = 0\n\n # Perform a query for each entry from TMDb.\n for film in tqdm(films['imdb_id']):\n entry = requests.get('http://omdbapi.com/?i=' + film +\n '&apikey=' + omdb_key)\n\n if entry.status_code==200:\n f = entry.json()\n films_list += [f]\n else:\n bad_response +=1\n print('Couldn\\'t get ' + 'http://omdbapi.com/?i=' + film + '&apikey=' + omdb_key)\n\n for i,a in enumerate(films_list):\n a['RT_score']=a['Metacritic_score']=a['IMdb_score']='NaN'\n# print(a)\n try:\n if len(a['Ratings'])==0:\n pass\n\n# Iterate through the Ratings element, stored as a list of dictionaries #\n for b in a['Ratings']:\n if b['Source'] == 'Internet Movie Database':\n a['IMdb_score']= float(b['Value'][:3])*10\n elif b['Source'] == 'Rotten Tomatoes':\n a['RT_score']= float(b['Value'].split('%')[0])\n elif b['Source'] == 'Metacritic':\n a['Metacritic_score'] = float(b['Value'].split('/')[0])\n except:\n continue\n\n return films_list", "def create_movies():\n movies = []\n try:\n with open('movies.json') as data_file:\n data = json.load(data_file)\n for info in data.values():\n movies.append(media.Movie(info[\"title\"], info[\"poster\"],\\\n info[\"trailer\"], info[\"overview\"]))\n except IOError, err:\n print \"Can't open file: \", err\n return movies", "def read_movies(filename):\n movies = []\n with open(filename, 'r') as movie_file:\n for movie_record in csv.DictReader(movie_file):\n movies.append(\n Movie(movie_record['Title'], int(movie_record['Year'])))\n return movies", "def read_movie_data(data_file):\n fdata = open(data_file, \"r\")\n movies = []\n for line in fdata:\n data = [x.strip() for x in line.rstrip().split(\",\")]\n movies.append(media.Movie(data))\n fdata.close()\n return movies", "def load_movies():\n movie_file = open(\"seed_data/u.item\")\n # start = time.time()\n\n for line in movie_file:\n movie_info = line.rstrip().split(\"|\")\n if movie_info[2]: \n release_date = datetime.strptime(movie_info[2], \"%d-%b-%Y\")\n movie = Movie(movie_name=movie_info[1][:-7], release_date=release_date, imdb_url=movie_info[4])\n db.session.add(movie)\n\n # print \"The load_movies for loop took\", time.time() - start, \"ms to run\" \n\n db.session.commit()", "def get_movie_data(self): \n raw_release_date = self._get_movie_value('Release Date')\n release_date = self._to_date(raw_release_date)\n raw_domestic_total_gross = self._get_movie_value('Domestic Total')\n domestic_total_gross = self._money_to_int(raw_domestic_total_gross)\n raw_runtime = self._get_movie_value('Runtime')\n runtime = self._runtime_to_minutes(raw_runtime)\n title = self._get_title()\n rating = self._get_movie_value('MPAA Rating')\n raw_budget = self._get_movie_value('Production Budget:')\n budget = self._money_to_int(raw_budget)\n genre = self._get_movie_value('Genre:')\n raw_opening_income_wend = self._get_opening_income()\n opening_income_wend = self._money_to_int(raw_opening_income_wend)\n distributor = self._get_movie_value('Distributor:')\n opening_theaters = self._get_opening_theaters()\n director = self._get_people('Director')\n actors = self._get_people('Actor')\n headers = ['BOM_id',\n 'movie_title',\n 'domestic_total_gross',\n 'release_date',\n 'runtime_mins',\n 'rating',\n 'budget',\n 'genre',\n 'opening_income_wend',\n 'distributor',\n 'opening_theaters',\n 'director',\n 'actors']\n movie_dict = dict(zip(headers, [self.BOM_id,\n title,\n domestic_total_gross,\n release_date,\n runtime,\n rating,\n budget,\n genre,\n opening_income_wend,\n distributor,\n opening_theaters,\n director,\n actors]))\n return movie_dict", "def download_data():\n url_base = \"https://datasets.imdbws.com\"\n fnames = [\n \"name.basics.tsv.gz\", \"title.akas.tsv.gz\", \"title.basics.tsv.gz\",\n \"title.crew.tsv.gz\", \"title.episode.tsv.gz\",\n \"title.principals.tsv.gz\", \"title.ratings.tsv.gz\"]\n\n for fname in fnames:\n url = os.path.join(url_base, fname)\n resp = requests.get(url)\n\n data_dir = os.path.join(home_dir, \"data\", fname)\n open(data_dir, \"wb\").write(resp.content)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for flights using an api
def search_for_flights(apikey, **kwargs): flight_search_paramaters = kwargs flight_search_paramaters['apikey'] = apikey flight_search_paramaters['currency'] = "USD" # since US Dollars is the most popular currency flight_search_response = requests.get(flight_booking_search, params=flight_search_paramaters).json() return flight_search_response
[ "def find_flight(payload):\r\n\r\n flight_search = request_server_response(requests.get, \"https://api.skypicker.com/flights?\", params=payload)\r\n\r\n flight_search_results = parse_json(flight_search)\r\n\r\n # if no results returned tell the user to fix the parameters\r\n if flight_search_results[\"_results\"] == 0:\r\n print(\"No flights found. Check spelling of parameters. Use -h or --help for more information.\")\r\n sys.exit(1)\r\n\r\n # book flight. Note that the results are already sorted by the desired property.\r\n # return booking token\r\n return flight_search_results[\"data\"][0][\"booking_token\"]", "def test_get_all_flights(self):\n\n res = self.client().get(\n '/api/v1/flight',\n headers={\n 'content-type': 'application/json'\n }\n )\n self.assertEqual(res.status_code, 200)", "def test_get_flights_by_location(self):\n\n res = self.client().get(\n '/api/v1/flight/location/Lagos/Los',\n headers={\n 'content-type': 'application/json'\n }\n )\n self.assertEqual(res.status_code, 200)", "def getFlight(date, f, t, ret, cheapest, fastest, oneway):\n\n\tparams = {\n\t\t'dateFrom': date,\n\t\t'daysInDestinationFrom': (ret if ret else None),\n\t\t'flyFrom': f,\n\t\t'to': t,\n\t\t'typeFlight': ('oneway' if oneway else 'round'),\n\t\t'sort': ('price' if cheapest else 'duration')\n\t}\n\tdata = requests.get('https://api.skypicker.com/flights', params=params).json()\n\treturn data['data'][0] if data.get('data') else None", "def search(api_key, term, location):\n\n url_params = {\n 'name': term.replace(' ', '+'),\n 'address1': location.replace(' ', '+'),\n 'city': 'Long Beach',\n 'state': 'US',\n 'country': 'US',\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, MATCH_PATH, api_key, url_params=url_params)", "def search_for_hotels(apikey, **kwargs):\n hotel_search_parameters = kwargs\n hotel_search_parameters['apikey'] = apikey\n hotel_search_parameters['currency'] = \"USD\" # since US Dollars is the most popular currency\n hotel_api_response = requests.get(hotel_booking_search, params=hotel_search_parameters).json()\n return hotel_api_response", "def searchRestaurantsWith(keyword):\n # construct proper URL\n entity_id = findLocation()\n apiUrl = (strings.ZOMATO_SEARCH_URL).format(config.zomato_api_key, entity_id, urllib.urlencode({'q':keyword}))\n print(apiUrl)\n\n # call zomato api\n json_data = requests.get(apiUrl).json()\n print(json.dumps(json_data))\n\n # add each restaurant name to the list of restaurants\n restaurants = []\n size = json_data['results_found']\n for i in range(size):\n r_name = json_data['restaurants'][i]['restaurant']['name']\n r_address = json_data['restaurants'][i]['restaurant']['location']['address']\n r_avgCost = json_data['restaurants'][i]['restaurant']['average_cost_for_two']\n r_rating = str(json_data['restaurants'][i]['restaurant']['user_rating']['aggregate_rating'])\n # create new restaurant object\n restaurants[i] = buildRestaurant(r_name,r_address,r_avgCost,r_rating)\n \n return restaurants", "def request_swapi(url, s_string):\n payload = {'search': s_string}\n req = requests.get(url, params=payload).json()\n num = req.get('count')\n print('Number of results: {}'.format(num))\n if num > 0:\n search_res = req.get('results')\n for indiv in search_res:\n print(indiv.get('name'))", "def get_flights_service_handler(self, req):\n # Build array of flight names\n flights = [flight[0] for flight in self._available_flights]\n\n # Return flights\n return GetFlightsResponse(flights)", "def search(api_key, term, location, category, url_params):\n return request(API_HOST, SEARCH_PATH, api_key, url_params)", "def foodtrucks():\n try:\n longitude = float(request.args.get('longitude'))\n latitude = float(request.args.get('latitude'))\n distance = float(request.args.get('distance'))\n except (TypeError, ValueError):\n abort(400)\n\n foodtrucks = [x for x in get_db().gen_within_distance(distance, latitude, longitude)]\n\n return make_response(\n json.dumps(foodtrucks),\n 201\n )", "def airports():\n\n queryType = \"SQL++ query - scoped to inventory: \"\n partialAirportName = request.args['search']\n\n queryPrep = \"SELECT airportname FROM `travel-sample`.inventory.airport WHERE \"\n sameCase = partialAirportName == partialAirportName.lower() or partialAirportName == partialAirportName.upper() #bool\n\n # The code does some guesswork to determine what the user is typing in.\n # This is based on string length and capitalization. If it believes the\n # string is an FAA or ICAO code, it queries for a match in the 'faa' or\n # 'icao' field. Otherwise, the code assumes a partial airport name, and\n # queries for a substring match at the start of the 'airportname' field\n\n if sameCase and len(partialAirportName) == 3:\n queryPrep += \"faa=$1\"\n queryArgs = [partialAirportName.upper()]\n elif sameCase and len(partialAirportName) == 4:\n queryPrep += \"icao=$1\"\n queryArgs = [partialAirportName.upper()]\n else:\n queryPrep += \"POSITION(LOWER(airportname), $1) = 0\"\n queryArgs = [partialAirportName.lower()]\n\n results = cluster.query(queryPrep, *queryArgs)\n airports = [x for x in results]\n\n # 'context' is returned to the frontend to be shown in the Query Log\n\n context = [queryType + queryPrep]\n\n response = make_response(jsonify({\"data\": airports, \"context\": context}))\n return response", "def search_rooftop():\n street = request.json.get(\"street\")\n city = request.json.get(\"city\")\n state = request.json.get(\"state\")\n radius = request.json.get(\"radius\")\n\n # Convert user's input to meters\n radius = int(radius)*1607\n\n # Define the API Key, endpoing, and header\n endpoint = 'https://api.yelp.com/v3/businesses/search'\n headers = {'Authorization': 'Bearer %s' % YELP_API_KEY}\n\n # Define the parameters\n parameters = { 'term': 'rooftop bar',\n 'limit': 10,\n 'location': f\"{street}, {city}, {state}\",\n 'radius': radius}\n \n # Make a request to the Yelp API\n response = requests.get(url=endpoint, params=parameters, headers=headers)\n print(response.status_code)\n # Translate the returned JSON string to a dict\n rooftop_data = response.json()\n\n user_id = User.query.first().user_id\n \n businesses = rooftop_data[\"businesses\"]\n for business in businesses:\n yelp_id = business[\"id\"]\n favorite = crud.get_favorite(user_id, yelp_id)\n business[\"favorited\"] = True if favorite else False\n return jsonify(businesses)", "def get_lights():\n\n\treturn requests.get(LIGHTS_API).json()", "def openflights_post_request(data):\n link = 'https://openflights.org/php/apsearch.php'\n response = requests.post(link, data=data)\n if response.status_code not in range(200,299):\n raise IOError('error: %s' % response.text)\n return response.json()", "def all_restaurants(api_key, params):\n\n data1 = yelp_search(api_key, params)\n\n records_num = data1[\"total\"]\n requests_num = records_num // 20 + 1\n offset = 0\n result = []\n\n for i in range(requests_num):\n # 20 restaurants per request\n curr_offset = offset + i * 20\n params[\"offset\"] = curr_offset\n data = yelp_search(api_key, params)\n result += data[\"businesses\"]\n # pause slightly between requests\n time.sleep(.300)\n\n return result", "def flights():\n cursor = get_cursor()\n cursor.execute(\"SELECT name FROM airport\")\n airports = cursor.fetchall()\n if request.method == \"POST\":\n start_date = request.form['start_date']\n end_date = request.form['end_date']\n dept_airport = request.form['dept_airport']\n arrv_airport = request.form['arrv_airport']\n cursor.execute('SELECT dept_airport, arrv_airport, DATE_FORMAT(dept_time, \"%%Y %%M %%D %%T\"), DATE_FORMAT(dept_time, \"%%Y %%M %%D %%T\"), flight_status, base_price, flight_id FROM flight WHERE airline = %s AND DATE(dept_time) BETWEEN DATE(%s) AND DATE(%s) AND dept_airport = %s AND arrv_airport = %s',\n (g.user[5], start_date, end_date, dept_airport, arrv_airport))\n flights = cursor.fetchall()\n n_flights = []\n for flight in flights:\n flight = list(flight)\n if flight[4] == 0:\n flight[4] = \"On Time\"\n elif flight[4] == 1:\n flight[4] = \"Delayed\"\n n_flights.append(flight)\n else:\n # get flights in the following 30 days\n cursor.execute(\n 'SELECT dept_airport, arrv_airport, DATE_FORMAT(dept_time, \"%%Y %%M %%D %%T\"), DATE_FORMAT(dept_time, \"%%Y %%M %%D %%T\"), flight_status, base_price, flight_id FROM flight WHERE airline = %s AND DATE(dept_time) BETWEEN CURDATE() AND DATE_ADD(CURDATE(), INTERVAL 30 DAY)', (g.user[5]))\n flights = cursor.fetchall()\n n_flights = []\n for flight in flights:\n flight = list(flight)\n if flight[4] == 0:\n flight[4] = \"On Time\"\n elif flight[4] == 1:\n flight[4] = \"Delayed\"\n n_flights.append(flight)\n return render_template('a/flights.html', airports = airports, flights=n_flights)", "def test_hotel_searching():\n request_data = {'destination': 'e', 'checkin': '02-05-2018', 'checkout': '03-05-2018', 'is_bathroom': False,\n 'is_tv': False, 'is_wifi': False, 'is_bathhub': False, 'is_airconditioniring': False,\n 'sleeps': 1, 'price_from': 0, 'price_to': 0, 'quantity': 1}\n with allure.step('Hotel searching'):\n search_url = f'{url}/'\n req = client.post(search_url, data=request_data)\n assert req.status_code == FOUND\n assert 'search-hotel' in req.location\n\n with allure.step('Search hotel'):\n search_url = f'{url}/search-hotel'\n hotel_id_data = {'hotel_id': '1'}\n req = client.post(search_url, data=hotel_id_data)\n assert req.status_code == FOUND\n assert 'more-info' in req.location\n\n with allure.step('Search hotel'):\n req = client.get(search_url)\n assert req.status_code == OK\n\n with allure.step('[NEGATIVE]: Empty data'):\n req = owner.post(search_url, data={})\n assert req.status_code == ERROR", "def get_venue_foursquare_near(place,food='food',price=[1,2]):\n api_user=os.getenv('CLIENT_ID')\n api_key=os.getenv('CLIENT_SECRET') \n today = date.today().strftime(\"%Y%m%d\")\n\n \n params={\n 'near':place,\n 'radius':500,\n 'query':food,\n 'limit':5,\n 'sortByDistance':1,\n 'price':price,\n 'client_id':api_user,\n 'client_secret':api_key,\n 'v':today\n }\n\n \n baseUrl=\"https://api.foursquare.com\"\n endpoint='/v2/venues/explore/'\n url = f\"{baseUrl}{endpoint}\"\n \n\n res = requests.get(url,params=params)\n \n if res.json()['meta']['code'] != 200:\n \n print('ยฟSeguro que has escrito bien tu ubicaciรณn y lo que te apetece comer?')\n \n else:\n print(f\"Requested data to {baseUrl}; status_code:{res.status_code}\")\n \n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Search for hotel using an api
def search_for_hotels(apikey, **kwargs): hotel_search_parameters = kwargs hotel_search_parameters['apikey'] = apikey hotel_search_parameters['currency'] = "USD" # since US Dollars is the most popular currency hotel_api_response = requests.get(hotel_booking_search, params=hotel_search_parameters).json() return hotel_api_response
[ "def test_hotel_searching():\n request_data = {'destination': 'e', 'checkin': '02-05-2018', 'checkout': '03-05-2018', 'is_bathroom': False,\n 'is_tv': False, 'is_wifi': False, 'is_bathhub': False, 'is_airconditioniring': False,\n 'sleeps': 1, 'price_from': 0, 'price_to': 0, 'quantity': 1}\n with allure.step('Hotel searching'):\n search_url = f'{url}/'\n req = client.post(search_url, data=request_data)\n assert req.status_code == FOUND\n assert 'search-hotel' in req.location\n\n with allure.step('Search hotel'):\n search_url = f'{url}/search-hotel'\n hotel_id_data = {'hotel_id': '1'}\n req = client.post(search_url, data=hotel_id_data)\n assert req.status_code == FOUND\n assert 'more-info' in req.location\n\n with allure.step('Search hotel'):\n req = client.get(search_url)\n assert req.status_code == OK\n\n with allure.step('[NEGATIVE]: Empty data'):\n req = owner.post(search_url, data={})\n assert req.status_code == ERROR", "def single_search(self, hotel, session_id, *args, **kwargs):\n params = {'hotel': hotel, 'sessionID': session_id}\n params.update(self._underscore_to_camelcase(kwargs))\n return self._perform_request(url='hotel', params=params)", "def search(api_key, term, location, category, url_params):\n return request(API_HOST, SEARCH_PATH, api_key, url_params)", "def search(api_key, term, location):\n\n url_params = {\n 'name': term.replace(' ', '+'),\n 'address1': location.replace(' ', '+'),\n 'city': 'Long Beach',\n 'state': 'US',\n 'country': 'US',\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, MATCH_PATH, api_key, url_params=url_params)", "def search_rooftop():\n street = request.json.get(\"street\")\n city = request.json.get(\"city\")\n state = request.json.get(\"state\")\n radius = request.json.get(\"radius\")\n\n # Convert user's input to meters\n radius = int(radius)*1607\n\n # Define the API Key, endpoing, and header\n endpoint = 'https://api.yelp.com/v3/businesses/search'\n headers = {'Authorization': 'Bearer %s' % YELP_API_KEY}\n\n # Define the parameters\n parameters = { 'term': 'rooftop bar',\n 'limit': 10,\n 'location': f\"{street}, {city}, {state}\",\n 'radius': radius}\n \n # Make a request to the Yelp API\n response = requests.get(url=endpoint, params=parameters, headers=headers)\n print(response.status_code)\n # Translate the returned JSON string to a dict\n rooftop_data = response.json()\n\n user_id = User.query.first().user_id\n \n businesses = rooftop_data[\"businesses\"]\n for business in businesses:\n yelp_id = business[\"id\"]\n favorite = crud.get_favorite(user_id, yelp_id)\n business[\"favorited\"] = True if favorite else False\n return jsonify(businesses)", "def searchRestaurantsWith(keyword):\n # construct proper URL\n entity_id = findLocation()\n apiUrl = (strings.ZOMATO_SEARCH_URL).format(config.zomato_api_key, entity_id, urllib.urlencode({'q':keyword}))\n print(apiUrl)\n\n # call zomato api\n json_data = requests.get(apiUrl).json()\n print(json.dumps(json_data))\n\n # add each restaurant name to the list of restaurants\n restaurants = []\n size = json_data['results_found']\n for i in range(size):\n r_name = json_data['restaurants'][i]['restaurant']['name']\n r_address = json_data['restaurants'][i]['restaurant']['location']['address']\n r_avgCost = json_data['restaurants'][i]['restaurant']['average_cost_for_two']\n r_rating = str(json_data['restaurants'][i]['restaurant']['user_rating']['aggregate_rating'])\n # create new restaurant object\n restaurants[i] = buildRestaurant(r_name,r_address,r_avgCost,r_rating)\n \n return restaurants", "def get_hotels_list(self, requirements):\n try:\n dest_ids = [self.get_dest_id(city) for city in requirements.cities]\n url = \"https://apidojo-booking-v1.p.rapidapi.com/properties/list\"\n querystring = {\n \"price_filter_currencycode\":\"USD\",\n \"travel_purpose\": \"leisure\",\n \"categories_filter\": \"price::9-40,free_cancellation::1,class::1,class::0,class::2\",\n \"search_id\": \"none\",\n \"order_by\": \"popularity\",\n \"languagecode\": \"en-us\",\n \"children_qty\": str(requirements.number_of_children),\n \"children_age\": \"5,7\",\n \"search_type\": \"city\",\n \"offset\": \"0\",\n \"dest_ids\": dest_ids,\n \"guest_qty\": str(requirements.number_of_guests),\n \"arrival_date\": requirements.start_date,\n \"departure_date\": requirements.end_date,\n \"room_qty\": str(requirements.number_of_rooms)\n }\n\n response = requests.request(\"GET\", url, headers=self.__headers, params=querystring)\n # return response.json().get('result', [])\n return response.json()\n except Exception as e:\n print(e)\n return []", "def search_for_flights(apikey, **kwargs):\n\n flight_search_paramaters = kwargs\n flight_search_paramaters['apikey'] = apikey\n flight_search_paramaters['currency'] = \"USD\" # since US Dollars is the most popular currency\n flight_search_response = requests.get(flight_booking_search, params=flight_search_paramaters).json()\n return flight_search_response", "def get(self, request, *args, **kwargs):\r\n query = request.GET.get('q')\r\n if not query:\r\n query = \"\"\r\n hotels = Hotel.objects.filter(\r\n city__name__icontains=query\r\n )\r\n return render(request, self.template_name, {'hotels':hotels, 'query':query})", "def all_restaurants(api_key, params):\n\n data1 = yelp_search(api_key, params)\n\n records_num = data1[\"total\"]\n requests_num = records_num // 20 + 1\n offset = 0\n result = []\n\n for i in range(requests_num):\n # 20 restaurants per request\n curr_offset = offset + i * 20\n params[\"offset\"] = curr_offset\n data = yelp_search(api_key, params)\n result += data[\"businesses\"]\n # pause slightly between requests\n time.sleep(.300)\n\n return result", "def findAVenue(location):\n\n latitude, longitude = getGeocodeLocation(location)\n # Dog-friendly places, according to Foursquare API Docs = 13.\n features = 13\n # Only return one match.\n matches = 1\n\n url = f\"https://api.foursquare.com/v2/venues/search?=client_id={forsquare_client_id}&client_secret={forsquare_client_secret}&v=20190521&ll={latitude},{longitude}&features={features}&limit={matches}\"\"\" \n\n r = requests.get(url).json()\n\n if r[\"response\"][\"venues\"]:\n venue = {}\n venue_id = r[\"response\"][\"venues\"][0][\"id\"]\n venue[\"name\"] = r[\"response\"][\"venues\"][0][\"name\"]\n venue_address = r[\"response\"][\"venues\"][0][\"location\"][\"formattedAddress\"]\n\n # Format venue address in one string.\n address = \"\"\n\n for i in venue_address:\n address += i + \" \"\n\n venue[\"address\"] = address\n\n # Get venue photo via another request.\n url = f\"https://api.foursquare.com/v2/venues/{venue_id}/photos?client_id={forsquare_client_id}&v=20190521&client_secret={forsquare_client_secret}\"\"\"\n\n r = requests.get(url).json()\n\n if r[\"response\"][\"photos\"][\"items\"]:\n firstpic = r[\"response\"][\"photos\"][\"items\"][0]\n prefix = firstpic[\"prefix\"]\n suffix = firstpic[\"suffix\"]\n img_url = f\"{prefix}300x300{suffix}\"\n else:\n img_url = \"https://img.evbuc.com/https%3A%2F%2Fcdn.evbuc.com%2Fimages%2F38670528%2F108919755319%2F1%2Foriginal.jpg?auto=compress&s=32c728ebfab7bb7cab9cf42307962b37\"\n\n venue[\"img_url\"] = img_url\n\n return venue\n else:\n\n return \"No matching venues.\"", "def search(self, params={}):\n params['limit'] = self.single_page_limit\n h_url = self.query_url.format(query=urlencode(params))\n #print h_url\n json = requests.get(h_url).json()\n return json", "def search_suggest():\n user_input = request.args.get('text')\n latitude = request.args.get('latitude', DEFAULT_LATITUDE)\n longitude = request.args.get('longitude', DEFAULT_LONGITUDE)\n\n if not user_input:\n return json.dumps({})\n\n yelp_session_obj = YelpAPI(api_key=YELP_API_KEY)\n autocomplete_suggestions = yelp_session_obj.autocomplete_query(\n text=user_input,\n latitude=latitude,\n longitude=longitude,\n )\n\n response = {\n 'businesses': autocomplete_suggestions['businesses'],\n 'categories': autocomplete_suggestions['categories'],\n }\n return json.dumps(response)", "def test_api_search_get(self):\n pass", "def search(foodName):\n #foodName = input(\"What did you eat today? \")\n url = \"https://api.nal.usda.gov/ndb/search/?format=json&q={}&max=50&sort=r&ds=Standard+Reference&offset=0&api_key={}\".format(ndbno,api_key)\n page = urllib.request.urlopen(url)\n data_bytes = page.read()\n data_string = data_bytes.decode('utf-8')\n page.close()\n data_dict = json.loads(data_string)\n # In format: dict: ( dict: list: ( dict: value ) )\n # [\"item\"][0] will return first search item\n ndbno = data_dict[\"list\"][\"item\"][0][\"ndbno\"]\n return ndbno", "def restaurant_search(\n self, query=\"\", latitude=\"\", longitude=\"\", cuisines=\"\", limit=30\n ):\n cuisines = \"%2C\".join(cuisines.split(\",\"))\n if str(limit).isalpha() == True:\n raise ValueError(\"LimitNotInteger\")\n headers = {\"Accept\": \"application/json\", \"user-key\": self.user_key}\n r = (\n requests.get(\n base_url\n + \"search?q=\"\n + str(query)\n + \"&count=\"\n + str(limit)\n + \"&lat=\"\n + str(latitude)\n + \"&lon=\"\n + str(longitude)\n + \"&cuisines=\"\n + str(cuisines),\n headers=headers,\n ).content\n ).decode(\"utf-8\")\n return r # a = ast.literal_eval(r)", "def get_venue_foursquare_near(place,food='food',price=[1,2]):\n api_user=os.getenv('CLIENT_ID')\n api_key=os.getenv('CLIENT_SECRET') \n today = date.today().strftime(\"%Y%m%d\")\n\n \n params={\n 'near':place,\n 'radius':500,\n 'query':food,\n 'limit':5,\n 'sortByDistance':1,\n 'price':price,\n 'client_id':api_user,\n 'client_secret':api_key,\n 'v':today\n }\n\n \n baseUrl=\"https://api.foursquare.com\"\n endpoint='/v2/venues/explore/'\n url = f\"{baseUrl}{endpoint}\"\n \n\n res = requests.get(url,params=params)\n \n if res.json()['meta']['code'] != 200:\n \n print('ยฟSeguro que has escrito bien tu ubicaciรณn y lo que te apetece comer?')\n \n else:\n print(f\"Requested data to {baseUrl}; status_code:{res.status_code}\")\n \n return res", "def test_api_v3_search_get(self):\n pass", "def fetch_yelp_data(yelp_baseurl, search_term):\n headers = {\"Authorization\": f\"Bearer {yelp_secrets.yelp_api_key}\"}\n params = {\"categories\": category, \"location\": search_term, \"locale\": \"en_US\", \"limit\": 50}\n response = requests.get(yelp_baseurl, params=params, headers=headers)\n text_response = response.text\n yelp_data = json.loads(text_response)\n return yelp_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Bundle up all amenities into one string.
def serialize_hotel_amenities(old_amenities): new_amentities = "" if old_amenities: for old_amenity in old_amenities: new_amentities = new_amentities + ', '+old_amenity['description'] return new_amentities+"." else: return new_amentities
[ "def gen_ambush_text(encounter):\n ls = [(x.name, x.nameplural) for x in encounter]\n ls.sort()\n ls2 = []\n num = 0\n name = \"\"\n for value in ls:\n if value[0] == name:\n num += 1\n else:\n if num == 1:\n ls2.append(\"a \" + FMT_ENEMY.format(value[0]))\n elif num > 1:\n ls2.append(\"{} {}\".format(num, FMT_ENEMY.format(value[1])))\n name = value[0]\n num = 1\n if num == 1:\n ls2.append(\"a \" + FMT_ENEMY.format(value[0]))\n elif num > 1:\n ls2.append(\"{} {}\".format(num, FMT_ENEMY.format(value[1])))\n return join_list_pretty(ls2)", "def assets(self) -> str:\n return '\\n\\n\\n'.join('\\n'.join(item[1].assets) for item in self.items() if len(item[1].assets) > 0)", "def __str__(self) -> str:\n aux_str = \"\"\n for i in self.__agents:\n aux_str += str(i) + \"\\n\"\n return aux_str", "def ALL_Amenities():\n retval = []\n all_amenities = storage.all('Amenity')\n for amenities in all_amenities.values():\n retval.append(amenities.to_dict())\n return jsonify(retval)", "def get_items(self):\r\n options = \"\"\r\n for item in self.menu:\r\n options += f\"{item.name} ${item.cost:.2f} | \"\r\n return options", "def get_apartment_amenities(self, soup, apartment_dict):\n amenities_list_container = soup.find('div', id='amenities')\n amenities_list = []\n\n # Creates a comma seperated list of amenities\n for litag in amenities_list_container.find_all('li'):\n amenities_list.append(litag.text.strip())\n apartment_dict['amenities'] = amenities_list", "def generate_accessories(accessories, is_male):\n\n sentence = \"He\" if is_male else \"She\"\n sentence += \" is wearing\"\n\n def necktie_and_hat(attribute):\n \"\"\"\n Returns a grammatically correct sentence based on the attribute\n \"\"\"\n\n if attribute == \"necktie\" or attribute == \"hat\" or attribute == \"necklace\":\n return \"a \" + attribute\n return attribute\n\n if len(accessories) == 1:\n attribute = (\n accessories[0].lower()\n if accessories[0] == \"Eyeglasses\"\n else necktie_and_hat(accessories[0].lower().split(\"_\")[1])\n )\n return sentence + \" \" + attribute + \".\"\n\n for i, attribute in enumerate(accessories):\n attribute = (\n attribute.lower()\n if attribute == \"Eyeglasses\"\n else necktie_and_hat(attribute.lower().split(\"_\")[1])\n )\n\n if i == len(accessories) - 1:\n sentence = sentence[:-1]\n sentence += \" and \" + attribute + \".\"\n else:\n sentence += \" \" + attribute + \",\"\n\n return sentence", "def get_meal_str_a(meals_list, area_str):\r\n\r\n found = False\r\n areas = get_areas()\r\n\r\n # validate categories\r\n for i in range(len(areas)):\r\n area = areas[i]\r\n if area.get_area().lower() == area_str.lower():\r\n found = True\r\n break\r\n\r\n if found:\r\n meals = requests.get_meals_by_area(area_str.title())\r\n meals_list += area_str.upper() + \" MEALS\" + \"\\n\\n\"\r\n for i in range(len(meals)):\r\n meal = meals[i]\r\n meals_list += meal.get_meal() + \"\\n\"\r\n else:\r\n meals_list = \"Invalid Area, please try again\"\r\n\r\n return meals_list", "def amenities():\n # GET\n if request.method == \"GET\":\n return jsonify([amenity.to_dict()\n for amenity in storage.all(\"Amenity\").values()])\n\n # POST\n doc = request.get_json(silent=True)\n if doc is None:\n return \"Not a JSON\", 400\n if doc.get(\"name\") is None:\n return \"Missing name\", 400\n amenity = Amenity(**doc)\n amenity.save()\n return jsonify(amenity.to_dict()), 201", "def join(items):\n return ''.join(items)", "def accordance(self):\n user_bible = \"\"\n \n # Loop over all transcriptions and put them in a dictionary based on the bible verse\n bible_verse_to_transcriptions_dict = defaultdict(list)\n for transcription in self.transcriptions():\n transcription_clean = transcription.remove_markup()\n if transcription.verse.bible_verse:\n bible_verse_to_transcriptions_dict[transcription.verse.bible_verse].append(transcription_clean)\n\n # Loop over the distinct Bible verses in order\n bible_verses = sorted(bible_verse_to_transcriptions_dict.keys(), key=lambda bible_verse: bible_verse.id) \n for bible_verse in bible_verses:\n transcriptions_txt = \" | \".join(bible_verse_to_transcriptions_dict[bible_verse])\n user_bible += f\"{bible_verse} <color=black></color>{transcriptions_txt}<br>\\n\"\n\n return user_bible", "def display_attr(self):\n # Ensure that only stars granting skills have its self.name displayed. Display source name for the rest.\n if self.find_constellation().members[-1] == self and 'Third' not in self.name and 'Fourth' not in self.name \\\n and 'Fifth' not in self.name and 'Sixth' not in self.name:\n name_to_display = self.name\n else:\n name_to_display = self.source.replace('_', ' ')\n attributes = [x for x in self.__dict__ if x in attributes_dict]\n result = []\n replaced = None\n unwanted = None\n replaced_two = None\n unwanted_two = None\n specific_done = False\n\n for i in attributes:\n # The below block of code arranges the attributes of skills which involve summoning pets into strings which\n # then will be sent to the app frontend.\n if type(self.__dict__[i]) == dict:\n # The dictionary as a star attribute means that its stored values refer to the in-game pet bonuses or\n # special skills.\n pet_list = list(self.__dict__[i].values())\n result.extend(pet_list)\n # Strings 'specific' and 'particular' will enable frontend code to put the attributes into\n # the right order.\n if 'Attributes' not in i and 'Abilities' not in i and specific_done is False:\n result.append(i + ' specific')\n specific_done = True\n elif 'Attributes' not in i and 'Abilities' not in i and specific_done:\n result.append(i + ' particular')\n else:\n result.append(i)\n # Some rare bonuses required hard coding to reflect the in-game display. These are usually attributes with\n # two values.\n elif type(self.__dict__[i]) != list:\n if 'Knockdown' in i:\n if self == trample:\n result.append('Knockdown target for 0.8 - 1.5 Seconds'.format(a=str(self.__dict__[i])))\n else:\n result.append('Knockdown target for {a} Seconds'.format(a=str(self.__dict__[i])))\n elif 'Chance_to_Stun' in i:\n result.append('Stun target for {a} Seconds'.format(a=self.__dict__[i]))\n elif 'Chance_to_Confuse' in i:\n result.append('Confuse target for {a} Seconds'.format(a=self.__dict__[i]))\n elif 'Lives' in i:\n result.append('Lives for {a} Seconds'.format(a=str(self.__dict__[i])))\n elif 'Energy_Leech_Chance' in i:\n result.append('Chance for {a} Energy Leech'.format(a=str(self.__dict__[i])))\n elif 'Invincible' in i:\n result.append('Invincible')\n elif self == hungering_void and '308 Active Health Cost per Second' not in result:\n result.append('308 Active Health Cost per Second')\n elif 'Chance_to_Petrify' in i:\n result.append('Petrify target for {a} Seconds'.format(a=self.__dict__[i]))\n elif 'Affected_Targets' in i:\n result.append('Affects up to {a} targets'.format(a=self.__dict__[i]))\n # Value as a float requires special treatment as well. Some must be displayed as '%', some as float.\n elif type(self.__dict__[i]) == float and 'per' not in i and 'Seconds_Skill_Recharge' not in i and \\\n 'Radius' not in i and 'Area' not in i and 'Knockdown' not in i and 'Lives' not in i and 'Stun' \\\n not in i and 'Confuse' not in i and 'Petrify' not in i and 'Affected' not in i:\n self.__dict__[i] = \"{:.0%}\".format(self.__dict__[i])\n else:\n if 'Duration' in i and 'Reduced' in i and 'Knockdown' not in i and type(self.__dict__[i]) != str:\n self.__dict__[i] = \"{:.0%}\".format(self.__dict__[i])\n result.append(str(self.__dict__[i]) + ' ' + i)\n elif 'Knockdown' not in i and 'Stun' not in i and 'Confuse' not in i and 'Affected' not in i:\n result.append(str(self.__dict__[i]) + ' ' + i)\n # The list occurs when there are two values but the display is not meant for a pet bonus nor skill.\n elif type(self.__dict__[i]) == list:\n if 'Chance_to_Stun' in i:\n a = \"{:.0%}\".format(self.__dict__[i][0])\n if self == hungering_void:\n result.append('pets {a} Chance to Stun target for {b} Seconds'.\n format(a=a, b=self.__dict__[i][1]))\n else:\n result.append('{a} Chance to Stun target for {b} Seconds'.format(a=a, b=self.__dict__[i][1]))\n elif 'Chance_to_Confuse' in i:\n a = \"{:.0%}\".format(self.__dict__[i][0])\n result.append('{a} Chance to Confuse target for {b} Seconds'.format(a=a, b=self.__dict__[i][1]))\n elif 'Restored' in i:\n a = \"{:.0%}\".format(self.__dict__[i][0])\n result.append('{a} + {b} Health Restored'.format(a=a, b=self.__dict__[i][1]))\n elif 'Restored' in i:\n a = \"{:.0%}\".format(self.__dict__[i][0])\n result.append('{a} + {b} Health Restored'.format(a=a, b=self.__dict__[i][1]))\n elif 'Chance_to_Petrify' in i:\n a = \"{:.0%}\".format(self.__dict__[i][0])\n result.append('{a} Chance to Petrify target for {b} Seconds'.format(a=a, b=self.__dict__[i][1]))\n elif 'Slow_Target' in i:\n if len(self.__dict__[i]) == 3:\n a = \"{:.0%}\".format(self.__dict__[i][0])\n b = \"{:.0%}\".format(self.__dict__[i][1])\n result.append('pets {a} Chance of {b} slow target for {c} Seconds'.\n format(a=a, b=b, c=self.__dict__[i][2]))\n else:\n a = \"{:.0%}\".format(self.__dict__[i][0])\n result.append('{a} Slow target for {b} Seconds'.format(a=a, b=self.__dict__[i][1]))\n else:\n if type(self.__dict__[i][1]) == float and 'per' not in i and 'Seconds_Skill_Recharge' not in i and \\\n 'Radius' not in i:\n second_string = \"{:.0%}\".format(self.__dict__[i][1]) + ' ' + i\n result.append(second_string)\n if type(self.__dict__[i][1]) != float:\n second_string = str(self.__dict__[i][1]) + ' ' + i\n result.append(second_string)\n if type(self.__dict__[i][0]) == float and 'per' not in i and 'Seconds_Skill_Recharge' not in i and \\\n 'Radius' not in i:\n first_string = \"{:.0%}\".format(self.__dict__[i][0]) + ' ' + i\n result.append(first_string)\n if type(self.__dict__[i][0]) != float:\n first_string = str(self.__dict__[i][0]) + ' ' + i\n result.append(first_string)\n result.append(name_to_display)\n # Turn python range object into 'number-number' format.\n for i in result:\n if 'range(' in i:\n if replaced:\n unwanted_two = result.index(i)\n replaced_two = i.replace(\"range(\", \"\")\n else:\n unwanted = result.index(i)\n replaced = i.replace(\"range(\", \"\")\n for i in result:\n if i in self.__dict__ and type(self.__dict__[i]) == float:\n self.__dict__[i] = \"{:.1%}\".format(self.__dict__[i])\n break\n if replaced:\n replaced = replaced.replace(',', ' -')\n replaced = replaced.replace(')', '')\n result[unwanted] = replaced\n if replaced_two:\n replaced_two = replaced_two.replace(',', ' -')\n replaced_two = replaced_two.replace(')', '')\n result[unwanted_two] = replaced_two\n # Get rid of the underscore character required to create a class attribute.\n for i in result:\n if '_' in i:\n result[result.index(i)] = i.replace('_', ' ')\n for i in result:\n if 'target' in i and 'Petrify' not in i and 'Knockdown' not in i and 'Stun' not in i and 'Slow' not in i \\\n and 'Affects' not in i:\n result[result.index(i)] = i.replace('target', \"target's\")\n for i in result:\n if 'True pets Taunt Target' in i:\n result[result.index(i)] = i.replace('True pets Taunt Target', \"pets Taunt target\")\n if 'of 250' in i:\n result[result.index(i)] = i.replace('of 250', \"of 250%\")\n return result", "def give_character(menu, brand):\n characters = ''\n for item in menu:\n if item[\"Brand\"] == brand:\n characters += ('{}\\n'.format(item[\"Character\"]))\n return characters", "def __str__(self):\n return \", \".join(a for a in self.args)", "def amenities(self):\n if os.getenv('HBNB_TYPE_STORAGE') == 'db':\n return self.__amenities\n else:\n lst = []\n for k, v in models.storage.all(Amenity).items():\n if v.place_id == self.id:\n lst += [v]\n return lst", "def give_brand(menu, brand):\n characters = ''\n for item in menu:\n if item[\"Brand\"] == brand and item[\"Stock\"] >= 1:\n characters += ('\\tCharacter: {}, Deposit: {}, Price: {:.2f}\\n'.format(item[\"Character\"], (item[\"Value\"] * .10), item[\"Price\"]))\n return characters", "def saveString(self):\n outputString = \"@\" + self.uniqueName\n outputString += \"#gameType,\" + self.gameType\n outputString += \"#hitpoints,\" + str(self.hitpoints)\n outputString += \"#inventory,\"\n for inventoryItem in self.inventory:\n outputString += inventoryItem + \",\"\n outputString += \"#equippedItems,\"\n for inventoryItem in self.equippedItems:\n outputString += inventoryItem + \",\"\n return outputString", "def menuFormat(self):\n \n pass", "def amenities(self, obj):\n if type(obj) == Amenity:\n self.append(obj)\n\n def append(self, obj):\n \"\"\"Method that appends\n \"\"\"\n self.amenity_ids.append(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find the number of samples in the directory
def find_num_samples(data_dir): path, dirs, files = os.walk(data_dir).next() assert path == data_dir samples =[x for x in files if x.endswith('.jpg')] numsample = len(samples) for subdir in dirs: numsample += find_num_samples(data_dir + '/' + subdir) return numsample
[ "def count_samples(samples_path):\n samples = np.load(os.path.join(samples_path, 'samples.npy'))\n return len(samples)", "def get_num_samples(self, split_name):", "def count():\n\n return len(directory)", "def get_sample_nr(path):\n path1 = Path(path)\n parent_path = str(path1.parent)\n sample_nr = int(parent_path.split('/')[-1])\n return sample_nr", "def count_data(self):\n num_data = 0\n for cur_file_name in self.file_names:\n cur_file_features, cur_file_labels = self.load_data(cur_file_name)\n num_data += self.get_num_samples( cur_file_features )\n return num_data", "def num_samples(self):\r\n return self.snapshots[0].num_samples", "def count_samples(\n self,\n samples: List,\n ) -> int:\n num_samples = len(samples)\n with utils.format_text(\"yellow\", [\"underline\"]) as fmt:\n self.log.info(fmt(f\"number of data: {num_samples}\"))\n\n return num_samples", "def count_samples(ctx):\n print(\"loading data...\")\n images, labels = load_data(ctx.obj[\"data_folder\"], shuffle_seed=ctx.obj[\"seed\"])\n\n print(\"\")\n print(\"enumerated sample counts:\")\n for key, arr in list(zip(label_mapping, numpy.transpose(keras.utils.to_categorical(labels)))):\n print(f\" - {key}: {int(sum(arr))}\")\n print(\"total: \", len(images))", "def get_num_data_items(dataset_directory):\n num_data_items = 0\n for filename in os.listdir(os.path.join(dataset_directory, \"non-shellcode\")):\n name, extension = os.path.splitext(filename)\n if extension == \".bin\":\n num_data_items += 1\n for filename in os.listdir(os.path.join(dataset_directory, \"shellcode\")):\n name, extension = os.path.splitext(filename)\n if extension == \".bin\":\n num_data_items += 1\n return num_data_items", "def getNumSamples(sound):\n return getLength(sound)", "def __len__(self):\n if self.mode == 'train':\n return self.n_blocks()\n else:\n return len(self.noisy_wav_files)", "def n_samples(self):\n return len(self.sampler)", "def file_count():\n\n corpus = Corpus.from_env()\n click.echo(corpus.file_count)", "def count_data(self):\n num_data = 0\n for in_file_name in self.file_names:\n h5_file = h5py.File( in_file_name, 'r' )\n X = h5_file[self.features_name]\n if hasattr(X, 'keys'):\n num_data += len(X[ list(X.keys())[0] ])\n else:\n num_data += len(X)\n h5_file.close()\n return num_data", "def num_samples(self, split: str) -> int:\n raise NotImplementedError", "def number_of_sub_file_entries(self):\n if self._directory is None:\n self._directory = self._GetDirectory()\n\n if self._directory is None:\n return 0\n\n # We cannot use len(self._directory.entries) since entries is a generator.\n return sum(1 for path_spec in self._directory.entries)", "def count_observation(data_name):\n #filename = str(data_name)\n with open(data_name) as file: \n num_lines = 0\n for line in file: \n num_lines = num_lines + 1\n num_obs = num_lines/3\n return(int(num_obs))", "def get_total_examples(list_files):\n total_lines = 0\n for file in list_files:\n with open(file) as f:\n for i, l in enumerate(f):\n pass\n total_lines += i\n return total_lines", "def get_num_pbfs(src_dir):\n\n return len([f for f in absolute_file_paths(src_dir) if f.endswith(\"pbf\")])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an adjacency matrix for a 2D cubic lattice with number of nodes specified by lattice_shape. If a directed network is requested with no bias, the default configuration is all bonds going from left to right and top to bottom. (recalling that we index nodes across rows then columns). The xbias and ybias give the probability that a bond goes from left to right versus RL and top to bottom versus BT respectively.
def create_adj_cubic_2d(lattice_shape, undirected=True, xbias=1, ybias=1 ): num_ynodes, num_xnodes = lattice_shape num_nodes = num_xnodes * num_ynodes A = sparse.lil_matrix((num_nodes, num_nodes)) # Form bond arrays to fill in row bonds and column bonds of the lattice x_bonds = np.ones(num_xnodes-1) y_bonds = np.ones(num_ynodes-1) # connect each row node to its neighbor to the right for first_row_node in range(0, num_nodes, num_xnodes): A[range(first_row_node, first_row_node + num_xnodes - 1),\ range(first_row_node + 1, first_row_node + num_xnodes)] = x_bonds # connect each column node to its neighbor below for first_col_node in range(0, num_xnodes): A[range(first_col_node, num_nodes - num_xnodes, num_xnodes),\ range(first_col_node + num_xnodes, num_nodes, num_xnodes)] = y_bonds # If we want an undirected network, just return the symmetrized form if undirected: A = A.tocsr() return A + A.T else: # If we want to toggle the direction of the elements (default direction is right and down) if (xbias != 1) or (ybias != 1): rows, cols = A.nonzero() for i, j in zip(rows, cols): if np.abs(i-j) == 1: # row bond if np.random.rand() > xbias: # overcome the bias with probability 1-xbias A[i, j] = 0 A[j, i] = 1 else: #column bond if np.random.rand() > ybias: A[i, j] = 0 A[j, i] = 1 return A.tocsr()
[ "def cellular_automaton2d(rows, cols, r=1, neighbourhood='Moore', boundary=\"periodic\"):\n n = rows * cols\n if n < 9:\n raise Exception(\"There must be at least 9 cells\")\n adjacency_matrix = [[0. for j in range(n)] for i in range(n)]\n if boundary == \"periodic\":\n if neighbourhood == 'von Neumann':\n criteria = lambda a_i, b_i, a_o, b_o, radius, rownum: np.abs(a_i - a_o) + np.abs(b_i - b_o) <= radius\n elif neighbourhood == 'Moore':\n criteria = lambda a_i, b_i, a_o, b_o, radius, rownum: np.abs(a_i - a_o) <= radius and np.abs(b_i - b_o) <= radius\n elif neighbourhood == 'Hex':\n def hex_crit(a_i, b_i, a_o, b_o, radius, rownum):\n vn = np.abs(a_i - a_o) + np.abs(b_i - b_o) <= radius\n if rownum % 2 == 0:\n ex = (b_i - b_o) < radius\n else:\n ex = (b_o - b_i) < radius\n return vn or ex\n criteria = hex_crit\n else:\n raise Exception(\"neighbourhood type not supported: %s\" % neighbourhood)\n\n lattice = np.array(range(n)).reshape((rows, cols)).tolist()\n rownum = 0\n for a, row in enumerate(lattice):\n rownum += 1\n for b, _ in enumerate(row):\n adjacency_row_num = lattice[a][b]\n neighbourhood_points = _get_neighbourhood_points2d(a, b, r, criteria, rownum)\n for point in neighbourhood_points:\n x = point[0] if point[0] == -1 else point[0] % len(lattice)\n y = point[1] if point[1] == -1 else point[1] % len(lattice[a])\n adjacency_matrix[adjacency_row_num][lattice[x][y]] = 1.\n\n else:\n raise Exception(\"unsupported boundary condition: %s\" % boundary)\n return adjacency_matrix", "def periodic_lattice(node_number, neighbors):\n import numpy as num\n from kreveik import *\n from kreveik.classes import TopologicalNetwork \n adjacency_matrix = num.zeros((node_number,node_number))\n for i in range(node_number):\n for j in range(neighbors):\n adjacency_matrix[i][i-j-1]=1\n adjacency_matrix=adjacency_matrix + adjacency_matrix.transpose()\n new_network=TopologicalNetwork(adjacency_matrix)\n return new_network", "def get_adjacency_matrix(self) -> lil_matrix:\n n_atoms = self.structure.get_atoms().size()\n adjacency_matrix = lil_matrix((n_atoms, n_atoms), dtype=bool)\n\n # Loop over bonds\n for component_idx, graph in enumerate(self.graphs):\n for bond in graph.bonds():\n s_idx1 = self._get_structure_idx(component_idx, bond[0])\n s_idx2 = self._get_structure_idx(component_idx, bond[1])\n adjacency_matrix[s_idx1, s_idx2] = True\n\n # Make symmetric\n rows, cols = adjacency_matrix.nonzero()\n adjacency_matrix[cols, rows] = adjacency_matrix[rows, cols]\n return adjacency_matrix", "def create_lattice(self):\n G = nx.Graph()\n nodes = list(range(self.n))\n G.add_nodes_from(nodes)\n h = ((self.n - 1) // self.k) # the number of the lowest row\n for node in nodes:\n row = node // self.k\n column = node % self.k\n # lower\n if node + self.k < self.n:\n G.add_edge(node, node + self.k)\n else:\n G.add_edge(node, column)\n # right\n if column == (self.k - 1): # rightmost column\n G.add_edge(node, node - self.k + 1)\n elif node + 1 < self.n:\n G.add_edge(node, node + 1)\n else:\n G.add_edge(node, h * self.k)\n # lower-right\n if column == (self.k - 1): # rightmost column\n if node + 1 == self.n: # last point\n G.add_edge(node, 0)\n else:\n G.add_edge(node, node + 1)\n else:\n if (node + self.k + 1) < self.n:\n G.add_edge(node, node + self.k + 1)\n else:\n G.add_edge(node, column + 1)\n # lower-left\n if column == 0: # leftmost column\n if row == h:\n G.add_edge(node, self.k)\n elif row == h - 1:\n G.add_edge(node, self.n - 1)\n else:\n G.add_edge(node, node + 2 * self.k - 1)\n elif (node + self.k - 1) < self.n:\n G.add_edge(node, node + self.k - 1)\n else:\n G.add_edge(node, (column - 1) % self.k)\n \"\"\"\n if node + self.k in nodes:\n G.add_edge(node, node + self.k)\n if node % self.k != (self.k - 1) and node + 1 in nodes:\n G.add_edge(node, node + 1)\n \"\"\"\n return G", "def sample_coupling_matrix(\n dim: int = 3, connectivity: float = 0.5\n) -> Tuple[np.ndarray, np.ndarray, np.ndarray, int]:\n max_trial = 10\n check = False\n for trial in range(max_trial):\n # random topology for a given connectivity / edge density\n Coupl = np.zeros((dim, dim))\n n_edges = 0\n for gp in range(dim):\n for g in range(dim):\n if gp == g:\n continue\n # need to have the factor 0.5, otherwise\n # connectivity=1 would lead to dim*(dim-1) edges\n if np.random.rand() < 0.5 * connectivity:\n Coupl[gp, g] = 0.7\n n_edges += 1\n # obtain adjacancy matrix\n Adj_signed = np.zeros((dim, dim), dtype='int_')\n Adj_signed = np.sign(Coupl)\n Adj = np.abs(Adj_signed)\n # check for cycles and whether there is at least one edge\n if check_nocycles(Adj) and n_edges > 0:\n check = True\n break\n if not check:\n raise ValueError(\n 'did not find graph without cycles after' f'{max_trial} trials'\n )\n return Coupl, Adj, Adj_signed, n_edges", "def generate_lattice(n_points):\n grid = np.zeros(tuple(4 * [4] + [4, 3, 3]), dtype=np.complex64)\n for t in range(n_points):\n for x in range(n_points):\n for y in range(n_points):\n for z in range(n_points):\n grid[t, x, y, z, :, :, :] = generate_site()\n return grid", "def cellular_automaton(n, r=1, boundary=\"periodic\"):\n if n < 3:\n raise Exception(\"There must be at least 3 cells\")\n adjacency_matrix = [[0. for j in range(n)] for i in range(n)]\n if boundary == \"periodic\":\n for w, row in enumerate(adjacency_matrix):\n for c, _ in enumerate(row):\n if w == c:\n adjacency_matrix[w][c] = 1.\n for i in range(r):\n adjacency_matrix[w][c - (1 + i)] = 1.\n adjacency_matrix[w][(c + (1 + i)) % len(adjacency_matrix[w])] = 1.\n else:\n raise Exception(\"unsupported boundary condition: %s\" % boundary)\n return adjacency_matrix", "def generate_circle_lattice(N):\n theta = np.linspace(0, 2. * np.pi, N + 1)\n theta = theta[:-1]\n # The radius, given the length of a side is:\n # radius = s/(2 * sin(2 pi/ n)), where n is number of sides, s is length of each side\n # We set the length of a side to be 1 (the rest length of each bond)\n R = 1. / (2. * np.sin(np.pi / float(N)))\n # print '(2.* np.sin(2.*np.pi/float(N))) = ', (2.* np.sin(np.pi/float(N)))\n # print 'R = ', R\n xtmp = R * np.cos(theta)\n ytmp = R * np.sin(theta)\n xy = np.dstack([xtmp, ytmp])[0]\n if N == 1:\n BL = np.array([[]])\n elif N == 2:\n BL = np.array([[0, 1]])\n else:\n BL = np.array([[i, (i + 1) % N] for i in np.arange(N)])\n # print 'BL = ', BL\n # print 'NP = ', len(xtmp)\n NL, KL = le.BL2NLandKL(BL, NP=len(xtmp), NN=2)\n LV = np.array([[0, 0], [0, 0]], dtype=int)\n UC = xy\n ztmp = np.zeros(len(xy), dtype=int)\n LVUC = np.dstack((ztmp, ztmp, np.arange(len(xy), dtype=int)))[0]\n print LVUC\n lattice_exten = 'circlebonds'\n return xy, NL, KL, BL, LV, UC, LVUC, lattice_exten", "def make_adjacency_matrix(self):\n normal_arr = self.coord_mat\n adj_mat = np.full((len_mesh, len_mesh), None)\n # create an adjacenty matrix for the mesh object\n n = 0\n for i in range(self.num_triangles):\n facet_a = self.coord_mat[i]\n for coord1 in facet_a:\n for j in range(self.num_triangles):\n facet_b = self.coord_mat[j]\n for coord2 in facet_b:\n if [coord1[0], coord1[1], coord1[2]] == [coord2[0], coord2[1], coord2[2]]:\n adj_mat[i][j] = uf.magnitude(\n uf.cross(uf.find_normal(normal_arr[i][0], normal_arr[i][1], normal_arr[i][2]),\n uf.find_normal(normal_arr[j][0], normal_arr[j][1], normal_arr[j][2])))\n self.adj_mat = adj_mat", "def connections(self):\n if self._connections is None:\n # get connection pairs\n w = 10 if self.width == 24 else 11\n conn = [(anode, cathode) for cathode in range(12) for anode in [a for a in range(12) if a!= cathode][:w]]\n # arrange connection pairs in coordinate grid\n col_height, cols = (5, 24) if self.width == 24 else (11, 12)\n self._connections = [conn[col_height*i:col_height*i+col_height] for i in range(cols)]\n return self._connections", "def network_laplacian_matrix(network, rtype='array', normalize=False):\n k_i = dict((key, index) for index, key in network.vertices_enum())\n edges = [(k_i[u], k_i[v]) for u, v in network.edges_iter()]\n return laplacian_matrix(edges, normalize=normalize, rtype=rtype)", "def index_cube(nodes, grid_shape, n=None):\n if not isinstance(nodes, str):\n raise TypeError(\"Nodes must be a str variable: e.g. 'ABCD'\")\n nodes = nodes.upper()\n try:\n dim = len(grid_shape)\n if n is None:\n n = tuple(x - 1 for x in grid_shape)\n except TypeError:\n return TypeError(\"grid_shape must be iterable\")\n # Make sure that we choose from the possible nodes.\n possibleNodes = \"ABCD\" if dim == 2 else \"ABCDEFGH\"\n for node in nodes:\n if node not in possibleNodes:\n raise ValueError(\"Nodes must be chosen from: '{0!s}'\".format(possibleNodes))\n\n if dim == 2:\n ij = ndgrid(np.arange(n[0]), np.arange(n[1]))\n i, j = ij[:, 0], ij[:, 1]\n elif dim == 3:\n ijk = ndgrid(np.arange(n[0]), np.arange(n[1]), np.arange(n[2]))\n i, j, k = ijk[:, 0], ijk[:, 1], ijk[:, 2]\n else:\n raise Exception(\"Only 2 and 3 dimensions supported.\")\n\n nodeMap = {\n \"A\": [0, 0, 0],\n \"B\": [0, 1, 0],\n \"C\": [1, 1, 0],\n \"D\": [1, 0, 0],\n \"E\": [0, 0, 1],\n \"F\": [0, 1, 1],\n \"G\": [1, 1, 1],\n \"H\": [1, 0, 1],\n }\n out = ()\n for node in nodes:\n shift = nodeMap[node]\n if dim == 2:\n out += (sub2ind(grid_shape, np.c_[i + shift[0], j + shift[1]]).flatten(),)\n elif dim == 3:\n out += (\n sub2ind(\n grid_shape, np.c_[i + shift[0], j + shift[1], k + shift[2]]\n ).flatten(),\n )\n\n return out", "def lattice_builder(edges):\n\n\n topEdge, bottomEdge, leftEdge, rightEdge = edges \n # initializes the lattice\n latticeList = Lattice(np.zeros((containerSize, containerSize, 6), np.int8))\n\n # top left corner and top right corner positions are set, they won't vary\n # if the container size is odd or even.\n latticeList.array[0][0] = (0, 2, 2, 2, 2, 0) # topLeft\n latticeList.array[containerSize-1][0] = (2, 2, 2, 0, 0, 2) # topRight\n\n\n # the following if/else statement sets the walls for the bottom corners, which vary\n # based on whether the container size is odd or even. If even, the final row is short,\n # if odd, the final row is the same as the top row.\n if containerSize % 2 == 0: \n latticeList.array[containerSize-2][containerSize-1] = (2, 0, 0, 0, 2, 2) # bottomRight\n latticeList.array[0][containerSize-1] = (0, 0, 0, 2, 2, 2) # bottomLeft\n \n else:\n latticeList.array[containerSize-1][containerSize-1] = (2, 2, 0, 0, 2, 2) # bottomRight \n latticeList.array[0][containerSize-1] = (0, 0, 2, 2, 2, 2) # bottomLeft\n\n\n # the following for loops declare the edges based on either the lists provided by the\n # user, or automatically produced by auto_square_edges().\n for i in range(0,len(topEdge)):\n column, row = topEdge[i]\n latticeList.array[column][row] = (0, 2, 2, 0, 0, 0)\n \n \n for i in range(0,len(bottomEdge)):\n column, row = bottomEdge[i]\n latticeList.array[column][row] = (0, 0, 0, 0, 2, 2) \n \n \n for i in range(0,len(leftEdge)):\n column, row = leftEdge[i]\n \n if i % 2 == 1:\n latticeList.array[column][row] = (0, 0, 2, 2, 2, 0)\n else:\n latticeList.array[column][row] = (0, 0, 0, 2, 0, 0)\n \n \n for i in range(0,len(rightEdge)):\n column, row = rightEdge[i]\n \n if i % 2 == 1:\n latticeList.array[column][row] = (2, 2, 0, 0, 0, 2)\n else:\n latticeList.array[column][row] = (2, 0, 0, 0, 0, 0)\n latticeList.array[column+1][row] = (2, 2, 2, 2, 2, 2)\n\n\n return latticeList", "def __init__(self,\n block_shape,\n block_rows,\n include_diagonal=True,\n include_off_diagonal=True,\n upper=False,\n name='block_triangular_matrix'):\n super(BlockTriangularMatrix, self).__init__(name=name)\n if not include_diagonal and not include_off_diagonal:\n raise ValueError('Arguments include_diagonal and include_off_diagonal '\n 'cannot both be False.')\n\n self._block_shape = tuple(block_shape)\n self._block_rows = block_rows\n self._include_diagonal = include_diagonal\n self._include_off_diagonal = include_off_diagonal\n self._upper = upper\n self._num_blocks = sum(\n self._content_blocks(r) for r in xrange(self._block_rows))", "def __init__(self, matrix, neighbor_function, weight_function):\n self.lattice = matrix\n self.row_dim = len(self.lattice)\n self.col_dim = len(self.lattice[0])\n self.neighbor_function = neighbor_function\n self.weight_function = weight_function\n self.consistency_check()\n self.build_adjacency_list()", "def matrix_adjacency_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix = []\r\n\r\n for i in nodes:\r\n row = []\r\n for j in nodes:\r\n if [i, j] in graph:\r\n row.append(1)\r\n else:\r\n row.append(0)\r\n matrix.append(row)\r\n\r\n return matrix", "def get_adjacency_matrix(self):\n \n #initialize an empty 2D list\n length = len(self.nodes)\n matrix = [x[:] for x in [[0]*length]*length]\n for edge in self.edges:\n fromIndex = self.nodes.index(edge.node_from)\n toIndex = self.nodes.index(edge.node_to)\n matrix[fromIndex][toIndex] = edge.value\n return matrix", "def connectivity_matrix_neigh_feats(n_ftrs,n_neighbours,n_testNs):\n \n node_set = []\n for i in range(n_testNs):\n node_set.append(node(n_ftrs,n_neighbours))\n\n W = np.random.normal(size=n_ftrs*4)\n #nonLin = lambda x: np.tanh(x)\n conn_mtx_n = np.zeros([n_testNs,n_testNs])\n\n for i1,pair1 in enumerate(node_set):\n for i2,pair2 in enumerate(node_set):\n conn_mtx_n[i1,i2] = (W.dot(np.concatenate([pair1.ftrs,pair1.agg,pair2.ftrs,pair2.agg])))\n return conn_mtx_n, W, node_set", "def set_all_neighbours(self) :\n\n\t\tN = self.size\n\n\t\tfor row in range(N) :\n\t\t\tfor col in range(N) :\n\n\t\t\t\tnext_row = (row + 1) % self.size\n\t\t\t\tnext_col = (col + 1) % self.size\n\t\t\t\tprev_row = (row - 1) % self.size\n\t\t\t\tprev_col = (col - 1) % self.size\n\t\t\t\t\n\t\t\t\tneighbours = [self.lattice_array[prev_row, col], self.lattice_array[next_row, col], self.lattice_array[row, prev_col], self.lattice_array[row, next_col]]\n\t\t\t\t\n\t\t\t\tself.lattice_array[row, col].set_neighbours(neighbours)\n\t\t\t\tself.lattice_array[row, col].set_location(row, col)\n\n\t\treturn self.lattice_array", "def nb_matrix(graph, aux=False, ordering='blocks', return_ordering=False):\n if aux:\n degrees = graph.degree()\n degrees = sparse.diags([degrees[n] for n in graph.nodes()])\n ident = sparse.eye(graph.order())\n adj = nx.adjacency_matrix(graph)\n pseudo = sparse.bmat([[None, degrees - ident], [-ident, adj]])\n return pseudo.asformat('csr')\n\n else:\n # Compute the NB-matrix in a single pass on the non-zero elements\n # of the intermediate matrix.\n sources, targets, ord_func = half_incidence(\n graph, ordering, return_ordering=True)\n inter = np.dot(sources.T, targets).asformat('coo')\n inter_coords = set(zip(inter.row, inter.col))\n\n # h_coords contains the (row, col) coordinates of non-zero elements\n # in the NB-matrix\n h_coords = [(r, c) for r, c in inter_coords if (c, r) not in inter_coords]\n data = np.ones(len(h_coords))\n nbm = sparse.coo_matrix((data, list(zip(*h_coords))),\n shape=(2*graph.size(), 2*graph.size()))\n\n # Return the correct format\n nbm = nbm.asformat('csr')\n return (nbm, ord_func) if return_ordering else nbm" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check vuln toolbar actions; there must be 2 rows to perform the test
def check_vulns_multiactions(sclnt, dt_id): # there should be two rows in total dt_elem = dt_wait_processing(sclnt, dt_id) toolbar_elem = sclnt.find_element_by_id('%s_toolbar' % dt_id) assert len(dt_elem.find_elements_by_xpath('//tbody/tr[@role="row"]')) == 2 # one cloud be be tagged dt_elem.find_element_by_xpath('(//tr[@role="row"]/td[contains(@class, "select-checkbox")])[1]').click() toolbar_elem.find_element_by_xpath('//a[contains(@class, "abutton_tag_multiid") and text()="Info"]').click() dt_elem = dt_wait_processing(sclnt, dt_id) assert Vuln.query.filter(Vuln.name == 'vuln 1', Vuln.tags.any('info')).one() # or the other one dt_elem.find_element_by_xpath('(//tr[@role="row"]/td[contains(@class, "select-checkbox")])[2]').click() toolbar_elem.find_element_by_xpath('//a[contains(@class, "abutton_tag_multiid") and text()="Report"]').click() dt_elem = dt_wait_processing(sclnt, dt_id) assert Vuln.query.filter(Vuln.name == 'vuln 2', Vuln.tags.any('report')).one() # both might be tagged at the same time toolbar_elem.find_element_by_xpath('//a[text()="All"]').click() toolbar_elem.find_element_by_xpath('//a[contains(@class, "abutton_tag_multiid") and text()="Todo"]').click() dt_elem = dt_wait_processing(sclnt, dt_id) assert Vuln.query.filter(Vuln.tags.any('todo')).count() == 2 # or deleted toolbar_elem.find_element_by_xpath('//a[text()="All"]').click() toolbar_elem.find_element_by_xpath('//a[contains(@class, "abutton_delete_multiid")]').click() webdriver_waituntil(sclnt, EC.alert_is_present()) sclnt.switch_to.alert.accept() dt_wait_processing(sclnt, dt_id) assert not Vuln.query.all()
[ "def test_has_action(self):\n self.assertIn('request_unsolvedcases_status', self.model_admim.actions)", "def test_menu_item(main_window):\n if SPYDER6:\n main_menu = main_window.get_plugin(Plugins.MainMenu)\n run_menu = main_menu.get_application_menu(ApplicationMenus.Run)\n actions = run_menu.get_actions()\n else:\n actions = main_window.run_menu_actions\n\n # Filter out seperators (indicated by action is None) and convert to text\n menu_items = [action.text() for action in actions if action]\n\n assert 'Run unit tests' in menu_items", "def test_button_access_mfc(self):\n # Read a first toolbar with buttons: \"File, View, Help\"\n self.assertEqual(self.menu_bar.button_count(), 3)\n self.assertEqual(self.toolbar.button_count(), 8)\n\n # Test if it's in writable properties\n props = set(self.menu_bar.get_properties().keys())\n self.assertEqual('button_count' in props, True)\n self.assertEqual(\"File\", self.menu_bar.button(0).window_text())\n self.assertEqual(\"View\", self.menu_bar.button(1).window_text())\n self.assertEqual(\"Help\", self.menu_bar.button(2).window_text())\n\n found_txt = self.menu_bar.button(\"File\", exact=True).window_text()\n self.assertEqual(\"File\", found_txt)\n\n found_txt = self.menu_bar.button(\"File\", exact=False).window_text()\n self.assertEqual(\"File\", found_txt)", "def check_action_name():", "def perform_checks(self) -> None:", "def test_actions(base_app):\n InvenioAccess(base_app, entry_point_actions=None)\n with base_app.app_context():\n current_access.register_action(ActionNeed('action_a'))\n assert len(current_access.actions) == 1\n current_access.register_action(ActionNeed('action_b'))\n assert len(current_access.actions) == 2", "def test_can_select_multiple_items(self):\n table = self.dlg.Table\n cells = table.cells()\n self.assertEqual(len(table.cells()), 0)\n self.dlg.menu_select('#0 -> #1 -> #1 -> #0 -> #0 -> #4 ->#0')\n cells = table.cells()\n self.assertEqual(len(cells), 1)\n self.assertEqual(len(cells[0]), 1)", "def test_detail_website_gui_actions(self):\n site = SiteConfigurationManager.get_blank_site()\n website = site.website\n control_agent = DetailSiteControlAgent(website)\n presentation_agent = control_agent.get_presentation_agent()\n refresh_gui()\n\n # Enables component\n presentation_agent['enabled'].set_active(True)\n refresh_gui()\n\n # Tests widgets sensitivity after enablement\n flags = {\n 'enabled': True,\n 'maintenance': True,\n 'template': True,\n 'access': True,\n }\n self.assert_widgets_sensitive_flag(presentation_agent, flags)\n\n # When a checkbox is enabled, the corresponding attribute should follow\n for name in ('enabled', 'maintenance'):\n presentation_agent[name].set_active(True)\n refresh_gui()\n self.assertTrue(getattr(website, name),\n 'site %s attribute is not enabled' % name)\n\n # Comboboxes value should be reported to abstraction\n template = SiteDefaultsManager.get_site_templates().keys()[0]\n access = SiteDefaultsManager.get_site_accesses().keys()[0]\n\n for name, value in {'template': template, 'access': access}.items():\n presentation_agent.set_value(name, value)\n refresh_gui()\n self.assertEquals(getattr(website, name), value,\n 'site %s attribute is wrong' % name)", "def test_edit_unit(self):\n self._check_verticals([self.vert_loc])", "def test_trunner_click_in_run_page(self, browser, login, logout):\n self.open_run_test_page_for_1st_test(browser)\n run_test_page = RunTestPage(browser)\n run_test_page.trunner_lnk_click()\n run_test_page.handling_alert()\n run_test_page.wait_new_page_load()\n suites_page = SuitesPage(browser)\n page_title = suites_page.get_title()\n assert page_title == 'Suites Info', \"Should be 'Suites Info' for suites page'\"", "def test_click_all_passed_buttons_case_status_passed(self, browser, login, logout):\n self.open_run_test_page_for_1st_test(browser)\n run_test_page = RunTestPage(browser)\n run_test_page.click_all_passed_btns()\n status = run_test_page.get_case_status()\n assert status == 'โœ… Passed', \"Status should be 'Passed' for test case\"\n run_test_page.back_to_suite_btn_click()", "def test_bulk_verify_option_for_verifier(\n self, second_creator, login_as_creator, audit_w_auditor,\n asmts_w_verifier, login_as_second_creator, page, soft_assert, selenium\n ):\n page = page(audit_w_auditor)\n webui_facade.soft_assert_bulk_verify_for_not_in_review_state(\n page, asmts_w_verifier, soft_assert)\n webui_facade.soft_assert_bulk_verify_for_in_review_state(\n page, asmts_w_verifier[-1], soft_assert, is_available=True)\n soft_assert.assert_expectations()", "def test_read_available_resource_actions(self):\n pass", "def test_content_analyst_view_all_tags_in_assessment_qa_view_14732(self):\n self.ps.test_updates['name'] = 't2.11.043' \\\n + inspect.currentframe().f_code.co_name[4:]\n self.ps.test_updates['tags'] = [\n 't2',\n 't2.11',\n 't2.11.043',\n '14732'\n ]\n self.ps.test_updates['passed'] = False\n\n # Test steps and verification assertions\n self.content.login()\n self.content.open_user_menu()\n self.content.find(By.PARTIAL_LINK_TEXT, \"QA Content\").click()\n self.content.sleep(8)\n\n # Go to a non introductory section\n self.content.find(\n By.XPATH,\n \"//ul[@class='section'][1]/li[3]/ul[@class='section']/li/a\"\n ).click()\n self.content.sleep(10)\n\n # Verify the book is physics\n title = self.content.find(By.XPATH, \"//li[@class='title']\").text\n if title.find('Physics') < 0:\n self.content.find(\n By.XPATH, \"//a[@id='available-books']/span[1]\").click()\n books = self.content.driver.find_elements_by_xpath(\n \"//a[@class='book']/div[@class='title-version']\")\n for book in books:\n if book.text.find('Physics') >= 0:\n book.click()\n self.content.sleep(10)\n self.content.find(\n By.XPATH,\n \"//ul[@class='section'][1]/li[3]/ul\" +\n \"[@class='section']/li/a\"\n ).click()\n break\n\n qa_tags = len(self.content.driver.find_elements_by_xpath(\n \"//span[@class='exercise-tag']\"))\n qa_tags += len(self.content.driver.find_elements_by_xpath(\n \"//span[@class='lo-tag']\"))\n self.content.sleep(3)\n\n assert(qa_tags > 0), \\\n 'No tags found'\n\n self.ps.test_updates['passed'] = True", "def test_right_feature_and_wrong_story():\n pass", "def test_bulk_verify_option_for_non_verifier(\n self, second_creator, login_as_creator, audit_w_auditor,\n asmts_w_verifier, page, soft_assert, selenium\n ):\n page = page(audit_w_auditor)\n webui_facade.soft_assert_bulk_verify_for_not_in_review_state(\n page, asmts_w_verifier, soft_assert)\n webui_facade.soft_assert_bulk_verify_for_in_review_state(\n page, asmts_w_verifier[-1], soft_assert, is_available=False)\n soft_assert.assert_expectations()", "def testAllowableActions(self):\n actions = self._testFolder.getAllowableActions()\n assert len(actions) > 0", "def test_overview_self_checked_out(self, browser):\n\n self.login(self.regular_user, browser)\n\n browser.open(self.document, view='tabbedview_view-overview')\n browser.find('Checkout and edit').click()\n\n # Tabbedview gets in the way of the redirect so we'll have to revisit\n browser.open(self.document, view='tabbedview_view-overview')\n\n document_metadata = browser.css('.documentMetadata tr').text\n\n self.assertIn(\n 'Author test_user_1_',\n document_metadata,\n )\n\n self.assertIn(\n 'creator Ziegler Robert (robert.ziegler)',\n document_metadata,\n )\n\n self.assertIn(\n u'Checked out B\\xe4rfuss K\\xe4thi (kathi.barfuss)',\n document_metadata,\n )\n\n file_actions = browser.css('.file-action-buttons a').text\n\n self.assertIn(\n 'Checkout and edit',\n file_actions,\n )\n\n self.assertIn(\n 'Checkin without comment',\n file_actions,\n )\n\n self.assertIn(\n 'Checkin with comment',\n file_actions,\n )\n\n self.assertIn(\n 'Download copy',\n file_actions,\n )\n\n self.assertIn(\n 'PDF Preview',\n file_actions,\n )", "def test_TC_Boards_BoardDetails_CardDetails_220819_3(self):\n self.log.info(\"*#\" * 20)\n self.log.info(\"test_TC_Boards_BoardDetails_CardDetails_220819_3 started\")\n self.log.info(\"*#\" * 20)\n self.bd.gotoBoards()\n self.bd.clickBoard(name=\"Sample\")\n self.bd.clickCard(listName=\"Sample List\", cardTitle=\"Sample Card 2\")\n self.bd.editCard(desc=\"Sample Card 2 Edited\")\n self.bd.clickCard(listName=\"Sample List\", cardTitle=\"Sample Card 2\")\n result = self.bd.verifyEditCard(desc=\"Sample Card 2 Edited\")\n self.ts.markFinal(\"test_TC_Boards_BoardDetails_CardDetails_220819_3\", result, \"Edit Card Verification\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update agent's velocity using the velocity function.
def update_velocity(self): self.velocity = self.vel_func() * ( self.goal - self.current) / abs(self.goal - self.current)
[ "def update(self, **kwargs):\n self.apply_velocity()", "def velocity(self, t):\n pass", "def exec_velocity_cmd(self, cmd):\n self.set_joint_velocities(cmd, self._joint_ids)", "def _updateVelocity(self):\n\t\t# Find difference between two vectors\n\t\tdifferenceVector = [0, 0]\n\t\tdifferenceVector[0] = self.targetVelocity[0] - self.currentVelocity[0]\n\t\tdifferenceVector[1] = self.targetVelocity[1] - self.currentVelocity[1]\n\n\t\t# Exit if there's nothing to update to avoid extra calculations\n\t\tif(differenceVector[0] == 0 and differenceVector[1] == 0):\n\t\t\treturn\n\n\t\t# Find the hypotenuse of the difference vector\n\t\tdifferenceMagnitude = math.sqrt((differenceVector[0] ** 2) + (differenceVector[1] ** 2))\n\n\t\t# If hypotenuse <= maxAcceleration, set currentVelocity = targetVelocity\n\t\tif(differenceMagnitude <= self.maxAcceleration):\n\t\t\tself.currentVelocity[0] = self.targetVelocity[0]\n\t\t\tself.currentVelocity[1] = self.targetVelocity[1]\n\t\t\treturn\n\n\t\t# Else, divide the distance vector by the hypotenuse (to make unit vector), multiply by maxAcceleration, and add to currentVelocity\n\t\tdifferenceVector[0] = self.maxAcceleration * (differenceVector[0] / differenceMagnitude)\n\t\tdifferenceVector[1] = self.maxAcceleration * (differenceVector[1] / differenceMagnitude)\n\n\t\tself.currentVelocity[0] += differenceVector[0]\n\t\tself.currentVelocity[1] += differenceVector[1]\n\n\t\treturn", "def update_velocity(self, msg):\n\t\tself.ekf.vel = enu_to_ned(np.array([[msg.twist.linear.x], [msg.twist.linear.y], [msg.twist.linear.z]]))", "def setVelocity(self, velocity):\r\n self._velocity = velocity", "def v(self):\n return self.velocity + self.dv()", "def physics_update(self) -> None:\n if not self.stopped:\n self.velocity += helpers.V(magnitude=self.gravity_power, angle=180)", "def updateVelocities(self) -> None:\r\n for idx1 in range(self.size() - 1):\r\n for idx2 in range(idx1 + 1, self.size()):\r\n self.updateVelocity(idx1, idx2)", "def update_velocity_body(self, msg):\n\t\tself.ekf.vel_body = enu_to_ned(np.array([[msg.twist.linear.x], [msg.twist.linear.y], [msg.twist.linear.z]]))", "def setVel(self,cmd):\n if self.time == 0.0:\n self.time = time.time()\n # update the velocity, assume the velocity takes times to change (to avoid local minimum)\n self.curVel = self.inertia*array(cmd)+(1-self.inertia)*self.curVel\n self.pose[0:2] = self.pose[0:2]+array(self.curVel)*(time.time()-self.time)\n self.time = time.time()\n # the orintation is kept the same (rad)\n # TODO: allows more robot models", "def set_velocity(self, velocity):\n self.mover.set_velocity(velocity)", "def update(self):\r\n self.updateVelocities()\r\n self.updatePositions()", "def velocity(estimate, actual, times=60):\n return (estimate*times)/(actual*1.)", "def velocity_rescale():\n system.vel = v_res(system.vel, system.T, const.KB, system.mass)", "def apply_velocity(self, **kwargs):\n if self.position.get_distance(self._target) < 30:\n if self._target == self._start:\n self._target = self._end\n else:\n self._target = self._start\n\n direction = (self._target - self.position).normalized()\n self.velocity = direction * 2\n self.position += self.velocity\n self.generate_vertices()", "def apply_velocity(self):\n for moon in self.moons:\n for axis, vel in moon['vel'].items():\n moon['pos'][axis] += vel", "def velocity(obs0, obs1, r0, r1):\n\tsigma = G/(np.linalg.norm(r0)**3)\n\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, 0)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\tfor _ in range(4): # Iterate to get tau\n\t\ttau = r0.dot(v0)/r0.dot(r0)\n\t\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, tau)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\treturn v0", "def update(self, dt):\n super().update(dt)\n self.velocity.norm = min(self.velocity.norm, self.max_speed)", "def update(self, time_step):\r\n self.position.propagate(self.velocity, time_step)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove marker from retVal and plot
def clearMarker(self): self.retVal['x'] = None self.retVal['y'] = None self.retVal['subPlot'] = None for i in range(self.nSubPlots): subPlot = self.selectSubPlot(i) for marker in self.markers: if marker in subPlot.lines: subPlot.lines.remove(marker) self.markers = [] self.fig.canvas.draw()
[ "def refresh_marker_display(self): \n if self.scalar_display:\n return\n self.removeMarkers()\n self.info_marker = None\n self.log_marker = None\n self.source_marker = None\n if self.is_combined_image:\n self.insert_marker_lines()\n# draw dividing lines for complex array, cross_sections, solver_offsets, etc\n self.insert_array_info()\n self.replot()\n if HAS_TIMBA:_dprint(3, 'called replot in refresh_marker_display ')\n #print 'called replot in refresh_marker_display '", "def remove_all_markers(self):\n self.marker_layer.clear_markers()", "def delMarker(self):\n for i in self.markers:\n self.rens[0].RemoveActor(i)\n for i in self.labels:\n self.rens[0].RemoveActor(i)\n self.markers = []\n self.labels = []", "def clearMarker(data,name):\n for i in range(len(data)):\n data[i][name] = np.array([np.nan,np.nan,np.nan])\n return data", "def removePointMarker(self, id):\n del self.pointmarkers[id]", "def removeLineMarker(self, id):\n del self.linemarkers[id]", "def _on_mark(self, evt):\r\n mark_color = 'k'\r\n if self.sub_plots.color.lower() == 'black':\r\n mark_color = 'white'\r\n if self.sub_plots.has_selection:\r\n #delete markers\r\n for sub_plot in self.sub_plots.sub_plots:\r\n for line in sub_plot.selection:\r\n sub_plot.axes.lines.remove(line)\r\n self.canvas.draw()\r\n else:\r\n for i, sub_plot in enumerate(self.sub_plots.sub_plots):\r\n x1, x2, y1, y2 = sub_plot.axes.axis()\r\n x = [x1, x2, x2, x1, x1]\r\n y = [y1, y1, y2, y2, y1]\r\n sub_plot.selection = self.redraw(x,y, hold = True,\r\n limits = (x1,x2,y1,y2),\r\n index = i,\r\n color = mark_color, linewidth = 2.0)\r\n self.sub_plots.has_selection = not self.sub_plots.has_selection", "def SoMarkerSet_removeMarker(idx: 'int') -> \"SbBool\":\n return _coin.SoMarkerSet_removeMarker(idx)", "def remove_points(self, keep_index):\n self.points = self.points[keep_index]\n self.remissions = self.remissions[keep_index]\n # TODO Merge classes to avoid this??\n self.label = self.label[keep_index]\n self.label_color = self.label_color[keep_index]\n\n # TODO add pinhole projection?", "def removeMarker(idx: 'int') -> \"SbBool\":\n return _coin.SoMarkerSet_removeMarker(idx)", "def unmark(self):\r\n self.setZ(0)\r\n self.lineWidth = 1\r\n\r\n if self.relationType == 0:\r\n self.setBrush(QBrush(QColor(200, 200, 255), Qt.SolidPattern))\r\n else:\r\n self.setBrush(QBrush(QColor(200, 255, 200), Qt.SolidPattern))\r\n self.relationIcon.unmark(False)\r\n self.collectionIcon.unmark(False)\r\n\r\n self.setCoords()", "def clear_fn(self):\n self.x, self.y = [], []", "def clear_points(self):\n self._points = []\n # clear points from the number line\n return", "def trace_off(self): #Funciona\n self._trace=False", "def _erase_marks(self, view):\r\n\r\n erase_lint_marks(view)", "def removeVerticalMarker(self, id):\n del self.verticalmarkers[id]", "def clear_figure(self):\n self._plot_status = PlotStatus.NO_DATA\n self._figure.data = []", "def clear_plot(self):\n self.plot_window.pg_plot_widget.clear()", "def removeIcon(self):\r\n # pylint: disable=E1103\r\n # It is ensured that self.__marked is not an integer\r\n\r\n if self.getMarkedIcon() != 0:\r\n self.getMarkedIcon().destroyIcon()\r\n self.contentsMousePressEvent(None)\r\n\r\n self.canvas().update()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the nr of the subplot that has been clicked
def getSubPlotNr(self, event): i = 0 axisNr = None for axis in self.fig.axes: if axis == event.inaxes: axisNr = i break i += 1 return axisNr
[ "def row_num(ax: mpl.axes.Axes) -> int:\n return ax.get_subplotspec().rowspan.start", "def onClick(self, event):\t\t\r\n\t\r\n\t\tsubPlotNr = self.getSubPlotNr(event)\t\t\r\n\t\tif subPlotNr == None:\r\n\t\t\treturn\r\n\t\t\r\n\t\tif event.button == 1:\t\t\t\t\r\n\t\t\r\n\t\t\tself.clearMarker()\r\n\t\t\tfor i in range(self.nSubPlots):\r\n\t\t\t\tsubPlot = self.selectSubPlot(i)\t\t\t\t\t\t\t\t\r\n\t\t\t\tmarker = plt.axvline(event.xdata, 0, 1, linestyle='--', \\\r\n\t\t\t\t\tlinewidth=2, color='gray')\r\n\t\t\t\tself.markers.append(marker)\r\n\r\n\t\t\tself.fig.canvas.draw()\r\n\t\t\tself.retVal['subPlot'] = subPlotNr\r\n\t\t\tself.retVal['x'] = event.xdata\r\n\t\t\tself.retVal['y'] = event.ydata\r\n\t\t\tprint self.retVal['x']\r\n\t\t\tprint self.retVal['y']\r\n\t\t\tbiglist.append([self.retVal['x'],self.retVal['y']])\r\n\t\telse:\t\t\t\r\n\t\t\t# Start a dragFrom\r\n\t\t\tself.dragFrom = event.xdata", "def get_row(master):\r\n\r\n try:\r\n row = int(master.grid_slaves()[0].grid_info()[\"row\"]) + 1\r\n except IndexError:\r\n # If master widget does not have any widgets plotted yet\r\n row = 0\r\n return row", "def __call__(self, index):\r\n try:\r\n return self.sub_plots[index]\r\n except IndexError:\r\n raise IndexError, \"No sub-plot exists at index:{0!s}\".format(index)", "def _nrows(self):\n # the ``int`` technically does not make a difference, but pycharm\n # thinks that ``ceil`` returns floats and therefore complains\n # otherwise\n return int(ceil(self._nsubplots / self._ncols))", "def subplot_index(nrow, ncol, k, kmin=1):\n i = 1 + (k - kmin) // ncol\n j = 1 + (k - kmin) % ncol\n if i > nrow:\n raise ValueError('k = %d exceeds number of rows' % k)\n return i, j", "def FindPlot(self, plt):\n if self.plots.count(plt)>0:\n return self.plots.index(plt)\n else:\n return -1", "def numSelected(self):\n\n return self._ensemble.numSelected()", "def col_num(ax: mpl.axes.Axes) -> int:\n return ax.get_subplotspec().colspan.start", "def on_click(event):\n if not event.xdata or not event.ydata: # if not clicked on a cell\n return\n # get closes integer points\n cx = math.floor(round(event.xdata))\n cy = math.floor(round(event.ydata))\n replot(cx, cy)", "def onScroll(self, event):\r\n\t\r\n\t\tfor i in range(self.nSubPlots):\r\n\t\t\tsubPlot = self.selectSubPlot(i)\t\t\r\n\t\t\txmin, xmax = subPlot.get_xlim()\r\n\t\t\tdx = xmax - xmin\r\n\t\t\tcx = (xmax+xmin)/2\r\n\t\t\tif event.button == 'down':\r\n\t\t\t\tdx *= 1.1\r\n\t\t\telse:\r\n\t\t\t\tdx /= 1.1\r\n\t\t\t_xmin = cx - dx/2\r\n\t\t\t_xmax = cx + dx/2\t\r\n\t\t\tsubPlot.set_xlim(_xmin, _xmax)\r\n\t\tevent.canvas.draw()", "def get_plot_idx(plotname):\n try:\n pdfname = plotname.split('/')[-1]\n return DESIRED_PLOT_ORDER.index(pdfname.replace('_v_costh.pdf', ''))\n except ValueError:\n pass\n return len(DESIRED_PLOT_ORDER)", "def numaxes(self):\n return len(self.axes)", "def get_window(self, event):\n if event.insaxes is None:\n return None\n return event.inaxes.figure.canvas.parent()", "def _get_count(self) -> \"size_t\" :\n return _core.ToolbarPanels__get_count(self)", "def index(self):\n if self.parent:\n return self.parent.children.index(self)\n else:\n return 0", "def on_press(event):\n\n\tglobal xclick, yclick, NumberOfLines, press, line\n\n\t#if event.inaxes!=line.axes: return\n\tpress = 1;\n\txclick.append(event.xdata);\n\tyclick.append(event.ydata);\n\tNumberOfLines += 1;\n\tprint('The number of lines: {}'.format(NumberOfLines))\n\tprint('Button clicked at:{}'.format((event.xdata,event.ydata)))", "def active_window_num_spikes(self):\n return self.active_windowed_df.shape[0]", "def _get_index(self) -> \"size_t\" :\n return _core.ToolbarPanel__get_index(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process a mouse click event. If a mouse is right clicked within a subplot, the return value is set to a (subPlotNr, xVal, yVal) tuple and the plot is closed. With rightclicking and dragging, the plot can be moved.
def onClick(self, event): subPlotNr = self.getSubPlotNr(event) if subPlotNr == None: return if event.button == 1: self.clearMarker() for i in range(self.nSubPlots): subPlot = self.selectSubPlot(i) marker = plt.axvline(event.xdata, 0, 1, linestyle='--', \ linewidth=2, color='gray') self.markers.append(marker) self.fig.canvas.draw() self.retVal['subPlot'] = subPlotNr self.retVal['x'] = event.xdata self.retVal['y'] = event.ydata print self.retVal['x'] print self.retVal['y'] biglist.append([self.retVal['x'],self.retVal['y']]) else: # Start a dragFrom self.dragFrom = event.xdata
[ "def on_click(event):\n if not event.xdata or not event.ydata: # if not clicked on a cell\n return\n # get closes integer points\n cx = math.floor(round(event.xdata))\n cy = math.floor(round(event.ydata))\n replot(cx, cy)", "def on_exc_click_release(self, event):\n\n if self.app.is_legacy is False:\n event_pos = event.pos\n # event_is_dragging = event.is_dragging\n right_button = 2\n else:\n event_pos = (event.xdata, event.ydata)\n # event_is_dragging = self.app.plotcanvas.is_dragging\n right_button = 3\n\n pos_canvas = self.canvas.translate_coords(event_pos)\n\n if self.app.grid_status():\n pos = self.app.geo_editor.snap(pos_canvas[0], pos_canvas[1])\n else:\n pos = (pos_canvas[0], pos_canvas[1])\n\n # if the released mouse button was RMB then test if it was a panning motion or not, if not it was a context\n # canvas menu\n try:\n if event.button == right_button: # right click\n if self.app.ui.popMenu.mouse_is_panning is False:\n try:\n QtGui.QGuiApplication.restoreOverrideCursor()\n except Exception:\n pass\n if self.active_tool.complete is False and not isinstance(self.active_tool, SelectEditorExc):\n self.active_tool.complete = True\n self.in_action = False\n self.delete_utility_geometry()\n self.app.inform.emit('[success] %s' % _(\"Done.\"))\n self.select_tool('drill_select')\n else:\n if isinstance(self.active_tool, DrillAdd):\n self.active_tool.complete = True\n self.in_action = False\n self.delete_utility_geometry()\n self.app.inform.emit('[success] %s' % _(\"Done.\"))\n self.select_tool('drill_select')\n\n self.app.cursor = QtGui.QCursor()\n self.app.populate_cmenu_grids()\n self.app.ui.popMenu.popup(self.app.cursor.pos())\n\n except Exception as e:\n log.warning(\"AppExcEditor.on_exc_click_release() RMB click --> Error: %s\" % str(e))\n raise\n\n # if the released mouse button was LMB then test if we had a right-to-left selection or a left-to-right\n # selection and then select a type of selection (\"enclosing\" or \"touching\")\n try:\n if event.button == 1: # left click\n if self.app.selection_type is not None:\n self.draw_selection_area_handler(self.pos, pos, self.app.selection_type)\n self.app.selection_type = None\n\n elif isinstance(self.active_tool, SelectEditorExc):\n self.active_tool.click_release((self.pos[0], self.pos[1]))\n\n # if there are selected objects then plot them\n if self.selected:\n self.replot()\n except Exception as e:\n log.warning(\"AppExcEditor.on_exc_click_release() LMB click --> Error: %s\" % str(e))\n raise", "def mouse_right_click(self):\n global selected_edge\n if (self.mouse_inside()) and not self.active:\n selected_edge=None\n self.remove_traces()\n self.update_shape()\n elif (self.mouse_inside()) and self.active and len(all_objects)>1:\n # self.remove_traces()\n if(self.is_neighbour()):\n if(self.same_position(self.other)):\n try:self.other.parent.remove_traces()\n except:pass\n else:\n self.remove_traces()\n else:\n if selected_edge==None:\n selected_edge=self\n else:\n self.set_link(selected_edge)\n selected_edge=None\n\n # print(self.is_neighbour(), selected_edge)\n # link stuff by pairs\n else:\n return False\n return True", "def OnRightDown(self, event):\n\n click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n event.Skip()", "def handleClick(self, event):\n print(str(event.x) + ' ' + str(event.y))\n for listener in self.listeners:\n listener.handleClick(self.coordsToGrid(event.x, event.y))", "def mouse_released(self, pos_x, pos_y):\n if self.in_grid_drawing_state:\n QApplication.setOverrideCursor(QCursor(Qt.ArrowCursor))\n self.update_grid_motor_positions(self.graphics_grid_draw_item)\n self.graphics_grid_draw_item.set_draw_mode(False)\n self.wait_grid_drawing_click = False\n self.in_grid_drawing_state = False\n self.de_select_all()\n self.emit(\"shapeCreated\", self.graphics_grid_draw_item, \"Grid\")\n self.graphics_grid_draw_item.setSelected(True) \n self.shape_dict[self.graphics_grid_draw_item.get_display_name()] = \\\n self.graphics_grid_draw_item\n elif self.in_beam_define_state:\n self.stop_beam_define()\n elif self.in_select_items_state:\n self.graphics_select_tool_item.hide()\n self.in_select_items_state = False\n \"\"\"\n for point in self.get_points():\n if point.isSelected():\n self.emit(\"pointSelected\", point)\n \"\"\"\n self.select_lines_and_grids()", "def mouseDown_(self, event):\n global canvasView\n node = self.node\n\n m = NSEvent.mouseLocation()\n c = canvasView.convertPointToBase_(NSEvent.mouseLocation())\n node.canvas.mousex = m.x\n node.canvas.mousey = m.y\n node.mousedown = m", "def mouseReleaseEvent(self, event):\n\n if event.button() == Qt.LeftButton:\n if self.select_zooming:\n self.zooming_rect = QRect(self.zoom_point.x(), self.zoom_point.y(),\n event.x(), event.x() / self.vid_ratio)\n self.zoom_point = QPoint()\n self.select_zooming = False\n self.zooming = True\n self.load_current_frame()\n self.update()\n else:\n if self.current_line:\n self.lines.append(self.current_line)\n self.current_line = []\n self.drawing = False\n\n if event.button() == Qt.RightButton:\n self.playing_using_mouse = False", "def mouseDown(self, point, clickCount):\n if clickCount == 2:\n self.toleranceWindow.w.open()\n\n self.glyph.prepareUndo(\"Move handles\")\n\n # Get positions of mouse & bcps and do some math\n self.mouseDownPoint = (round(point.x), round(point.y))\n\n # Select segment when BCP connection is clicked first,\n # and then analyze selection for the dictionary\n # otherwise, everything will be deselected when user clicks\n # outside of the contours (eg. on the BCP connection)\n # and we will have no selections to analyze.\n self._selectSegmentWhenBCPConnectionIsClicked()\n self.delegate._analyzeSelection(self.glyph)\n\n # Only calculate slopes & intercepts when 1 segment is selected\n if len(self.delegate._selectedSegments) != 1:\n return\n\n for selected in self.delegate._selectedSegments:\n pt1, segment = selected\n self.h1, self.h2, pt2 = segment\n\n self.h1Pos = self.h1.position\n self.h2Pos = self.h2.position\n\n self.slope0, self.intercept0 = hf.getSlopeAndIntercept(pt1.position, self.h1Pos)\n self.slope1, self.intercept1 = hf.getSlopeAndIntercept(pt2.position, self.h2Pos)", "def mouseReleaseEvent(self, event):\r\n super().mouseReleaseEvent(event)\r\n\r\n # handle when grNode moved\r\n if self._was_moved:\r\n self._was_moved = False\r\n self.node.scene.history.storeHistory(\"Node moved\", setModified=True)\r\n\r\n self.node.scene.resetLastSelectedStates()\r\n self.doSelect() # also trigger itemSelected when node was moved\r\n\r\n # we need to store the last selected state, because moving does also select the nodes\r\n self.node.scene._last_selected_items = self.node.scene.getSelectedItems()\r\n\r\n # now we want to skip storing selection\r\n return\r\n\r\n # handle when grNode was clicked on\r\n if self._last_selected_state != self.isSelected() or self.node.scene._last_selected_items != self.node.scene.getSelectedItems():\r\n self.node.scene.resetLastSelectedStates()\r\n self._last_selected_state = self.isSelected()\r\n self.onSelected()", "def on_press(event):\n\n\tglobal xclick, yclick, NumberOfLines, press, line\n\n\t#if event.inaxes!=line.axes: return\n\tpress = 1;\n\txclick.append(event.xdata);\n\tyclick.append(event.ydata);\n\tNumberOfLines += 1;\n\tprint('The number of lines: {}'.format(NumberOfLines))\n\tprint('Button clicked at:{}'.format((event.xdata,event.ydata)))", "def handle_mouse(self, event, x, y, flags, param):\n mousey = MouseEvent(event, x, y, flags, param)\n window_commands.mouse_pub.publish(mousey)", "def on_mouse_click(self):\n base.graphicsEngine.render_frame()\n p=PNMImage(1, 1,4)\n base.graphicsEngine.extract_texture_data(self.mouse_tex, base.win.getGsg())\n self.mouse_tex.store(p)\n c=p.getXelA(0,0)\n id=self.color_to_id(c)\n if id != 0 and id == self.last_mouse_down_id:\n if id in self.click_commands:\n self.click_commands[id]()", "def mouse_handler(self,events):\n\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mousedown = True\n self.mousebutton = event.button\n elif event.type == pygame.MOUSEBUTTONUP:\n self.mousedown = False\n self.mousebutton = event.button\n self.mouseX, self.mouseY = pygame.mouse.get_pos()\n\n #manage tool events\n if self.draw_tool == \"Line\":\n self.draw_line_template()\n if self.draw_tool == \"Circle\":\n self.draw_circle_template()\n\n #show mouse state\n self.show_mousestate()", "def rect_zoom(self, eclick, erelease):\r\n msg = 'rect_zoom called. eclick={0} , erelease={1}'.format(str(eclick), str(erelease))\r\n logging.debug(msg)\r\n if eclick.xdata != erelease.xdata and eclick.ydata != erelease.ydata:\r\n x = sorted([eclick.xdata, erelease.xdata])\r\n y = sorted([eclick.ydata, erelease.ydata])\r\n paket = {'x':x,\r\n 'y':y,\r\n 'tip':self.konfig.TIP}\r\n msg = 'zoom value emit - data={0}'.format(str(paket))\r\n logging.debug(msg)\r\n self.emit(QtCore.SIGNAL('add_zoom_level(PyQt_PyObject)'), paket)\r\n #TODO! disable zoom\r\n self.zoomSelector.set_active(False)\r\n #enable spanSelector\r\n if self.spanSelector != None:\r\n self.spanSelector.visible = True", "def _mouseDown(self):\n # Execute pre interaction callback\n self._preCallback()", "def click(event):\n ROIsize = self.settings['ROI_size']\n Ly =self.img.shape[-1]\n Lx =self.img.shape[-2]\n \n if self.settings['selectROI'] and (Lx,Ly)!=(ROIsize,ROIsize):\n event.accept() \n pos = event.pos()\n x = int(pos.x()) #pyqtgraph is transposed\n y = int(pos.y())\n x = max(min(x, Lx-ROIsize//2 ),ROIsize//2 )\n y = max(min(y, Ly-ROIsize//2 ),ROIsize//2 )\n self.settings['roiX']= x\n self.settings['roiY']= y\n if hasattr(self, 'roi'):\n self.imv.removeItem(self.roi) \n self.roi = pg.RectROI([x-ROIsize//2,y-ROIsize//2], [ROIsize,ROIsize])\n self.imv.addItem(self.roi)\n \n self.settings['selectROI'] = False", "def on_right_click(self, event):\n\n element, (x, y) = event\n parent = self.tree_viewer.control\n self.context_menu.create_menu(parent).show(x, y)", "def event(mouse_event):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle a keypress event. The plot is closed without return value on enter. Other keys are used to add a comment.
def onKey(self, event): if event.key == 'enter': plt.close() return if event.key == 'escape': self.clearMarker() return if event.key == 'backspace': self.comment = self.comment[:-1] elif len(event.key) == 1: self.comment += event.key self.supTitle.set_text("comment: %s" % self.comment) event.canvas.draw()
[ "def on_key(event):\n if event.key == ' ':\n replot(0, 0, True)", "def handle_keypress(self, e):\n if e.char == '\\r': # log new weight when user hits <Enter>\n self.e_w.config(fg='grey')\n self.submit_handler()\n self.show_graph()\n else: # switch font back to black to indicate active editing\n self.e_w.config(fg='black') # when entry is changed, change text back to black", "def keypress(self, key):\n\n pass", "def process_key_press(self, keycode: int) -> None:", "def keyHandler(event:Event):\r\n if event.keysym == \"Return\": # Enter key\r\n addTaskClick()\r\n elif event.keysym == \"Delete\": # Delete Key\r\n removeSelectedClick()", "def key_released(self, event):\n pass", "def on_key_pressed(self, obj, event):\n if event.type == Gdk.EventType.KEY_PRESS:\n if event.keyval in (_RETURN, _KP_ENTER):\n self.on_booklist_ok_clicked(obj)\n #emit OK response on dialog to close it automatically\n self.top.response(-5)\n return True\n return False", "def handle_KeyPress(self, e):\r\n state = e.state & ~(self.qtile.numlockMask)\r\n keysym = self.qtile.conn.keycode_to_keysym(e.detail, state)\r\n if keysym == xkeysyms.keysyms['Tab']:\r\n self.userInput = self.completer.complete(self.userInput)\r\n else:\r\n actual_value = self.completer.actual()\r\n self.completer.reset()\r\n if keysym < 127 and chr(keysym) in string.printable:\r\n # No LookupString in XCB... oh,\r\n # the shame! Unicode users beware!\r\n self.userInput += chr(keysym)\r\n elif (keysym == xkeysyms.keysyms['BackSpace'] and\r\n len(self.userInput) > 0):\r\n self.userInput = self.userInput[:-1]\r\n elif keysym == xkeysyms.keysyms['Escape']:\r\n self.active = False\r\n self.bar.widget_ungrab_keyboard()\r\n elif keysym == xkeysyms.keysyms['Return']:\r\n self.active = False\r\n self.bar.widget_ungrab_keyboard()\r\n if self.strict_completer:\r\n self.callback(actual_value or self.userInput)\r\n else:\r\n self.callback(self.userInput)\r\n self._update()", "def handle(self, event):\n if event.type == QUIT:\n sys.exit()\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n sys.exit()", "def on_key(self, event):\r\n self.key_event = event", "def OnKey(self, event):\n if self.alive.isSet(): \n code = event.GetKeyCode()\n if code < 256: #is it printable?\n if code == 13: #is it a newline? (check for CR which is the RETURN key)\n if self.settings.echo: #do echo if needed\n self.text_ctrl_output.AppendText('\\n')\n cmd = ''\n if self.settings.newline == NEWLINE_CR:\n cmd = '\\r'\n elif self.settings.newline == NEWLINE_LF:\n cmd = '\\n'\n elif self.settings.newline == NEWLINE_CRLF:\n cmd = '\\r\\n'\n self.parent.write_queue.put(cmd)\n else:\n char = chr(code)\n self.parent.write_queue.put(char)\n else:\n print \"Extra Key:\", code\n #else:\n #print \"Serial Terminal not open\"", "def key_pressed_dispatch(self, event):\r\n try:\r\n self.key_map[event.char]()\r\n except KeyError:\r\n try:\r\n self.key_map[event.keycode]()\r\n except KeyError:\r\n print(\"No handler for key \" + (\"enter\" if event.keycode == 13 else event.char) + \"(\" + str(\r\n event.keycode) + \")\")", "def on_key_press_event(self, window, event):\n if event.get_keyval()[1] == Gdk.KEY_Escape:\n self.popdown()\n return True\n return False", "def EnterKeyEvent(self, event):\n # Handle auto-complete first.\n if self.SCIAutoCActive():\n self.SCIAutoCComplete()\n self.SCIAutoCCancel()\n # Call the IDLE event.\n return self.bindings.fire(\"<<newline-and-indent>>\", event)", "def keyPressEvent(self, e):\n if e.key() == qt.QtCore.Qt.Key.Key_Escape:\n self.close()", "def _enter_key( self, event ) :\n w = event.widget\n self._run_command( w )", "def key_pressed(self, key_event):\n if key_event == \"Delete\":\n for item in self.graphics_view.graphics_scene.items():\n if item.isSelected():\n self.delete_shape(item)\n elif key_event == \"Escape\":\n self.stop_measure_distance()\n self.stop_measure_angle()\n self.stop_measure_area() \n if self.in_beam_define_state:\n self.stop_beam_define()\n if self.in_magnification_mode:\n self.set_magnification_mode(False)\n #elif key_event == \"Up\":\n # self.diffractometer_hwobj.move_to_beam(self.beam_position[0],\n # self.beam_position[1] - 50)\n #elif key_event == \"Down\":\n # self.diffractometer_hwobj.move_to_beam(self.beam_position[0], \n # self.beam_position[1] + 50)\n elif key_event == \"Plus\":\n self.diffractometer_hwobj.zoom_in()\n elif key_event == \"Minus\":\n self.diffractometer_hwobj.zoom_out()", "def __key_pressed (self, event):\n if event.GetKeyCode ( ) == wx.WXK_ESCAPE:\n self.__clear_search (None)", "def keyPressEvent(self, e: QtGui.QKeyEvent) -> None:\n if e.key() == QtCore.Qt.Key_Escape:\n self.reset_and_hide()\n else:\n super().keyPressEvent(e)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process scroll events. All subplots are scrolled simultaneously
def onScroll(self, event): for i in range(self.nSubPlots): subPlot = self.selectSubPlot(i) xmin, xmax = subPlot.get_xlim() dx = xmax - xmin cx = (xmax+xmin)/2 if event.button == 'down': dx *= 1.1 else: dx /= 1.1 _xmin = cx - dx/2 _xmax = cx + dx/2 subPlot.set_xlim(_xmin, _xmax) event.canvas.draw()
[ "def mouse_scroll(event):\n fig = event.canvas.figure\n ax1 = fig.axes[0]\n ax2 = fig.axes[1]\n if event.button == 'down':\n previous_slice(ax1, ax2)\n elif event.button == 'up':\n next_slice(ax1, ax2)\n fig.canvas.draw()", "def on_mouse_scroll(self, evt):\n \n pass", "def on_scroll_window(self, event):\n sx,sy = self.GetScrollPixelsPerUnit()\n if event.GetOrientation() == wx.HORIZONTAL:\n dx = event.GetPosition()\n dy = self.GetScrollPos(wx.VERTICAL)\n else:\n dx = self.GetScrollPos(wx.HORIZONTAL)\n dy = event.GetPosition()\n \n pos = (dx ,dy)\n print(\"scrolling...\" + str(pos) + str(event.GetPosition()))\n # self.main.Scroll(dx, dy)\n # self.top.Scroll(dx, 0)\n # self.left.Scroll(0, dy)\n event.Skip()", "def scrolls(self , scroll):\n if(scroll.scroll_y <= MainWindow.distance):\n operations.load_more() \n scroll.scroll_to(content.ArticlesContainerCopy.articles_container_copy.children[content.Data.limit] , padding=0, animate=True)", "def mouse_scroll(self, event):\r\n\r\n if self.vsb.visible:\r\n if OS == \"Darwin\":\r\n new_delta = -1 * event.delta\r\n else:\r\n new_delta = -1 * int(event.delta / 120)\r\n self.canvas.yview_scroll(new_delta, \"units\")", "def adjust_cuts_scroll(self, plot, event):\n bm = self.fitsimage.get_bindings()\n pct = -self.scroll_pct\n if event.step > 0:\n pct = -pct\n bm.cut_pct(self.fitsimage, pct)", "def _infinite_scroll_kernels(self, n_scrolls=None, batch_size=10):\n # TODO: could change this to check for No more kernels message instead, might be cleaner\n if n_scrolls is not None and n_scrolls <= 0:\n raise ValueError(\"Must scroll at least once: %d\" % n_scrolls)\n curr = 0\n while n_scrolls is None or curr < n_scrolls:\n if curr % batch_size == 0:\n print(\"Scroll: %d\" % curr)\n current_height = self._get_height()\n self._scroll_pg_down()\n time.sleep(10)\n new_height = self._get_height()\n if current_height == new_height:\n log.info('Window height unchanged, done scrolling')\n return False\n curr += 1\n return True", "def updateScrollLabels(self):\n pass", "def _scroll(self, *args):\n for current_list in self.lists:\n current_list.yview(*args)\n return 'break'", "def glfw_mouse_scroll_callback(self, window, x_offset: float, y_offset: float):\r\n self._mouse_scroll_event_func(x_offset, y_offset)", "def on_mouse_wheel(self, event):\n if platform.system() == \"Windows\":\n self.canvas.yview_scroll(int(-1 * (event.delta / 120)), \"units\")\n elif platform.system() == \"Darwin\":\n self.canvas.yview_scroll(int(-1 * event.delta), \"units\")\n else:\n if event.num == 4:\n self.canvas.yview_scroll(-1, \"units\")\n elif event.num == 5:\n self.canvas.yview_scroll(1, \"units\")", "def updateScrollbar(self):\n self.horizontalScrollBar.blockSignals(True)\n\n self.horizontalScrollBar.setMinimum(0)\n self.horizontalScrollBar.setMaximum(self.signal.length - (self.mainCursor.max - self.mainCursor.min))\n self.horizontalScrollBar.setValue(self.mainCursor.min)\n self.horizontalScrollBar.setPageStep(self.mainCursor.max - self.mainCursor.min)\n self.horizontalScrollBar.setSingleStep((self.mainCursor.max - self.mainCursor.min) / self.SCROLL_BAR_STEP)\n\n # if zoom tool is selected update its limits here because\n # this is where all change range operations finish it's processing\n self.updateZoomRegionsLimits()\n\n self.horizontalScrollBar.blockSignals(False)", "def __set_subplots(self):\n self.logger.debug(\"running\")\n if len(self.__plot_names) < 1:\n return\n r = len(self.__plot_names)\n c = 1\n for i in range(0, r):\n self.__plots[self.__plot_names[i]] = [(r, c, i + 1), True]\n self.logger.debug(\"done\")", "def onMouseWheel(self, event):\r\n\r\n self.canvas.yview_scroll(int(-1 * (event.delta/4)), \"units\")", "def test_example_other_scroll_bar() -> None:\n scroll_bar.main(test=True)\n test_reset_surface()", "def _scrollupdate(self, old_x, old_y):\n new_x = self._barx.GetControlValue()\n new_y = self._bary.GetControlValue()\n Qd.SetPort(self._onscreen_wid)\n # See whether we can use scrollrect. Only possible if no updates pending.\n updrgn = Qd.NewRgn()\n self._onscreen_wid.GetWindowUpdateRgn(updrgn)\n## self._onscreen_wid.GetWindowRegion(Windows.kWindowUpdateRgn, updrgn)\n if Qd.EmptyRgn(updrgn):\n # Scroll, and get the new vacated region back\n Qd.ScrollRect(self.qdrect(), old_x-new_x, old_y-new_y, updrgn)\n else:\n # ok, update the whole window\n Qd.RectRgn(updrgn, self.qdrect())\n self._onscreen_wid.InvalWindowRgn(updrgn)\n Qd.DisposeRgn(updrgn)\n self._canvaspos = new_x, new_y", "def mouse_wheel(event):\n canvas.yview_scroll(-1*(event.delta/120), \"units\")", "def _on_cb_grid(self, evt):\r\n self.sub_plots.show_grid(self.cb_grid.IsChecked())\r\n #redraw plots\r\n self.canvas.draw()", "def OnPaint (self, event):\n scrollWindowOriginX, scrollWindowOriginY = self.CalcUnscrolledPosition (0, 0)\n\n paintDC = wxPaintDC (self)\n self.PrepareDC (paintDC)\n\n \"\"\"\n Calculate the rectangle that needs updating in scrolled coordinates\n \"\"\"\n updateRect = self.GetUpdateRegion().GetBox()\n bufferX = updateRect.GetLeft() + scrollWindowOriginX\n bufferY = updateRect.GetTop() + scrollWindowOriginY\n bufferWidth = updateRect.GetWidth()\n bufferHeight = updateRect.GetHeight()\n\n memoryDC = wxMemoryDC()\n offscreenBuffer = wxEmptyBitmap (bufferWidth, bufferHeight)\n memoryDC.SelectObject (offscreenBuffer)\n memoryDC.SetDeviceOrigin (-bufferX, -bufferY)\n\n \"\"\"\n Debugging code that makes it easy to see which areas are updating.\n \"\"\"\n if 0:\n success = paintDC.Blit (bufferX,\n bufferY,\n bufferWidth,\n bufferHeight,\n paintDC,\n bufferX,\n bufferY,\n wxSRC_INVERT)\n time.sleep(1)\n success = paintDC.Blit (bufferX,\n bufferY,\n bufferWidth,\n bufferHeight,\n paintDC,\n bufferX,\n bufferY,\n wxSRC_INVERT)\n\n\n memoryDC.BeginDrawing()\n\n self.DrawBackground (memoryDC)\n self.Draw (memoryDC)\n\n paintDC.Blit (bufferX,\n bufferY,\n bufferWidth,\n bufferHeight,\n memoryDC,\n bufferX,\n bufferY)\n\n memoryDC.EndDrawing()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that NearestMeanResponseImputer has fit and transform methods.
def test_class_methods(self): x = NearestMeanResponseImputer( response_column="c", use_median_if_no_nulls=False, columns=None ) h.test_object_method(obj=x, expected_method="fit", msg="fit") h.test_object_method(obj=x, expected_method="transform", msg="transform")
[ "def test_learnt_values_not_modified(self):\n\n df = d.create_NearestMeanResponseImputer_test_df()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n x.fit(df)\n\n x2 = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n x2.fit(df)\n\n x2.transform(df)\n\n h.assert_equal_dispatch(\n expected=x.impute_values_,\n actual=x2.impute_values_,\n msg=\"Impute values not changed in transform\",\n )", "def test_inheritance(self):\n\n x = NearestMeanResponseImputer(\n response_column=\"c\", use_median_if_no_nulls=False, columns=None\n )\n\n h.assert_inheritance(x, tubular.imputers.BaseImputer)", "def test_arguments(self):\n\n h.test_function_arguments(\n func=NearestMeanResponseImputer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )", "def test_super_transform_called(self, mocker):\n\n df = d.create_NearestMeanResponseImputer_test_df()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n x.fit(df)\n\n expected_call_args = {\n 0: {\"args\": (d.create_NearestMeanResponseImputer_test_df(),), \"kwargs\": {}}\n }\n\n with h.assert_function_call(\n mocker, tubular.base.BaseTransformer, \"transform\", expected_call_args\n ):\n\n x.transform(df)", "def test_use_median_if_no_nulls_false_and_columns_with_no_nulls_error(self):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [5, 4, 3, 2, 1], \"c\": [3, 2, 1, 4, 5]}\n )\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError,\n match=\"Column a has no missing values, cannot use this transformer.\",\n ):\n\n x.fit(df)", "def test_fit(self):\n self._fit()", "def test_apply_before_fit() -> None:\n X = np.empty((1, 1))\n # Supervised model\n with pytest.raises(NotFittedError):\n mod1 = NullModel(objective='regression')\n mod1.apply(X)\n # Unsupervised model\n with pytest.raises(NotFittedError):\n mod2 = KMeans()\n mod2.apply(X)", "def test_null_values_in_response_error(self):\n\n df = d.create_df_3()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(ValueError, match=r\"Response column \\(c\\) has null values.\"):\n\n x.fit(df)", "def test_learned_normal_impute(make_missing_data):\n ab.set_hyperseed(100)\n _, m, X, _ = make_missing_data\n\n # This replicates the input layer behaviour\n def data_layer(**kwargs):\n return kwargs['X'], 0.0\n\n def mask_layer(**kwargs):\n return kwargs['M'], 0.0\n\n n, N, D = X.shape\n impute = ab.LearnedNormalImpute(data_layer, mask_layer)\n\n F, KL = impute(X=X, M=m)\n\n tc = tf.test.TestCase()\n with tc.test_session():\n tf.global_variables_initializer().run()\n X_imputed = F.eval()\n assert KL.eval() == 0.0 # Might want to change this in the future\n assert(X_imputed.shape == X.shape)", "def test_use_median_if_no_nulls_not_bool_error(self):\n\n with pytest.raises(TypeError, match=\"use_median_if_no_nulls must be a bool\"):\n\n NearestMeanResponseImputer(\n response_column=\"a\", use_median_if_no_nulls=\"abc\"\n )", "def test_imputer_kNN(setup_imputation_kNN, expected_imputation_kNN):\n for case, expected_outcome in expected_imputation_kNN.items():\n calc_imputation = setup_imputation_kNN[case][\"df\"]\n calc_imputation[setup_imputation_kNN[case][\"col_name\"]] = impute_kNN(\n **setup_imputation_kNN[case]\n )\n calc_imputation = calc_imputation.round(10)\n\n assert_frame_equal(calc_imputation, expected_outcome[\"df\"], check_dtype=False)", "def fit(self, dataset):\n if dataset.static_feature is not None: \n # MICE\n if self.imputation_model_name == 'mice': \n self.imputation_model = IterativeImputer() \n # MissForest\n elif self.imputation_model_name == 'missforest': \n self.imputation_model = MissForest() \n # KNN\n elif self.imputation_model_name == 'knn': \n self.imputation_model = KNNImputer()\n \n self.imputation_model.fit(dataset.static_feature)\n\n return", "def test_fit(self, mock_draw):\n X, y = make_classification(\n n_samples=400,\n n_features=20,\n n_informative=8,\n n_redundant=8,\n n_classes=2,\n n_clusters_per_class=4,\n random_state=1221,\n )\n\n visualizer = DiscriminationThreshold(BernoulliNB())\n assert not hasattr(visualizer, \"thresholds_\")\n assert not hasattr(visualizer, \"cv_scores_\")\n\n out = visualizer.fit(X, y)\n\n assert out is visualizer\n mock_draw.assert_called_once()\n assert hasattr(visualizer, \"thresholds_\")\n assert hasattr(visualizer, \"cv_scores_\")\n\n for metric in METRICS:\n assert metric in visualizer.cv_scores_\n assert \"{}_lower\".format(metric) in visualizer.cv_scores_\n assert \"{}_upper\".format(metric) in visualizer.cv_scores_", "def impute(X_train, X_test, strategy):\n imp = Imputer(missing_values=np.nan, strategy=strategy).fit(X_train)\n X_train_imputed = imp.transform(X_train)\n X_train_imputed = pd.DataFrame(\n X_train_imputed, columns=X_train.columns)\n X_test_imputed = imp.transform(X_test)\n X_test_imputed = pd.DataFrame(X_test_imputed, columns=X_test.columns)\n return X_train_imputed, X_test_imputed", "def test_assertSimilarMeans_one_obs_true(self):\n obs = [5]\n expected = [1,2,3,4,5,6,7,8,9,10,11]\n self.assertSimilarMeans(obs, expected)\n self.assertSimilarMeans(obs, expected, pvalue=0.25)\n self._set_suite_pvalue(0.10)\n self.assertSimilarMeans(obs, expected)", "def test_sklearn_compatible_estimator(estimator, check):\n check(estimator)", "def impute_dataset_train_test(imputation: str, train: pd.DataFrame, test: pd.DataFrame = None,\n dataset: pd.DataFrame = None) -> tuple:\n cols_to_impute = train.loc[:, train.isna().any()].select_dtypes(exclude=['string', 'object']).columns.tolist()\n if len(cols_to_impute) == 0:\n if dataset is not None:\n return dataset.copy(), train, test\n else:\n return None, train, test\n cols_to_add = [col for col in train.columns.tolist() if col not in cols_to_impute]\n if imputation == 'mean' or imputation == 'median':\n imputer = MissingValueImputation.get_simple_imputer(df=train.filter(cols_to_impute), strategy=imputation)\n elif imputation == 'iterative':\n imputer = MissingValueImputation.get_iter_imputer(df=train.filter(cols_to_impute))\n elif imputation == 'knn':\n imputer = MissingValueImputation.get_knn_imputer(df=train.filter(cols_to_impute))\n\n train_imp = pd.concat([pd.DataFrame(data=imputer.transform(X=train.filter(cols_to_impute)),\n columns=cols_to_impute, index=train.index), train[cols_to_add]],\n axis=1, sort=False)\n if test is None:\n test_imp = None\n else:\n test_imp = pd.concat([pd.DataFrame(data=imputer.transform(X=test.filter(cols_to_impute)),\n columns=cols_to_impute, index=test.index), test[cols_to_add]],\n axis=1, sort=False)\n if dataset is None:\n dataset_imp = None\n else:\n dataset_imp = pd.concat([pd.DataFrame(data=imputer.transform(X=dataset.filter(cols_to_impute)),\n columns=cols_to_impute, index=dataset.index), dataset[cols_to_add]],\n axis=1, sort=False)\n return dataset_imp, train_imp, test_imp", "def test_min_dist(self):\n self.plotter_structural_LOGS.umap(n_neighbors=15, random_state=None, min_dist=0.5)\n self.assertEqual(self.plotter_structural_LOGS.umap_fit.min_dist, 0.5)", "def test_pipeline_as_model_input_quickmethod(self):\n X, y = load_spam()\n\n model = Pipeline([\n ('imputer', SimpleImputer(missing_values=np.nan, strategy='mean')),\n ('lr', LogisticRegression(multi_class=\"auto\", solver=\"liblinear\"))\n ])\n\n oz = discrimination_threshold(model, X, y, random_state=42)\n self.assert_images_similar(oz, tol=15)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that NearestMeanResponseImputer inherits from BaseImputer.
def test_inheritance(self): x = NearestMeanResponseImputer( response_column="c", use_median_if_no_nulls=False, columns=None ) h.assert_inheritance(x, tubular.imputers.BaseImputer)
[ "def test_class_methods(self):\n\n x = NearestMeanResponseImputer(\n response_column=\"c\", use_median_if_no_nulls=False, columns=None\n )\n\n h.test_object_method(obj=x, expected_method=\"fit\", msg=\"fit\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")", "def test_learnt_values_not_modified(self):\n\n df = d.create_NearestMeanResponseImputer_test_df()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n x.fit(df)\n\n x2 = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n x2.fit(df)\n\n x2.transform(df)\n\n h.assert_equal_dispatch(\n expected=x.impute_values_,\n actual=x2.impute_values_,\n msg=\"Impute values not changed in transform\",\n )", "def __init__(\n self,\n num_imputer=InterpolateImputer,\n cat_imputer=ModeImputer,\n num_kwgs={\"fill_strategy\": \"linear\"},\n cat_kwgs={\"fill_strategy\": \"random\"}\n ):\n DefaultBaseImputer.__init__(\n self,\n num_imputer=num_imputer,\n cat_imputer=cat_imputer,\n num_kwgs=num_kwgs,\n cat_kwgs=cat_kwgs\n )", "def test_arguments(self):\n\n h.test_function_arguments(\n func=NearestMeanResponseImputer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )", "def test_use_median_if_no_nulls_false_and_columns_with_no_nulls_error(self):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [5, 4, 3, 2, 1], \"c\": [3, 2, 1, 4, 5]}\n )\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError,\n match=\"Column a has no missing values, cannot use this transformer.\",\n ):\n\n x.fit(df)", "def __init__(\n self,\n num_imputer=PMMImputer,\n cat_imputer=MultinomialLogisticImputer,\n num_kwgs=None,\n cat_kwgs=None\n ):\n # delegate to DefaultBaseImputer\n DefaultBaseImputer.__init__(\n self,\n num_imputer=num_imputer,\n cat_imputer=cat_imputer,\n num_kwgs=num_kwgs,\n cat_kwgs=cat_kwgs\n )", "def test_super_transform_called(self, mocker):\n\n df = d.create_NearestMeanResponseImputer_test_df()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n x.fit(df)\n\n expected_call_args = {\n 0: {\"args\": (d.create_NearestMeanResponseImputer_test_df(),), \"kwargs\": {}}\n }\n\n with h.assert_function_call(\n mocker, tubular.base.BaseTransformer, \"transform\", expected_call_args\n ):\n\n x.transform(df)", "def test_null_values_in_response_error(self):\n\n df = d.create_df_3()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(ValueError, match=r\"Response column \\(c\\) has null values.\"):\n\n x.fit(df)", "def impute_by_regression(target, df, impute_method=\"mean\"):\n if target.name in df.columns:\n df = df[~target.name]\n reg_imp = MiceImputer(seed_strategy=impute_method, target=target.name, group=[])\n reg_imp.fit(pd.concat([df, target], axis=0))\n return reg_imp", "def test_use_median_if_no_nulls_not_bool_error(self):\n\n with pytest.raises(TypeError, match=\"use_median_if_no_nulls must be a bool\"):\n\n NearestMeanResponseImputer(\n response_column=\"a\", use_median_if_no_nulls=\"abc\"\n )", "def test_non_numeric_response_column_error(self):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [5, 4, 3, 2, 1], \"c\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}\n )\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError, match=\"dtypes in response_column must be numeric.\"\n ):\n\n x.fit(df)", "def impute_apply(data, n_iter, to_nan=0.2, fast_impute=False):\n output = impute_optimizer(data, n_iter=n_iter, to_nan=0.2, fast_impute=False)\n imputer, param = output.iloc[0,:].name.split(\"__\")\n param = param.replace(\":\", \"\")\n\n if imputer == \"SimpleImputer\":\n ix = data.index.copy()\n data = (SimpleImputer(strategy=param)\n .fit_transform(np.asarray(data).reshape(-1, 1)))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n\n elif imputer == \"KNNImputer\":\n ix = data.index.copy()\n data = (KNNImputer(weights=param)\n .fit_transform(np.asarray(data).reshape(-1, 1)))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n\n elif imputer == \"Interpolate\":\n if param == \"time\":\n data.index = pd.to_datetime(pd.to_timedelta(data.index, unit=\"days\"))\n data = data.interpolate(method=param, limit_direction=\"both\")\n else:\n data = data.interpolate(method=param, limit_direction=\"both\")\n\n elif imputer == \"Interpolate_with_order\":\n # Order can be tweaked (default quadratic)\n data = data.interpolate(method=param, limit_direction=\"both\", order=2)\n \n elif imputer == \"TimeSeries_LOCF\":\n ix = data.index.copy()\n data = locf(np.asarray(data).reshape(1, -1))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n \n elif imputer == \"Moving_Win_Imputer\":\n ix = data.index.copy()\n param = int(param)\n remainder = -(len(data) % param)\n data = np.asarray(list(zip(*[iter(data)] * param)))\n data = np.asarray(moving_window(data, wsize=param))\n if remainder != 0:\n data = pd.Series(data.flatten(),\n index=ix[:remainder])\n else:\n data = pd.Series(data.flatten(), index=ix)\n del ix\n else:\n raise Exception\n print(\"Imputer passed through \\\"impute_optimize\\\" cannot be applied\")\n print(f\"Value passed: {impter}\")\n \n return data, imputer, param", "def num_imputer(self):\n return self._num_imputer", "def test_learned_normal_impute(make_missing_data):\n ab.set_hyperseed(100)\n _, m, X, _ = make_missing_data\n\n # This replicates the input layer behaviour\n def data_layer(**kwargs):\n return kwargs['X'], 0.0\n\n def mask_layer(**kwargs):\n return kwargs['M'], 0.0\n\n n, N, D = X.shape\n impute = ab.LearnedNormalImpute(data_layer, mask_layer)\n\n F, KL = impute(X=X, M=m)\n\n tc = tf.test.TestCase()\n with tc.test_session():\n tf.global_variables_initializer().run()\n X_imputed = F.eval()\n assert KL.eval() == 0.0 # Might want to change this in the future\n assert(X_imputed.shape == X.shape)", "def impute(X_train, X_test, strategy):\n imp = Imputer(missing_values=np.nan, strategy=strategy).fit(X_train)\n X_train_imputed = imp.transform(X_train)\n X_train_imputed = pd.DataFrame(\n X_train_imputed, columns=X_train.columns)\n X_test_imputed = imp.transform(X_test)\n X_test_imputed = pd.DataFrame(X_test_imputed, columns=X_test.columns)\n return X_train_imputed, X_test_imputed", "def test_imputer_kNN(setup_imputation_kNN, expected_imputation_kNN):\n for case, expected_outcome in expected_imputation_kNN.items():\n calc_imputation = setup_imputation_kNN[case][\"df\"]\n calc_imputation[setup_imputation_kNN[case][\"col_name\"]] = impute_kNN(\n **setup_imputation_kNN[case]\n )\n calc_imputation = calc_imputation.round(10)\n\n assert_frame_equal(calc_imputation, expected_outcome[\"df\"], check_dtype=False)", "def imputeAge(train, test):\n for df in [train, test]:\n df['Age_Null_Flag'] = df['Age'].apply(lambda x: 1 if pd.isnull(x) else 0)\n train['mean'] = train.groupby(['Name_Title', 'Pclass'])['Age'].transform('mean')\n train['Age'] = train['Age'].fillna(train['mean'])\n merged_data = test.merge(train, on=['Name_Title', 'Pclass'], how='left').drop_duplicates(['PassengerId_x'])\n test['Age'] = np.where(test['Age'].isnull(), merged_data['mean'], test['Age'])\n test['Age'] = test['Age'].fillna(test['Age'].mean())\n del train['mean']\n return train, test", "def test_mean(self):\n x = np.random.random(10)\n self.assertAlmostEqual(m.mean(x), x.mean())\n self.assertAlmostEqual(m.mean(iter(x)), x.mean())", "def fit(self, dataset):\n if dataset.static_feature is not None: \n # MICE\n if self.imputation_model_name == 'mice': \n self.imputation_model = IterativeImputer() \n # MissForest\n elif self.imputation_model_name == 'missforest': \n self.imputation_model = MissForest() \n # KNN\n elif self.imputation_model_name == 'knn': \n self.imputation_model = KNNImputer()\n \n self.imputation_model.fit(dataset.static_feature)\n\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an exception is raised if response_column is not str
def test_response_column_not_str_error(self): with pytest.raises(TypeError, match="response_column must be a str"): NearestMeanResponseImputer(response_column=0)
[ "def test_extract_column_8(self):\n with self.assertRaises(TypeError):\n querying.extract_column(self.column, check=str)", "def test_get_column_enforce_type_typeerror(self):\n row = {\"col1\": 1, \"col2\": 2}\n with self.assertRaises(TypeError):\n get_column(row, \"col1\", enforce_type=str)\n with self.assertRaises(TypeError):\n get_column(row, \"col2\", enforce_type=float)", "def test_handle_string_invalid_format():\n artifacts = types.ColumnArtifacts(\"string\", format=\"unsupported\")\n\n with pytest.raises(exceptions.FeatureNotImplementedError):\n column._handle_string(artifacts=artifacts)", "def test_handle_string_invalid(artifacts_kwargs):\n artifacts = types.ColumnArtifacts(\"string\", **artifacts_kwargs)\n\n with pytest.raises(exceptions.MalformedSchemaError):\n column._handle_string(artifacts=artifacts)", "def test_raise_if_exception_missing_str() -> None:\n programming_exc = ProgrammingError(\"select * from;\", Mock(), Mock())\n programming_exc.__cause__ = MockPyODBCProgrammingError(\n \"[42S11] [FreeTDS][SQL Server]The operation failed because an index or statistics with name 'ix_states_old_state_id' already exists on table 'states'. (1913) (SQLExecDirectW)\"\n )\n\n migration.raise_if_exception_missing_str(\n programming_exc, [\"already exists\", \"duplicate\"]\n )\n\n with pytest.raises(ProgrammingError):\n migration.raise_if_exception_missing_str(programming_exc, [\"not present\"])", "def test_raise_if_exception_missing_empty_cause_str() -> None:\n programming_exc = ProgrammingError(\"select * from;\", Mock(), Mock())\n programming_exc.__cause__ = MockPyODBCProgrammingError()\n\n with pytest.raises(ProgrammingError):\n migration.raise_if_exception_missing_str(\n programming_exc, [\"already exists\", \"duplicate\"]\n )\n\n with pytest.raises(ProgrammingError):\n migration.raise_if_exception_missing_str(programming_exc, [\"not present\"])", "def test_frame_invalid_column(self):\n with self.assertRaisesRegexp(Exception, \"Invalid column name\"):\n self.frame.take(100, columns=['not_in'])", "def test_non_numeric_response_column_error(self):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [5, 4, 3, 2, 1], \"c\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}\n )\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError, match=\"dtypes in response_column must be numeric.\"\n ):\n\n x.fit(df)", "def test_adjust_columns_non_string_error(self):\n\n with pytest.raises(TypeError, match=\"adjust_column should be a string\"):\n\n CrossColumnAddTransformer(mappings={\"a\": {\"a\": 1}}, adjust_column=1)", "def test_get_column_transform_and_enforce_type(self):\n row = {\"col1\": \"1\", \"col2\": 2}\n\n self.assertEqual(get_column(row, \"col1\", transform=int, enforce_type=int), 1)\n\n with self.assertRaises(TypeError):\n get_column(row, \"col1\", transform=str, enforce_type=int)", "def test_column_name_validation_fail(self):\n\n schema = {\n 'decimal_1': float\n }\n df = pd.DataFrame(data=(1, 2, 3), columns=['err_col'])\n\n try:\n val = Validator().validate_column_names(df, schema)\n except Exception as e:\n assert \"decimal_1\" in str(e).lower()\n assert e.__class__ == AssertionError", "def __expectString(val):\n if type(val) != str:\n raise Exception('Expected string, received {}'.format(type(val)))", "def _check_column_valid(self, column):\n if (isinstance(column, (int, long) )):\n if (column<0 and column>=self.get_number_of_cols()):\n raise ValueError(\"ERROR! column number (\" + str(column) + \") not valid!\")\n \n if (isinstance(column, str )):\n if (column not in self._col_names):\n raise ValueError(\"ERROR! column name (\" + column + \") not valid!\")", "def test_get_str_nullable(self):\n row = {\"col1\": None}\n self.assertEqual(get_str(row, \"col1\"), None)\n self.assertEqual(get_str(row, \"col1\", nullable=True), None)\n with self.assertRaises(ValueError):\n get_str(row, \"col1\", nullable=False)", "def test_get_column_enforce_type(self):\n row = {\"col1\": 1, \"col2\": 2}\n self.assertEqual(get_column(row, \"col1\", enforce_type=int), 1)\n self.assertEqual(get_column(row, \"col2\", enforce_type=int), 2)", "def _verify_response(self, response, expected_status,\n expected_type='application/json'):\n actual_status = response.status_code\n actual_type = response.headers['Content-Type']\n\n if response.status_code != expected_status:\n try:\n response_content = response.json()\n error_message = response_content['message']\n\n raise RuntimeError('Failed to execute operation. Server returned ' +\n f'an error with status {actual_status}: {error_message}')\n except:\n # In some weird cases the server returns an error nobody will ever understand.\n # This catch-all fixes the problem and returns a somewhat useful error message.\n raise RuntimeError('Failed to execute operation. Server returned ' +\n f'an error with status: {actual_status}')\n\n # Sometimes the server does respond, but sends some weird piece of data that we can't parse.\n # This check makes sure that we don't try to ever read it.\n if actual_type != expected_type:\n raise RuntimeError(f'Failed to execute operation. ' +\n 'Received invalid response type: {actual_type}')", "def test_get_int_with_type_error(self):\n row = {\"col\": 1}\n with self.assertRaises(TypeError):\n get_int(row, \"col\", transform=str)", "def test_column_upper_type_error(self):\n\n with pytest.raises(TypeError, match=\"column_upper should be a str\"):\n\n DateDiffLeapYearTransformer(\n column_lower=\"dummy_1\",\n column_upper=123,\n new_column_name=\"dummy_3\",\n drop_cols=True,\n )", "def test_with_wrong_datatypes(self):\n party_data = self.create_party_data\n party_data[\"party_name\"] = 10\n token = self.get_token()\n response = self.client.post(\"/api/v2/party\", data=json.dumps(party_data), content_type=\"application/json\",\n query_string={\"db\": \"test\"}, headers={\"Authorization\": \"Bearer {}\".format(token)})\n \n self.assertEqual(response.status_code, 400)\n self.assertIn(b\"party_name has to be a <class 'str'>\", response.data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an exception is raised if use_median_if_no_nulls is not bool
def test_use_median_if_no_nulls_not_bool_error(self): with pytest.raises(TypeError, match="use_median_if_no_nulls must be a bool"): NearestMeanResponseImputer( response_column="a", use_median_if_no_nulls="abc" )
[ "def test_use_median_if_no_nulls_false_and_columns_with_no_nulls_error(self):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [5, 4, 3, 2, 1], \"c\": [3, 2, 1, 4, 5]}\n )\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError,\n match=\"Column a has no missing values, cannot use this transformer.\",\n ):\n\n x.fit(df)", "def test_median_empty():\n\n assert median([]) == 0", "def nanmedian(x):\n try:\n return np.nanmedian(x)\n except:\n return np.median(x[np.isfinite(x)])", "def test_no_nan():\n\tdef test():\n\t\t@no_nan\n\t\tdef dummy(x):\n\t\t\tif x:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn float(\"nan\")\n\t\treturn dummy(1) == 1 and dummy(0) == 0\n\treturn [\"vice.core.callback.no_nan\", test]", "def test_median_real():\n\n assert median([2048, 4096, 49152]) == 4096", "def test_no_nans(self):\n self.assertTrue(read_dataframe().isnull().values.any(), \"There are NaNs!\")", "def nanmedian(*args, **kwargs):\n \n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return(np.nanmedian(*args, **kwargs))", "def test_nonfinite_04(self):\n\t\twith self.assertRaises(TypeError):\n\t\t\tresult = arrayfunc.takewhile('==', self.data, self.dataout, 100.0, maxlen=math.nan)", "def float_if_not_none(value):\n ...", "def test_nan_exists():\n with pytest.raises(BadInputError) as excinfo:\n some_fn(np.array([[1.]]))\n assert str(excinfo.value) == \"No NaN's in given data\"", "def test_no_missing_data(self):\n self.assertFalse(self.data_processor.agg_data_frame.isnull().\n values.any())", "def assert_never_inf(tensor):\n try:\n assert torch.isfinite(tensor).byte().any()\n except AssertionError:\n raise ValueError(\"There was an Inf value in tensor\")", "def test_noise_no_trend(self):\n self.assertFalse(self.data_item.is_noise(20))\n self.assertFalse(self.data_item.is_noise(20.1))\n self.assertFalse(self.data_item.is_noise(10))", "def make_nan_filter(rama=True, chi=True, profiles=False):\n def func(record):\n if rama and np.isnan(np.min(record.rama)):\n return False\n if chi and np.isnan(np.min(record.chi)):\n return False\n if profiles and np.isnan(np.min(record.profiles)):\n return False\n return True\n return func", "def test_nullset(self):\n x = pf.PercentileFinder([])\n t = x.getPercentile(25)\n self.assertEqual(t, None)", "def test_sesgo_not_nan(self, df):\n self.assertFalse(df.isnull().values.any(), note=\"Las mรฉtricas de sesgo e inequidad contienen nulos\")", "def test_median_modulo():\n\n assert median([2048, 4096, 49152, 64738]) == 26624.0", "def nan_test(something):\n# == DEBUG ==\n \n if something == \"nan\":\n sys.exit()", "def check_nan(tensor):\n\tassert(not(torch.isnan(tensor).any()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that fit has expected arguments.
def test_arguments(self): h.test_function_arguments( func=NearestMeanResponseImputer.fit, expected_arguments=["self", "X", "y"], expected_default_values=(None,), )
[ "def test_fit(self):\n self._fit()", "def requires_fit(self) -> bool:\n pass", "def test_pipeline_fit_params():\n pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])\n pipe.fit(X=None, y=None, clf__should_succeed=True)\n # classifier should return True\n assert pipe.predict(None)\n # and transformer params should not be changed\n assert pipe.named_steps['transf'].a is None\n assert pipe.named_steps['transf'].b is None", "def test_apply_before_fit() -> None:\n X = np.empty((1, 1))\n # Supervised model\n with pytest.raises(NotFittedError):\n mod1 = NullModel(objective='regression')\n mod1.apply(X)\n # Unsupervised model\n with pytest.raises(NotFittedError):\n mod2 = KMeans()\n mod2.apply(X)", "def test_not_fitted_error():\n with pytest.raises(NotFittedError):\n SingleROClassifier().predict(create_linear_X(), create_y())", "def _check_fitted(aah_cluster):\n assert aah_cluster.fitted\n assert aah_cluster.n_clusters == n_clusters\n assert len(aah_cluster._cluster_names) == n_clusters\n assert len(aah_cluster._cluster_centers_) == n_clusters\n assert aah_cluster._fitted_data is not None\n assert aah_cluster._info is not None\n assert aah_cluster.GEV_ is not None\n assert aah_cluster._labels_ is not None", "def check_fitted(func):\n def wrapper(*args, **kwargs):\n if not hasattr(args[0], \"is_fit\"):\n raise AttributeError(\"Method using check_fitted has no is_fit attr\"\n \" to check if fitted!\")\n if not args[0].is_fit:\n raise NotFittedError(\"{} has not been fit!\"\n \"\".format(args[0].__class__.__name__))\n else:\n return func(*args, **kwargs)\n return wrapper", "def test_fit():\n X_train, X_test, y_train, y_test = get_testing_data()\n\n fs = ReliefF(n_neighbors=100, n_features_to_keep=5)\n fs.fit(X_train, y_train)\n\n with np.load(\"data/test_arrays.npz\") as arrays:\n correct_top_features = arrays['correct_top_features']\n correct_feature_scores = arrays['correct_feature_scores']\n\n assert np.all(np.equal(fs.top_features, correct_top_features))\n assert np.all(np.equal(fs.feature_scores, correct_feature_scores))", "def test_plot_fit_not_implemented():\n plot_fit(display=False, fittype='not implemented')", "def test_invalid_arguments():\n # n_clusters\n with pytest.raises(\n TypeError, match=\"'n_clusters' must be an instance of \"\n ):\n aahCluster_ = AAHCluster(n_clusters=\"4\")\n with pytest.raises(ValueError, match=\"The number of clusters must be a\"):\n aahCluster_ = AAHCluster(n_clusters=0)\n with pytest.raises(ValueError, match=\"The number of clusters must be a\"):\n aahCluster_ = AAHCluster(n_clusters=-101)\n\n # normalize_input\n with pytest.raises(\n TypeError, match=\"'normalize_input' must be an instance of bool\"\n ):\n aahCluster_ = AAHCluster(n_clusters=n_clusters, normalize_input=\"asdf\")\n with pytest.raises(\n TypeError, match=\"'normalize_input' must be an instance of bool\"\n ):\n aahCluster_ = AAHCluster(n_clusters=n_clusters, normalize_input=None)\n\n aahCluster_ = AAHCluster(\n n_clusters=n_clusters,\n # ignore_polarity=True,\n normalize_input=False,\n )\n # inst\n with pytest.raises(TypeError, match=\"'inst' must be an instance of \"):\n aahCluster_.fit(epochs_eeg.average())\n\n # tmin/tmax\n with pytest.raises(TypeError, match=\"'tmin' must be an instance of \"):\n aahCluster_.fit(raw_eeg, tmin=\"101\")\n with pytest.raises(TypeError, match=\"'tmax' must be an instance of \"):\n aahCluster_.fit(raw_eeg, tmax=\"101\")\n with pytest.raises(ValueError, match=\"Argument 'tmin' must be positive\"):\n aahCluster_.fit(raw_eeg, tmin=-101, tmax=None)\n with pytest.raises(ValueError, match=\"Argument 'tmax' must be positive\"):\n aahCluster_.fit(raw_eeg, tmin=None, tmax=-101)\n with pytest.raises(\n ValueError,\n match=\"Argument 'tmax' must be strictly larger than 'tmin'.\",\n ):\n aahCluster_.fit(raw_eeg, tmin=5, tmax=1)\n with pytest.raises(\n ValueError,\n match=\"Argument 'tmin' must be shorter than the instance length.\",\n ):\n aahCluster_.fit(raw_eeg, tmin=101, tmax=None)\n with pytest.raises(\n ValueError,\n match=\"Argument 'tmax' must be shorter than the instance length.\",\n ):\n aahCluster_.fit(raw_eeg, tmin=None, tmax=101)\n\n # reject_by_annotation\n with pytest.raises(\n TypeError, match=\"'reject_by_annotation' must be an instance of \"\n ):\n aahCluster_.fit(raw_eeg, reject_by_annotation=1)\n with pytest.raises(ValueError, match=\"only allows for\"):\n aahCluster_.fit(raw_eeg, reject_by_annotation=\"101\")", "def test_predict(self):\n assert 2 == 2", "def _check_is_fitted(self):\n if not self._is_fitted:\n raise ValueError('The model has not been fitted.')", "def test_parametrized_estimators_fitting_error(estimators, X, y, est_name):\n with pytest.raises(ValueError):\n _ParametrizedEstimatorsMixin(estimators, est_name).fit(X, y)", "def do_init_params_fit(self, args):\n # Parse input and handle bad cases\n parsed = parse(args)\n if len(parsed) < 2:\n print(\"init_params_fit: Not enough arguments given\")\n return False\n if len(parsed) % 2 == 1:\n print(\"init_params_fit: Parameter given without value\")\n return False\n try:\n for i in range(0, len(parsed), 2):\n self.parameters.add(parsed[i], value=float(parsed[i + 1]))\n except ValueError:\n print(\"init_params_fit: Non-numeric value supplied\")\n return False", "def test_shape_predictor(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def _fit_callback(self, data_inputs, expected_outputs):\n self.fit_callback_function((data_inputs, expected_outputs), *self.more_arguments)", "def test_simultaneous_fit(logging_mixin: Any, setup_simultaneous_fit_data: Any) -> None:\n # Setup\n h, h_shifted, _, _ = setup_simultaneous_fit_data\n cost_func1 = cost_function.ChiSquared(parabola, data=h)\n cost_func2 = cost_function.ChiSquared(parabola, data=h_shifted)\n minuit_args: Dict[str, Union[float, Tuple[float, float]]] = {\n \"scale\": 1.5,\n \"error_scale\": 0.15,\n \"limit_scale\": (-1000, 1000),\n }\n\n # Setup the probfit version\n probfit = pytest.importorskip(\"probfit\")\n s_probfit = probfit.SimultaneousFit(*[cost_func1, cost_func2])\n\n # Setup the comparison version\n s = cost_func1 + cost_func2\n\n # First, basic checks\n logger.debug(f\"func_code: {s.func_code}, co_varnames: {s.func_code.co_varnames}\")\n assert s.func_code == fit_base.FuncCode([\"scale\"])\n assert s.func_code.co_varnames == list(s_probfit.func_code.co_varnames)\n\n # Now perform the fits\n fit_result, _ = fit_integration.fit_with_minuit(cost_func=s, minuit_args=minuit_args, x=h.x)\n fit_result_probfit, _ = fit_integration.fit_with_minuit(cost_func=s_probfit, minuit_args=minuit_args, x=h.x)\n # And check that the fit results agree\n logger.debug(f\"scale: {fit_result.values_at_minimum['scale']} +/- {fit_result.errors_on_parameters['scale']}\")\n logger.info(f\"type: {type(fit_result)}, {type(fit_result_probfit)}\")\n assert fit_result == fit_result_probfit", "def test_fit(self, mock_draw):\n X, y = make_classification(\n n_samples=400,\n n_features=20,\n n_informative=8,\n n_redundant=8,\n n_classes=2,\n n_clusters_per_class=4,\n random_state=1221,\n )\n\n visualizer = DiscriminationThreshold(BernoulliNB())\n assert not hasattr(visualizer, \"thresholds_\")\n assert not hasattr(visualizer, \"cv_scores_\")\n\n out = visualizer.fit(X, y)\n\n assert out is visualizer\n mock_draw.assert_called_once()\n assert hasattr(visualizer, \"thresholds_\")\n assert hasattr(visualizer, \"cv_scores_\")\n\n for metric in METRICS:\n assert metric in visualizer.cv_scores_\n assert \"{}_lower\".format(metric) in visualizer.cv_scores_\n assert \"{}_upper\".format(metric) in visualizer.cv_scores_", "def test_mutable(data):\n (input_data, y, formula) = data\n model_prefit = gammy.BayesianGAM(formula)\n model_fitted = model_prefit.fit(input_data, y)\n assert_arrays_equal(\n model_prefit.mean_theta,\n model_fitted.mean_theta\n )\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test an error is raised if response_column is nonnumeric
def test_non_numeric_response_column_error(self): df = pd.DataFrame( {"a": [1, 2, 3, 4, 5], "b": [5, 4, 3, 2, 1], "c": ["a", "b", "c", "d", "e"]} ) x = NearestMeanResponseImputer(response_column="c", columns=["a", "b"]) with pytest.raises( ValueError, match="dtypes in response_column must be numeric." ): x.fit(df)
[ "def test_response_column_not_str_error(self):\n\n with pytest.raises(TypeError, match=\"response_column must be a str\"):\n\n NearestMeanResponseImputer(response_column=0)", "def test_check_valid_values_raises_valueerror_if_not_numeric(self):\n # Setup\n X = np.array([\n [1.0, 'A'],\n [0.0, 1.0]\n ])\n\n instance_mock = MagicMock()\n function_mock = MagicMock()\n\n # Run\n decorated_function = check_valid_values(function_mock)\n\n # Check:\n error_msg = 'There are non-numerical values in your data.'\n with pytest.raises(ValueError, match=error_msg):\n decorated_function(instance_mock, X)\n\n function_mock.assert_not_called()\n instance_mock.assert_not_called()", "def test_invalid_numeral(self):\n numeral = \"o\"\n response = validate(numeral)\n self.assertFalse(response['statusCode'] == 200, numeral + \" should not be valid numeral\")\n\n numeral = \"abc\"\n response = validate(numeral)\n self.assertFalse(response['statusCode'] == 200, numeral + \" should not be valid numeral\")", "async def value_not_digit(self, ctx, user):\r\n await ctx.send(f\"{user.mention} Please raise a numerical value.\")", "def _is_numeric(df, column):\n\n if str(df[column].dtypes) == 'int64' or \\\n str(df[column].dtypes) == 'float64':\n return True\n else:\n return False", "def test_mapping_values_not_numeric_error(self):\n\n with pytest.raises(TypeError, match=\"mapping values must be numeric\"):\n\n CrossColumnAddTransformer(mappings={\"a\": {\"a\": \"b\"}}, adjust_column=\"b\")", "def test_column_name_validation_fail(self):\n\n schema = {\n 'decimal_1': float\n }\n df = pd.DataFrame(data=(1, 2, 3), columns=['err_col'])\n\n try:\n val = Validator().validate_column_names(df, schema)\n except Exception as e:\n assert \"decimal_1\" in str(e).lower()\n assert e.__class__ == AssertionError", "def test_handle_number_invalid_format():\n artifacts = types.ColumnArtifacts(\"number\", format=\"unsupported\")\n\n with pytest.raises(exceptions.FeatureNotImplementedError):\n column._handle_number(artifacts=artifacts)", "def test_parse_charge_row_valueerror(self):\n charge_row = {\n \"PK\": 1,\n \"ChargeActualName\": \"test_charge_name\",\n \"ChargeAmount\": \"not_a_decimal\", # ValueError here\n \"UsageUnit\": \"kW\",\n \"ChargeUnitsUsed\": Decimal(200),\n \"ChargeRatePerUnit\": Decimal(10),\n \"ThirdPartyProvider\": \"test_provider\",\n \"IsAdjustmentCharge\": 0,\n \"IntervalStart\": date(2000, 2, 1),\n \"IntervalEnd\": date(2000, 3, 1),\n \"ChargeId\": None,\n }\n with self.assertRaises(ValueError):\n UrjanetPyMySqlDataSource.parse_charge_row(charge_row)", "def is_numeric(attribute):\n colType = attribute[1]\n return 'int' in colType or 'float' in colType", "def test_uda_bad_numeric_task(self):\n code, out, err = self.t.runError(\"add bad extra:bad_numeric\")\n self.assertNotIn(\"Created task\", out)\n self.assertIn(\"The value 'bad_numeric' is not a valid numeric value\", err)", "def test_handle_number_invalid(artifacts_kwargs):\n artifacts = types.ColumnArtifacts(\"number\", **artifacts_kwargs)\n\n with pytest.raises(exceptions.MalformedSchemaError):\n column._handle_number(artifacts=artifacts)", "def test_get_column_enforce_type_typeerror(self):\n row = {\"col1\": 1, \"col2\": 2}\n with self.assertRaises(TypeError):\n get_column(row, \"col1\", enforce_type=str)\n with self.assertRaises(TypeError):\n get_column(row, \"col2\", enforce_type=float)", "def test_parse_meter_row_valueerror(self):\n meter_row = {\n \"PK\": \"not_an_int\", # ValueError occurs here\n \"Tariff\": \"test_tariff\",\n \"ServiceType\": \"test_service\",\n \"PODid\": \"12345\",\n \"MeterNumber\": \"67890\",\n \"IntervalStart\": date(2000, 2, 1),\n \"IntervalEnd\": date(2000, 3, 1),\n }\n with self.assertRaises(ValueError):\n UrjanetPyMySqlDataSource.parse_meter_row(meter_row)", "def _check_column_valid(self, column):\n if (isinstance(column, (int, long) )):\n if (column<0 and column>=self.get_number_of_cols()):\n raise ValueError(\"ERROR! column number (\" + str(column) + \") not valid!\")\n \n if (isinstance(column, str )):\n if (column not in self._col_names):\n raise ValueError(\"ERROR! column name (\" + column + \") not valid!\")", "def test_extract_column_8(self):\n with self.assertRaises(TypeError):\n querying.extract_column(self.column, check=str)", "def test_unexpected_error_result(self):\n process_result = process_response(self.resp_unexpected_error)\n self.assertEqual(process_result[\"result\"], -1)", "def test_frame_invalid_column(self):\n with self.assertRaisesRegexp(Exception, \"Invalid column name\"):\n self.frame.take(100, columns=['not_in'])", "def test_read_invalid_float(self):\n self.data_validation_on_read_template(2.14, expect_invalid=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test an error is raised if the response column contains null entries.
def test_null_values_in_response_error(self): df = d.create_df_3() x = NearestMeanResponseImputer(response_column="c", columns=["a", "b"]) with pytest.raises(ValueError, match=r"Response column \(c\) has null values."): x.fit(df)
[ "def test_no_missing_data(self):\n self.assertFalse(self.data_processor.agg_data_frame.isnull().\n values.any())", "def test_ref_data_validation_null_fail(self):\n df = pd.DataFrame(data=(1, 2, 3, None), columns=['test'])\n\n try:\n val = Validator().validate_val_in_list(df, 'test', [1, 2, 3, 4, 5])\n except Exception as e:\n assert \"nan\" in str(e)\n assert e.__class__ == ValueError", "def test_API9_empty_columns(self):\n\n response = self.app.post(\n constants.API9_URL,\n data = json.dumps(dict(table = \"RIDES\")),\n mimetype = 'application/json')\n\n self.assertEqual(response.status_code, 400)", "def test_empty_response(self):\n series = self.es_check.get_series()\n self.assertFalse(series['error'])\n self.assertEqual(series['raw'], get_json_file('es_empty_response.json'))\n data = series['data']\n self.assertEqual(len(data), 1)\n\n data = data[0]\n self.assertEqual(str(data['series']), 'no_data_fill_0')\n self.assertEqual(data['datapoints'], [[1491577200, 0]])\n\n result, tags = self.es_check._run()\n self.assertFalse(result.succeeded)\n self.assertEqual(result.error, 'CRITICAL no_data_fill_0: 0.0 not >= 3.0')\n self.assertEqual(tags, ['critical:no_data_fill_0', 'warning:no_data_fill_0'])", "def test_API8_empty_columns(self):\n\n response = self.app.post(\n constants.API8_URL,\n data = json.dumps(dict(table = \"RIDES\")),\n mimetype = 'application/json')\n\n self.assertEqual(response.status_code, 400)", "def test_not_found_error(self):\n process_result = process_response(self.resp_not_found)\n self.assertEqual(process_result[\"error\"], \"Not found\")", "def test_no_nans(self):\n self.assertTrue(read_dataframe().isnull().values.any(), \"There are NaNs!\")", "def test_get_column_nullable(self):\n row = {\"col1\": None}\n self.assertEqual(get_column(row, \"col1\"), None)\n self.assertEqual(get_column(row, \"col1\", nullable=True), None)\n with self.assertRaises(ValueError):\n get_column(row, \"col1\", nullable=False)", "def testFalseWhenEmpty(self):\n row = sqlresult.ResultRow([], [])\n self.assertFalse(row)", "def check_dataframe_nonemptiness(df, table_name):\n if df.count() == 0:\n raise Exception(\"DataFrame \" + table_name + \" has no records.\")", "def test_response_column_not_str_error(self):\n\n with pytest.raises(TypeError, match=\"response_column must be a str\"):\n\n NearestMeanResponseImputer(response_column=0)", "def is_error(response) -> bool:\n return hasattr(response, 'message') and hasattr(response, 'message_detail')", "def testFalseWhenEmpty(self):\n result = sqlresult.ResultSet(query='Lorem Ipsum dolor sit amet.',\n charset='latin1',\n result=(),\n fields=self.fields,\n affected=1,\n insertid=1)\n self.assertFalse(result)", "def data_mismatches_none(self, column) -> None:\r\n msg = '\\nNo data mismatches for {col}'\r\n self._ui.print_(msg.format(col=column), fore='green')", "def test_use_median_if_no_nulls_false_and_columns_with_no_nulls_error(self):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [5, 4, 3, 2, 1], \"c\": [3, 2, 1, 4, 5]}\n )\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError,\n match=\"Column a has no missing values, cannot use this transformer.\",\n ):\n\n x.fit(df)", "def check_for_null(df, columns=None):\n if not columns:\n columns = df.schema.names\n \n df_count = df.count()\n for col in columns:\n count_nulls = df.where(df[col].isNull()).count()\n if count_nulls > 0:\n print(ValueError(f'Data in {col} has {count_nulls} NULLs'))", "def test_get_decimal_nullable(self):\n row = {\"col1\": None}\n self.assertEqual(get_decimal(row, \"col1\"), None)\n self.assertEqual(get_decimal(row, \"col1\", nullable=True), None)\n with self.assertRaises(ValueError):\n get_decimal(row, \"col1\", nullable=False)", "def test_fatal_error_on_missing_required_column(self):\n\n file_name = \"test_panel_data_missing_required_column.csv\"\n fpath = os.path.join(self.base_dir, \"test_data\", file_name)\n with open(fpath, \"rb\") as infile:\n uploaded_file = SimpleUploadedFile(\n fpath, infile.read(), content_type=\"text/csv\"\n )\n clinical_sample_file = ClinicalSampleFile(\n file_name=file_name,\n file_contents=uploaded_file,\n user=self.user,\n gating_strategy=self.gating_strategy,\n )\n\n validation_report = clinical_sample_file.validate()\n length_of_validation_report = len(validation_report)\n # There should be an entry in validation errors .\n self.assertEquals(length_of_validation_report, 1)\n\n # The first entry should have key: required_columns_missing\n # type FATAL and value Clinical_sample\n validation_entry = validation_report[0]\n self.assertEquals(validation_entry.key, \"required_columns_missing\")\n self.assertEquals(validation_entry.entry_type, \"FATAL\")\n self.assertEquals(validation_entry.value, [\"Clinical_sample\"])", "def test_parse_charge_row_nil_pk(self):\n charge_row = {\n \"PK\": None,\n \"ChargeActualName\": \"test_charge_name\",\n \"ChargeAmount\": Decimal(100.00),\n \"UsageUnit\": \"kW\",\n \"ChargeUnitsUsed\": Decimal(200),\n \"ChargeRatePerUnit\": Decimal(10),\n \"ThirdPartyProvider\": \"test_provider\",\n \"IsAdjustmentCharge\": 0,\n \"IntervalStart\": date(2000, 2, 1),\n \"IntervalEnd\": date(2000, 3, 1),\n }\n with self.assertRaises(ValueError):\n UrjanetPyMySqlDataSource.parse_charge_row(charge_row)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test an error is raised if a nonresponse column contains no nulls and use_median_if_no_nulls is false.
def test_use_median_if_no_nulls_false_and_columns_with_no_nulls_error(self): df = pd.DataFrame( {"a": [1, 2, 3, 4, 5], "b": [5, 4, 3, 2, 1], "c": [3, 2, 1, 4, 5]} ) x = NearestMeanResponseImputer(response_column="c", columns=["a", "b"]) with pytest.raises( ValueError, match="Column a has no missing values, cannot use this transformer.", ): x.fit(df)
[ "def test_use_median_if_no_nulls_not_bool_error(self):\n\n with pytest.raises(TypeError, match=\"use_median_if_no_nulls must be a bool\"):\n\n NearestMeanResponseImputer(\n response_column=\"a\", use_median_if_no_nulls=\"abc\"\n )", "def test_null_values_in_response_error(self):\n\n df = d.create_df_3()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(ValueError, match=r\"Response column \\(c\\) has null values.\"):\n\n x.fit(df)", "def test_no_nans(self):\n self.assertTrue(read_dataframe().isnull().values.any(), \"There are NaNs!\")", "def test_no_missing_data(self):\n self.assertFalse(self.data_processor.agg_data_frame.isnull().\n values.any())", "def test_sesgo_not_nan(self, df):\n self.assertFalse(df.isnull().values.any(), note=\"Las mรฉtricas de sesgo e inequidad contienen nulos\")", "def test_check_valid_values_raises_valuerror_if_nans(self):\n # Setup\n X = np.array([\n [1.0, np.nan],\n [0.0, 1.0]\n ])\n\n instance_mock = MagicMock()\n function_mock = MagicMock()\n\n # Run\n decorated_function = check_valid_values(function_mock)\n\n # Check:\n error_msg = 'There are nan values in your data.'\n with pytest.raises(ValueError, match=error_msg):\n decorated_function(instance_mock, X)\n\n function_mock.assert_not_called()\n instance_mock.assert_not_called()", "def test_no_nan():\n\tdef test():\n\t\t@no_nan\n\t\tdef dummy(x):\n\t\t\tif x:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn float(\"nan\")\n\t\treturn dummy(1) == 1 and dummy(0) == 0\n\treturn [\"vice.core.callback.no_nan\", test]", "def nanmedian(x):\n try:\n return np.nanmedian(x)\n except:\n return np.median(x[np.isfinite(x)])", "def test_median_empty():\n\n assert median([]) == 0", "def test_nan_exists():\n with pytest.raises(BadInputError) as excinfo:\n some_fn(np.array([[1.]]))\n assert str(excinfo.value) == \"No NaN's in given data\"", "def check_df_nan(df):\n # make sure no NaN as currently not supported. \n # TODO use utils mean impute function\n if np.any(np.isnan((df).values)):\n filt=np.isnan((df).mean(axis=1))\n print df.loc[filt,]\n raise ValueError(\"Error NaN in an input df.\")", "def test_non_numeric_response_column_error(self):\n\n df = pd.DataFrame(\n {\"a\": [1, 2, 3, 4, 5], \"b\": [5, 4, 3, 2, 1], \"c\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}\n )\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n with pytest.raises(\n ValueError, match=\"dtypes in response_column must be numeric.\"\n ):\n\n x.fit(df)", "def test_response_column_not_str_error(self):\n\n with pytest.raises(TypeError, match=\"response_column must be a str\"):\n\n NearestMeanResponseImputer(response_column=0)", "def test_clean_data_is_not_empty(self):\n self.assertTrue(self.data_processor.clean_data_frame)", "def describe_null(outpath, rawpath, cleanpath, **kwargs):\n \n try:\n raw_data = pd.read_csv(rawpath).drop(columns=['Unnamed: 0'])\n except KeyError:\n raw_data = pd.read_csv(rawpath)\n clean_data = pd.read_csv(cleanpath)\n print('Generating null proportions.')\n raw_data.isna().mean().round(5).to_frame().reset_index().rename(columns={0:'Proportion of Null Values', 'index':'Column Name'}).to_csv(os.path.join(outpath, 'nulls_arrests_raw.csv'), index=False)\n clean_data.isna().mean().round(5).to_frame().reset_index().rename(columns={0:'Proportion of Null Values', 'index':'Column Name'}).to_csv(os.path.join(outpath, 'nulls_arrests_clean.csv'), index=False)\n print('Complete')", "def test_agg_data_is_not_empty(self):\n self.assertFalse(self.data_processor.agg_data_frame.empty)", "def test_r3p_extract_nwis_df_raises_HydroNoDataError(self):\n # alternative 1: class Fake(object): json = lambda: []\n # alternative 2: make a new response object from the requests lib.\n class FakeResponse(object):\n @staticmethod\n def json():\n my_json = {'value': {'timeSeries': []}}\n return my_json\n\n fake_response = FakeResponse\n\n with self.assertRaises(exceptions.HydroNoDataError):\n r3p.extract_nwis_df(fake_response)", "def check_dataframe_nonemptiness(df, table_name):\n if df.count() == 0:\n raise Exception(\"DataFrame \" + table_name + \" has no records.\")", "def test_same_verifs_valid_time_no_nan(hindcast_hist_obs_1d):\n skill = hindcast_hist_obs_1d.verify(\n metric=\"rmse\",\n comparison=\"e2o\",\n dim=[], # important\n alignment=\"same_verifs\",\n )\n assert not skill.coords[\"valid_time\"].isnull().any()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that BaseTransformer.transform called.
def test_super_transform_called(self, mocker): df = d.create_NearestMeanResponseImputer_test_df() x = NearestMeanResponseImputer(response_column="c", columns=["a", "b"]) x.fit(df) expected_call_args = { 0: {"args": (d.create_NearestMeanResponseImputer_test_df(),), "kwargs": {}} } with h.assert_function_call( mocker, tubular.base.BaseTransformer, "transform", expected_call_args ): x.transform(df)
[ "def test_super_transform_call(self, mocker):\n\n df = d.create_df_1()\n\n mapping = {\"b\": {\"a\": 1.1, \"b\": 1.2, \"c\": 1.3, \"d\": 1.4, \"e\": 1.5, \"f\": 1.6}}\n\n x = CrossColumnAddTransformer(mappings=mapping, adjust_column=\"a\")\n\n expected_call_args = {0: {\"args\": (d.create_df_1(),), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.base.BaseTransformer,\n \"transform\",\n expected_call_args,\n return_value=d.create_df_1(),\n ):\n\n x.transform(df)", "def test_super_transform_called(self, mocker):\n\n df = d.create_date_test_df()\n\n x = DateDiffLeapYearTransformer(\n column_lower=\"a\", column_upper=\"b\", new_column_name=\"c\", drop_cols=True\n )\n\n expected_call_args = {0: {\"args\": (d.create_date_test_df(),), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.base.BaseTransformer,\n \"transform\",\n expected_call_args,\n return_value=d.create_date_test_df(),\n ):\n\n x.transform(df)", "def test_transform(pipeline):\n pl = pipeline(model=False)\n assert isinstance(pl.transform(X_bin), np.ndarray)\n assert isinstance(pl.transform(X_bin, y_bin), tuple)", "def test_fit_transform(pipeline):\n pl = pipeline(model=False)\n pl.steps[0] = (\"test\", \"passthrough\")\n assert isinstance(pl.fit_transform(X_bin), np.ndarray) # Returns X\n pl.steps[-1] = (\"test_final\", \"passthrough\")\n assert isinstance(pl.fit_transform(X_bin, y_bin), tuple) # Returns X, y", "def test_static_get_transform(self):\n\n cfg = OCIO.Config.CreateRaw()\n\n mat_fwd = OCIO.MatrixTransform()\n mat_fwd.setOffset(self.OFFSET_FWD)\n named_tr_fwd = OCIO.NamedTransform()\n named_tr_fwd.setTransform(mat_fwd, OCIO.TRANSFORM_DIR_FORWARD)\n\n mat_inv = OCIO.MatrixTransform()\n mat_inv.setOffset(self.OFFSET_INV)\n named_tr_inv = OCIO.NamedTransform()\n named_tr_inv.setTransform(mat_inv, OCIO.TRANSFORM_DIR_INVERSE);\n\n # Forward transform from forward-only named transform\n tf = OCIO.NamedTransform.GetTransform(named_tr_fwd, OCIO.TRANSFORM_DIR_FORWARD)\n self.assertIsNotNone(tf)\n proc = cfg.getProcessor(tf, OCIO.TRANSFORM_DIR_FORWARD)\n group = proc.createGroupTransform()\n self.assertEqual(len(group), 1)\n self.assertIsInstance(group[0], OCIO.MatrixTransform)\n self.assertEqual(group[0].getOffset(), self.OFFSET_FWD)\n\n # Inverse transform from forward-only named transform\n tf = OCIO.NamedTransform.GetTransform(named_tr_fwd, OCIO.TRANSFORM_DIR_INVERSE)\n self.assertIsNotNone(tf)\n proc = cfg.getProcessor(tf, OCIO.TRANSFORM_DIR_FORWARD)\n group = proc.createGroupTransform()\n self.assertEqual(len(group), 1)\n self.assertIsInstance(group[0], OCIO.MatrixTransform)\n self.assertEqual(group[0].getOffset(), self.OFFSET_FWD_INV)\n\n # Forward transform from inverse-only named transform\n tf = OCIO.NamedTransform.GetTransform(named_tr_inv, OCIO.TRANSFORM_DIR_FORWARD)\n self.assertIsNotNone(tf)\n proc = cfg.getProcessor(tf, OCIO.TRANSFORM_DIR_FORWARD)\n group = proc.createGroupTransform()\n self.assertEqual(len(group), 1)\n self.assertIsInstance(group[0], OCIO.MatrixTransform)\n self.assertEqual(group[0].getOffset(), self.OFFSET_INV_INV)\n\n # Inverse transform from inverse-only named transform\n tf = OCIO.NamedTransform.GetTransform(named_tr_inv, OCIO.TRANSFORM_DIR_INVERSE)\n self.assertIsNotNone(tf)\n proc = cfg.getProcessor(tf, OCIO.TRANSFORM_DIR_FORWARD)\n group = proc.createGroupTransform()\n self.assertEqual(len(group), 1)\n self.assertIsInstance(group[0], OCIO.MatrixTransform)\n self.assertEqual(group[0].getOffset(), self.OFFSET_INV)", "def test_class_methods(self):\n\n x = CrossColumnAddTransformer(mappings={\"a\": {\"a\": 1}}, adjust_column=\"b\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")", "def transform_test_data():\n return TransformTestData()", "def test_basic_transformer_operations() -> None:\n tf.register_transformer('dummy', DummyTransformer)\n transformer: TransformerBase = tf.get('dummy')\n assert transformer.transform('foo') == RoleId(__root__='my_string')", "def test_transform_not_added():\n rng = np.random.default_rng(0)\n A2B = pt.random_transform(rng)\n C2D = pt.random_transform(rng)\n\n tm = TransformManager()\n tm.add_transform(\"A\", \"B\", A2B)\n tm.add_transform(\"C\", \"D\", C2D)\n\n with pytest.raises(KeyError, match=\"Unknown frame\"):\n tm.get_transform(\"A\", \"G\")\n with pytest.raises(KeyError, match=\"Unknown frame\"):\n tm.get_transform(\"G\", \"D\")\n with pytest.raises(KeyError, match=\"Cannot compute path\"):\n tm.get_transform(\"A\", \"D\")", "def test_set_transform():\n w = wcs.WCS(forward_transform=pipe[:])\n w.set_transform('detector', 'focal', models.Identity(2))\n assert_allclose(w(1, 1), (2, -2))\n with pytest.raises(CoordinateFrameError):\n w.set_transform('detector1', 'focal', models.Identity(2))\n with pytest.raises(CoordinateFrameError):\n w.set_transform('detector', 'focal1', models.Identity(2))", "def do_generic_transform(self, *args, **kwargs):\n # Makes assumptions about order of nodes returned by supported_nodes\n # override to perform more complicated transformations\n supported_nodes = self.create_supported_nodes(*args, **kwargs)\n return self.create_transform_result(to_replace=supported_nodes[0:1],\n to_add=supported_nodes[1:])", "def test_get_transform():\n w = wcs.WCS(pipe[:])\n tr_forward = w.get_transform('detector', 'focal')\n tr_back = w.get_transform('icrs', 'detector')\n x, y = 1, 2\n fx, fy = tr_forward(1, 2)\n assert_allclose(w.pipeline[0].transform(x, y), (fx, fy))\n assert_allclose(w.pipeline[0].transform(x, y), (fx, fy))\n assert_allclose((x, y), tr_back(*w(x, y)))\n assert(w.get_transform('detector', 'detector') is None)", "def test_transform_simple(self, dataset, preprocessor, bert):\n (actual_processed_dataset, actual_encoded_mentions, actual_encoded_mentions_split_sizes,\n actual_targets, actual_targets_split_sizes) = \\\n preprocessor.transform(dataset, bert)\n\n # TODO 1 Example should include corefs\n expected_processed_dataset = {\n 'train': {\n 'WH_train_0': {\n 'mentions': [[]],\n 'query': \"participant_of juan rossell\",\n 'candidate_indices': {\n '1996 summer olympics': [],\n 'olympic games': [],\n 'sport': [],\n }\n },\n 'WH_train_1': {\n 'mentions': [\n [\n {'text': 'english', 'corefs': []},\n {'text': 'spanish', 'corefs': []},\n ],\n [\n {'text': 'nahuatl', 'corefs': []},\n {'text': 'spanish', 'corefs': []},\n ]\n ],\n 'query': \"languages_spoken_or_written john osteen\",\n 'candidate_indices': {\n 'english': [0],\n 'greek': [],\n 'koine greek': [],\n 'nahuatl': [2],\n 'spanish': [1, 3],\n }\n }\n }\n }\n expected_encoded_mentions_split_sizes = {'train': [0, 4]}\n expected_targets = torch.tensor([1, 0, 0, 1, 0, 0, 0, 0])\n expected_targets_split_sizes = {'train': [3, 5]}\n\n assert expected_processed_dataset == actual_processed_dataset\n # 4 because there are four mentions and 768 b/c it is the size of BERT encodings\n assert actual_encoded_mentions['train'].shape == (4, 768)\n assert expected_encoded_mentions_split_sizes == actual_encoded_mentions_split_sizes\n assert torch.equal(expected_targets, actual_targets['train'])\n assert expected_targets_split_sizes, actual_targets_split_sizes['train']", "def _transform(self, dataset):\n\n for t in self.transforms:\n method = getattr(dataset, t.name)\n dataset = method(*t.args, **t.kwargs)\n\n return dataset", "def transform(self, data):\n\t\t\n\t\tfor t in self.transformer_list:\n\t\t\tdata = t.transform(data)\n\t\t\t\n\t\treturn data", "def transform(self, dataset, params={}):\n raise NotImplementedError()", "def apply_transform(self, transform, include_scatter=False):\n self._transformed_events = self._transform(transform, include_scatter=include_scatter)\n self._include_scatter_option = include_scatter\n self.transform = transform", "def test_transform(self):\r\n self.assert_(self.object._transform([]) == {})\r\n records = self._get_records(5, keyspace=\"eggs\", column_family=\"bacon\")\r\n out = self.object._transform(records)\r\n self.assert_(len(out) == len(records))\r\n for record in records:\r\n self.assert_(record.key.key in out)\r\n self.assert_(out[record.key.key] is record)\r\n\r\n for key in out:\r\n self.assert_(key == out[key].key.key)", "def setTransformAlgorithm(self, transformer) -> None:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the impute_values_ from fit are not changed in transform.
def test_learnt_values_not_modified(self): df = d.create_NearestMeanResponseImputer_test_df() x = NearestMeanResponseImputer(response_column="c", columns=["a", "b"]) x.fit(df) x2 = NearestMeanResponseImputer(response_column="c", columns=["a", "b"]) x2.fit(df) x2.transform(df) h.assert_equal_dispatch( expected=x.impute_values_, actual=x2.impute_values_, msg="Impute values not changed in transform", )
[ "def impute(X_train, X_test, strategy):\n imp = Imputer(missing_values=np.nan, strategy=strategy).fit(X_train)\n X_train_imputed = imp.transform(X_train)\n X_train_imputed = pd.DataFrame(\n X_train_imputed, columns=X_train.columns)\n X_test_imputed = imp.transform(X_test)\n X_test_imputed = pd.DataFrame(X_test_imputed, columns=X_test.columns)\n return X_train_imputed, X_test_imputed", "def impute_values(X_train, X_test, col_names, missing_val, impute_strategy, impute_val=None):\n X_train_res = X_train.copy()\n X_test_res = X_test.copy()\n\n for col in col_names:\n imp = SimpleImputer(missing_values=missing_val, strategy=impute_strategy, fill_value=impute_val)\n imp.fit(X_train[[col]])\n X_train_res[col] = imp.transform(X_train[[col]])\n X_test_res[col] = imp.transform(X_test[[col]])\n\n return X_train_res, X_test_res", "def _impute(self, examples\n ):\n\n for feature, feature_values in examples.items():\n if schema_util.is_categorical_feature(\n schema_util.get_feature(self._schema, feature)):\n imputation_fill_value = CATEGORICAL_FEATURE_IMPUTATION_FILL_VALUE\n else:\n imputation_fill_value = max(\n value for value in feature_values if value is not None) * 10\n examples[feature] = [\n value if value is not None else imputation_fill_value\n for value in feature_values\n ]\n return examples", "def impute_missing_values(\n self,\n imputation_method,\n log_transform=True,\n min_observations_per_peptide=1,\n min_observations_per_allele=1):\n if isinstance(imputation_method, string_types):\n imputation_method = imputer_from_name(imputation_method)\n\n X_incomplete, peptide_list, allele_list = self.to_dense_pMHC_affinity_matrix(\n min_observations_per_peptide=min_observations_per_peptide,\n min_observations_per_allele=min_observations_per_allele)\n\n if imputation_method is None:\n logging.warn(\"No imputation method given\")\n # without an imputation method we should leave all the values\n # incomplete and return an empty dataset\n X_complete = np.ones_like(X_incomplete) * np.nan\n else:\n if log_transform:\n X_incomplete = np.log(X_incomplete)\n\n if np.isnan(X_incomplete).sum() == 0:\n # if all entries in the matrix are already filled in then don't\n # try using an imputation algorithm since it might raise an\n # exception.\n logging.warn(\"No missing values, using original data instead of imputation\")\n X_complete = X_incomplete\n else:\n X_complete = imputation_method.complete(X_incomplete)\n\n if log_transform:\n X_complete = np.exp(X_complete)\n\n allele_to_peptide_to_affinity_dict = dense_pMHC_matrix_to_nested_dict(\n X=X_complete,\n peptide_list=peptide_list,\n allele_list=allele_list)\n return self.from_nested_dictionary(allele_to_peptide_to_affinity_dict)", "def test_imputer_kNN(setup_imputation_kNN, expected_imputation_kNN):\n for case, expected_outcome in expected_imputation_kNN.items():\n calc_imputation = setup_imputation_kNN[case][\"df\"]\n calc_imputation[setup_imputation_kNN[case][\"col_name\"]] = impute_kNN(\n **setup_imputation_kNN[case]\n )\n calc_imputation = calc_imputation.round(10)\n\n assert_frame_equal(calc_imputation, expected_outcome[\"df\"], check_dtype=False)", "def imputeAge(train, test):\n for df in [train, test]:\n df['Age_Null_Flag'] = df['Age'].apply(lambda x: 1 if pd.isnull(x) else 0)\n train['mean'] = train.groupby(['Name_Title', 'Pclass'])['Age'].transform('mean')\n train['Age'] = train['Age'].fillna(train['mean'])\n merged_data = test.merge(train, on=['Name_Title', 'Pclass'], how='left').drop_duplicates(['PassengerId_x'])\n test['Age'] = np.where(test['Age'].isnull(), merged_data['mean'], test['Age'])\n test['Age'] = test['Age'].fillna(test['Age'].mean())\n del train['mean']\n return train, test", "def impute_by_regression(target, df, impute_method=\"mean\"):\n if target.name in df.columns:\n df = df[~target.name]\n reg_imp = MiceImputer(seed_strategy=impute_method, target=target.name, group=[])\n reg_imp.fit(pd.concat([df, target], axis=0))\n return reg_imp", "def impute_data(x_train):\n for i in range(x_train.shape[1]):\n # If NA values in column\n if na(x_train[:, i]):\n msk_train = (x_train[:, i] != -999.)\n # Replace NA values with most frequent value\n values, counts = np.unique(x_train[msk_train, i], return_counts=True)\n # If there are values different from NA\n if (len(values) > 1):\n x_train[~msk_train, i] = values[np.argmax(counts)]\n else:\n x_train[~msk_train, i] = 0\n\n return x_train", "def impute_apply(data, n_iter, to_nan=0.2, fast_impute=False):\n output = impute_optimizer(data, n_iter=n_iter, to_nan=0.2, fast_impute=False)\n imputer, param = output.iloc[0,:].name.split(\"__\")\n param = param.replace(\":\", \"\")\n\n if imputer == \"SimpleImputer\":\n ix = data.index.copy()\n data = (SimpleImputer(strategy=param)\n .fit_transform(np.asarray(data).reshape(-1, 1)))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n\n elif imputer == \"KNNImputer\":\n ix = data.index.copy()\n data = (KNNImputer(weights=param)\n .fit_transform(np.asarray(data).reshape(-1, 1)))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n\n elif imputer == \"Interpolate\":\n if param == \"time\":\n data.index = pd.to_datetime(pd.to_timedelta(data.index, unit=\"days\"))\n data = data.interpolate(method=param, limit_direction=\"both\")\n else:\n data = data.interpolate(method=param, limit_direction=\"both\")\n\n elif imputer == \"Interpolate_with_order\":\n # Order can be tweaked (default quadratic)\n data = data.interpolate(method=param, limit_direction=\"both\", order=2)\n \n elif imputer == \"TimeSeries_LOCF\":\n ix = data.index.copy()\n data = locf(np.asarray(data).reshape(1, -1))\n data = pd.Series(data.flatten(), index=ix)\n del ix\n \n elif imputer == \"Moving_Win_Imputer\":\n ix = data.index.copy()\n param = int(param)\n remainder = -(len(data) % param)\n data = np.asarray(list(zip(*[iter(data)] * param)))\n data = np.asarray(moving_window(data, wsize=param))\n if remainder != 0:\n data = pd.Series(data.flatten(),\n index=ix[:remainder])\n else:\n data = pd.Series(data.flatten(), index=ix)\n del ix\n else:\n raise Exception\n print(\"Imputer passed through \\\"impute_optimize\\\" cannot be applied\")\n print(f\"Value passed: {impter}\")\n \n return data, imputer, param", "def impute_dataset_train_test(imputation: str, train: pd.DataFrame, test: pd.DataFrame = None,\n dataset: pd.DataFrame = None) -> tuple:\n cols_to_impute = train.loc[:, train.isna().any()].select_dtypes(exclude=['string', 'object']).columns.tolist()\n if len(cols_to_impute) == 0:\n if dataset is not None:\n return dataset.copy(), train, test\n else:\n return None, train, test\n cols_to_add = [col for col in train.columns.tolist() if col not in cols_to_impute]\n if imputation == 'mean' or imputation == 'median':\n imputer = MissingValueImputation.get_simple_imputer(df=train.filter(cols_to_impute), strategy=imputation)\n elif imputation == 'iterative':\n imputer = MissingValueImputation.get_iter_imputer(df=train.filter(cols_to_impute))\n elif imputation == 'knn':\n imputer = MissingValueImputation.get_knn_imputer(df=train.filter(cols_to_impute))\n\n train_imp = pd.concat([pd.DataFrame(data=imputer.transform(X=train.filter(cols_to_impute)),\n columns=cols_to_impute, index=train.index), train[cols_to_add]],\n axis=1, sort=False)\n if test is None:\n test_imp = None\n else:\n test_imp = pd.concat([pd.DataFrame(data=imputer.transform(X=test.filter(cols_to_impute)),\n columns=cols_to_impute, index=test.index), test[cols_to_add]],\n axis=1, sort=False)\n if dataset is None:\n dataset_imp = None\n else:\n dataset_imp = pd.concat([pd.DataFrame(data=imputer.transform(X=dataset.filter(cols_to_impute)),\n columns=cols_to_impute, index=dataset.index), dataset[cols_to_add]],\n axis=1, sort=False)\n return dataset_imp, train_imp, test_imp", "def impute(data):\n\n for i in range(len(data)): # every person\n data[i] = data[i].fillna(0)\n\n return data", "def fill_empty_values(dataset):\n for f in dataset.get_feature_names():\n if dataset.feature_is_continuous(f):\n f_analysis = dataset.analyse_continuous_feature(f)\n if f_analysis is not None:\n mean = f_analysis[1]\n # Impute missing values with mean\n c = dataset.impute_feature_value(f, mean,\n lambda val, t: val==None)\n if c>0:\n print \"Imputed {0} values for feature {1}\".format(c, f)\n else:\n # Analyse categorical features\n f_analysis = dataset.analyse_categorical_feature(f)\n if f_analysis is not None:\n mode1 = f_analysis[2]\n # Impute missing values with mean\n c = dataset.impute_feature_value(f, mode1,\n lambda val, t: val==None)\n if c>0:\n print \"Imputed {0} values for feature {1}\".format(c, f)", "def test_fill_check(self):\n # Argument required\n input_tensor = torch.tensor(\n [\n [float(\"nan\"), 2.0],\n [3.0, 4.0],\n [5.0, 6.0],\n [7.0, 8.0],\n [9.0, 10.0],\n [11.0, 12.0],\n ]\n )\n with pytest.raises(\n AssertionError, match=re.escape(\"argument 'fill' must be provided\")\n ):\n forward_impute(input_tensor)\n # Argument not required\n input_tensor = torch.tensor(\n [\n [1.0, 2.0],\n [float(\"nan\"), 4.0],\n [5.0, 6.0],\n [7.0, 8.0],\n [9.0, 10.0],\n [11.0, 12.0],\n ]\n )\n test_tensor = forward_impute(input_tensor)\n expect_tensor = torch.tensor(\n [\n [1.0, 2.0],\n [1.0, 4.0],\n [5.0, 6.0],\n [7.0, 8.0],\n [9.0, 10.0],\n [11.0, 12.0],\n ]\n )\n assert torch.equal(test_tensor, expect_tensor)", "def test_learned_normal_impute(make_missing_data):\n ab.set_hyperseed(100)\n _, m, X, _ = make_missing_data\n\n # This replicates the input layer behaviour\n def data_layer(**kwargs):\n return kwargs['X'], 0.0\n\n def mask_layer(**kwargs):\n return kwargs['M'], 0.0\n\n n, N, D = X.shape\n impute = ab.LearnedNormalImpute(data_layer, mask_layer)\n\n F, KL = impute(X=X, M=m)\n\n tc = tf.test.TestCase()\n with tc.test_session():\n tf.global_variables_initializer().run()\n X_imputed = F.eval()\n assert KL.eval() == 0.0 # Might want to change this in the future\n assert(X_imputed.shape == X.shape)", "def imputation(self):\n return self._imputation", "def test_super_transform_called(self, mocker):\n\n df = d.create_NearestMeanResponseImputer_test_df()\n\n x = NearestMeanResponseImputer(response_column=\"c\", columns=[\"a\", \"b\"])\n\n x.fit(df)\n\n expected_call_args = {\n 0: {\"args\": (d.create_NearestMeanResponseImputer_test_df(),), \"kwargs\": {}}\n }\n\n with h.assert_function_call(\n mocker, tubular.base.BaseTransformer, \"transform\", expected_call_args\n ):\n\n x.transform(df)", "def transform(self, dataset):\n # Only after fitting\n assert self.median_imputation is not None\n \n if dataset.temporal_feature is not None:\n # Interpolate temporal data if at least one value is observed\n dataset.temporal_feature = interpolation(dataset.temporal_feature, dataset.time, self.interpolation_model_name) \n # Do median imputation for the sequence without any observed data\n dataset = self.median_imputation.transform(dataset)\n\n return dataset", "def imputeNaN(data, newValue):\n\tdata[np.isnan(data)] = newValue; # Se asigno este valor de manera arbitraria para que no marcara un error de validacion por valores muy grandes", "def test_imputer_msd(setup_imputation_msd, expected_imputation_msd):\n\n df_imputed = expected_imputation_msd[\"case1\"][\"df\"]\n df_imputed_obs = df_imputed[setup_imputation_msd[\"case1\"][\"df\"].isna()]\n\n df_to_test = []\n for i, j in zip(\n df_imputed_obs.items(), setup_imputation_msd[\"case1\"][\"df\"].items()\n ):\n col_to_test = (i[1] <= j[1].median() + 0.25 * j[1].std()) & (\n i[1] >= j[1].median() - 0.25 * j[1].std()\n )\n df_to_test.append(col_to_test)\n\n expected_outcome_msd = pd.DataFrame(df_to_test).T\n calc_imputation_msd = pd.DataFrame(df_imputed_obs.notnull())\n\n assert_frame_equal(calc_imputation_msd, expected_outcome_msd, check_dtype=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the filename of the match PDB to determine IDs and positions of match residues
def determine_matched_residue_positions(match_pdb_path): positions_block = os.path.basename(os.path.normpath(match_pdb_path)).split('_')[2] resnames = [a for a in re.split("[0-9]*", positions_block) if a] resnums = [int(a) for a in re.split("[a-zA-Z]*", positions_block) if a] return [(a, b) for a, b in zip(resnames, resnums)]
[ "def parse_pdb(pdb_file):\n\n dict_coord = {}\n # Id for residu because in some pdb files the num residu\n # dosen't start to 1\n resID = 0\n\n for line in pdb_file:\n resName = line[17:20].strip()\n resID_pdb = line[22:26]\n\n if (line[0:4] == \"ATOM\") or ((line[0:6] == \"HETATM\") and\n ( (resName == \"MET\") or resName == \"MSE\") ):\n if line[12:16].strip() == \"N\": # Suppose that 1st = \"N\"\n resID += 1\n resID_pdb = line[22:26].strip() # Needed for erasing\n\n if resID not in dict_coord.keys():\n dict_coord[resID] = [resID_pdb, line]\n else:\n dict_coord[resID][1] += line\n\n # The resID is now equivalent to the size of the protein given as argument\n return (resID, dict_coord)", "def parse_filename(filename):\n basename = os.path.basename(filename).replace('.simtel.gz', '')\n foutputs = re.split(\n '___', basename)\n prod_info = foutputs[1]\n soutputs = re.split(\n '_', foutputs[0])\n particle_type, ze, az, run_number = soutputs\n identifiers = [particle_type, ze, az, prod_info]\n \n identifiers[1] = int(re.sub('deg$', '', identifiers[1]))\n identifiers[2] = int(re.sub('deg$', '', identifiers[2]))\n run_number = int(re.sub('^run', '', run_number))\n\n return run_number, identifiers", "def _parse_molecule(lines, file_extension):\n if file_extension == '.pdb':\n #Extract residue information and assign column\n i = 0\n column_for_res = {}\n res_for_column = {}\n name_for_res = {}\n atoms_in_res = {}\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n if atom_name in ['CA', 'CB', 'C', 'N', 'O']:\n altloc = line[16]\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n resname = line[17:20]\n if resname in list(CONVERT_RES_NAMES):\n resname = CONVERT_RES_NAMES[resname]\n if res not in list(column_for_res):\n column_for_res[res] = i\n res_for_column[i] = res\n name_for_res[res] = resname\n atoms_in_res[res] = set()\n i += 1\n atoms_in_res[res].add(atom_name)\n\n #Extract coordinates and atoms information\n alphas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n betas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n carbons = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n nitrogens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n oxygens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n side_chains = []\n coords_array = [] #For calculate grid size\n\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n\n # atomic coordinates\n try:\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n except Exception:\n raise Exception(\"Invalid or missing coordinate(s) at \\\n residue %s, atom %s\" % (res, name))\n coord = [x, y, z]\n\n if atom_name == \"CA\":\n # Coordinates for the grid\n coords_array.append(coord)\n # Coordinates for searching sites\n alphas[column_for_res[res]] = coord\n elif atom_name == \"CB\":\n # Coordinates for searching sites\n betas[column_for_res[res]] = coord\n elif atom_name == \"C\":\n # Coordinates for searching sites\n carbons[column_for_res[res]] = coord\n elif atom_name == \"N\":\n # Coordinates for searching sites\n nitrogens[column_for_res[res]] = coord\n elif atom_name == \"O\":\n # Coordinates for searching sites\n oxygens[column_for_res[res]] = coord\n else: # Atom belongs to a side-chain\n # Coordinates for discarding clashes\n side_chains.append(coord)\n\n coords_array = np.array(coords_array)\n centroid = np.mean(coords_array, axis=0)\n max_distance = np.max(np.linalg.norm(coords_array - centroid, axis=1)) \\\n + DIST_PROBE_ALPHA['ALL'][1]\n\n alphas = np.array(alphas)\n betas = np.array(betas)\n carbons = np.array(carbons)\n nitrogens = np.array(nitrogens)\n oxygens = np.array(oxygens)\n side_chains = np.array(side_chains)\n return centroid, max_distance, alphas, betas, carbons, nitrogens, \\\n oxygens, column_for_res, res_for_column, name_for_res, \\\n atoms_in_res, side_chains", "def parse_pdb(path):\n\n pdb_dict = defaultdict(lambda: defaultdict(list))\n res_dict = defaultdict(list)\n with open(path) as o:\n lines = o.readlines()\n for line in lines:\n if line[:4] == 'ATOM':\n atom_info = process_atom_info(line)\n identifier = '{}{}'.format(\n atom_info['res_name'],\n atom_info['res_no']\n )\n pdb_dict[atom_info['chain']][identifier].append(atom_info)\n if identifier not in res_dict[atom_info['chain']]:\n res_dict[atom_info['chain']].append(identifier)\n return pdb_dict,res_dict", "def parse_id(filename):\n match = re.search('B[0-9]{2}-[0-9]{3}', filename) \n if match:\n return match.group()\n return None", "def get_cellID_info(filePath):\n pat = '/(20\\d{6})/cell.?(\\d{1,2})/(\\d{2}[0-9ond]\\d{5}).*.abf'\n _date, _cell_id, _id = re.findall(pat, filePath)[0]\n return _id, _date, _cell_id", "def get_pdb_coords(pdbname):\n coords = []\n for line in open(pdbname,\"r\"):\n if line[:3] in ['TER','END']:\n break\n else:\n if line[:4] == \"ATOM\":\n coords.append([float(line[31:39]),float(line[39:47]),float(line[47:55])]) \n\n return np.array(coords)", "def parse_filename(self, fpath):\n fname = os.path.basename(fpath)\n if fname == \"\":\n return None\n parts = fname.split(\".\") #split on dot character\n print(\"parts are:\", parts)\n if len(parts) >= 3 and (parts[0] == \"pir\" or parts[0] == \"cap\"):\n ts = int(parts[1])\n if ts == 0: #conversion likely failed\n return None\n segno = None\n try:\n segno = int(parts[2])\n except:\n return None\n return (parts[0], ts, segno)", "def load_match_dat(fname, processor=None):\n processor = processor or tuple\n\n if isinstance(fname, six.string_types):\n with open(fname) as fp:\n nmatch = len([None for line in fp if line.strip()]) - 1\n else:\n pos = fname.tell()\n nmatch = len([None for line in fname if line.strip()]) - 1\n fname.seek(pos)\n\n def proc_buff(buff):\n src_idx, period, sigma, order, snr = (\n [(e,) * nmatch] for e in buff[:5])\n match_rank = [tuple(range(1, nmatch+1))]\n array = np.array(buff[5:])\n array.shape = (nmatch or 1, 5)\n matchs = np.concatenate(\n (src_idx, match_rank, period, sigma, order, snr, array.T))\n return matchs.T\n\n def gen(fp):\n buff, lineno = [], 0\n for line in fp:\n if line.strip():\n line = re.sub(r\"\\*{2,}\", \"nan\", line) # remove al ****...\n buff.extend(line.strip().rsplit(None, 4))\n lineno += 1\n if lineno >= nmatch + 1:\n for row in proc_buff(buff):\n yield tuple(row)\n buff = []\n lineno = 0\n if buff:\n for row in proc_buff(buff):\n yield tuple(row)\n\n if isinstance(fname, six.string_types):\n with open(fname) as fp:\n generator = gen(fp)\n return processor(generator)\n generator = gen(fname)\n return processor(generator)", "def load_res_file(filename, pfam_mapping=False,check_for_duplicates=False):\n\n EVALUE_THRESHOLD = 1\n protein_hits = dict()\n\n with open(filename, 'r') as f:\n for line in f:\n # strip header\n if line[0] == '#' or not line.strip():\n continue\n\n elements = line.split()\n protein_id = elements[0]\n align_start = int(elements[1])\n align_end = int(elements[2])\n hmm_acc = elements[5].split('.')[0] # strip hmm family name, e.g. PF00105.13 -> PF00105\n hmm_name = elements[6]\n domain_type = elements[7]\n e_value = float(elements[12])\n clan = elements[14]\n if pfam_mapping:\n pfam_mapping[hmm_acc] = hmm_name\n\n if float(e_value > EVALUE_THRESHOLD):\n continue\n data = (hmm_acc, align_start, align_end, domain_type, clan)\n if protein_id not in protein_hits:\n protein_hits[protein_id] = [data]\n else:\n protein_hits[protein_id].append(data)\n \n # sort domains inside a protein according to sequence\n for protein, domains in protein_hits.items(): # work on a copy\n protein_hits[protein] = sorted(protein_hits[protein], key=lambda rank: (rank[1], rank[2]))\n\n # check for overlaps in annotation\n # TODO: too much overlaps, filter them out!\n if check_for_duplicates:\n f = os.path.basename(filename).split('-')[0]\n for protein, domains in protein_hits.iteritems():\n last_start = -1\n last_stop = -1\n for domain in domains:\n start = domain[1]\n stop = domain[2]\n if start < last_stop:\n # overlap\n if stop < last_stop:\n print('{}:{}: inclusion of domain {}'.format(f, protein, domain[0]))\n else:\n print('{}:{}: overlap of domain {}, {} residues'.format(f, protein, domain[0], last_stop-start+1))\n last_start = start\n last_stop = stop\n\n return protein_hits", "def getChainIDsFromPDB(cls, filename, qparent=None):\n extension = filename.split('.')[-1].lower()\n if extension == 'pdb':\n linelist = []\n for line in open(filename, 'U'):\n if line[:6] == 'COMPND' and line[10:70].split(':')[0].strip() == 'CHAIN':\n linelist = line[17:].split(', ')\n linelist[0] = linelist[0].strip()\n if ';' in linelist[-1]:\n linelist[-1] = linelist[-1].split(';')[0]\t#removes the terminating semicolon and extra whitespace\n while True:\n try: linelist.remove('NULL')\n except: break\n return linelist\n if linelist == []:\n return []\n else:\n raise NotImplementedError, 'NYI'", "def get_PDB_info(dir):\r\n\r\n\t#the three vectors you are required to fill.\r\n\tDSSP_vector, TMHMM_vector, oracle = [],[],[]\r\n\r\n\tprint(\"There are\",len(os.listdir(dir)),\"PDB files to parse\")\r\n\r\n\r\n\t#Assemble a machine learning dataset incrementally, for each PDB file in the directory\r\n\tfor ind,PDB_file in enumerate(os.listdir(dir)):\r\n\t\tif ind%10==0:\r\n\t\t\tprint(\"Working on structure\",ind)\r\n\t\t\r\n\t\tif(str(PDB_file) == \".DS_Store\"): continue\r\n\t\t# if(str(PDB_file) == \"2dco.pdb\"): break\r\n\t\t#Step 1 : parse your PDB file with biopython to obtain a model object\r\n\t\tp = PDB.PDBParser()\r\n\t\tstructure = p.get_structure(PDB_file[:-4].upper(), dir + \"/\" + PDB_file)\r\n\t\tmodel = structure[0]\r\n\r\n\t\t#TODO : extract a list of residues from your model object\r\n\t\tresidues = extract_residues(model)\r\n\t\tprint(\"file\", PDB_file, len(residues))\r\n\t\t# print(\"residue_size\",len(residues))\r\n\t\t# if(len(residues) > 500): continue\r\n\r\n\t\t#TODO : compute a distance matrix of size len(sequence)*len(sequence) with the distance between each residue\r\n\t\tmatrix = compute_distance_matrix(residues)\r\n\t\t# print(\"here\")\r\n\r\n\r\n\t\t#TODO : contact map should be a boolean numpy array of the same size as the distance matrix.\r\n\t\t#if two amino acids are within 5 angstroms of each other in 3D, but distant of at least 10 in sequence, the table should have True, else False.\r\n\t\t\r\n\r\n\t\tcontact_map = removeConsecutives(matrix)\r\n\t\thas_contact = [True if True in contact_map[residue] else False for residue in contact_map]\r\n\r\n\t\t#TODO : contact info should return the proportion of residues that have an intramolecular contact in your object.\r\n\t\tcontact_info = get_contact_numbers(contact_map)\r\n\t\t# print(contact_info,\"contacts\")\r\n\r\n\t\t# TODO : obtain the secondary structure prediction of the PDB model with DSSP\r\n\t\tdssp_info = get_dssp_info(PDB_file,model,dir)\r\n\r\n\t\t#TODO : obtain the sequence of the PDB file in some way of your choice.\r\n\t\tsequence = \"\"\r\n\t\tppb = PDB.PPBuilder()\r\n\t\tfor pp in ppb.build_peptides(structure):\r\n\t\t\tsequence += pp.get_sequence()\r\n\r\n\t\tdssp_ss = \"\" #ss stands for secondary structure\r\n\t\tdssp_seq = \"\"\r\n\r\n\t\tdssp_keys = sorted(dssp_info.keys())\r\n\t\tfor key in dssp_keys:\r\n\t\t\tcurr_ss = dssp_info[key][2]\r\n\t\t\tdssp_ss += curr_ss\r\n\t\t\tdssp_seq += dssp_info[key][1]\r\n\r\n\t\tconverted = convert_info(dssp_ss)\r\n\t\t# print(dssp_ss)\r\n\t\t#TODO : write the sequence to a fasta file to call TMHMM with it, or to use the webserver\r\n\t\tfilename = write_fasta(sequence,PDB_file)\r\n\r\n\t\t#TODO : obtain secondary structure prediction for this FASTA file with TMHMM\r\n\t\t# run_tmhmm will now parse tmhmmm file\r\n\t\t\r\n\t\t# test_file = \"6j20\"\r\n\r\n\t\ttm_ss = run_tmhmm(filename,PDB_file)\r\n\r\n\t\t# if(len(sequence) != len(residues)): continue\r\n\t\tDSSP_vector, TMHMM_vector, oracle = generate_ML_dataset(sequence,converted,tm_ss,has_contact,DSSP_vector, TMHMM_vector, oracle)\r\n\t\t# DSSP_vector, TMHMM_vector, oracle = generate_ML_dataset(sequence,converted,has_contact,DSSP_vector, TMHMM_vector, oracle)\r\n\treturn DSSP_vector, TMHMM_vector, oracle", "def read_lookup_table(filename):\n\n pdb_id_file = open(filename, \"r\")\n uniprot_pdb_dict = {}\n for line in pdb_id_file:\n pdb_id = str(line[7:-1])\n uniprot_id_for_dict = str(line[:-6])\n uniprot_pdb_dict.setdefault(uniprot_id_for_dict,[]).append(pdb_id)\n\n return uniprot_pdb_dict", "def GetPdbCoordinates( filename,\n select_atom = (\"CA\",),\n select_chain = None,\n renumber = None,\n only_coordinates = None):\n\n if not os.path.exists(filename):\n raise \"pdb file %s does not exist\" % filename\n\n if filename[-3:] == \".gz\":\n lines = os.popen(\"gunzip < %s\" % filename).readlines()\n else:\n lines = open(filename,\"r\").readlines()\n\n result = []\n\n current_number = 1\n \n for line in lines:\n if line[:6] not in (\"ATOM \", \"HETATM\"): continue\n\n chain = line[21]\n number = line[22:26]\n aa = line[17:20]\n atom = string.strip(line[13:17])\n \n x,y,z = map(string.atof, (line[30:38], line[38:46], line[46:54]))\n\n if select_chain and chain not in select_chain: continue\n if select_atom and atom not in select_atom: continue\n \n if renumber:\n number = current_number\n current_number += 1\n\n if AMINOACIDS.has_key(aa):\n aminoacid = AMINOACIDS[aa]\n else:\n sys.stderr.write( \"# error in PdbCoordinates: aminoacid %s not known\\n\" % aa )\n continue\n\n if only_coordinates:\n result.append( (x, y, z) )\n else:\n result.append( (number, aminoacid, x, y, z) ) \n \n return result", "def get_resinum_to_resi_map(resiname_file, offset = 0, indexing = 1, aa_code = 3):\n resi_map = {}\n\n if resiname_file == None:\n print('Warning: No prmtop or PDB file given.\\n' + \\\n ' No residue number information will be presented.')\n for i in range(10000):\n resi_map[i] = str(i)\n return resi_map\n\n try:\n f = file(resiname_file)\n except IOError:\n print('Warning: Could not open ' + resiname_file + '.\\n' + \\\n ' No residue number information will be presented.')\n for i in range(10000):\n resi_map[i] = str(i)\n return resi_map\n\n # If the file is a prmtop file...\n\n if not resiname_file.endswith('.pdb'):\n resi_num = 1\n \n residue_section = False\n for line in f:\n if line.startswith('%FLAG RESIDUE_POINTER'):\n break\n if line.startswith('%FLAG RESIDUE_LABEL'):\n residue_section = True\n if not residue_section or line.startswith('%F'):\n continue\n else:\n residue_names = line.split()\n for resi_name in residue_names:\n if aa_code == 1:\n resi_name = ThrLett_to_OneLett(resi_name)\n resi_name = resi_name.capitalize() + str(resi_num + offset)\n resi_map[resi_num + indexing - 1] = resi_name\n resi_num += 1\n\n # If the file is a PDB file...\n\n else:\n for line in f:\n if not (line.startswith('ATOM') or line.startswith('HETATM')):\n continue\n resi_name = line[17:21].strip()\n resi_num = int(line[22:26].strip())\n if aa_code == 1:\n resi_name = ThrLett_to_OneLett(resi_name)\n resi_name = resi_name.capitalize() + str(resi_num + offset)\n resi_map[resi_num + indexing - 1] = resi_name\n \n f.close()\n\n if not resi_map:\n print(\"Warning: Could not extract residue information from prmtop or PDB file.\\n\")\n print(\" No residue number information will be presented.\")\n for i in range(10000):\n resi_map[i] = str(i)\n return resi_map\n \n return resi_map", "def parser_file(file_in, header=False):\n df = pd.read_csv(file_in, sep=SEPARATOR)\n try:\n df = df.sort_values(by=['score'], ascending=False)\n\n except Exception as e:\n\n print('cannot sort ', file_in)\n\n\n\n try:\n ids = df['node,layer'].values\n except:\n #print('WARNING: cannot select \\\"node,layer\\\" perform a replace operation if needed')\n ids = df['node'].values\n\n return ids", "def parseRefout(tmpDir, guideSeqs, pamLen):\n fnames = glob.glob(join(tmpDir, \"*.map\"))\n\n # while parsing, make sure we keep only the hit with the lowest number of mismatches\n # to the guide. Saves time when parsing.\n posToHit = {}\n hitBestMismCount = {}\n for fname in fnames:\n for line in open(fname):\n # s20+.17:A>G - chr8 26869044 CCAGCACGTGCAAGGCCGGCTTC IIIIIIIIIIIIIIIIIIIIIII 7 4:C>G,13:T>G,15:C>G\n guideIdWithMod, strand, chrom, start, tSeq, weird, someScore, alnModifStr = \\\n line.rstrip(\"\\n\").split(\"\\t\")\n\n guideId = guideIdWithMod.split(\".\")[0]\n modifParts = alnModifStr.split(\",\")\n if modifParts==['']:\n modifParts = []\n mismCount = len(modifParts)\n hitId = (guideId, chrom, start, strand)\n oldMismCount = hitBestMismCount.get(hitId, 9999)\n if mismCount < oldMismCount:\n hit = (mismCount, guideIdWithMod, strand, chrom, start, tSeq, modifParts)\n posToHit[hitId] = hit\n\n ret = []\n for guideId, hit in posToHit.iteritems():\n mismCount, guideIdWithMod, strand, chrom, start, tSeq, modifParts = hit\n if strand==\"-\":\n tSeq = revComp(tSeq)\n guideId = guideIdWithMod.split(\".\")[0]\n guideSeq = guideSeqs[guideId]\n genomeSeq = applyModifStr(tSeq, modifParts, strand)\n start = int(start)\n bedRow = (guideId, chrom, start, start+GUIDELEN+pamLen, strand, guideSeq, genomeSeq) \n ret.append( bedRow )\n\n return ret", "def process_file(file_path):\n file_of_matches=open(file_path, \"r\")\n #loop over every line to get process individual matches\n for match in file_of_matches:\n process_match(match[:-1])#drop the \\n from end of line \n file_of_matches.close()", "def parse_matchinfo(infile): \n f=open(infile,'r')\n txt=f.readlines()\n f.close()\n hero_name=[]\n player_name=[]\n steamid=[]\n game_team=[]\n infotags=[]\n for i,l in enumerate(txt):\n if \"player_info\" in l:\n infotags.append(i)\n\n for i in infotags:\n hero_name.append(txt[i+1].replace(\"hero_name:\",\"\").split('''\"''')[1])\n player_name.append(txt[i+2].replace(\"player_name:\",\"\").split('''\"''')[1])\n steamid.append(int(txt[i+4].replace(\"steamid:\",\"\")))\n game_team.append(int(txt[i+5].replace(\"game_team:\",\"\")))\n\n df=pd.DataFrame({'hero_name':hero_name,\n 'player_name':player_name,\n 'steamid':steamid,\n 'game_team':game_team})\n\n return df", "def findResIndexes(self):\n self.reactant['solvated']={}\n self.reactant['solvated']['pdb']=self.simdir+'/reactant/solvated/'+self.id+'.premin.pdb'\n nres=162 #protein residues + DHP + NPD + energy-sink\n pdbf=self.reactant['solvated']['pdb']\n ptin=open(pdbf,'r'); l=ptin.readline()\n resindexes=[]; currires=None; catalytic=False\n while l:\n if l[0:5]=='ATOM ':\n iat=int(l[6:11])-1; ires=l[22:26]; resname=l[17:20]\n if not currires:\n currires=ires #initialize residue index\n group=[] #initialize list of atom residues\n if ires!=currires:\n resindexes.append(group)\n currires=ires\n group=[]\n if resname=='Na+' and not catalytic:\n \"\"\"introduce the catalytic site residue\"\"\"\n for index in self.hot_spot['indexes']:\n group.append(index)\n resindexes.append(group); catalytic=True; group=[];\n if iat not in self.hot_spot['indexes']: group.append(iat)\n l=ptin.readline()\n resindexes.append(group) #enter last group\n self.resinfo={'indexes':resindexes,'nres':nres}\n pdb.set_trace()\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loging the text in the log file
def Log(self, text): self.__file.write("\n"+ str(datetime.now()) + ": " + text)
[ "def log(text: str) -> None:\n now = datetime.now()\n ts = now.timestamp()\n f = open(f\"Logs/{now.strftime('%d-%m-%Y')}.log\", \"a\")\n\n text = str(ts)+\"\\t\"+text+\"\\n\"\n print(text.strip(\"\\n\"))\n\n f.write(text)\n f.close()", "def log(self, message):\n timestamp = datetime.datetime.now().isoformat()\n try:\n with open('logfile.txt','a') as logfile:\n logfile.write(f\"{timestamp} - {self.logType} : {message}\\n\")\n except FileNotFoundError:\n with open('logfile.txt', 'w') as logfile:\n logfile.write(f\"{timestamp} - {self.logType} : {message}\\n\")", "def send_log_entry(self, text):\n self._post_data(\"log\", data={'log_entry':text})\n\n #Log to file if a logger variable is set on this class instance\n logger = getattr(self, 'logger', None)\n if logger:\n logger.debug(text)", "def log_txt(self, text, key, basepath=None, write_mode='w'):\r\n\t\tif basepath is None:\r\n\t\t\tbasepath = 'log'\r\n\t\tdirpath = self.get_log_dir(key)\r\n\t\twith open(os.path.join(dirpath, basepath), write_mode) as f:\r\n\t\t\tf.write(text)", "def running_log(log):\n print(log)\n with open('running_log.txt', 'a') as f:\n f.write(log + '\\n')", "def test_filelog():\n\tlgr = simplelog.make_logger (handler='test/out/test.log')\n\tlgr.info (\"what?\")", "def debug_log(text):\n\n with open('/tmp/wttr.in-debug.log', 'a') as f_debug:\n f_debug.write(text+'\\n')", "def _log(self, newText):\r\n self.gameLog.config(state=NORMAL)\r\n self.gameLog.insert(END, newText+\"\\n\")\r\n self.gameLog.config(state=DISABLED)\r\n self.gameLog.yview(END)", "def log(text):\n if LOG:\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(f\"[{current_time}] {text}\")", "def log(self, message: str):\r\n with open(self.log_file, 'a') as f:\r\n f.write(message)", "def log(self, msg):\n self.logger.write(msg)", "def log_sample(log_path, text, model_config, vocabulary_len):\n with open(log_path,'a') as log:\n line = str(model_config) + \",\" + str(vocabulary_len) + \",\" + str(text)\n log.write(line)", "def logger_info(self,text):\n logging.info(self.log_my_name()+' '+text)", "def _write_log(self,level,msg):\n with open(self.file_name,'a') as log_file:\n log_file.write(\"[{0}] {1}\\n\".format(level,msg))", "def logs():\n return send_file('app.log', mimetype='text/plain')", "def logMessage(log_path, log_message):\r\n with open(log_path, 'a') as log:\r\n log.write(log_message + '\\n')", "def log(self, **params):\n ltype=params['ltype']\n if ltype in self.logentries:\n tm,ts=divmod(params.get('tstamp',time.time()),60)\n th,tm=divmod(tm,60)\n for le in self.logentries[ltype]:\n if not le['filename'] in logger.logfiles:\n logf=open(le['filename'],'a' if 'append' in le else 'w')\n logger.logfiles[le['filename']]=logf\n else:\n logf=logger.logfiles[le['filename']]\n if 'format' in le:\n logf.write((le['format'] if 'noheader' in le else logger.logheader+le['format']).format(\n name=self.name,H=int(th), M=int(tm),S=ts, **params))\n elif 'asdict' in le:\n logf.write(params.__repr__())\n else:\n logf.write(logger.logheader.format(name=self.name,H=int(th), M=int(tm),S=ts)+'\\n'.join(['%s: %s' % (k,v) for k,v in params.items()]))\n if le['filename']=='stdout':\n logf.write('\\033[K\\n')\n else:\n logf.write('\\n')", "def __log_file(self):\n while True:\n line = self.fd.readline()\n if not line: break\n syslog.syslog(self.p, line)", "def log(complete_file_path, *contents):\n try:\n log_file = open(complete_file_path, 'a')\n log_file.write('\\n' + '-' * 5 + '\\n')\n log_file.write(str(datetime.datetime.now()) + '\\n')\n for content in contents:\n if content:\n log_file.write(content)\n log_file.write('\\n' + '-' * 5 + '\\n')\n except OSError:\n print('logging went wrong' + complete_file_path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Chooses a valid journal location or returns a corresponding error.
def _check_journal_location(journal_location, stor, action): if journal_location: if not uuidutils.is_uuid_like(journal_location): raise exception.InvalidUUID(uuid=journal_location) # If a journal location is provided by the user. if journal_location: # Check that the journal location is that of an existing stor object. try: requested_journal_onistor = pecan.request.dbapi.istor_get( journal_location) except exception.ServerNotFound: raise wsme.exc.ClientSideError(_( "No journal stor with the provided uuid: %s" % journal_location)) # Check that the provided stor is assigned to the same host as the OSD. if (requested_journal_onistor.forihostid != stor['forihostid']): raise wsme.exc.ClientSideError(_( "The provided stor belongs to another " "host.")) # If the action is journal create, don't let the journal be # collocated. if action == constants.ACTION_CREATE_JOURNAL: if (requested_journal_onistor.function != constants.STOR_FUNCTION_JOURNAL): raise wsme.exc.ClientSideError(_( "The provided uuid belongs to a stor " "that is not of journal type.")) # If the action is journal update: # - if the new journal location is not collocated, check that the # location is of journal type. # - if the new journal location is collocated, allow it. if action == constants.ACTION_UPDATE_JOURNAL: if requested_journal_onistor.uuid != stor['uuid']: if (requested_journal_onistor.function != constants.STOR_FUNCTION_JOURNAL): raise wsme.exc.ClientSideError(_( "The provided uuid belongs to a stor " "that is not of journal type.")) # If no journal location is provided by the user. else: # Check if there is a journal storage designated for the present host. existing_journal_stors = pecan.request.dbapi.istor_get_by_ihost_function( stor['forihostid'], constants.STOR_FUNCTION_JOURNAL) # If more than one journal stor is assigned to the host, the user # should choose only one journal location. # # If there is only one journal stor assigned to the host, then that's # where the journal will reside. # # If there are no journal stors assigned to the host, then the journal # is collocated. if 'uuid' in stor: if len(existing_journal_stors) > 1: available_journals = "" for stor_obj in existing_journal_stors: available_journals = (available_journals + stor_obj.uuid + "\n") raise wsme.exc.ClientSideError(_( "Multiple journal stors are available. Choose from:\n%s" % available_journals)) elif len(existing_journal_stors) == 1: journal_location = existing_journal_stors[0].uuid elif len(existing_journal_stors) == 0: journal_location = stor['uuid'] return journal_location
[ "def getLocation():\n location=input(\"please input the location you want to look at : \")\n if not location:\n location = LOCATION\n return location", "def lookup_location(record,format=None):\n location_list = locations = record.getVariableFields('994')\n for location in location_list:\n subfield_a = location.getSubfield('a')\n in_reference = REF_LOC_RE.search(subfield_a.getData())\n if in_reference is not None:\n ref_loc_code = in_reference.groups()[0]\n if ref_loc_code != 'tarfc':\n return \"Book\" # Classify everything as a book and not journal\n in_periodicals = PER_LOC_RE.search(subfield_a.getData())\n if in_periodicals is not None:\n return \"Journal\"\n return format", "def validateLocation(location: str) -> str:\n # check location\n validLocation = \"NULL\" # init to \"NULL\" incase no valid location is found\n locationTokens = location.split()\n for token in locationTokens:\n # token = re.compile(\"[^a-zA-Z]\") # removes all non-alpha chars\n token = str(token)\n # print(\"token = \", token)\n if token in stateInitials:\n # print(\"--found valid: \", stateInitials[token])\n validLocation = stateInitials[token]\n break\n elif token in states:\n # print(\"--found valid: \", token)\n validLocation = token\n break\n return validLocation", "def prompt_for_location():\n while True:\n print()\n print(\"Select a location:\")\n print()\n n = 1\n for code,description in datastorage.locations():\n print(\" {}. {} - {}\".format(n, code, description))\n n = n + 1\n\n s = input(\"> \").strip()\n if s == \"\": return None\n\n try:\n n = int(s)\n except ValueError:\n n = -1\n\n if n < 1 or n > len(datastorage.locations()):\n print(\"Invalid option: {}\".format(s))\n continue\n\n location_code = datastorage.locations()[n-1][0]\n return location_code", "def validate_location():\n location = request.args.get('location')\n\n try:\n possible_locations = bundle_location_data(location)\n except NoLocationResultsError as e:\n return jsonify({'error': e.message})\n\n return jsonify(possible_locations)", "def error_form(journalpath):\n entry_time = gen_iso_8601()\n program = input(\"What program did the error occur in?\")\n description = field_multiline(\"Describe the nature of the error.\")\n hypothesis = field_multiline(\"What do you think is causing the error?\")\n etc = field_etc(\"How long do you expect it to take to fix this error?\")", "def guess_journal_name(ln, journal_name=None):\n from invenio.webjournal_config import InvenioWebJournalNoJournalOnServerError\n from invenio.webjournal_config import InvenioWebJournalNoNameError\n\n journals_id_and_names = get_journals_ids_and_names()\n if len(journals_id_and_names) == 0:\n raise InvenioWebJournalNoJournalOnServerError(ln)\n\n elif not journal_name and \\\n journals_id_and_names[0].has_key('journal_name'):\n return journals_id_and_names[0]['journal_name']\n\n elif len(journals_id_and_names) > 0:\n possible_journal_names = [journal_id_and_name['journal_name'] for journal_id_and_name \\\n in journals_id_and_names \\\n if journal_id_and_name.get('journal_name', '').lower() == journal_name.lower()]\n if possible_journal_names:\n return possible_journal_names[0]\n else:\n raise InvenioWebJournalNoNameError(ln)\n\n else:\n raise InvenioWebJournalNoNameError(ln)", "def get_location_or_create(location_str, address=None):\n\t\t\n\t\tlocation_arr = LocationUtil.get_all_locations_array()\n\t\t\n\t\tfor location in location_arr:\n\t\t\tif(CrawlUtil.is_similar(location_str, location[1])):\n\t\t\t\treturn LocationUtil.get_location(location[0])\n\n\t\t#not found similar one in the database, therefore create\n\t\tnew_location = LocationUtil.create_location()\n\t\tnew_location.name = location_str\n\t\tif(address != None):\n\t\t\tnew_location.address = address\n\t\t\t\n\t\tLocationUtil.update_location(new_location)\n\t\t\n\t\treturn new_location", "def get_valid_locations(collection: list):\n unique_locations = []\n for number in range(1, 39):\n unique_locations.append(str(number))\n\n for book_dict in collection:\n if book_dict['Shelf'] not in unique_locations:\n unique_locations.append(book_dict['Shelf'])\n\n print(f'Here are the locations that you can move the book to: \\n\\tShelf numbers 1 to 38')\n for location in unique_locations[38:]:\n print(f'\\t{location}')\n\n new_location = input('Please enter a number between 1 to 38 or the location name.')\n if new_location.strip() in unique_locations:\n return new_location\n else:\n print('That is not a valid location. Returning to the main menu.\\n')", "def print_default_location_warning(_, args, request):\n if not (properties.VALUES.workflows.location.IsExplicitlySet() or\n args.IsSpecified(\"location\")):\n log.warning(\"The default location(us-central1) was used since the location \"\n \"flag was not specified.\")\n return request", "def test_default_validate_loc(loc):\n with pytest.raises(SitemapValidationError):\n get_validated(loc=loc)", "def extract_location(location):\n\tif len(location) < 1:\n\t\treturn None\n\t\t\n\t#If you don't remove leading and trailing spaces, the geocoding doesn't work\n\tlocation = location.strip()\n\tresult = strict_geocode_location(location)\n\n\tif result is not None:\n\t\tif result.no_consensus:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn result\t\n\telse:\n\t\tresult = reverse_geocode_location(location)\n\t\tif result is not None:\n\t\t\treturn result\n\treturn None", "def handle_failure(query):\n return \"Sorry, we're having trouble finding {query}. Can you be more specific?\".format(query=query)", "def test_bad_store_scheme(self):\n bad_uri = 'unknown://user:pass@example.com:80/images/some-id'\n\n self.assertRaises(exception.UnknownScheme,\n location.get_location_from_uri,\n bad_uri)", "def location_from_code(self, loc_code):\n result = get_supply_point(self.domain.name, loc_code)\n if not result:\n raise SMSError('invalid location code \"%s\"' % loc_code)\n return result", "def error_noloc(message):\n location = noloc()\n error(message, location)", "def test_create_location_invalid(self):\n payload = {'name': '',\n 'code': 'Hub Jakarta SelatanHub Jakarta Selatan Selatan',\n 'type': 'AgentAgentAgentAgentAgent'}\n res = self.client.post(LOCATIONS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def _err_archive_not_found():\n _err_msg(\"Archive doesn't exist, please make sure you typed it's name correctly!\")\n sys.exit(1)", "def newLocation():\n\tcity = input(\"What is the name of your new location?: \")\n\tzipcode = input(\"What is the 5 digit zipcode?: \")\n\tcountry = input(\"What is the country name?: \") \n\tlatitude = input(\"What is the latitude?: \") \n\tlongitude = input(\"What is the longitude?: \")\n\tnew_loc = data.Location(city, zipcode, country, latitude, longitude)\n\n\treturn new_loc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open a file, find the closing tag and insert the google analytics text in there.
def insert_g_analytics(fname): try: ff = file(fname, 'r') except: ff = open(fname, 'r') # Get the text in there: ll = ff.readlines() ff.close() this_idx = None for idx, l in enumerate(ll): if '</head>' in l: this_idx = idx # Only if this is possible: if this_idx: ll = ll[:this_idx] + [google_analytics_txt] + ll[this_idx:] try: ff = file(fname, 'w') except: ff = open(fname, 'w') ff.writelines(ll) ff.close()
[ "def finish(self):\r\n with open(self.c_file, 'w') as f:\r\n # checking tag availability\r\n if self.file_opened:\r\n content_instance = self.file_str.format(content = doc.content)\r\n else:\r\n content_instance = self.file_str.format(\r\n lang = doc.lang,\r\n title = doc.title,\r\n char = doc.charset,\r\n author = doc.author,\r\n content = doc.content, \r\n bg_color = doc.bg_color )\r\n \r\n soup = BeautifulSoup(content_instance, 'html.parser')\r\n with open(self.c_file, 'w') as f:\r\n f.write(soup.prettify())", "def tag(file, id, title='', artist='', album='', comment='', apic='none'):\n t = stagger.default_tag()\n t._filename = file\n t[TIT2] = title\n t[TPE1] = artist\n t[TALB] = album\n t[USLT] = USLT(text=comment)\n if apic != 'none':\n getpic(file + '.jpg', id, apic)\n t[APIC] = APIC(file + '.jpg')\n os.remove(file + '.jpg')\n t.write()", "def google_analytics_tags (request):\n\n web_property_id = getattr(settings, 'GOOGLE_ANALYTICS_ID', False)\n domain = getattr(settings, 'GOOGLE_ANALYTICS_DOMAIN', False)\n\n if not web_property_id or not domain:\n return { 'google_analytics': '<!-- Set GOOGLE_ANALYTICS_ID and GOOGLE_ANALYTICS_DOMAIN to include Google Analytics tracking.' }\n\n #if getattr(settings, 'DEBUG', True):\n # return { 'google_analytics': '<!-- Set DEBUG to False to include Google Analytics tracking.' }\n\n if (request.META.has_key('HTTP_REFERER') and request.META['HTTP_REFERER'] != ''):\n http_referer = request.META['HTTP_REFERER']\n else:\n http_referer = '-'\n\n vars = {\n 'id': web_property_id,\n 'domain': domain,\n 'utmn': random.randint(1000000000, 9999999999),\n 'cookie': random.randint(10000000, 99999999),\n 'random': random.randint(1000000000, 2147483647),\n 'today': str(int(time.time())),\n 'referer': http_referer,\n 'uservar': '-',\n 'utmp': '/nojs' + request.path,\n }\n\n script_section = ('<script type=\"text/javascript\">var _gaq = _gaq || []; '\n '_gaq.push([\\'_setAccount\\', \\'%(id)s\\']); '\n '_gaq.push([\\'_setDomainName\\', \\'.%(domain)s\\']); '\n '_gaq.push([\\'_trackPageview\\']); '\n '(function() { var ga = document.createElement(\\'script\\'); '\n 'ga.type = \\'text/javascript\\'; ga.async = true; '\n 'ga.src = (\\'https:\\' == document.location.protocol ? '\n '\\'https://ssl\\' : \\'http://www\\') + \\'.google-analytics.com/ga.js\\'; '\n 'var s = document.getElementsByTagName(\\'script\\')[0]; '\n 's.parentNode.insertBefore(ga, s); })(); </script>') % vars\n\n noscript_section = ('<noscript>'\n '<img src=\"//www.google-analytics.com/__utm.gif?utmwv=3&utmn='\n '%(utmn)s&utme=&utmcs=-&utmsr=-&utmsc=-&utmul=-&utmje=0&utmfl=-&utmdt=-'\n '&utmhn=%(domain)s&utmhid=%(utmn)s&utmr=%(referer)s&utmp=%(utmp)s'\n '&utmac=%(id)s&utmcc=__utma%%3D%(cookie)s.%(random)s.%(today)s.'\n '%(today)s.%(today)s.2%%3B%%2B__utmz%%3D%(cookie)s.%(today)s.2.2.'\n 'utmcsr%%3D_SOURCE_%%7Cutmccn%%3D_CAMPAIGN_%%7Cutmcmd%%3D_MEDIUM_%%7'\n 'Cutmctr%%3D_KEYWORD_%%7Cutmcct%%3D_CONTENT_%%3B%%2B__utmv%%3D'\n '%(cookie)s.%(uservar)s%%3B;\" border=\"0\" /></noscript>') % vars\n\n tags = script_section + noscript_section\n return { 'google_analytics': mark_safe (tags) }", "def PlaceAtTag(self, tag, newText):\n \n index = self.text.find(\"<!--tag:{}-->\".format(tag))\n if index > -1:\n newStr = self.text[:index]\n newStr += newText\n newStr += self.text[index:]\n self.text = newStr\n logging.debug(\"Succesfully placed string in file.\")\n else:\n logging.debug(\"Could not find tag {0} in {1}\".format(tag, \n self.template))", "def google_analytics_code():\n if settings.DEBUG:\n return mark_safe(\"\"\"<script>function ga() {}</script>\"\"\")\n\n return mark_safe(\"\"\"\n<script>\n (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){\n (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),\n m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)\n })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');\n\n ga('create', '%s', 'auto');\n ga('send', 'pageview');\n\n</script>\n \"\"\" % settings.GOOGLE_ANALYTICS_CODE)", "def createFileFooter(self):\n import_file_desc_h = open('xml_footer.txt', 'r')\n readlines = import_file_desc_h.read()\n self.fileDesXmlData.write(readlines)\n import_file_desc_h.close()", "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n data = \"\"\n with open(filename, \"r+\") as f:\n for line in f:\n data += line\n if search_string in line:\n data += new_string\n f.seek(0)\n f.write(data)", "def finish_html_file(self):\n with self.file_path_html.open('a') as f:\n f.write('</div> \\n')\n f.write(\"\"\"\n <div class='right-panel' style='display: inline-block; float: left;'>\n <iframe class=\"learnosity-iframe\" name=\"learnosity-iframe\" srcdoc=\"\"></iframe>\n </div> \\n\n \"\"\")\n f.write('</body></html>')", "def add_after_in_file(filename, search, appendix):\n file = open(filename)\n content = file.read().split(\"\\n\")\n newcontent = []\n file.close()\n\n for line in content:\n newcontent.append(line)\n if search in line:\n newcontent.append(appendix)\n\n file2 = open(filename, 'w+')\n file2.write(\"\\n\".join(newcontent))\n file2.close()", "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n with open(filename, 'r+') as iostream:\n lines = [\n line + new_string * (search_string in line)\n for line in iostream.readlines()\n ]\n iostream.seek(0)\n iostream.writelines(lines)", "def insert(self, output_file): \n ext = os.path.splitext(output_file)[1].lower()\n if ext not in self.__opener:\n return 1\n tags = self.__opener[ext](output_file)\n for tag, value in self.tags.items():\n if value is None or tag not in self.__tag_mapping[ext]:\n continue\n if tag == 'tracknumber' and \\\n (isinstance(value, list) or isinstance(value, tuple)) and\\\n len(value) == 2:\n value = '%d/%d' % (value[0], value[1])\n if ext == '.mp3':\n if tag == 'lyrics':\n tags[self.__tag_mapping[ext][tag]] = \\\n self.__id3_mapping[tag](encoding=3, \n lang='eng', \n desc='lyrics',\n text=u'%s' % value)\n else:\n tags[self.__tag_mapping[ext][tag]] = \\\n self.__id3_mapping[tag](encoding=3, \n text=[u'%s' % value])\n elif ext in self.exts and ext != '.mp3':\n if tag == 'tracknumber' and ext == '.m4a':\n try:\n trkn = [int(i) for i in str(value).split('/')]\n tags[self.__tag_mapping[ext][tag]] = \\\n [(trkn[0], trkn[1])]\n except IndexError:\n tags[self.__tag_mapping[ext][tag]] = [(trkn[0], 0)]\n else:\n tags[self.__tag_mapping[ext][tag]] = [u'%s' % value]\n tags.save()", "def append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n tmp = \"\"\n with open(filename) as f:\n for line in f:\n tmp += line\n if search_string in line:\n tmp += new_string\n with open(filename, \"w\") as w:\n w.write(tmp)", "def insert_html(htmlOriginal, htmlAdd, insertionPoint):\n import fileinput\n\n with open(htmlAdd) as f:\n head = [x.strip('\\n') for x in f]\n head = [x.strip() for x in head] \n for line in fileinput.input(htmlOriginal, inplace=1):\n if line.startswith(insertionPoint):\n for lineHead in range(len(head)):\n print(head[lineHead])\n else:\n print(line)", "def __correct_tags(self, line):\n\n for tag in self.used_second_level_tags:\n line = self.__replace_nth_occurence(\n line, tag, self.__create_closing_html_tag(tag)\n )\n return line", "def html_update(image_id):\n new_href = \"https://drive.google.com/file/d/%s/view\"%str(image_id)\n soup = BeautifulSoup(open(html_file), \"html.parser\")\n\n jc_link = soup('a')[0]\n jc_link['href'] = new_href\n with open(html_file, \"w\") as file:\n file.write(str(soup))", "def identify_sentences(file):\n basename = os.path.basename(file)\n textname, ext = os.path.splitext(basename)\n #print(textname)\n\n with open(file, \"r\") as file:\n document = file.read()\n document = str(document)\n #print(document)\n\n ### Gets header as string\n result = re.search(r\"(<\\?xml[^$]*?</teiHeader>)\", document, re.DOTALL)\n if result:\n header = result.group(0)\n #print(header,\"\\n\")\n else:\n print(\"There is an error. No header was found.\")\n \n ### Gets text as string\n result = re.search(r\"(<text[^$]*?</TEI>)\", document, re.DOTALL)\n if result: \n text = result.group(0)\n #print(text,\"\\n\")\n else:\n print(\"There is an error. No text was found.\")\n\n ### Preprocessing\n text = re.sub(\"<hi rend=\\\"italic\\\">\",\"\",text)\n text = re.sub(\"<hi rend=\\\"bold\\\">\",\"\",text)\n text = re.sub(\"<hi>\",\"\",text)\n text = re.sub(\"</hi>\",\"\",text)\n\n\n\n ### Identify and mark up sentences boundaries\n ## At paragraph boundaries (pairs)\n text = re.sub(\"<p>\",\"<p><s>\",text)\n text = re.sub(\"</p>\",\"</s></p>\",text)\n text = re.sub(\"<said>\",\"<said><s>\",text)\n text = re.sub(\"</said>\",\"</s></said>\",text)\n ## Classical sentence boundaries\n text = re.sub(\"([a-z|รฉ]\\. )([A-Z])\",\"\\\\1</s><s>\\\\2\",text)\n \n newdocument = header + text\n outtext = str(newdocument)\n outfile = \"./outfolder/\" + textname + \"_s.xml\"\n with open(outfile,\"w\") as output:\n output.write(outtext)", "def close(self, tag):\n return \"</{}>\".format(self.tags[tag].split(\" \", 1)[0])", "def tag_file() -> str:\n return os.path.join(glob.application_folder, \"item_tags.txt\")", "def endFile(file) :\n log.run(\"addhis in=%s comment='Data reduction by CARMA pipeline version %s completed at %s'\" % (file,version.VERSION,time.ctime()),[],logit=False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sets properties defaults for the instrument with Name
def __init__(self,instrumentName,web_var=None): self.iliad_prop = PropertyManager(instrumentName) # the variables which are set up from the main properties self._main_properties=[] # the variables which are set up from the advanced properties. self._advanced_properties=[] # The variables which are set up from web interface. self._web_var = web_var
[ "def _init_default_properties(self):\n for property_name in type(self).default_properties:\n if self.properties.get(property_name) is None:\n self.properties[property_name] = type(self).default_properties[property_name]", "def defaults(cls):\n msg = \"\"\"\\\n== Instrument class %(name)s ==\nradiation = %(radiation)s at %(L)s Angstrom with %(dL)s resolution\nslit distances = %(d_s1)s mm and %(d_s2)s mm\"\"\"\n L, d_s1, d_s2 = [\"%g\"%v if v is not None else 'unknown'\n for v in (cls.wavelength, cls.d_s1, cls.d_s2)]\n dL = \"%g%%\"%(cls.dLoL*100) if cls.dLoL else 'unknown'\n return msg % dict(name=cls.instrument, radiation=cls.radiation,\n L=L, dL=dL, d_s1=d_s1, d_s2=d_s2)", "def initialize_instruments(self):\n for inst in self.requiredInstruments:\n try:\n # inst_string = str(instrument).lower().replace('-', '')\n setattr(self, inst, self.instruments[inst])\n except KeyError:\n raise KeyError('Instrument {} not available.'.format(inst))\n for setting in self.requiredSettings:\n try:\n setattr(self, setting, self.settings[setting])\n except KeyError:\n raise KeyError('Setting {} not available.'.format(setting))", "def __constructDefaults(self, section):\n self.DEFAULTS[section] = {}\n self.DEFAULTS[section]= FontSetting.FONT_SIZE", "def set_default_config(self, name):\n self.local.defaults.config = name", "def setDefaults(self, defaults=()):\n for key in defaults:\n self._setattr(key, getattr(multiconfig.DefaultConfig, key))", "def set_default_values(self, defaults={'waist':90.0,'shoulder':150.0,'elbow':35.0,'wrist_roll':140.0,'wrist_pitch':85.0,'claw':None}):\n for joint in defaults:\n if defaults[joint] != None:\n self.joints[joint]['default_value'] = math.radians(defaults[joint])\n else:\n self.joints[joint]['default_value'] = None", "def create_default_values (self):\n\n self.default_values = {\"username\": '',\n \"password\": '',\n \"is_demo\": True,\n \"epic\": 'IX.D.DAX.IMF.IP',\n \"api_key\": '',\n \"proxies\": {\"https\": ''},\n \"account_nb\": '0'\n }\n for key in self.default_values:\n if key not in personal.__dict__:\n personal.__dict__[key] = self.default_values[key]", "def __defaults__(self): \n self.tag = 'weights'\n \n self.vehicle = Data()\n self.settings = Data()", "def setup_sim_properties(self):\n self.sim_model.set_sim_property(self.properties_info)", "def test_default_props():\n #==========================================================================\n # assert w_grid.active == True # timespan is as big as possible\n # assert w_grid.uncertain_duration == 3.0\n # assert w_grid.uncertain_time_delay == 0\n # assert w_grid.uncertain_speed_scale == 2\n # assert w_grid.uncertain_angle_scale == 0.4\n # assert w_grid.uncertain_angle_units == 'rad'\n #==========================================================================\n assert w_grid.wind_scale == 1\n assert w_grid.extrapolate == False\n assert w_grid.time_offset == 0\n\n _defaults(w_grid)", "def defaults(cls):\n\n msg = \"\"\"\\\n== Instrument class %(name)s ==\nradiation = %(radiation)s at %(L)g Angstrom with %(dLpercent)g%% resolution\nslit distances = %(d_s1)g mm and %(d_s2)g mm\n\"\"\" % dict(name=cls.instrument, L=cls.wavelength, dLpercent=cls.dLoL*100,\n d_s1=cls.d_s1, d_s2=cls.d_s2,\n radiation=cls.radiation,\n )\n return msg", "def create_default_measure(instrument_name, measure_name):\n measure = {\n \"measure_type\": MeasureType.other,\n \"measure_name\": measure_name,\n \"instrument_name\": instrument_name,\n \"measure_id\": f\"{instrument_name}.{measure_name}\",\n \"individuals\": None,\n \"default_filter\": None,\n }\n measure = Box(measure)\n return measure", "def __defaults__(self): \n self.tag = ' U.S. Standard Atmosphere (1976)'\n\n # break point data: \n self.fluid_properties = Air()\n self.planet = Earth()\n self.breaks = Data()\n self.breaks.altitude = np.array( [-2.00 , 0.00, 11.00, 20.00, 32.00, 47.00, 51.00, 71.00, 84.852]) * Units.km # m, geopotential altitude\n self.breaks.temperature = np.array( [301.15 , 288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65, 186.95]) # K\n self.breaks.pressure = np.array( [127774.0 , 101325.0, 22632.1, 5474.89, 868.019, 110.906, 66.9389, 3.95642, 0.3734]) # Pa\n self.breaks.density = np.array( [1.47808e0, 1.2250e0, 3.63918e-1, 8.80349e-2, 1.32250e-2, 1.42753e-3, 8.61606e-4, 6.42099e-5, 6.95792e-6]) # kg/m^3", "def set_default_values(self):\n self.vmin.set(0)\n self.vmax.set(0)\n self.dq_show.set('184')\n self.segment.set('A')\n self.N_degraded.set(0)\n self.extract.set('None')\n self.draw.set('Modal Gain')\n self.extract_offset.set(0)\n self.cmap.set('gist_yarg')\n self.grid_limits.set(1)", "def make_default_settings():\n default_settings = {\n 'height': 24, \n 'width': 24, \n 'max_box_height': 7,\n 'max_box_width': 7,\n 'max_container_height': 5,\n 'max_container_width': 9,\n 'default_num_samples': 20,\n 'fixed_floor': False,\n 'floor_height': 3,\n 'infinite_position_domain': False,\n 'frame': False, # indicates presence of PixelWorld frame\n 'frame_color': PURPLE,\n 'padding': 0, # padding around outside edge\n 'colors': COLORS.values(), \n 'check_overlap': True,\n 'allow_pushable': False, # Whether to allow objects the option of being pushable\n 'allow_targets': False, # Whether to allow use of the is_target attribute\n 'add_self': True,\n 'make_self_red_pixel': True,\n 'self_color_is_unique': False,\n 'objects_are_white': False,\n 'objects_are_small_blobs': False,\n 'self_grips': False, # True if the self can grip/ungrip other objects\n }\n return default_settings", "def restore_defaults(self):\n\n # Set default values for each of the pysat provided values. Set\n # all but the last parameter directly. Set last using __setitem__\n # to trigger a file write.\n keys = list(self.defaults.keys())\n for key in keys:\n self.data[key] = self.defaults[key]\n\n # Trigger a file write\n self.store()\n\n return", "def default(**kwargs):\n for name, default_val in kwargs.items():\n try:\n getattr(SETTINGS, name)\n except AttributeError:\n setattr(SETTINGS, name, default_val)", "def set_default_parameters(self, kind):\n\n self.kind_to_calculation_settings_mapping[kind] = self.name_to_param.copy()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Define properties which considered to be main properties changeable by user Should be overwritten by special reduction and decorated with decorator. Should return dictionary with key are the properties names and values the default values these properties should have.
def def_main_properties(self): raise NotImplementedError('def_main_properties has to be implemented')
[ "def MainProperties(main_prop_definition): \n def main_prop_wrapper(*args):\n properties = main_prop_definition(*args)\n #print \"in decorator: \",properties\n host = args[0]\n host._main_properties=properties\n host.iliad_prop.set_input_parameters(**properties)\n return properties\n\n return main_prop_wrapper", "def get_extended_properties_dict(self):\n properties = {}\n for prop in self.extended_properties:\n if prop.delete is False:\n properties[prop.name] = prop.value\n return properties", "def _init_default_properties(self):\n for property_name in type(self).default_properties:\n if self.properties.get(property_name) is None:\n self.properties[property_name] = type(self).default_properties[property_name]", "def key_properties(self):\n # adding a 'building' qualifier to title because these props are often combined\n # with sensor properties that also have a title.\n props = {'building_title': self.title,\n 'latitude': self.latitude,\n 'longitude': self.longitude}\n try:\n props.update( yaml.load(self.other_properties, Loader=yaml.FullLoader) )\n except:\n # ignore errors\n pass\n \n return props", "def get_overrided_properties(self):\n items = {}\n for cl in self.admin_obj.__class__.mro():\n if cl is admin.ModelAdmin:\n break\n for k, v in cl.__dict__.items():\n if k not in items:\n items[k] = v\n return items", "def get_user_defined_properties(cls):\n return OrderedDict([\n ('descriptor',\n OutputProperty(description='a short and unique descriptor for this Slack integration '\n '(ie: channel, group, etc)')),\n ('url',\n OutputProperty(description='the full Slack webhook url, including the secret',\n mask_input=True,\n input_restrictions={' '},\n cred_requirement=True))\n ])", "def getPropertyMap(self, properties = None):\n if properties is None:\n properties = self.getProperties()\n\n rv = dict()\n for prefix in [\"omero\",\"Ice\"]:\n for k,v in properties.getPropertiesForPrefix(prefix).items():\n rv[k] = v\n return rv", "def AdvancedProperties(adv_prop_definition): \n def advanced_prop_wrapper(*args):\n properties = adv_prop_definition(*args)\n #print \"in decorator: \",properties\n host = args[0]\n host._advanced_properties=properties\n host.iliad_prop.set_input_parameters(**properties)\n return properties\n\n return advanced_prop_wrapper", "def _make_properties(prop_names):\n def property_factory(prop_name):\n \"\"\"\n local function which returns a getter and setter\n \"\"\"\n def prop_get(self):\n \"\"\"\n this defines a method for getting the property's value\n \"\"\"\n prop_val = getattr(self, \"_{}\".format(prop_name))\n return prop_val\n \n def prop_set(self, new_val):\n \"\"\"\n this defines a method for setting the property's value\n \"\"\"\n setattr(self, \"_{}\".format(prop_name), new_val)\n \n # return the methods for the named property\n return prop_get, prop_set\n\n def wrapper(cls):\n \"\"\"\n Enhances a class by setting the attributes (property_names) passed to \n the decorator function\n \n @param cls : class to be decorated\n @type cls : class\n \"\"\"\n for prop_name in prop_names:\n prop = property(*property_factory(prop_name))\n setattr(cls, prop_name, prop)\n return cls\n\n return wrapper", "def properties(self):\n\n properties = {}\n properties['nx'] = self.nx\n properties['ny'] = self.ny\n properties['x0'] = self.x0\n properties['y0'] = self.y0\n properties['dx'] = self.dx\n properties['dy'] = self.dy\n properties['rot'] = self.rot\n properties['dtype'] = self.dtype\n properties['filename'] = self.filename\n properties['gridtype'] = self.gridtype\n properties['decoration'] = self.decoration\n properties['cs'] = self.cs\n\n return properties", "def exec_properties(self) -> Dict[str, Any]:\n exec_dict = {}\n for key, val in self._execution.properties.items():\n exec_dict[key] = getattr(val, val.WhichOneof('value'))\n\n for key, val in self._execution.custom_properties.items():\n exec_dict[key] = getattr(val, val.WhichOneof('value'))\n\n return exec_dict", "def _update_defaults(self, new, base=None):\n base = base or self.__state\n # handle objects not already in instance state\n disjoint = set(new) - set(base)\n base.update({x: new[x] for x in disjoint})\n # handle overlaps\n overlap = set(base) & set(new)\n for item in overlap:\n obj1, obj2 = base[item], new[item]\n if inspect.isfunction(obj2):\n base[item] = obj2\n elif hasattr(obj2, \"__dict__\") and hasattr(obj1, \"__dict__\"):\n if obj1 is not obj2:\n self._update_defaults(obj2.__dict__, obj1.__dict__)\n else:\n base[item] = obj2", "def _buildProperties(cls):\n def _property(key):\n return property(lambda self: self._getVal(key), lambda self, x: self._setVal(key, x), lambda self: self._delVal(key))\n\n for k in PROPERTY_TYPES.keys():\n setattr(cls, k, _property(k))", "def make_default_settings():\n default_settings = {\n 'height': 24, \n 'width': 24, \n 'max_box_height': 7,\n 'max_box_width': 7,\n 'max_container_height': 5,\n 'max_container_width': 9,\n 'default_num_samples': 20,\n 'fixed_floor': False,\n 'floor_height': 3,\n 'infinite_position_domain': False,\n 'frame': False, # indicates presence of PixelWorld frame\n 'frame_color': PURPLE,\n 'padding': 0, # padding around outside edge\n 'colors': COLORS.values(), \n 'check_overlap': True,\n 'allow_pushable': False, # Whether to allow objects the option of being pushable\n 'allow_targets': False, # Whether to allow use of the is_target attribute\n 'add_self': True,\n 'make_self_red_pixel': True,\n 'self_color_is_unique': False,\n 'objects_are_white': False,\n 'objects_are_small_blobs': False,\n 'self_grips': False, # True if the self can grip/ungrip other objects\n }\n return default_settings", "def _populate_dict(self, params):\n \n output_dict = {}\n \n for prop in params:\n if getattr(self, prop) is not None:\n output_dict[prop] = getattr(self, prop)\n \n return output_dict", "def get_default_param_values(cls):\n return dict(\n rename_dict={},\n show_pins=False,\n debug=True,\n power_width_ntr=None,\n )", "def props_to_dict(properties):\n ret_val = collections.defaultdict()\n for prop in properties:\n ret_val[prop.qualified_name] = prop.prop_val.value\n return ret_val", "def getProperties(self):\n # type: () -> Dict[str]\n pass", "def create_default_values (self):\n\n self.default_values = {\"username\": '',\n \"password\": '',\n \"is_demo\": True,\n \"epic\": 'IX.D.DAX.IMF.IP',\n \"api_key\": '',\n \"proxies\": {\"https\": ''},\n \"account_nb\": '0'\n }\n for key in self.default_values:\n if key not in personal.__dict__:\n personal.__dict__[key] = self.default_values[key]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Define properties which considered to be advanced but still changeable by instrument scientist or advanced user Should be overwritten by special reduction and decorated with decorator. Should return dictionary with key are the properties names and values the default values these properties should have.
def def_advanced_properties(self): raise NotImplementedError('def_advanced_properties has to be implemented')
[ "def get_extended_properties_dict(self):\n properties = {}\n for prop in self.extended_properties:\n if prop.delete is False:\n properties[prop.name] = prop.value\n return properties", "def AdvancedProperties(adv_prop_definition): \n def advanced_prop_wrapper(*args):\n properties = adv_prop_definition(*args)\n #print \"in decorator: \",properties\n host = args[0]\n host._advanced_properties=properties\n host.iliad_prop.set_input_parameters(**properties)\n return properties\n\n return advanced_prop_wrapper", "def get_user_defined_properties(cls):\n return OrderedDict([\n ('descriptor',\n OutputProperty(description='a short and unique descriptor for this Slack integration '\n '(ie: channel, group, etc)')),\n ('url',\n OutputProperty(description='the full Slack webhook url, including the secret',\n mask_input=True,\n input_restrictions={' '},\n cred_requirement=True))\n ])", "def def_main_properties(self): \n raise NotImplementedError('def_main_properties has to be implemented')", "def get_overrided_properties(self):\n items = {}\n for cl in self.admin_obj.__class__.mro():\n if cl is admin.ModelAdmin:\n break\n for k, v in cl.__dict__.items():\n if k not in items:\n items[k] = v\n return items", "def key_properties(self):\n # adding a 'building' qualifier to title because these props are often combined\n # with sensor properties that also have a title.\n props = {'building_title': self.title,\n 'latitude': self.latitude,\n 'longitude': self.longitude}\n try:\n props.update( yaml.load(self.other_properties, Loader=yaml.FullLoader) )\n except:\n # ignore errors\n pass\n \n return props", "def get_experimental_design(self):\n\n return {\n 'minimumNumberOfParticipants': self._get_min_num_participants(),\n 'numberOfConditionsPerParticipant': self._get_num_conditions_per_participant(),\n 'arrangements': self._get_arrangement()\n }", "def getPropertyMap(self, properties = None):\n if properties is None:\n properties = self.getProperties()\n\n rv = dict()\n for prefix in [\"omero\",\"Ice\"]:\n for k,v in properties.getPropertiesForPrefix(prefix).items():\n rv[k] = v\n return rv", "def _optimal_camera_properties(self, with_distance=True):\n center = self.mesh._center * self._scale\n sc = 1.08 * self.mesh._camratio[-1]\n prop = {'center': center, 'scale_factor': sc, 'azimuth': 0.,\n 'elevation': 90, 'distance': 4 * sc}\n if with_distance:\n return prop\n else:\n del prop['distance']\n return prop", "def properties(self):\n\n properties = {}\n properties['nx'] = self.nx\n properties['ny'] = self.ny\n properties['x0'] = self.x0\n properties['y0'] = self.y0\n properties['dx'] = self.dx\n properties['dy'] = self.dy\n properties['rot'] = self.rot\n properties['dtype'] = self.dtype\n properties['filename'] = self.filename\n properties['gridtype'] = self.gridtype\n properties['decoration'] = self.decoration\n properties['cs'] = self.cs\n\n return properties", "def get_all_user_properties(user):\n user_properties_utility = getUtility(ICatalogFactory, name='user_properties')\n attributes = user_properties_utility.properties + METADATA_USER_ATTRS\n try:\n extender_name = api.portal.get_registry_record('genweb.controlpanel.core.IGenwebCoreControlPanelSettings.user_properties_extender')\n except:\n extender_name = ''\n\n if extender_name:\n if extender_name in [a[0] for a in getUtilitiesFor(ICatalogFactory)]:\n extended_user_properties_utility = getUtility(ICatalogFactory, name=extender_name)\n attributes = attributes + extended_user_properties_utility.properties\n\n mapping = {}\n for attr in attributes:\n value = user.getProperty(attr)\n if isinstance(value, str) or isinstance(value, unicode):\n mapping.update({attr: value})\n\n return mapping", "def get_registered_properties():\n return _metaschema_properties", "def getProperties(self):\n # type: () -> Dict[str]\n pass", "def get_options(self) -> Dict:\n\n center = max(self.center.get(), 1)\n linewidth= max(self.linewidth.get(), 1)\n power = max(self.power.get(), 1)\n\n out = {'power': power, 'linewidth': linewidth, 'center': center}\n return out", "def exec_properties(self) -> Dict[str, Any]:\n exec_dict = {}\n for key, val in self._execution.properties.items():\n exec_dict[key] = getattr(val, val.WhichOneof('value'))\n\n for key, val in self._execution.custom_properties.items():\n exec_dict[key] = getattr(val, val.WhichOneof('value'))\n\n return exec_dict", "def filter_properties(self, a_filter='all'):\n\t\t\n\t\tindex_key = 0\n\t\tindex_value = 0\n\t\tindex_dict = {}\n\n\t\tfor group in self.properties:\n\t\t\tif a_filter == 'no_full_sets' and len(group) >= group[0].full_size():\n\t\t\t\tindex_value += len(group)\n\t\t\t\tcontinue\n\n\t\t\tfor card in group:\n\t\t\t\tif a_filter == 'no_buildings' and card.name == \"House\" or card.name == \"Hotel\":\n\t\t\t\t\tindex_value += 1\n\t\t\t\t\tcontinue\n\t\t\t\tif a_filter == 'no_any_wilds' and card.name == \"Property Wild: Any\":\n\t\t\t\t\tindex_value += 1\n\t\t\t\t\tcontinue\n\n\t\t\t\tindex_dict[index_key] = index_value\n\t\t\t\tindex_key += 1\n\t\t\t\tindex_value += 1\n\n\t\treturn index_dict", "def _get_inferred_properties(self):\n if self._inferred_properties is None:\n patterns = [\n # namespaced\n \"^/apis/(?P<group>[^/]+)/(?P<apiversion>v[a-z0-9]+)/namespaces/(?P<namespace>[a-z0-9-]+)/(?P<resourcetype>[a-z-]+)/(?P<name>[a-z0-9-.]+)[/]?(?P<subresource>[a-z]*)$\",\n # cluster scoped\n \"^/apis/(?P<group>[^/]+)/(?P<apiversion>v[a-z0-9]+)/(?P<resourcetype>[a-z-]+)/(?P<name>[a-z0-9-.]+)[/]?(?P<subresource>[a-z]*)$\",\n # api core (namespaced)\n \"^/api[s]?/(?P<apiversion>v[a-z0-9]+)/namespaces/(?P<namespace>[a-z0-9-]+)/(?P<resourcetype>[a-z-]+)/(?P<name>[a-z0-9-.]+)[/]?(?P<subresource>[a-z]*)$\",\n # api core\n \"^/api[s]?/(?P<apiversion>v[a-z0-9]+)/(?P<resourcetype>[a-z-]+)/(?P<name>[a-z0-9-.]+)[/]?(?P<subresource>[a-z]*)$\",\n ]\n match = None\n for pattern in patterns:\n match = re.match(pattern, self._resource_path)\n if match is not None:\n break\n\n if match is None:\n raise ValueError(self._resource_path)\n dd = match.groupdict()\n name = dd.get(\"name\")\n group = dd.get(\"group\", \"\")\n apiversion = dd.get(\"apiversion\")\n namespace = dd.get(\"namespace\")\n resourcetype = dd.get(\"resourcetype\")\n subresource = dd.get(\"subresource\")\n\n props = {\n PropertyType.DEFAULT: {},\n PropertyType.EXTENDED: {}\n }\n #if group is not None:\n # props[PropertyType.EXTENDED][\"group\"] = group\n if len(group):\n apiversion = \"{}/{}\".format(group, apiversion)\n props[PropertyType.EXTENDED][\"apiversion\"] = apiversion\n if namespace is not None:\n props[PropertyType.EXTENDED][\"namespace\"] = namespace\n props[PropertyType.EXTENDED][\"resourcetype\"] = resourcetype\n if subresource is not None:\n props[PropertyType.EXTENDED][\"subresource\"] = subresource\n\n props[PropertyType.EXTENDED][\"name\"] = name\n props[PropertyType.EXTENDED][\"kind\"] = self._get_resource().get(\"kind\")\n\n self._inferred_properties = props\n\n return self._inferred_properties", "def build_purifier_dict(self, dev_dict: dict) -> None:\n self.enabled = dev_dict.get('enabled', False)\n if self.enabled:\n self.device_status = 'on'\n else:\n self.device_status = 'off'\n self.details['filter_life'] = dev_dict.get('filter_life', 0)\n self.mode = dev_dict.get('mode', 'manual')\n self.speed = dev_dict.get('level', 0)\n self.details['display'] = dev_dict.get('display', False)\n self.details['child_lock'] = dev_dict.get('child_lock', False)\n self.details['night_light'] = dev_dict.get('night_light', 'off')\n self.details['display'] = dev_dict.get('display', False)\n self.details['display_forever'] = dev_dict.get('display_forever',\n False)\n if self.air_quality_feature is True:\n self.details['air_quality_value'] = dev_dict.get(\n 'air_quality_value', 0)\n self.details['air_quality'] = dev_dict.get('air_quality', 0)", "def _init_default_properties(self):\n for property_name in type(self).default_properties:\n if self.properties.get(property_name) is None:\n self.properties[property_name] = type(self).default_properties[property_name]", "def create_default_values (self):\n\n self.default_values = {\"username\": '',\n \"password\": '',\n \"is_demo\": True,\n \"epic\": 'IX.D.DAX.IMF.IP',\n \"api_key\": '',\n \"proxies\": {\"https\": ''},\n \"account_nb\": '0'\n }\n for key in self.default_values:\n if key not in personal.__dict__:\n personal.__dict__[key] = self.default_values[key]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator stores properties dedicated as main and sets these properties as input to reduction parameters.
def MainProperties(main_prop_definition): def main_prop_wrapper(*args): properties = main_prop_definition(*args) #print "in decorator: ",properties host = args[0] host._main_properties=properties host.iliad_prop.set_input_parameters(**properties) return properties return main_prop_wrapper
[ "def AdvancedProperties(adv_prop_definition): \n def advanced_prop_wrapper(*args):\n properties = adv_prop_definition(*args)\n #print \"in decorator: \",properties\n host = args[0]\n host._advanced_properties=properties\n host.iliad_prop.set_input_parameters(**properties)\n return properties\n\n return advanced_prop_wrapper", "def kwargs_decorator(deco):\n return update_wrapper(curry(deco), deco)", "def params(self, **params):\n for param in params:\n setattr(self.steps[self.current_step], param, params[param])\n return self.common_decorator", "def defer_properties(recipent_name, property_names):\n def decorator(cls):\n for property_name in property_names:\n prop = defer_property(recipent_name, property_name)\n\n setattr(cls, property_name, prop)\n\n return cls\n\n return decorator", "def _decorate_with_context(property_name: str, method_impl: Callable) -> Callable:\n\n @functools.wraps(method_impl)\n def decorated_function(self):\n if property_name in self._data:\n return self._data[property_name]\n\n ctx = WearyContext()\n\n result = method_impl(self, ctx)\n self._data[property_name] = result\n\n return result\n\n return decorated_function", "def kwargs_processor(self, fn):\r\n self._kwargs_processor = fn\r\n return fn", "def def_main_properties(self): \n raise NotImplementedError('def_main_properties has to be implemented')", "def data_source_factory(name=None, **properties):\n\n def _decorator(func):\n @functools.wraps(func)\n def _properties(settings):\n def _factory(environ):\n return func(settings, environ)\n d = dict(properties)\n d['name'] = name\n d['factory'] = _factory\n return d\n return _properties\n return _decorator", "def copyprops(original_fn, decorated_fn):\n if hasattr(original_fn, '_wsgiwapi_props'):\n decorated_fn._wsgiwapi_props = original_fn._wsgiwapi_props\n if hasattr(original_fn, '__doc__'):\n decorated_fn.__doc__ = original_fn.__doc__", "def pre_metrics_init(init_f):\n def wrapper(self, **kwargs):\n # Add params from base class\n self.append_params(UserMetric)\n self.assign_attributes(kwargs, 'init')\n\n # Call init\n init_f(self, **kwargs)\n\n return wrapper", "def reduce(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def puzzle_property(*prereqs):\n\n # Try to throw a useful error if we were mistakenly called on a\n # function rather than on a list of prerequisites.\n for prereq in prereqs:\n if not isinstance(prereq, type):\n assert '_prereqs' in prereq.__dict__, _dec_gen_error_msg\n assert '_infers' in prereq.__dict__, _dec_gen_error_msg\n\n # This is the actual decorator.\n def add_prereqs(f):\n \"\"\"A puzzle property decorator for a given list of prerequisites.\"\"\"\n\n # Bad stuff happens if we double-apply the decorator, so let's\n # check that the decorator hasn't already been applied and\n # that we're not (for some strange reason) overwriting\n # pre-existing data.\n assert '_prereqs' not in f.__dict__\n assert '_infers' not in f.__dict__\n\n f._prereqs = prereqs\n f._infers = []\n\n initial = True\n # Add a reference to this property to all of its prereqs, so\n # that they will be able to invoke the evaluation of this\n # property when they are found to be true.\n for prereq in prereqs:\n if not isinstance(prereq, type):\n prereq._infers.append(f)\n initial = False\n\n if initial:\n _initial_properties.append(f)\n\n return f\n\n return add_prereqs", "def _add_property_function(func_name):\n\n def property_func(self, *args, **kwargs):\n result = getattr(self._tensor, func_name)(*args, **kwargs)\n return result\n\n setattr(CUDALongTensor, func_name, property_func)", "def initialise_global_reduce_ops(self,):\n # NB axis must be set to zero because of the list-comprehension...\n self.reduced_mean_tensor_list = [\n tf.reduce_mean(\n input_tensor=placeholder,\n axis=0\n ) for placeholder in self.parameter_placeholder_list\n ]\n # Can insert an operation here in between reduce_mean and assign, EG\n # Bayesian inference for parameters by using previous value as a prior\n self.assign_reduced_mean_op_list = [\n var.assign(reduced_mean) for (var, reduced_mean) in zip(\n self.global_vars, self.reduced_mean_tensor_list\n )\n ]", "def streaming_step_properties(input, output, mapper, reducer=None, arguments=None):\n streaming_step = {\n 'mapper': mapper,\n 'reducer': '',\n 'input': input,\n 'output': output\n }\n if reducer is not None:\n streaming_step['reducer'] = reducer\n if arguments is not None:\n streaming_step['arguments'] = arguments\n return streaming_step", "def get_advice(**kwds):\n def decorate(f):\n for k in kwds:\n if k == 'pre_fn' or k == 'post_fn' or k == 'pre_process_fn' or k == 'post_processs_fn' or k == 'init_fn':\n setattr(f, k, kwds[k])\n return f\n return decorate", "def save_init_args(deepcopy_args=False):\n if deepcopy_args: # whether to deepcopy the input arguments\n def safe_copy(val):\n try:\n return copy.deepcopy(val)\n except:\n return copy.copy(val)\n else:\n def safe_copy(val): return val\n\n def decorator(fun):\n @functools.wraps(fun)\n def wrapper(self, *args, **kwargs):\n if hasattr(self, '_tfObject__args_saved'):\n if self._tfObject__args_saved: # make sure it's only called once\n return fun(self, *args, **kwargs)\n\n # save the input arguments\n self.__args, self.__kwargs = [], {}\n self.__args = [safe_copy(arg) for arg in args]\n self.__kwargs = {k: safe_copy(v) for k, v in kwargs.items()}\n self.__args_saved = True\n\n return fun(self, *args, **kwargs)\n return wrapper\n return decorator", "def _make_properties(prop_names):\n def property_factory(prop_name):\n \"\"\"\n local function which returns a getter and setter\n \"\"\"\n def prop_get(self):\n \"\"\"\n this defines a method for getting the property's value\n \"\"\"\n prop_val = getattr(self, \"_{}\".format(prop_name))\n return prop_val\n \n def prop_set(self, new_val):\n \"\"\"\n this defines a method for setting the property's value\n \"\"\"\n setattr(self, \"_{}\".format(prop_name), new_val)\n \n # return the methods for the named property\n return prop_get, prop_set\n\n def wrapper(cls):\n \"\"\"\n Enhances a class by setting the attributes (property_names) passed to \n the decorator function\n \n @param cls : class to be decorated\n @type cls : class\n \"\"\"\n for prop_name in prop_names:\n prop = property(*property_factory(prop_name))\n setattr(cls, prop_name, prop)\n return cls\n\n return wrapper", "def feature(func):\n\n @functools.wraps(func) # makes the decorated function behave as the original (name etc)\n def decorated_func(self, items, *args, **kwargs):\n\n parameters = list(inspect.signature(func).parameters.items())\n\n # treat all defaultless arguments, including var_positional (*cols), as column names:\n columns = []\n arg_index = 0\n for name, param in parameters[2:]:\n if arg_index >= len(args):\n break\n if param.kind == param.VAR_POSITIONAL:\n # if func takes any list of args, assume they all signify columns:\n columns.extend(args[arg_index:])\n break\n elif param.default is inspect.Parameter.empty:\n # the argument signifies a column:\n columns.append(args[arg_index])\n else:\n # the argument is in fact a keyword argument:\n kwargs[name] = args[arg_index]\n arg_index += 1\n\n # Add all defaults to the kwargs and sort them in order of signature:\n kwargs_complete = {**{k: v.default for k, v in parameters if v.default is not inspect.Parameter.empty}, **kwargs}\n kwargs_complete_sorted = sorted([(k, v) for k,v in kwargs_complete.items()], key=lambda x: [p[0] for p in parameters].index(x[0]))\n function_str = f\"{func.__name__}({','.join(columns)},{','.join(f'{str(k)}={str(v)}' for k,v in kwargs_complete_sorted)})\"\n\n if not columns: # no saving/loading of pre-computed results\n logging.info(f\" Extracting {function_str}: computing {len(items)} anew (no column arguments; results will not be saved).\")\n return func(self, items, *columns, **kwargs)\n\n stored_values = self.computed_features.setdefault(function_str, {})\n keys = items[columns].apply(lambda x: tuple(x), axis=1)\n\n # compute & store new items\n compute_indices = ~keys.isin(stored_values)\n num_new = sum(compute_indices)\n logging.info(f\" Extracting {function_str}: {len(compute_indices) - num_new} precomputed items available, computing {num_new} anew.\")\n\n if num_new > 0:\n new_values = dict(zip(keys[compute_indices],\n func(self, items[compute_indices], *columns, **kwargs)))\n stored_values.update(new_values)\n\n # return list of values\n all_values = [stored_values[key] for key in keys]\n return all_values\n\n registered_features.append((func.__name__, decorated_func))\n\n return decorated_func" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorator stores properties decided to be advanced and sets these properties as input for reduction parameters
def AdvancedProperties(adv_prop_definition): def advanced_prop_wrapper(*args): properties = adv_prop_definition(*args) #print "in decorator: ",properties host = args[0] host._advanced_properties=properties host.iliad_prop.set_input_parameters(**properties) return properties return advanced_prop_wrapper
[ "def params(self, **params):\n for param in params:\n setattr(self.steps[self.current_step], param, params[param])\n return self.common_decorator", "def kwargs_decorator(deco):\n return update_wrapper(curry(deco), deco)", "def MainProperties(main_prop_definition): \n def main_prop_wrapper(*args):\n properties = main_prop_definition(*args)\n #print \"in decorator: \",properties\n host = args[0]\n host._main_properties=properties\n host.iliad_prop.set_input_parameters(**properties)\n return properties\n\n return main_prop_wrapper", "def defer_properties(recipent_name, property_names):\n def decorator(cls):\n for property_name in property_names:\n prop = defer_property(recipent_name, property_name)\n\n setattr(cls, property_name, prop)\n\n return cls\n\n return decorator", "def _decorate_with_context(property_name: str, method_impl: Callable) -> Callable:\n\n @functools.wraps(method_impl)\n def decorated_function(self):\n if property_name in self._data:\n return self._data[property_name]\n\n ctx = WearyContext()\n\n result = method_impl(self, ctx)\n self._data[property_name] = result\n\n return result\n\n return decorated_function", "def _proxy_container_op_props(cls: 'ContainerOp'):\n # properties mapping to proxy: ContainerOps.<prop> => Container.<prop>\n prop_map = dict(image='image', env_variables='env')\n # itera and create class props\n for op_prop, container_prop in prop_map.items():\n # create getter and setter\n _getter, _setter = _create_getter_setter(container_prop)\n # decorate with deprecation warning\n getter = deprecation_warning(_getter, op_prop, container_prop)\n setter = deprecation_warning(_setter, op_prop, container_prop)\n # update attribites with properties\n setattr(cls, op_prop, property(getter, setter))\n return cls", "def puzzle_property(*prereqs):\n\n # Try to throw a useful error if we were mistakenly called on a\n # function rather than on a list of prerequisites.\n for prereq in prereqs:\n if not isinstance(prereq, type):\n assert '_prereqs' in prereq.__dict__, _dec_gen_error_msg\n assert '_infers' in prereq.__dict__, _dec_gen_error_msg\n\n # This is the actual decorator.\n def add_prereqs(f):\n \"\"\"A puzzle property decorator for a given list of prerequisites.\"\"\"\n\n # Bad stuff happens if we double-apply the decorator, so let's\n # check that the decorator hasn't already been applied and\n # that we're not (for some strange reason) overwriting\n # pre-existing data.\n assert '_prereqs' not in f.__dict__\n assert '_infers' not in f.__dict__\n\n f._prereqs = prereqs\n f._infers = []\n\n initial = True\n # Add a reference to this property to all of its prereqs, so\n # that they will be able to invoke the evaluation of this\n # property when they are found to be true.\n for prereq in prereqs:\n if not isinstance(prereq, type):\n prereq._infers.append(f)\n initial = False\n\n if initial:\n _initial_properties.append(f)\n\n return f\n\n return add_prereqs", "def _make_properties(prop_names):\n def property_factory(prop_name):\n \"\"\"\n local function which returns a getter and setter\n \"\"\"\n def prop_get(self):\n \"\"\"\n this defines a method for getting the property's value\n \"\"\"\n prop_val = getattr(self, \"_{}\".format(prop_name))\n return prop_val\n \n def prop_set(self, new_val):\n \"\"\"\n this defines a method for setting the property's value\n \"\"\"\n setattr(self, \"_{}\".format(prop_name), new_val)\n \n # return the methods for the named property\n return prop_get, prop_set\n\n def wrapper(cls):\n \"\"\"\n Enhances a class by setting the attributes (property_names) passed to \n the decorator function\n \n @param cls : class to be decorated\n @type cls : class\n \"\"\"\n for prop_name in prop_names:\n prop = property(*property_factory(prop_name))\n setattr(cls, prop_name, prop)\n return cls\n\n return wrapper", "def with_attrs(**kwargs):\n def decorator(fun):\n for k, v in kwargs.items():\n setattr(fun, k, v)\n\n return fun\n\n return decorator", "def reduce(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def generic_expander_decorator(wrapped):\r\n @functools.wraps(wrapped)\r\n def generic_expander_wrapper(base_url, name, tokens):\r\n \"\"\"Wrap the expander.\"\"\"\r\n keyword = get_single_keyword(tokens)\r\n if keyword in ('inherit', 'initial'):\r\n results = dict.fromkeys(expanded_names, keyword)\r\n skip_validation = True\r\n else:\r\n skip_validation = False\r\n results = {}\r\n if wants_base_url:\r\n result = wrapped(name, tokens, base_url)\r\n else:\r\n result = wrapped(name, tokens)\r\n for new_name, new_token in result:\r\n assert new_name in expanded_names, new_name\r\n if new_name in results:\r\n raise InvalidValues(\r\n 'got multiple %s values in a %s shorthand'\r\n % (new_name.strip('-'), name))\r\n results[new_name] = new_token\r\n\r\n for new_name in expanded_names:\r\n if new_name.startswith('-'):\r\n # new_name is a suffix\r\n actual_new_name = name + new_name\r\n else:\r\n actual_new_name = new_name\r\n\r\n if new_name in results:\r\n value = results[new_name]\r\n if not skip_validation:\r\n # validate_non_shorthand returns [(name, value)]\r\n (actual_new_name, value), = validate_non_shorthand(\r\n base_url, actual_new_name, value, required=True)\r\n else:\r\n value = 'initial'\r\n\r\n yield actual_new_name, value\r\n return generic_expander_wrapper", "def get_advice(**kwds):\n def decorate(f):\n for k in kwds:\n if k == 'pre_fn' or k == 'post_fn' or k == 'pre_process_fn' or k == 'post_processs_fn' or k == 'init_fn':\n setattr(f, k, kwds[k])\n return f\n return decorate", "def get_optimizer_kwargs(self, states, internals, actions, terminal, reward, update):\n kwargs = dict()\n kwargs['time'] = self.timestep\n kwargs['variables'] = self.get_variables()\n kwargs['fn_loss'] = (\n lambda: self.fn_loss(states=states, internals=internals, actions=actions, terminal=terminal, reward=reward, update=update)\n )\n if self.global_model is not None:\n kwargs['global_variables'] = self.global_model.get_variables()\n return kwargs", "def copyprops(original_fn, decorated_fn):\n if hasattr(original_fn, '_wsgiwapi_props'):\n decorated_fn._wsgiwapi_props = original_fn._wsgiwapi_props\n if hasattr(original_fn, '__doc__'):\n decorated_fn.__doc__ = original_fn.__doc__", "def _create_getter(dct, lst, func):\n def _wrapper():\n return dct, lst, func\n return _wrapper", "def __init__(self, func=None, decorators=None, **params):\n params[\"decorators\"] = decorators\n super(ChainedDecorator, self).__init__(func, **params)", "def _add_property_function(func_name):\n\n def property_func(self, *args, **kwargs):\n result = getattr(self._tensor, func_name)(*args, **kwargs)\n return result\n\n setattr(CUDALongTensor, func_name, property_func)", "def thermoml_property(thermoml_string, supported_phases):\n\n def decorator(cls):\n register_thermoml_property(thermoml_string, supported_phases, cls)\n return cls\n\n return decorator", "def make_optimizer(self):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return normalized sublattice site ratio. E.g. [[0.25, 0.25], [0.1666, 0.1666, 0.1666]]
def normalized_sublattice_site_ratios(self): subl_model = self.sublattice_model subl_names = self._sublattice_names comp_dict = self.composition.as_dict() site_ratios = [[comp_dict['X'+name+e+'0+']/self.num_sites for e in subl] for subl, name in zip(subl_model, subl_names)] return site_ratios
[ "def _site_ratio_normalization(self):\n site_ratio_normalization = S.Zero\n # Calculate normalization factor\n for idx, sublattice in enumerate(self.constituents):\n active = set(sublattice).intersection(self.components)\n subl_content = sum(spec.number_of_atoms * v.SiteFraction(self.phase_name, idx, spec) for spec in active)\n site_ratio_normalization += self.site_ratios[idx] * subl_content\n return site_ratio_normalization", "def get_downsample_ratio(self, wsi, level: int) -> float:\n return float(wsi.pages[0].imagelength) / float(wsi.pages[level].imagelength)", "def ratio_digit_sub_domain_scaled_calculation(self):\n domain = self.hostname\n psl = PublicSuffixList()\n psl.accept_unknown = False\n if domain is None:\n domain = \"\"\n else:\n try:\n domain = domain[:len(domain) - (len(psl.publicsuffix(domain)) + 1)]\n except TypeError:\n pass\n\n subdomains = domain.split(\".\")\n\n exclusiveDigit = 0\n\n for subdomain in subdomains:\n if sum(list(map(lambda x: 1 if x.isdigit() else 0, subdomain))) == len(subdomain):\n exclusiveDigit += 1\n\n self.ratioDigitSubDomainScaledWeight = exclusiveDigit / len(subdomains)", "def _calc_scales():\n raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]\n min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),\n np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))\n max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))\n scales_down = pl.frange(min_scale, 0, 1.)\n scales_up = pl.frange(0.5, max_scale, 0.5)\n scales_pow = np.hstack((scales_down, scales_up))\n scales = np.power(2.0, scales_pow)\n return scales", "def ratio_digit_scaled_calculation(self):\n domain = self.hostname\n psl = PublicSuffixList()\n psl.accept_unknown = False\n if domain is None:\n domain = \"\"\n else:\n try:\n domain = domain[:len(domain) - (len(psl.publicsuffix(domain)) + 1)]\n except TypeError:\n pass\n\n domain.replace(\".\", \"\")\n\n self.ratioDigitScaledWeight = sum(list(map(lambda x: 1 if x.isdigit() else 0, domain))) / len(domain)", "def pixel_size_ratio(self):\n return 2**(self.levels[-1])", "def ratioRelSol(self,element1,element2):\n \n ratio = self.model[element1]/self.model[element2]*self.solar[element2].loc[0]/self.solar[element1].loc[0]\n return ratio", "def normalize(self): \n mag = self.distance( Point() )\n if mag > 0:\n self.scale(1/mag)", "def normalize(self, belief: Sequence[Sequence[float]]) -> Sequence[Sequence[float]]:\n s = sum(sum(r) for r in belief)\n if s:\n return [[c/s for c in r] for r in belief]\n p = 1./sum(len(_) for _ in range(len(belief)))\n return [[p]*len(_) for _ in range(len(belief))]", "def getAspectRatio(self) -> \"float\":\n return _coin.SbBox2s_getAspectRatio(self)", "def getAspectRatio(self) -> \"float\":\n return _coin.SbBox2f_getAspectRatio(self)", "def getViewportAspectRatio(self) -> \"float\":\n return _coin.SbViewportRegion_getViewportAspectRatio(self)", "def normalize(self) -> \"double\":\n return _coin.SbVec2d_normalize(self)", "def getAspectRatio(self) -> \"float\":\n return _coin.SbBox2i32_getAspectRatio(self)", "def _normalize_prototypes(prototypes: np.ndarray) -> None:\n np.divide(\n prototypes,\n np.linalg.norm(prototypes, axis=1, keepdims=True),\n out=prototypes,\n )", "def get_downsample_ratio(self, wsi, level: int) -> float:\n return float(wsi.resolutions[\"level_downsamples\"][level])", "def norm(self) -> float:\n return np.sqrt(self.inner_product(self).real)", "def norm(i,subvalues):\n return rightProd(i,subvalues) / leftProd(i)", "def getAspectRatio(self) -> \"double\":\n return _coin.SbBox2d_getAspectRatio(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Modify self to be a concrete SQS based on the sublattice model.
def get_concrete_sqs(self, subl_model, scale_volume=True): def _subl_error(): raise ValueError('Concrete sublattice model {} does not match size of abstract sublattice model {}'.format(subl_model, self.sublattice_model)) if len(subl_model) != len(self.sublattice_model): _subl_error() # build the replacement dictionary and the site ratios # we have to look up the sublattice names to build the replacement species names replacement_dict = {} site_occupancies = [] # list of [{'FE': 0.3333, 'NI': 0.6666}, {'FE': 1}] for [['FE', 'NI'], ['FE]] for abstract_subl, concrete_subl, subl_name, subl_ratios in zip(self.sublattice_model, subl_model, self._sublattice_names, self.sublattice_site_ratios): if len(abstract_subl) != len(concrete_subl): _subl_error() sublattice_ratio_sum = sum(subl_ratios) sublattice_occupancy_dict = {} for abstract_specie, concrete_specie, site_ratio in zip(abstract_subl, concrete_subl, subl_ratios): specie = 'X' + subl_name + abstract_specie replacement_dict[specie] = concrete_specie sublattice_occupancy_dict[concrete_specie] = sublattice_occupancy_dict.get(concrete_specie, 0) + site_ratio/sublattice_ratio_sum site_occupancies.append(sublattice_occupancy_dict) # create a copy of myself to make the transformations and make them self_copy = copy.deepcopy(self) self_copy.replace_species(replacement_dict) if scale_volume: fractional_comp = dict(self_copy.composition.fractional_composition) estimated_density = 0 for component in self_copy.composition.elements : temp = pmg.core.periodic_table.Element(component).data['Density of solid'] density = float(temp.split(' ')[0]) estimated_density += (fractional_comp[component] * density)/1000 self_copy.scale_lattice(float((self_copy.volume/estimated_density)*self_copy.density)) # finally we will construct the SQS object and set the values for the canonicalized # sublattice configuration, site ratios, and site occupancies # first, canonicalize the sublattice model, e.g. [['FE', 'FE'], ['NI']] => [['FE'], ['NI']] sublattice_configuration = [sorted(set(subl)) for subl in subl_model] # construct the sublattice occupancies for the model sublattice_occupancies = [[occupancies[specie] for specie in subl] for occupancies, subl in zip(site_occupancies, sublattice_configuration)] # sum up the individual sublattice site ratios to the total sublattice ratios. # e.g [[0.25, 0.25], [0.1666, 0.1666, 0.1666]] => [0.5, 0.5] site_ratios = [sum(ratios) for ratios in self.sublattice_site_ratios] # create the SQS and add all of these properties to our SQS concrete_sqs = PRLStructure.from_sites(self_copy.sites) concrete_sqs.sublattice_configuration = sublattice_configuration concrete_sqs.sublattice_occupancies = sublattice_occupancies concrete_sqs.sublattice_site_ratios = site_ratios return concrete_sqs
[ "def test_abstract_sqs_scales_volume_when_made_concrete():\r\n\r\n structure = lat_in_to_sqs(ATAT_FCC_L12_LATTICE_IN)\r\n concrete_structure = structure.get_concrete_sqs([['Fe', 'Ni'], ['Al']])\r\n assert np.isclose(concrete_structure.volume, 445.35213050176463)\r\n assert np.isclose(concrete_structure.density, 4.12275)\r\n\r\n structure = lat_in_to_sqs(ATAT_FCC_L12_LATTICE_IN)\r\n concrete_structure = structure.get_concrete_sqs([['Fe', 'Ni'], ['Al']], scale_volume=False)\r\n assert np.isclose(concrete_structure.volume, 8.0)", "def test_sqs_is_properly_enumerated_for_a_higher_order_sublattice_model():\r\n structure = lat_in_to_sqs(ATAT_FCC_L12_LATTICE_IN)\r\n structures = enumerate_sqs(structure, [['Al', 'Ni'], ['Fe', 'Cr']])\r\n assert len(structures) == 6\r\n\r\n structure = lat_in_to_sqs(ATAT_ROCKSALT_B1_LATTICE_IN)\r\n structures = enumerate_sqs(structure, [['Al', 'Ni', 'Fe'], ['Fe', 'Ni', 'Cr']])\r\n assert len(structures) == 36", "def test_sqs_is_properly_enumerated_for_a_multiple_solution_sublattice_model():\r\n structure = lat_in_to_sqs(ATAT_ROCKSALT_B1_LATTICE_IN)\r\n structures = enumerate_sqs(structure, [['Al', 'Ni'], ['Fe', 'Cr']])\r\n assert len(structures) == 9\r\n\r\n structure = lat_in_to_sqs(ATAT_ROCKSALT_B1_LATTICE_IN)\r\n structures = enumerate_sqs(structure, [['Al', 'Ni'], ['Fe', 'Cr']])\r\n assert len(structures) == 9\r\n assert all([isinstance(s, PRLStructure) for s in structures])", "def _update_scale(self,\n child: ConvBlockGene,\n new_scale: int,\n self_scale: int):\n child.set(spatial_scale=new_scale)\n\n # Update n kernels\n d_kernels = 2 ** (new_scale - self_scale)\n self_kernels = self.hyperparam('n_kernels')\n child.set(n_kernels=d_kernels * self_kernels)\n\n # Update name\n child.name = f'decode block {new_scale}'", "def feature_scale(self):\n \n #-------------------------------------------------------------------------\n # List of quantitative features to be standardized\n #-------------------------------------------------------------------------\n list_quant_feature = ['Quantity','UnitPrice']\n self._list_quant_feature = list_quant_feature.copy()\n\n #-------------------------------------------------------------------------\n # Standardization is applied over quantitative features in list.\n #-------------------------------------------------------------------------\n self.std_scale, X_quantitative_std = \\\n p5_util.df_features_standardize(self.df_invoice, list_quant_feature)\n\n\n df_quant_std = pd.DataFrame(X_quantitative_std\\\n , index=self.df_invoice.index)\n \n #-------------------------------------------------------------------------\n # Columns from standardized dataframe are renamed\n #-------------------------------------------------------------------------\n df_quant_std.rename(columns={0:'Quantity',1:'UnitPrice'},inplace=True)\n\n #-------------------------------------------------------------------------\n # Standardized values dataframe is aggregated to df_invoice\n #-------------------------------------------------------------------------\n list_col_drop = ['Quantity','UnitPrice']\n list_col_keep = \\\n [col for col in self.df_invoice.columns if col not in list_col_drop ]\n self.df_invoice = self.df_invoice[list_col_keep]\n\n self.df_invoice = pd.concat([self.df_invoice,df_quant_std], axis=1)\n \n return", "def setScale(self, *args) -> \"void\":\n return _coin.SbMatrix_setScale(self, *args)", "def test_equality_of_sqs_objects():\r\n config = [['A', 'B'], ['A']]\r\n occupancy = [[0.5, 0.5], [1]]\r\n site_ratios = [3, 1]\r\n # Use same sublattice for different underlying structures. Should be equal\r\n s1 = PRLStructure(Lattice.hexagonal(1, 2), ['Mg', 'Mg'], [[0, 0, 0], [0.3333, 0.66666, 0.5]], sublattice_configuration=config, sublattice_occupancies=occupancy, sublattice_site_ratios=site_ratios)\r\n s2 = PRLStructure(Lattice.cubic(1), ['Fe'], [[0, 0, 0]], sublattice_configuration=config, sublattice_occupancies=occupancy, sublattice_site_ratios=site_ratios)\r\n assert s1 == s2\r\n\r\n # Use same underlying crystal structures, but different sublattice configurations. Should be not equal\r\n s1.sublattice_site_ratios = [2, 1]\r\n assert s1 != s2\r\n s1.sublattice_site_ratios = site_ratios\r\n\r\n s1.sublattice_occupancies = [[0.25, 0.5], [1]]\r\n assert s1 != s2\r\n s1.sublattice_occupancies = occupancy\r\n\r\n s1.sublattice_configuration = [['A', 'A'], ['A']]\r\n assert s1 != s2\r\n s1.sublattice_configuration = config\r\n\r\n assert s1 == s2", "def secularize(self):\n \n if not self.is_secular:\n \n self.secular_basis_op = self._get_current_basis_op()\n \n if self.as_operators:\n self._set_population_rates_from_operators()\n self._set_dephasing_rates_from_operators() \n else:\n self._set_population_rates_from_tensor()\n self._set_dephasing_rates_from_tensor()\n \n self.is_secular = True", "def scaling(self, bonds, scaleparameters=None,scf_scaleparameters=None):\n if scaleparameters is None:\n scaleparameters=[0.0,0.0,0.8333333333]\n\n\n\n # Building connection matrix\n for k in range(len(bonds)):\n self.bound12[bonds[k][0]][bonds[k][1]] = 1.0\n self.bound12[bonds[k][1]][bonds[k][0]] = 1.0\n\n for i in range(len(self.bound12)):\n b12 = np.where(self.bound12[i] == 1.0)[0]\n for j in range(len(b12)):\n b12t = np.where(self.bound12[b12[j]] == 1.0)[0]\n for k in range(len(b12t)):\n if i != b12t[k]:\n self.bound13[b12t[k]][i] = 1.0\n self.bound13[i][b12t[k]] = 1.0\n\n for i in range(self.natoms):\n b13 = np.where(self.bound13[i] == 1.0)[0]\n for j in range(len(b13)):\n b13t = np.where(self.bound12[b13[j]] == 1.0)[0]\n for k in range(len(b13t)):\n if self.bound12[b13t[k]][i] == 0.0:\n self.bound14[b13t[k]][i] = 1.0\n self.bound14[i][b13t[k]] = 1.0\n\n for i in range(self.natoms):\n self.scale[i][i] = 0.0\n # find values in matrix with value 1.0\n b12 = np.array(np.where(self.bound12 == 1.0)).transpose()\n b13 = np.array(np.where(self.bound13 == 1.0)).transpose()\n b14 = np.array(np.where(self.bound14 == 1.0)).transpose()\n\n # Fill scaling matrix with values\n for i in range(len(b12)):\n self.scale[b12[i][0]][b12[i][1]] = scaleparameters[0] # Value for 1-2 interaction 0 means interactions are neglected\n for i in range(len(b13)):\n self.scale[b13[i][0]][b13[i][1]] = scaleparameters[1] # Value for 1-3 interaction 0 means interactions are neglected\n for i in range(len(b14)):\n self.scale[b14[i][0]][b14[i][1]] = scaleparameters[2] # Value for the 1-4 scaling\n\n # Different Scaling parameter for SCF\n if scf_scaleparameters !=None:\n self.scale_scf = np.ones((self.natoms, self.natoms))\n for i in range(len(b12)):\n self.scale_scf[b12[i][0]][b12[i][1]] = scf_scaleparameters[0] # Value for 1-2 interaction 0 means interactions are neglected\n for i in range(len(b13)):\n self.scale_scf[b13[i][0]][b13[i][1]] = scf_scaleparameters[1] # Value for 1-3 interaction 0 means interactions are neglected\n for i in range(len(b14)):\n self.scale_scf[b14[i][0]][b14[i][1]] = scf_scaleparameters[2] # Value for the 1-4 scaling\n\n\n\n if scaleparameters == 'onlyinter':\n #self.scale = np.ones((self.natoms, self.natoms))\n # Building connection matrix\n for k in range(1,len(bonds)):\n if np.all(self.bound12[:k].transpose()[k:] == 0):\n self.log('Multiple Molecules detected: No connection between atom {} and {}'.format(k,k+1))", "def q_scale(self): # real signature unknown; restored from __doc__\n return 0.0", "def __manipulatesystem(self):\n # Get ucell\n ucell = self.parent.system.ucell\n\n # Rotate to specified uvws\n rcell, transform = ucell.rotate(self.uvws, return_transform=True)\n\n # Scale atomshift by rcell vectors\n shift = np.dot(self.atomshift, rcell.box.vects)\n\n # Shift atoms\n rcell.atoms.pos += shift\n\n # Apply sizemults\n system = rcell.supersize(self.a_mults, self.b_mults, self.c_mults)\n system.wrap()\n\n # Update class attributes\n self.__transform = transform\n self.__system = system\n self.__rcell = rcell", "def initqp(self):\n\n self.qp = get_spherical_quad_points()\n sp = cartesian2spherical(self.qp.points)\n self.sqp = sp", "def setScale(self, *args):\n return _coin.SbMatrix_setScale(self, *args)", "def _rescale(self, samp, **kwargs):\n \"\"\"\n Here is where the subclass where overwrite rescale method\n \"\"\"\n return samp", "def scale(self, scale):\n self.tf_.scale = scale\n self.sdf.tf_.scale = scale\n if self.mesh_ is not None:\n self.mesh_.tf_.scale = scale", "def lat_in_to_sqs(atat_lattice_in, rename=True):\n # TODO: handle numeric species, e.g. 'g1'. Fixed\n # Problems: parser has trouble with matching next line and we have to rename it so pymatgen\n # doesn't think it's a charge.\n # parse the data\n parsed_data = _parse_atat_lattice(atat_lattice_in)\n atat_coord_system = parsed_data[0]\n atat_lattice = parsed_data[1]\n atat_atoms = parsed_data[2]\n # create the lattice\n if len(atat_coord_system) == 3:\n # we have a coordinate system matrix\n coord_system = Lattice(atat_coord_system.asList()).matrix\n else:\n # we have length and angles\n #coord_system = Lattice.from_lengths_and_angles(list(atat_coord_system[0]), list(atat_coord_system[1])).matrix\n (lat_a, lat_b, lat_c) = list(atat_coord_system[0])\n (lat_alpha, lat_beta, lat_gamma) = list(atat_coord_system[1])\n coord_system = Lattice.from_parameters(lat_a, lat_b, lat_c, lat_alpha, lat_beta, lat_gamma).matrix\n direct_lattice = Lattice(atat_lattice.asList())\n lattice = coord_system.dot(direct_lattice.matrix)\n # create the list of atoms, converted to the right coordinate system\n species_list = []\n species_positions = []\n subl_model = {} # format {'subl_name': 'atoms_found_in_subl, e.g. \"aaabbbb\"'}\n for position, atoms in atat_atoms:\n # atoms can be a list of atoms, e.g. for not abstract SQS\n if len(atoms) > 1:\n raise NotImplementedError('Cannot parse atom list {} because the sublattice is unclear.\\nParsed data: {}'.format(atoms, atat_atoms))\n atom = atoms[0]\n if rename:\n # change from `a_B` style to `Xab`\n\n atom = atom.lower().split('_')\n else:\n raise NotImplementedError('Cannot rename because the atom name and sublattice name may be ambigous.')\n # add the abstract atom to the sublattice model\n subl = atom[0]\n #Replace the digital by alphas, 1->a, 2->b, 3->c, ...\n rep_items = re.findall(r\"\\d+\", subl)\n for rep_item in rep_items:\n subl = subl.replace(rep_item, chr(96 + int(rep_item)))\n subl_atom = atom[1]\n subl_model[subl] = subl_model.get(subl, set()).union({subl_atom})\n # add the species and position to the lists\n species_list.append('X'+subl+subl_atom)\n species_positions.append(list(position))\n # create the structure\n sublattice_model = [[e for e in sorted(list(set(subl_model[s])))] for s in sorted(subl_model.keys())]\n sublattice_names = [s for s in sorted(subl_model.keys())]\n sqs = AbstractSQS(direct_lattice, species_list, species_positions, coords_are_cartesian=True,\n sublattice_model=sublattice_model,\n sublattice_names=sublattice_names)\n sqs.lattice = Lattice(lattice)\n #sqs.modify_lattice(Lattice(lattice)) #This will be deprecated in v2020\n\n return sqs", "def get_square_model(self):\n def square_model(v):\n f_body = force_calcs.get_body_force(\n self.param.water_density, \n self.param.sub_body_drag_coef, \n self.param.sub_body_diameter,\n v\n )\n f_mount = force_calcs.get_mount_force( \n self.param.water_density, \n self.param.sub_mount_drag_coef, \n self.param.sub_mount_chord, \n self.param.sub_mount_length, \n v\n )\n\n return f_body+f_mount\n\n return square_model", "def test_atat_bestsqs_is_correctly_parsed_to_sqs():\r\n structure = lat_in_to_sqs(ATAT_FCC_L12_LATTICE_IN)\r\n specie_types = {specie.symbol for specie in structure.types_of_specie}\r\n assert specie_types == {'Xaa', 'Xab', 'Xca'}\r\n assert np.all(structure.sublattice_model == [['a', 'b'], ['a']])\r\n assert structure.normalized_sublattice_site_ratios == [[0.125, 0.125], [0.75]]\r\n assert structure.sublattice_site_ratios == [[1, 1], [6]]\r\n assert np.all(structure._sublattice_names == ['a', 'c'])\r\n\r\n structure = lat_in_to_sqs(ATAT_ROCKSALT_B1_LATTICE_IN)\r\n specie_types = {specie.symbol for specie in structure.types_of_specie}\r\n assert specie_types == {'Xaa', 'Xab', 'Xba', 'Xbb'}\r\n assert np.all(structure.sublattice_model == [['a', 'b'], ['a', 'b']])\r\n assert structure.normalized_sublattice_site_ratios == [[0.25, 0.25], [0.25, 0.25]]\r\n assert structure.sublattice_site_ratios == [[1, 1], [1, 1]]\r\n assert np.all(structure._sublattice_names == ['a', 'b'])", "def SoModelMatrixElement_scaleBy(state: 'SoState', node: 'SoNode', scaleFactor: 'SbVec3f') -> \"void\":\n return _coin.SoModelMatrixElement_scaleBy(state, node, scaleFactor)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return endmember space group info..
def get_endmember_space_group_info(self, symprec=1e-2, angle_tolerance=5.0): endmember_subl = [['X' + subl_name for _ in subl] for subl, subl_name in zip(self.sublattice_model, self._sublattice_names)] # we need to replace the abstract names with real names of species. endmember_speices = {specie for subl in endmember_subl for specie in subl} real_species_dict = {abstract_specie: real_specie for abstract_specie, real_specie in zip(endmember_speices, pmg.core.periodic_table._pt_data.keys())} # replace them endmember_subl = [[real_species_dict[specie] for specie in subl] for subl in endmember_subl] # get the structure and spacegroup info endmember_struct = self.get_concrete_sqs(endmember_subl, scale_volume=False) endmember_space_group_info = endmember_struct.get_space_group_info(symprec=symprec, angle_tolerance=angle_tolerance) return endmember_space_group_info
[ "def get_group(self): # real signature unknown; restored from __doc__\n return \"\"", "def retr_spacegroup_number(struct,ini):\n ini[\"spacegroup\"] = struct.get_space_group_number()\n return ini", "def McGroupCountEnd(self):\n return self._McGroupCountEnd", "def _get_space_group(s: Structure) -> int:\n try:\n return int(np.round(s.get_space_group_info()[1]))\n except TypeError:\n # 0 should be fine as it is not taken\n return 0", "def _get_group(self):\n\n import re\n\n pattern = re.compile(\"Group\\sinfo:\\n(.*?)(?=^-)\", re.M|re.DOTALL)\n pattern = re.compile(\"Group\\sinfo:\\n(.*?)(?=(^Profile\\skey|^$))\", re.M|re.DOTALL)\n info = pattern.findall(self.msg)\n\n # No group info: return None\n if len(info) == 0: return None\n info = info[0][0]\n\n # Decoding group information\n res = msggroupinfo(info)\n\n return(res)", "def get_outgroup(self):\n if self.outgroup is not None:\n outgroup_taxonomy = ''\n for i in self.data.seq_records:\n if self.outgroup == i.voucher_code:\n outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'],\n i.taxonomy['species'])\n break\n outgroup = '\\noutgroup {0}_{1};'.format(self.outgroup,\n outgroup_taxonomy)\n else:\n outgroup = ''\n return outgroup", "def get_next_group_name(self):\n\n return f'g.{len(self.groups)}'", "def get_root_gms_group(self) -> dict:\n return self._get(\"/gms/group/root\")", "def G2():\n\n return Group.schlafli(6)", "def check_if_group_has_members(group):\n memb = group.uniqueMember.values\n # It can't be an empty list so an empty group should\n # just be a single empty member.\n if memb == ['']:\n return \" (empty)\"\n return \"\"", "def _identify_member_groups(self):\n\n # dict node tag to width in z direction , and neighbouring node\n self.node_width_z_dict = dict()\n self.node_connect_z_dict = dict()\n for ele in self.long_ele:\n d1 = [] # d for distance\n d2 = []\n p1 = []\n p2 = []\n n1 = [\n trans_ele\n for trans_ele in self.trans_ele\n if trans_ele[1] == ele[1] or trans_ele[2] == ele[1]\n ]\n n2 = [\n trans_ele\n for trans_ele in self.trans_ele\n if trans_ele[1] == ele[2] or trans_ele[2] == ele[2]\n ]\n for item in n1:\n d1.append(\n [\n np.abs(a - b)\n for (a, b) in zip(\n self.node_spec[item[1]][\"coordinate\"],\n self.node_spec[item[2]][\"coordinate\"],\n )\n ]\n )\n if item[1] != ele[1] and item[1] != ele[2]:\n p1.append(item[1])\n if item[2] != ele[1] and item[2] != ele[2]:\n p1.append(item[2])\n\n for item in n2:\n d2.append(\n [\n np.abs(a - b)\n for (a, b) in zip(\n self.node_spec[item[1]][\"coordinate\"],\n self.node_spec[item[2]][\"coordinate\"],\n )\n ]\n )\n if item[1] != ele[1] and item[1] != ele[2]:\n p2.append(item[1])\n if item[2] != ele[1] and item[2] != ele[2]:\n p2.append(item[2])\n # list, [ele tag, ele width (left and right)]\n self.node_width_z_dict.setdefault(ele[1], d1)\n self.node_width_z_dict.setdefault(ele[2], d2)\n self.node_connect_z_dict.setdefault(ele[1], p1)\n self.node_connect_z_dict.setdefault(ele[2], p2)\n\n # dict z to long ele\n self.z_group_to_ele = dict()\n for count, node in enumerate(self.noz):\n self.z_group_to_ele[count] = [\n ele for ele in self.long_ele if ele[3] == count\n ]\n\n self.global_z_grid_count = max(self.z_group_to_ele.keys()) + 1\n # dict x to trans ele\n self.x_group_to_ele = dict()\n for count in range(0, self.global_x_grid_count):\n self.x_group_to_ele[count] = [\n ele for ele in self.trans_ele if ele[3] == count\n ]\n # dict edge counter to ele\n self.edge_group_to_ele = dict()\n for count in range(0, self.global_edge_count + 1):\n self.edge_group_to_ele[count] = [\n ele for ele in self.edge_span_ele if ele[3] == count\n ]\n # dict node tag to width in x direction\n self.node_width_x_dict = dict()\n self.node_connect_x_dict = dict()\n for ele in self.trans_ele:\n d1 = []\n d2 = []\n p1 = []\n p2 = []\n n1 = [\n long_ele\n for long_ele in self.long_ele\n if long_ele[1] == ele[1] or long_ele[2] == ele[1]\n ]\n n2 = [\n long_ele\n for long_ele in self.long_ele\n if long_ele[1] == ele[2] or long_ele[2] == ele[2]\n ]\n for item in n1:\n d1.append(\n [\n np.abs(a - b)\n for (a, b) in zip(\n self.node_spec[item[1]][\"coordinate\"],\n self.node_spec[item[2]][\"coordinate\"],\n )\n ]\n )\n if item[1] != ele[1] and item[1] != ele[2]:\n p1.append(item[1])\n if item[2] != ele[1] and item[2] != ele[2]:\n p1.append(item[2])\n for item in n2:\n d2.append(\n [\n np.abs(a - b)\n for (a, b) in zip(\n self.node_spec[item[1]][\"coordinate\"],\n self.node_spec[item[2]][\"coordinate\"],\n )\n ]\n )\n if item[1] != ele[1] and item[1] != ele[2]:\n p2.append(item[1])\n if item[2] != ele[1] and item[2] != ele[2]:\n p2.append(item[2])\n # list, [ele tag, ele width (left and right)]\n self.node_width_x_dict.setdefault(ele[1], d1)\n self.node_width_x_dict.setdefault(ele[2], d2)\n self.node_connect_x_dict.setdefault(ele[1], p1)\n self.node_connect_x_dict.setdefault(ele[2], p2)\n\n for ele in self.edge_span_ele:\n d1 = []\n d2 = []\n p1 = []\n p2 = []\n n1 = [\n long_ele\n for long_ele in self.long_ele\n if long_ele[1] == ele[1] or long_ele[2] == ele[1]\n ]\n n2 = [\n long_ele\n for long_ele in self.long_ele\n if long_ele[1] == ele[2] or long_ele[2] == ele[2]\n ]\n for item in n1:\n d1.append(\n [\n np.abs(a - b)\n for (a, b) in zip(\n self.node_spec[item[1]][\"coordinate\"],\n self.node_spec[item[2]][\"coordinate\"],\n )\n ]\n )\n if item[1] != ele[1] and item[1] != ele[2]:\n p1.append(item[1])\n if item[2] != ele[1] and item[2] != ele[2]:\n p1.append(item[2])\n for item in n2:\n d2.append(\n [\n np.abs(a - b)\n for (a, b) in zip(\n self.node_spec[item[1]][\"coordinate\"],\n self.node_spec[item[2]][\"coordinate\"],\n )\n ]\n )\n if item[1] != ele[1] and item[1] != ele[2]:\n p2.append(item[1])\n if item[2] != ele[1] and item[2] != ele[2]:\n p2.append(item[2])\n # list, [ele tag, ele width (left and right)]\n self.node_width_x_dict.setdefault(ele[1], d1)\n self.node_width_x_dict.setdefault(ele[2], d2)\n self.node_connect_x_dict.setdefault(ele[1], p1)\n self.node_connect_x_dict.setdefault(ele[2], p2)\n # create self.grid_number_dict, dict key = grid number, val = long and trans ele in grid\n self.grid_number_dict = dict()\n counter = 0\n for node_tag in self.node_spec.keys():\n # get the surrounding nodes\n x_vicinity_nodes = self.node_connect_x_dict.get(node_tag, [])\n z_vicinity_nodes = self.node_connect_z_dict.get(node_tag, [])\n for x_node in x_vicinity_nodes:\n xg = self.node_spec[x_node][\"x_group\"]\n for z_node in z_vicinity_nodes:\n zg = self.node_spec[z_node][\"z_group\"]\n # find the 3rd bounding node\n n3 = [\n n[\"tag\"]\n for n in self.node_spec.values()\n if n[\"x_group\"] == xg and n[\"z_group\"] == zg\n ]\n if n3:\n n3 = n3[0]\n if not any(\n [\n node_tag in d\n and x_node in d\n and z_node in d\n and n3 in d\n for d in self.grid_number_dict.values()\n ]\n ):\n self.grid_number_dict.setdefault(\n counter, [node_tag, x_node, n3, z_node]\n )\n counter += 1\n else: # list is empty\n if not any(\n [\n node_tag in d and x_node in d and z_node in d\n for d in self.grid_number_dict.values()\n ]\n ):\n self.grid_number_dict.setdefault(\n counter, [node_tag, x_node, n3, z_node]\n )\n counter += 1\n\n # dict of grid number return vicinity grid number in a subdict {'x-1': 'x+1', 'z-1' , 'z+1'}\n self.grid_vicinity_dict = dict()\n for k, grid in self.grid_number_dict.items():\n current_x_group = []\n current_z_group = []\n current_x = []\n current_z = []\n\n grid_number_record = []\n if [] in grid:\n grid.remove([])\n for node in grid:\n grid_number_record += [\n i\n for i, x in enumerate(\n [node in n for n in self.grid_number_dict.values()]\n )\n if x\n ]\n current_x_group.append(self.node_spec[node][\"x_group\"])\n current_z_group.append(self.node_spec[node][\"z_group\"])\n current_x.append(self.node_spec[node][\"coordinate\"][0])\n current_z.append(self.node_spec[node][\"coordinate\"][2])\n current_x_group = list(np.unique(current_x_group))\n current_z_group = list(np.unique(current_z_group))\n current_x = list(np.unique(current_x))\n current_z = list(np.unique(current_z))\n grid_number_record = np.unique(grid_number_record)\n # loop to characterize the grid for current\n subdict = {}\n for neighbour in grid_number_record:\n if neighbour == k: # identical , current grid\n continue\n x_group = [] # initialize variables\n x_coor = []\n z_group = []\n z_coor = []\n # loop each node in the vicintiy grids\n for nodes in self.grid_number_dict[neighbour]:\n if not nodes:\n continue\n x_group.append(self.node_spec[nodes][\"x_group\"])\n z_group.append(self.node_spec[nodes][\"z_group\"])\n x_coor.append(self.node_spec[nodes][\"coordinate\"][0])\n z_coor.append(self.node_spec[nodes][\"coordinate\"][2])\n x_group = list(np.unique(x_group))\n z_group = list(np.unique(z_group))\n x_coor = list(np.unique(x_coor))\n z_coor = list(np.unique(z_coor))\n # if x groups are identical, neighbour grid is either top or bottom of the element\n if all(a in current_x_group for a in x_group):\n # compare z max\n if max(z_coor) > max(current_z):\n subdict[\"top\"] = neighbour\n else:\n subdict[\"bottom\"] = neighbour\n # if x groups are identical, neighbour grid is either left or right of the element\n if all(a in current_z_group for a in z_group):\n if max(x_coor) > max(current_x):\n subdict[\"right\"] = neighbour\n else:\n subdict[\"left\"] = neighbour\n self.grid_vicinity_dict.setdefault(k, subdict)", "def get_group_bounds(self, group):\n\n if self.group_edges is None:\n msg = 'Unable to get energy group bounds for group \"{0}\" since ' \\\n 'the group edges have not yet been set'.format(group)\n raise ValueError(msg)\n\n cv.check_greater_than('group', group, 0)\n cv.check_less_than('group', group, self.num_groups, equality=True)\n\n lower = self.group_edges[self.num_groups-group]\n upper = self.group_edges[self.num_groups-group+1]\n return lower, upper", "def group(self) -> str: # TODO: Same as multiroom master?\n self._logger.info(\"Retrieving device group name...\")\n return self._device_info().get(\"GroupName\")", "def _get_group_dn(self):\n dn = ''\n if self.base_dn and self.group_dn:\n dn = \"{},{}\".format(self.group_dn, self.base_dn)\n elif self.base_dn:\n dn = self.base_dn\n elif self.user_dn:\n dn = self.group_dn\n return dn", "def objgrp(self, grpObj, newmembers, sr):\r\n\t\tgroup_type = grpObj[\"group_type\"]\r\n\t\tgroup_name = grpObj.name\r\n\t\tif group_type in (\"network\",):\r\n\t\t\tobject_type = \"network-object\"\r\n\t\t\tcandidate_type = \"host\"\r\n\t\telif group_type == \"service\":\r\n\t\t\tobject_type = \"port-object\"\r\n\t\t\tcandidate_type = \"eq\" # modify later for different\r\n\t\tl = \"\"\r\n\t\tif isinstance(newmembers, (tuple, list, set)):\r\n\t\t\tl += f\"object-group {group_type} {group_name}\\n\"\r\n\t\t\tif sr == \"del\":\r\n\t\t\t\tfor candidate in newmembers:\r\n\t\t\t\t\tl += self.objgrpadd(object_type, candidate_type, candidate)\r\n\t\t\telif sr == \"add\":\r\n\t\t\t\tfor candidate in newmembers:\r\n\t\t\t\t\tl += self.objgrprem(object_type, candidate_type, candidate)\r\n\t\telif isinstance(newmembers, str):\r\n\t\t\tif sr == \"del\":\r\n\t\t\t\tl += self.objgrpadd(object_type, candidate_type, candidate)\r\n\t\t\telif sr == \"add\":\r\n\t\t\t\tl += self.objgrprem(object_type, candidate_type, candidate)\r\n\t\tl += \"!\"\r\n\t\treturn l", "def _get_port_group(self):\n return self.__port_group", "def _find_name_in_group(self, group: str) -> str:\n\n # create group if required\n if group not in self.objects:\n self.objects[group] = {}\n\n # check group name itself\n if group not in self.objects[group]:\n return group\n\n # otherwise count up\n i = 1\n while True:\n name = '%s-%d' % (group, i)\n if name not in self.objects[group]:\n return name\n i += 1", "def test_get_end_roots(self):\n ends = self.family.get_end_roots()\n self.assertEquals(len(ends), 2)\n self.assertIn(self.family.groups.entries[\"Y_rad_out\"], ends)\n self.assertIn(self.family.groups.entries[\"XH_out\"], ends)", "def gid(self):\n ret = self._get_attr(\"GID\")\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calcule the charge in an coordinate influed by atoms in dists distance with charges charge. Inputs dists and charges are array 1D of same range of floats. >>> from numpy import allclose >>> dists = array(range(5)) + 0.1 >>> charges = array(range(5)) >>> vdws = array([ 1 ] 5) >>> c = charge_1(dists, charges) >>> allclose(c, 0.95120) True >>> dists = array(range(10)) + 0.1 >>> charges = array(range(10)) >>> vdws = array([ 1 ] 10) >>> c = charge_1(dists, charges) >>> allclose(c, 1.36864) True
def charge_1(dists, charges): charge = charges / ( map(epsilon, dists) * dists ) return sum(charge)
[ "def charge_2(dists, charges):\n d6 = dists <= 6.0\n d8 = dists <= 8.0\n d6_8 = logical_and(logical_not(d6), d8)\n epsilons = (d6*4.0) + \\\n d6_8*(38.0*dists-224.0) + \\\n logical_not(d8)*80.0\n charge = (charges / ( epsilons * dists ))\n return sum(charge)", "def charges(self, molecule):\n\n # TODO add option to use chargemol on onetep cube files.\n copy(f'../density/{molecule.name}.wfx', f'{molecule.name}.wfx')\n c_mol = Chargemol(molecule, self.all_configs)\n c_mol.generate_input()\n\n append_to_log(f'Chargemol analysis with DDEC{self.qm[\"ddec_version\"]} complete')\n\n return molecule", "def update_charge(self):\n for atom in self.atoms:\n if (len(atom.charge) == 1) and (len(atom.lone_pairs) == 1) and (len(atom.radical_electrons) == 1):\n # if the charge of the group is not labeled, then no charge update will be\n # performed. If there multiple charges are assigned, no update either.\n # Besides, this groupatom should have enough information to be updated\n atom_type = atom.atomtype[0]\n for element in allElements:\n if atom_type is ATOMTYPES[element] or atom_type in ATOMTYPES[element].specific:\n bond_order = 0\n valence_electron = elements.PeriodicSystem.valence_electrons[element]\n for _, bond in atom.bonds.items():\n bond_order += bond.order[0]\n lone_pairs = atom.lone_pairs[0]\n radical_electrons = atom.radical_electrons[0]\n atom.charge[0] = valence_electron - bond_order - 2 * lone_pairs - radical_electrons\n else:\n # if the group is not specified to specific element, charge will not be updated\n pass", "def give_resp_charges(old_atoms_list, new_charges):\n new_atoms_list = copy.copy(old_atoms_list) \n for index, atom in enumerate(new_atoms_list):\n atom.mm.charge = new_charges[index]\n \n old_charges_sum = new_charges_sum = 0 \n for no, charge in enumerate(new_charges):\n old_charges_sum += old_atoms_list[no].mm.charge\n new_charges_sum += new_atoms_list[no].mm.charge\n\n diff = new_charges_sum - old_charges_sum\n\n no_link_atoms = 0.0\n for atom in old_atoms_list:\n if atom.oniom.link_atom:\n no_link_atoms += 1.0\n \n for atom in new_atoms_list:\n if atom.oniom.link_atom:\n atom.mm.charge = atom.mm.charge - diff/no_link_atoms\n\n return new_atoms_list", "def charge(self, charge=None):\n\n if charge is None:\n return self._charge\n else:\n if not is_numeric(charge):\n raise TypeError(\"charge '{}' is not numeric\".format(charge))\n self._charge = charge", "def assign_charge(self, charge):\n\n self.charge = charge\n intcharge = np.floor(charge)\n deccharge = charge % 1\n self.orbitals = [Orbital(s) for s in fc.orbital_configuration(self.element_symbol, intcharge)]\n if deccharge > 0:\n self.orbitals[-1].remove_electron(deccharge)", "def charge(self):\n self._assertarrays_loaded()\n return self._check_nonempty_property('_charge')", "def charge(posJ,boss):\n d = math.sqrt((posJ[1] - boss.position[1])**2 + (posJ[2] - boss.position[2])**2)\n boss.directionCharge = [(posJ[1]-0.5 - boss.position[1])/d*1/16,(posJ[2]-0.5 - boss.position[2])/d*1/16] \n #definit la direction de la charge\n boss.aura = \"charge\"\n boss.auratimer = 0", "def charge_density(potential):\n result = np.zeros_like(potential)\n\n lengthx, lengthy = potential.shape\n\n for i in range(lengthx):\n for j in range(lengthy):\n v = 0\n if i > 0:\n v += potential[i - 1, j]\n v -= potential[i, j]\n if i < lengthx - 1:\n v += potential[i + 1, j]\n v -= potential[i, j]\n if j > 0:\n v += potential[i, j - 1]\n v -= potential[i, j]\n if j < lengthy - 1:\n v += potential[i, j + 1]\n v -= potential[i, j]\n\n result[i, j] = v\n \n return result", "def potential(charge, radius):\r\n e_knot=8.85418782*10**-12\r\n return charge/(4*np.pi*e_knot*radius)", "def gs_charge_dollars(self) -> RangeFilter:\n return self.__gs_charge_dollars", "def gate_drive_charge(self, V_d: float, V_gs: float) -> float:\n C_gd = self.C_rss\n C_gs = self.C_iss - self.C_rss\n C_equiv = C_gs + C_gd * (1 + V_d / V_gs)\n Q_gs = V_gs * C_equiv\n return Q_gs", "def reduce_charges(charges: List[BaseCharge],\n flows: List[bool],\n target_charges: np.ndarray,\n return_locations: Optional[bool] = False,\n strides: Optional[np.ndarray] = None) -> Any:\n\n tensor_dims = [len(c) for c in charges]\n\n if len(charges) == 1:\n # reduce single index\n if strides is None:\n strides = np.array([1], dtype=SIZE_T)\n return charges[0].dual(flows[0]).reduce(\n target_charges, return_locations=return_locations, strides=strides[0])\n\n # find size-balanced partition of charges\n partition = _find_best_partition(tensor_dims)\n\n # compute quantum numbers for each partition\n left_ind = fuse_charges(charges[:partition], flows[:partition])\n right_ind = fuse_charges(charges[partition:], flows[partition:])\n\n # compute combined qnums\n comb_qnums = fuse_ndarray_charges(left_ind.unique_charges,\n right_ind.unique_charges,\n charges[0].charge_types)\n #special case of empty charges\n #pylint: disable=unsubscriptable-object\n if (comb_qnums.shape[1] == 0) or (len(left_ind.charge_labels) == 0) or (len(\n right_ind.charge_labels) == 0):\n obj = charges[0].__new__(type(charges[0]))\n obj.__init__(\n np.empty((charges[0].num_symmetries, 0), dtype=charges[0].dtype),\n np.empty(0, dtype=charges[0].label_dtype), charges[0].charge_types)\n if return_locations:\n return obj, np.empty(0, dtype=SIZE_T)\n return obj\n\n unique_comb_qnums, comb_labels = np.unique(\n comb_qnums, return_inverse=True, axis=1)\n num_unique = unique_comb_qnums.shape[1]\n\n # intersect combined qnums and target_charges\n reduced_qnums, label_to_unique, _ = intersect(\n unique_comb_qnums, target_charges, axis=1, return_indices=True)\n map_to_kept = -np.ones(num_unique, dtype=charges[0].label_dtype)\n map_to_kept[label_to_unique] = np.arange(len(label_to_unique))\n #new_comb_labels is a matrix of shape (left_ind.num_unique, right_ind.num_unique)\n #each row new_comb_labels[n,:] contains integers values. Positions where values > 0\n #denote labels of right-charges that are kept.\n new_comb_labels = map_to_kept[comb_labels].reshape(\n [left_ind.num_unique, right_ind.num_unique])\n reduced_rows = [0] * left_ind.num_unique\n\n for n in range(left_ind.num_unique):\n temp_label = new_comb_labels[n, right_ind.charge_labels]\n reduced_rows[n] = temp_label[temp_label >= 0]\n\n reduced_labels = np.concatenate(\n [reduced_rows[n] for n in left_ind.charge_labels])\n obj = charges[0].__new__(type(charges[0]))\n obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types)\n\n if return_locations:\n row_locs = [0] * left_ind.num_unique\n if strides is not None:\n # computed locations based on non-trivial strides\n row_pos = fuse_stride_arrays(tensor_dims[:partition], strides[:partition])\n col_pos = fuse_stride_arrays(tensor_dims[partition:], strides[partition:])\n for n in range(left_ind.num_unique):\n temp_label = new_comb_labels[n, right_ind.charge_labels]\n temp_keep = temp_label >= 0\n if strides is not None:\n row_locs[n] = col_pos[temp_keep]\n else:\n row_locs[n] = np.where(temp_keep)[0]\n\n if strides is not None:\n reduced_locs = np.concatenate([\n row_pos[n] + row_locs[left_ind.charge_labels[n]]\n for n in range(left_ind.dim)\n ])\n else:\n reduced_locs = np.concatenate([\n n * right_ind.dim + row_locs[left_ind.charge_labels[n]]\n for n in range(left_ind.dim)\n ])\n return obj, reduced_locs\n\n return obj", "def force_partial_charge_computation(mol):\n rdkit_util.compute_charges(mol)", "def set_charge(self, charge: int):\n self[\"FORCE_EVAL\"][\"DFT\"][\"CHARGE\"] = Keyword(\"CHARGE\", int(charge))", "def compute_unique_fused_charges(charges: List[BaseCharge],\n flows: List[bool]) -> BaseCharge:\n if len(charges) == 1:\n return (charges[0] * flows[0]).unique(sort=False)\n\n accumulated_charges = (charges[0] * flows[0]).unique(sort=False)\n for n in range(1, len(charges)):\n leg_charges = charges[n].unique(sort=False)\n fused_charges = accumulated_charges + leg_charges * flows[n]\n accumulated_charges = fused_charges.unique(sort=False)\n return accumulated_charges", "def run_ddec_point_charges(self):\n charge_density = self.ctx.charge_density_calc['remote_folder']\n #options['prepend_text'] = \"export OMP_NUM_THREADS=12\"\n inputs = {\n 'code' : self.inputs.ddec_code,\n 'parameters' : self.inputs.ddec_parameters,\n 'charge_density_folder' : charge_density,\n '_options' : self.inputs.ddec_options.get_dict(),\n '_label' : \"run_pointcharges_ddec\",\n }\n\n # Create the calculation process and launch it\n running = submit(DdecCalculation.process(), **inputs)\n self.report(\"pk: {} | Running ddec to compute point charges based on the charge-density\")\n return ToContext(ddec_calc=Outputs(running))", "def continuous_cdf(s, dist, max_scale=1000):\n cdf = np.zeros_like(s, dtype=object)\n microburst_pdf = lambda x: dist.pdf(x)\n #r = diameter/2\n f = lambda r, s_i:A(r, s_i)*microburst_pdf(r) if ~np.isnan(A(r, s_i)) else 0\n\n for i, s_i in enumerate(s):\n result = scipy.integrate.dblquad(f, s_i, np.inf, lambda x:0, lambda x:max_scale)\n cdf[i] = result[0]\n cdf /= np.max(cdf)\n return cdf", "def gasteiger_charges(mol):\n AllChem.ComputeGasteigerCharges(mol)\n return [\n mol.GetAtomWithIdx(i).GetDoubleProp(\"_GasteigerCharge\") for i in range(mol.GetNumAtoms())\n ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calcule the charge in an coordinate influed by atoms in dists distance with charges charge. Inputs dists and charges are array 1D of same range of floats. >>> from numpy import allclose >>> dists = array(range(5)) + 0.1 >>> charges = array(range(5)) >>> vdws = array([ 1 ] 5) >>> c = charge_2(dists, charges) >>> allclose(c, 0.95120) True >>> dists = array(range(10)) + 0.1 >>> charges = array(range(10)) >>> vdws = array([ 1 ] 10) >>> c = charge_2(dists, charges) >>> allclose(c, 1.36864) True
def charge_2(dists, charges): d6 = dists <= 6.0 d8 = dists <= 8.0 d6_8 = logical_and(logical_not(d6), d8) epsilons = (d6*4.0) + \ d6_8*(38.0*dists-224.0) + \ logical_not(d8)*80.0 charge = (charges / ( epsilons * dists )) return sum(charge)
[ "def charge_1(dists, charges):\n charge = charges / ( map(epsilon, dists) * dists )\n return sum(charge)", "def update_charge(self):\n for atom in self.atoms:\n if (len(atom.charge) == 1) and (len(atom.lone_pairs) == 1) and (len(atom.radical_electrons) == 1):\n # if the charge of the group is not labeled, then no charge update will be\n # performed. If there multiple charges are assigned, no update either.\n # Besides, this groupatom should have enough information to be updated\n atom_type = atom.atomtype[0]\n for element in allElements:\n if atom_type is ATOMTYPES[element] or atom_type in ATOMTYPES[element].specific:\n bond_order = 0\n valence_electron = elements.PeriodicSystem.valence_electrons[element]\n for _, bond in atom.bonds.items():\n bond_order += bond.order[0]\n lone_pairs = atom.lone_pairs[0]\n radical_electrons = atom.radical_electrons[0]\n atom.charge[0] = valence_electron - bond_order - 2 * lone_pairs - radical_electrons\n else:\n # if the group is not specified to specific element, charge will not be updated\n pass", "def charges(self, molecule):\n\n # TODO add option to use chargemol on onetep cube files.\n copy(f'../density/{molecule.name}.wfx', f'{molecule.name}.wfx')\n c_mol = Chargemol(molecule, self.all_configs)\n c_mol.generate_input()\n\n append_to_log(f'Chargemol analysis with DDEC{self.qm[\"ddec_version\"]} complete')\n\n return molecule", "def _getCosCdist(DS1, DS2, channels=10, doBack2deg=True):\n \n import scipy.spatial.distance as sd\n import numpy as np\n from HA_extFunctions import _ds2deg, _circdescribe\n \n # each phase (ECC, POL) is converted into nChannels values\n # i.e. 1 phaseval (eg 45 deg) is converted into 10 cos values\n # TODO: now takes nChannel for both inputDS -> make it that both can be different\n if not 'channels' in locals(): channels = DS1.a.deg2cos_channels\n \n # just ignore if no ds as input (use it for plottools..)\n DS1.a['deg2cos_channels'] = channels\n DS2.a['deg2cos_channels'] = channels\n\n cosCdist = []\n for mul in [0,1]:\n ds1 = DS1[(mul*channels):(mul+1)*channels]\n ds2 = DS2[(mul*channels):(mul+1)*channels]\n if doBack2deg:\n ## transform the X-channel-cos-values back to degree and correlate the rad-value ##\n # correlate phase values, thus transform back to deg [0 .. 360]\n ds1 = _ds2deg(ds1)\n ds2 = _ds2deg(ds2)\n \n # if POL values, use cos to get \"circular\" stats\n if mul == 1: \n ds1 = np.cos(np.deg2rad(ds1))\n ds2 = np.cos(np.deg2rad(ds2))\n \n tmp = np.mean(np.diag(sd.cdist(ds1,ds2,'co')))\n else:\n # correlate transformed cos values (nChannels for each ph value)\n # this would implicate circular stats on ECC and POL\n ds1 = ds1.S.reshape(1,ds1.shape[0]*ds1.shape[1])\n ds2 = ds2.S.reshape(1,ds2.shape[0]*ds2.shape[1])\n tmp = sd.cdist(ds1,ds2,'co').squeeze()\n cosCdist.append(tmp)\n\n return cosCdist", "def give_resp_charges(old_atoms_list, new_charges):\n new_atoms_list = copy.copy(old_atoms_list) \n for index, atom in enumerate(new_atoms_list):\n atom.mm.charge = new_charges[index]\n \n old_charges_sum = new_charges_sum = 0 \n for no, charge in enumerate(new_charges):\n old_charges_sum += old_atoms_list[no].mm.charge\n new_charges_sum += new_atoms_list[no].mm.charge\n\n diff = new_charges_sum - old_charges_sum\n\n no_link_atoms = 0.0\n for atom in old_atoms_list:\n if atom.oniom.link_atom:\n no_link_atoms += 1.0\n \n for atom in new_atoms_list:\n if atom.oniom.link_atom:\n atom.mm.charge = atom.mm.charge - diff/no_link_atoms\n\n return new_atoms_list", "def gs_charge_dollars(self) -> RangeFilter:\n return self.__gs_charge_dollars", "def potential(charge, radius):\r\n e_knot=8.85418782*10**-12\r\n return charge/(4*np.pi*e_knot*radius)", "def charge_density(potential):\n result = np.zeros_like(potential)\n\n lengthx, lengthy = potential.shape\n\n for i in range(lengthx):\n for j in range(lengthy):\n v = 0\n if i > 0:\n v += potential[i - 1, j]\n v -= potential[i, j]\n if i < lengthx - 1:\n v += potential[i + 1, j]\n v -= potential[i, j]\n if j > 0:\n v += potential[i, j - 1]\n v -= potential[i, j]\n if j < lengthy - 1:\n v += potential[i, j + 1]\n v -= potential[i, j]\n\n result[i, j] = v\n \n return result", "def rho_HC2GC(s,l,b,d=-8.5):\n if isinstance(l, np.ndarray):\n return np.sqrt(s**2*cos(b)**2 + d**2*np.ones(l.shape[0]) + 2.*s*d*cos(l)*cos(b))\n elif isinstance(b, np.ndarray):\n return np.sqrt(s**2*cos(b)**2 + d**2*np.ones(b.shape[0]) + 2.*s*d*cos(l)*cos(b))\n elif isinstance(s, np.ndarray):\n return np.sqrt(s**2*cos(b)**2 + d**2*np.ones(s.shape[0]) + 2.*s*d*cos(l)*cos(b))\n else:\n return np.sqrt(s**2*cos(b)**2 + d**2 + 2.*s*d*cos(l)*cos(b))", "def assign_charge(self, charge):\n\n self.charge = charge\n intcharge = np.floor(charge)\n deccharge = charge % 1\n self.orbitals = [Orbital(s) for s in fc.orbital_configuration(self.element_symbol, intcharge)]\n if deccharge > 0:\n self.orbitals[-1].remove_electron(deccharge)", "def charge(self, charge=None):\n\n if charge is None:\n return self._charge\n else:\n if not is_numeric(charge):\n raise TypeError(\"charge '{}' is not numeric\".format(charge))\n self._charge = charge", "def gcpoints2(self, lon0, lat0, lon1, lat1, del_s=100., map_coords=True):\n # use great circle formula for a perfect sphere.\n _, _, dist = self.gc.inv(lon0, lat0, lon1, lat1)\n npoints = int((dist + 0.5 * 1000. * del_s) / (1000. * del_s))\n if npoints == 0:\n lons = [lon0, lon1]\n lats = [lat0, lat1]\n else:\n lonlats = self.gc.npts(lon0, lat0, lon1, lat1, npoints)\n lons = [lon0]\n lats = [lat0]\n for lon, lat in lonlats:\n lons.append(lon)\n lats.append(lat)\n lons.append(lon1)\n lats.append(lat1)\n if map_coords:\n x, y = self(lons, lats)\n else:\n x, y = (lons, lats)\n return x, y", "def reduce_charges(charges: List[BaseCharge],\n flows: List[bool],\n target_charges: np.ndarray,\n return_locations: Optional[bool] = False,\n strides: Optional[np.ndarray] = None) -> Any:\n\n tensor_dims = [len(c) for c in charges]\n\n if len(charges) == 1:\n # reduce single index\n if strides is None:\n strides = np.array([1], dtype=SIZE_T)\n return charges[0].dual(flows[0]).reduce(\n target_charges, return_locations=return_locations, strides=strides[0])\n\n # find size-balanced partition of charges\n partition = _find_best_partition(tensor_dims)\n\n # compute quantum numbers for each partition\n left_ind = fuse_charges(charges[:partition], flows[:partition])\n right_ind = fuse_charges(charges[partition:], flows[partition:])\n\n # compute combined qnums\n comb_qnums = fuse_ndarray_charges(left_ind.unique_charges,\n right_ind.unique_charges,\n charges[0].charge_types)\n #special case of empty charges\n #pylint: disable=unsubscriptable-object\n if (comb_qnums.shape[1] == 0) or (len(left_ind.charge_labels) == 0) or (len(\n right_ind.charge_labels) == 0):\n obj = charges[0].__new__(type(charges[0]))\n obj.__init__(\n np.empty((charges[0].num_symmetries, 0), dtype=charges[0].dtype),\n np.empty(0, dtype=charges[0].label_dtype), charges[0].charge_types)\n if return_locations:\n return obj, np.empty(0, dtype=SIZE_T)\n return obj\n\n unique_comb_qnums, comb_labels = np.unique(\n comb_qnums, return_inverse=True, axis=1)\n num_unique = unique_comb_qnums.shape[1]\n\n # intersect combined qnums and target_charges\n reduced_qnums, label_to_unique, _ = intersect(\n unique_comb_qnums, target_charges, axis=1, return_indices=True)\n map_to_kept = -np.ones(num_unique, dtype=charges[0].label_dtype)\n map_to_kept[label_to_unique] = np.arange(len(label_to_unique))\n #new_comb_labels is a matrix of shape (left_ind.num_unique, right_ind.num_unique)\n #each row new_comb_labels[n,:] contains integers values. Positions where values > 0\n #denote labels of right-charges that are kept.\n new_comb_labels = map_to_kept[comb_labels].reshape(\n [left_ind.num_unique, right_ind.num_unique])\n reduced_rows = [0] * left_ind.num_unique\n\n for n in range(left_ind.num_unique):\n temp_label = new_comb_labels[n, right_ind.charge_labels]\n reduced_rows[n] = temp_label[temp_label >= 0]\n\n reduced_labels = np.concatenate(\n [reduced_rows[n] for n in left_ind.charge_labels])\n obj = charges[0].__new__(type(charges[0]))\n obj.__init__(reduced_qnums, reduced_labels, charges[0].charge_types)\n\n if return_locations:\n row_locs = [0] * left_ind.num_unique\n if strides is not None:\n # computed locations based on non-trivial strides\n row_pos = fuse_stride_arrays(tensor_dims[:partition], strides[:partition])\n col_pos = fuse_stride_arrays(tensor_dims[partition:], strides[partition:])\n for n in range(left_ind.num_unique):\n temp_label = new_comb_labels[n, right_ind.charge_labels]\n temp_keep = temp_label >= 0\n if strides is not None:\n row_locs[n] = col_pos[temp_keep]\n else:\n row_locs[n] = np.where(temp_keep)[0]\n\n if strides is not None:\n reduced_locs = np.concatenate([\n row_pos[n] + row_locs[left_ind.charge_labels[n]]\n for n in range(left_ind.dim)\n ])\n else:\n reduced_locs = np.concatenate([\n n * right_ind.dim + row_locs[left_ind.charge_labels[n]]\n for n in range(left_ind.dim)\n ])\n return obj, reduced_locs\n\n return obj", "def gasteiger_charges(mol):\n AllChem.ComputeGasteigerCharges(mol)\n return [\n mol.GetAtomWithIdx(i).GetDoubleProp(\"_GasteigerCharge\") for i in range(mol.GetNumAtoms())\n ]", "def compute_fused_charge_degeneracies(\n charges: List[BaseCharge],\n flows: List[bool]) -> Tuple[BaseCharge, np.ndarray]:\n if len(charges) == 1:\n return (charges[0] * flows[0]).unique(return_counts=True, sort=False)\n\n # get unique charges and their degeneracies on the first leg.\n # We are fusing from \"left\" to \"right\".\n accumulated_charges, accumulated_degeneracies = (\n charges[0] * flows[0]).unique(\n return_counts=True, sort=False)\n for n in range(1, len(charges)):\n leg_charges, leg_degeneracies = charges[n].unique(\n return_counts=True, sort=False)\n fused_charges = accumulated_charges + leg_charges * flows[n]\n fused_degeneracies = fuse_degeneracies(accumulated_degeneracies,\n leg_degeneracies)\n accumulated_charges = fused_charges.unique(sort=False)\n accumulated_degeneracies = np.empty(len(accumulated_charges), dtype=SIZE_T)\n\n accumulated_degeneracies = np.array([\n np.sum(fused_degeneracies[fused_charges.charge_labels ==\n accumulated_charges.charge_labels[m]])\n for m in range(len(accumulated_charges))\n ])\n\n return accumulated_charges, accumulated_degeneracies", "def charge(posJ,boss):\n d = math.sqrt((posJ[1] - boss.position[1])**2 + (posJ[2] - boss.position[2])**2)\n boss.directionCharge = [(posJ[1]-0.5 - boss.position[1])/d*1/16,(posJ[2]-0.5 - boss.position[2])/d*1/16] \n #definit la direction de la charge\n boss.aura = \"charge\"\n boss.auratimer = 0", "def gate_drive_charge(self, V_d: float, V_gs: float) -> float:\n C_gd = self.C_rss\n C_gs = self.C_iss - self.C_rss\n C_equiv = C_gs + C_gd * (1 + V_d / V_gs)\n Q_gs = V_gs * C_equiv\n return Q_gs", "def charge(self):\n self._assertarrays_loaded()\n return self._check_nonempty_property('_charge')", "def force_closure(p1, p2, n1, n2, mu):\n # line between the contacts \n v = p2 - p1\n v = v / np.linalg.norm(v)\n \n # compute cone membership\n alpha = np.arctan(mu)\n in_cone_1 = (np.arccos(n1.dot(-v)) < alpha)\n in_cone_2 = (np.arccos(n2.dot(v)) < alpha)\n return (in_cone_1 and in_cone_2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs entity linking for the query.
def link(self, query): q = Query(query) linker = self.__get_linker(q) linked_ens = linker.link() res = {"query": q.raw_query, "processed_query": q.query, "results": linked_ens} return res
[ "def bind_to_graph(self, entity):", "def _add_related_link_to_entity(self, entity: wdi_core.WDItemEngine, uri: str):\n rel_link = wdi_core.WDUrl(value=uri, prop_nr=self._related_link_prop)\n entity.update([rel_link], append_value=[self._related_link_prop])", "def link(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.link\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)", "def _link_associations_with_classes(self):\n log(f\"Try linking classes with associations\")\n class_entities = self.get_generic_entities(types=[ClassDiagramTypes.CLASS_ENTITY])\n assoc_entities = self.get_generic_entities(types=[ClassDiagramTypes.ASSOCIATION_ENTITY])\n advanced_entities = self.get_generic_entities(types=[ClassDiagramTypes.ASSOCIATION_ENTITY_ADVANCED])\n\n # Link class entities with remaining associations\n for c in class_entities:\n class_bounding_box = c.bounding_box(adjustment=constants.BOUNDING_BOX_ADJUSTMENT)\n\n # ... with advanced associations\n for a in advanced_entities:\n for advanced_shape in a.shapes:\n if type(advanced_shape) is Shape:\n advanced_bounding_box = advanced_shape.bounding_box()\n\n if util.do_bounding_boxes_intersect(advanced_bounding_box, class_bounding_box) or util.do_bounding_boxes_intersect(class_bounding_box, advanced_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_FROM, c)\n\n elif type(advanced_shape) is Line:\n line_start = advanced_shape.start_xy()\n line_end = advanced_shape.end_xy()\n\n if util.is_point_in_area(line_start, class_bounding_box) or util.is_point_in_area(line_end, class_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_TO, c)\n\n # ... with simple associations\n for a in assoc_entities:\n line = a.shapes[0] # GenericEntity of type ASSOCIATION_ENTITY always has just one shape, which is a Line\n line_start = line.start_xy()\n line_end = line.end_xy()\n\n if util.is_point_in_area(line_start, class_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_FROM, c)\n log(\"FROM association found\")\n\n elif util.is_point_in_area(line_end, class_bounding_box):\n a.set(ClassDiagramConverter.STR_ASSOC_TO, c)\n log(\"TO association found\")", "def add_entity(self, entity_id, mentions, label=None):\n graph = self.graph\n # Create the node that links all mentions as an entity.\n new_node = GraphWrapper.new_node(graph=graph,\n node_type=self.entity_node_type,\n node_id=entity_id,\n label=label\n )\n # Mention lis is a list of mention node identifiers\n for mention in mentions:\n GraphWrapper.link(self.graph, entity_id, mention, self.entity_edge_type, label=self.entity_edge_label)\n return new_node", "def link(self, db, name):\n self._link(db, name.encode())", "def _evaluate_link(self):\n\n #\n # Read and validate parameters\n #\n\n column_name = self.id\n\n definition = self.column_json\n\n main_table = self.table\n\n main_keys = definition.get('keys', [])\n if not all_columns_exist(main_keys, main_table.data):\n log.error(\"Not all key columns available in the link column definition.\".format())\n return\n\n linked_table_name = definition.get('linked_table', '')\n linked_table = self.table.workflow.get_table(linked_table_name)\n if not linked_table:\n log.error(\"Linked table '{0}' cannot be found in the link column definition..\".format(linked_table))\n return\n\n linked_keys = definition.get('linked_keys', [])\n if not all_columns_exist(linked_keys, linked_table.data):\n log.error(\"Not all linked key columns available in the link column definition.\".format())\n return\n\n #\n # 1. Create a column with index values in the target table with the name of the link column.\n #\n \"\"\"\n INFO:\n df['index1'] = df.index # Copy\n # Use df.reset_index for converting existing index to a column. After that, we AGAIN create an index.\n # The goal is to preserve the old index even if it is not a continuous range\n df.reset_index().set_index('index', drop=False)\n # Or\n df.reset_index(inplace=True)\n df.set_index('index', drop=False, inplace=True)\n # Or\n df = df.rename_axis('index1').reset_index() # New index1 column will be created\n \"\"\"\n\n index_column_name = '__row_id__' # It could be 'id', 'index' or whatever other convention\n linked_table.data[index_column_name] = linked_table.data.index\n\n #\n # 2. Create left join on the specified keys\n #\n\n linked_prefix = column_name+'::' # It will prepended to each linked (secondary) column name\n\n out_df = pd.merge(\n main_table.data, # This table\n linked_table.data.rename(columns=lambda x: linked_prefix + x, inplace=False), # Target table to link to. We rename columns (not in place - the original frame preserves column names)\n how='left', # This (main) table is not changed - we attach target records\n left_on=main_keys, # List of main table key columns\n right_on= [linked_prefix + x for x in linked_keys], # List of target table key columns. Note that we renamed them above so we use modified names.\n left_index=False,\n right_index=False,\n #suffixes=('', linked_suffix), # We do not use suffixes because they cannot be enforced (they are used only in the case of equal column names)\n sort=False # Sorting decreases performance\n )\n # Here we get linked column names like 'Prefix::OriginalName'\n\n #\n # 3. Rename according to our convention and store the result\n #\n\n # Rename our link column by using only specified column name\n out_df.rename({column_name+'::'+index_column_name: column_name}, axis='columns', inplace=True)\n\n out = out_df[column_name]\n\n # Store the result df with all target columns (in the case they are used in other definitions)\n #main_table.data = out_df\n # ??? If the result df includes all columns of this df, then why not to simply replace this df by the new df?\n # ??? What if this df already has some linked (tareget) columns from another table attached before?\n # ??? What if the target table already has linked (target) columns from its own target table (recursive)?\n\n return out", "def render_link(self, link):\n\n for region in link:\n return Element('link', {'targets': region.id})", "def link_relation(entity, object_to_link, relation_attr, fk_field=None):\n if ( object_to_link is not None ) and ( object_to_link.id is not None ):\n if fk_field is not None:\n setattr(entity, fk_field, object_to_link.id)\n else:\n setattr(entity, relation_attr+\"_id\", object_to_link.id)\n else:\n setattr(entity, relation_attr, object_to_link)", "def create_link(self, child):\r\n arg_str = p2e._base._util._convert_args_to_string(\"object.link\", \r\n self._object._eco_id, \r\n child._eco_id)\r\n p2e._app.Exec(arg_str)", "def resolve(self, raise_exc=False):\n if self._entity is None:\n self._entity = SecretLink.query.get(self.link_id)\n\n if self._entity is None and raise_exc:\n raise LookupError(\n \"could not find link: {}\".format(self.dump())\n )\n\n return self._entity", "def entity_references(entity, authz):\n schema = model[entity.get('schema')]\n\n # Generate all the possible mention locations.\n properties = []\n queries = []\n for prop in model.properties:\n if prop.type != registry.entity:\n continue\n if not schema.is_a(prop.range):\n continue\n\n field = 'properties.%s' % prop.name\n queries.append({\n 'index': entities_read_index(prop.schema)\n })\n queries.append({\n 'size': 0,\n 'query': {\n 'bool': {\n 'filter': [\n authz_query(authz),\n {'term': {'schemata': prop.schema.name}},\n {'term': {field: entity.get('id')}},\n ]\n }\n }\n })\n properties.append(prop)\n\n if not len(queries):\n return\n\n # Run a count search (with schema facet?)\n res = es.msearch(body=queries)\n for prop, resp in zip(properties, res.get('responses', [])):\n total = resp.get('hits', {}).get('total')\n if total is not None and total > 0:\n yield (prop, total)", "def link_items( # noqa\n context,\n source_obj,\n target_obj,\n relationship=None,\n fieldname='relatedItems',\n):\n # relations from AT to DX and from DX to AT are only possible through\n # the referenceable-behavior:\n # plone.app.referenceablebehavior.referenceable.IReferenceable\n drop_msg = \"\"\"Dropping reference from %s to %s since\n plone.app.referenceablebehavior is not enabled!\"\"\"\n\n if source_obj is target_obj:\n # Thou shalt not relate to yourself.\n return\n\n # if fieldname != 'relatedItems':\n # 'relatedItems' is the default field for AT and DX\n # See plone.app.relationfield.behavior.IRelatedItems for DX and\n # Products.ATContentTypes.content.schemata.relatedItemsField for AT\n # They always use these relationships:\n # 'relatesTo' (Archetpyes) and 'relatedItems' (Dexterity)\n # Maybe be we should handle custom relations somewhat different?\n\n if relationship in ['relatesTo', 'relatedItems']:\n # These are the two default-relationships used by AT and DX\n # for the field 'relatedItems' respectively.\n pass\n\n if IDexterityContent.providedBy(source_obj):\n source_type = 'DX'\n else:\n source_type = 'AT'\n\n if IDexterityContent.providedBy(target_obj):\n target_type = 'DX'\n else:\n target_type = 'AT'\n\n if relationship == referencedRelationship:\n # 'relatesTo' is the relationship for linkintegrity-relations.\n # Linkintegrity-relations should automatically be (re)created by\n # plone.app.linkintegrity.handlers.modifiedDexterity or\n # plone.app.linkintegrity.handlers.modifiedArchetype\n # when a ObjectModifiedEvent is thrown.\n # These relations are only created if the source has a richtext-field\n # with a link to the target and should not be created manually.\n if source_type == 'AT':\n modifiedArchetype(source_obj, None)\n if source_type == 'DX' and is_referenceable(source_obj):\n modifiedDexterity(source_obj, None)\n return\n\n if source_type == 'AT':\n # If there is any Archetypes-content there is also the\n # reference_catalog and the uid_catalog.\n # For a site without AT content these might not be there at all.\n reference_catalog = getToolByName(context, REFERENCE_CATALOG)\n uid_catalog = getToolByName(context, 'uid_catalog')\n if target_type == 'DX' and not is_referenceable(target_obj):\n logger.info(drop_msg % (\n source_obj.absolute_url(), target_obj.absolute_url()))\n return\n\n # Make sure both objects are properly indexed and referenceable\n # Some objects that werde just created (migrated) are not yet\n # indexed properly.\n source_uid = IUUID(source_obj)\n target_uid = IUUID(target_obj)\n _catalog = uid_catalog._catalog\n\n if not _catalog.indexes['UID']._index.get(source_uid):\n uid_catalog.catalog_object(source_obj, source_uid)\n modified(source_obj)\n\n if not _catalog.indexes['UID']._index.get(target_uid):\n uid_catalog.catalog_object(target_obj, target_uid)\n modified(target_obj)\n\n field = source_obj.getField(fieldname)\n if field is None:\n # We can't migrate if it doesn't actually have the field\n return\n\n accessor = field.getAccessor(source_obj)\n existing_at_relations = accessor()\n\n if not isinstance(existing_at_relations, list):\n existing_at_relations = [i for i in existing_at_relations]\n if not existing_at_relations:\n existing_at_relations = []\n if target_obj in existing_at_relations:\n # don't do anything\n return\n\n target_uid = IUUID(target_obj)\n targetUIDs = [ref.targetUID for ref in reference_catalog.getReferences(\n source_obj, relationship)]\n if target_uid in targetUIDs:\n # Replace relations since is probably broken.\n reference_catalog.deleteReference(\n source_obj, target_uid, relationship)\n\n existing_at_relations.append(target_obj)\n mutator = field.getMutator(source_obj)\n mutator(existing_at_relations)\n modified(source_obj)\n return\n\n if source_type is 'DX':\n if target_type is 'AT' and not is_referenceable(source_obj):\n logger.info(drop_msg % (\n source_obj.absolute_url(), target_obj.absolute_url()))\n return\n # handle dx-relation\n intids = getUtility(IIntIds)\n to_id = intids.getId(target_obj)\n existing_dx_relations = getattr(source_obj, fieldname, [])\n # purge broken relations\n existing_dx_relations = [\n i for i in existing_dx_relations if i.to_id is not None]\n\n if to_id not in [i.to_id for i in existing_dx_relations]:\n existing_dx_relations.append(RelationValue(to_id))\n setattr(source_obj, fieldname, existing_dx_relations)\n modified(source_obj)\n return", "def after_get_relationship(self, obj, related_objects, relationship_field, related_type_, related_id_field,\n view_kwargs):\n raise NotImplementedError", "def link_to_order():", "def link_housing(tx):\n tx.run(\n \"MATCH (p:people {name:'Snow White'}), (h:house {name:'Castle'}) \"\n \"create (p)-[r:LIVES_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'M'}), (h:house {name:'Dwarf House'}) \"\n \"create (p)-[r:LIVES_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'F'}), (h:house {name:'Dwarf House'}) \"\n \"create (p)-[r:WORKS_IN]->(h) \"\n )\n tx.run(\n \"MATCH (p:people {gender:'M'}), (h:house {name:'Mine'}) \"\n \"create (p)-[r:WORKS_IN]->(h) \"\n )", "def links(self):\n container = IContainer(self.thing)\n if container.closed:\n for content in container.getContents():\n link = Link(self.thing.idea, content.idea)\n link.annotate([_ObstructedByGlass(),\n ContainmentRelationship(container,\n content)])\n yield link", "def add_link_to(self, node):\r\n Link(self, node)", "def search_links(self) -> None:\n # connect to LinkedIn\n self.connect()\n logging.info('Inspect job search results')\n # Scroll down the `infinit` page\n self.scroll_job_results()\n # Collects all the links toward job ad pages\n self.job_links = self.get_job_links()\n\n logging.info('All available jobs ads collected.')\n # teminates the bot\n self.driver.close()\n # self.save_job_links(self.job_links) # save the links" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
verification of the response after the update on user rate in timesheet by timeadmin entity for the given timesheet row on a specific day and specific task
def verify_update(self, response): self.status = True self.step_desc = 'Timeadmin updation of user rate in timesheet - verification' self.remarks = '\n Inside class: %s method: %s \n' % utils.get_method_class_names() self.step_input = '\n Response \n{}\n'.format(response.text) if response.status_code == 200: self.remarks += 'Timeadmin updated successfully' else: self.status = False self.remarks += 'Timeadmin updation of user rate in timesheet failed : {}'.format(response.text) db_wrapper.log_into_steps(self.request, self) assert self.status
[ "def task_updated(cls, task):\n if task.job is None or task.job.booking is None:\n return\n if task.job.is_fulfilled():\n if task.job.booking.end < timezone.now():\n if Emailed.objects.filter(end_booking=task.job.booking).exists():\n return\n Emailed.objects.create(end_booking=task.job.booking)\n cls.email_booking_over(task.job.booking)\n if task.job.booking.end > timezone.now() and task.job.booking.start < timezone.now():\n if Emailed.objects.filter(begin_booking=task.job.booking).exists():\n return\n Emailed.objects.create(begin_booking=task.job.booking)\n cls.email_job_fulfilled(task.job)", "def post_interval_hr():\n \n try:\n r = request.get_json()\n email = r[\"user_email\"]\n time_cuttoff = r[\"heart_rate_average_since\"]\n \n user_exist(email) \n\n hr = return_all_hr(email)\n timestamps = return_all_times(email)\n age = return_user_age(email)\n \n index = find_time_index(time_cuttoff, timestamps)\n hr_int = return_interval_hr(index, hr) \n \n x= is_tachy(hr_int, age)\n\n if (x == 1):\n status = 'Alert Tachycardic'\n else:\n status= 'not tachycardic'\n\n print_vals = {\n \"avg_hr_interval\" : hr_int,\n \"heart_rate_average_since\": time_cuttoff,\n \"tachycardic?\" :status\n }\n \n return jsonify(print_vals), 200\n\n except:\n return 400", "def weeksnapshot_post_final_status_update(sender, **kwargs):\n if kwargs['status'] == ApproverQueue.approved_status():\n weeksnapshot = kwargs['instance']\n #check if user has overtime policy set\n user_overtime_policy = UserOverTimePolicy.objects.get(user_profile=weeksnapshot.user.get_profile())\n if user_overtime_policy:\n \n timesheet_type = ContentType.objects.get_for_model(Timesheet)\n week_type = ContentType.objects.get_for_model(WeekSnapshot)\n \n #get the ruleset and run thru the validation\n conditions = user_overtime_policy.overtime_policy.overtime_policy_conditions.all()\n overtime_hours = 0\n banked_hours = 0\n for condition in conditions:\n if condition.ruleset.content_type == timesheet_type:\n for timesheet in weeksnapshot.timesheets: \n if not timesheet.is_timeoff: \n validated_instance = GenericAspect.validate((condition.ruleset,), timesheet)\n overtime_hours, banked_hours = tally(validated_instance, condition, overtime_hours, banked_hours) \n elif condition.ruleset.content_type == week_type:\n validated_instance = GenericAspect.validate((condition.ruleset,), weeksnapshot)\n overtime_hours, banked_hours = tally(validated_instance, condition, overtime_hours, banked_hours)\n \n #update the user's timeoff policy linked to the overtime. only update if banked \n if banked_hours:\n user_overtime_policy.bank_user_timeoff_policy.time_remaining += banked_hours\n user_overtime_policy.bank_user_timeoff_policy.save()\n \n #TODO: poor guy, what to do with overtime house and/or if no timeoff policy is set", "def _update_user_requests(self):\n\n if self.daytime % 120 == 0:\n self.user_requests['temp_desired'] = random.randint(15, 20)\n\n if self.day_start < self.daytime < self.day_end:\n self.user_requests['temp_desired'] += 4\n\n self.user_requests['light_desired'] = round(random.random(), 1)", "def test_task_renew_times(self):\n\n # login testuser\n self.client.login(username='testuser_task', password='8dR7ilC8cnCr8U2aq14V')\n # get user\n test_user = User.objects.get(username = 'testuser_task')\n # create object\n taskname_task_renew = Taskname.objects.create(taskname_name = 'task_renew')\n # get object\n taskpriority_1 = Taskpriority.objects.get(taskpriority_name = 'prio_1')\n # get object\n taskstatus_done = Taskstatus.objects.get(taskstatus_name = '30_done')\n # create object\n task_task_renew = Task.objects.create(\n taskname = taskname_task_renew,\n taskpriority = taskpriority_1,\n taskstatus = taskstatus_done,\n task_created_by_user_id = test_user,\n task_modified_by_user_id = test_user,\n task_started_time = timezone.now(),\n )\n # get response\n self.client.get('/task/' + str(task_task_renew.task_id) + '/renew/')\n # get object\n task_renewed = Task.objects.get(task_id = task_task_renew.task_id)\n # compare\n self.assertEqual(task_renewed.task_started_time, None)\n self.assertEqual(task_renewed.task_finished_time, None)", "def trigger_request(self, date, text, expected):\n request = MockRequestTask('update-stats', [date], date)\n out = views.task_update_stats(request)\n self.assertEqual(200, out.status_code)\n actual = list(out)\n self.assertEqual(1, len(actual))\n\n stats = models.AccountStatsDay.query().fetch()\n self.assertTrue(isinstance(expected, list))\n # Make a copy so |expected| is not modified.\n expected = [i.copy() for i in expected]\n for i in expected:\n i['user'] = str(i['user'].email)\n i.setdefault('issues', [4])\n i.setdefault('latencies', [-1])\n i.setdefault('lgtms', [0])\n i.setdefault('name', date)\n i.setdefault('score', models.AccountStatsBase.NULL_SCORE)\n self.assertEqual(expected, [views.stats_to_dict(s) for s in stats])\n # Check the HTTP request reply at the end, because it's more cosmetic than\n # the actual entities.\n self.assertTrue(\n re.match('^' + re.escape(date + '\\n' + text) + 'In \\\\d+\\\\.\\\\ds\\n$', actual[0]),\n actual[0])", "def OutAttendanceUtils(data_dict, up):\n\n da = DailyAttendance()\n current_date = datetime.datetime.now().date()\n a_obj_list = DailyAttendance.objects.filter(user=up)\n error = None\n\n for da in a_obj_list:\n if da.in_time.date() == current_date:\n if da.out_time == None:\n da.out_time = datetime.datetime.now()\n if data_dict['out_comment']:\n da.out_comment = data_dict['out_comment']\n if data_dict['is_half_day']:\n da.is_half_day = True\n da.save() \n return 1\n else:\n return 2\n return 2", "def test_approve_other_user(self):\r\n entry = factories.Entry(**{\r\n 'user': self.user2,\r\n 'start_time': self.now - relativedelta(hours=1),\r\n 'end_time': self.now\r\n })\r\n\r\n response = self.client.get(self.approve_url())\r\n self.assertEquals(response.status_code, 403)\r\n\r\n response = self.client.post(self.approve_url(), {'do_action': 'Yes'})\r\n self.assertEquals(response.status_code, 403)\r\n self.assertNotEquals(entry.status, Entry.APPROVED)\r\n self.assertContains(response,\r\n 'Forbidden: You cannot approve this timesheet',\r\n status_code=403\r\n )", "def test_update_time_tracking_entry(self):\n pass", "def Upd_non_hourly_task(user_id, habit_id):\n #Need to add database to track users info before writing this code\n #also need to add keeping track of user_id everywhere I grab username\n return True", "def test_edit_invoiced_entry(self):\r\n self.client.logout()\r\n self.login_user(self.superuser)\r\n\r\n url, entry, data = self.edit_entry_helper(Entry.INVOICED)\r\n\r\n response = self.client.post(url, data=data, follow=True)\r\n self.assertEqual(response.status_code, 200)\r\n\r\n msg = 'You cannot add/edit entries after a timesheet has been ' \\\r\n 'approved or invoiced. Please correct the start and end times.'\r\n self.assertContains(response, msg)", "def test_web_task_edit_status_to_in_progress(\n webapp, new_user, new_task_done_three_tags\n):\n webapp.homepage()\n webapp.sign_in(new_user.username, new_user.password)\n task = webapp.taskboard.find_task(new_task_done_three_tags.title)\n assert task.done is True\n task.edit(done=False)\n assert task.done is False", "def verifyStudent(curs, sid,hour_rate):\n curs.execute('UPDATE students set verified=1, hour_rate=%s WHERE sid=%s',(hour_rate,sid,))", "def submit_final_attendance(self):\n try:\n self.daily_attendance.update_checkout()\n return True\n except ObjectsNotValidatedError as e:\n return e.__str__().split(\":\")[1]", "def approve_time_sheet(request):\n # Behavior for updating database entries\n if request.user.is_authenticated and check_user_group(request.user, \"Manager\"):\n # If the user is sending a post request, make changes to the database.\n if request.method == \"POST\":\n time_sheet_id = request.POST['time_sheet_id']\n time_sheet = TimeSheetApprovals.objects.get(time_sheet_approvals_id=time_sheet_id)\n if \"approve\" in request.POST:\n time_sheet.status = \"Approved\"\n if \"reject\" in request.POST:\n time_sheet.status = \"Denied\"\n time_sheet.save()\n\n # HTTP None, Default behavior: Load all pending and processed time sheets.\n pending_time_sheets = TimeSheetApprovals.objects.filter(status=\"Pending\")\n processed_time_sheets = TimeSheetApprovals.objects.exclude(status=\"Pending\")\n time_sheet_approvals = TimeSheetApprovals.objects.all()\n\n # Load all approved time sheets.\n context = {\n 'time_sheet_approvals': time_sheet_approvals,\n 'pending_time_sheets': pending_time_sheets,\n 'processed_time_sheets': processed_time_sheets\n }\n return render(request, 'approvalstimesheets.html', context)\n else:\n return redirect(login_user)", "def onboard_task_update(context, task_id, values, session=None):\n values = dict([(k, v) for k, v in values.iteritems() if v is not None])\n status = values.get('status', '')\n #If this is a final status, then set the end date/time\n if status == 'completed' or status == 'failed':\n values['ended'] = timeutils.utcnow()\n if not session:\n session = nova_db_sa_api.get_session()\n with session.begin():\n query = model_query(\n context, pvc_models.OnboardTaskDTO, session=session)\n task_ref = query.filter_by(id=task_id).first()\n task_ref.update(values)\n task_ref.save(session=session)\n return task_ref", "def test_successful_update(self):\n\n manager = SchedulerManager()\n manager.sync_with_database()", "def test_update_decommission_dates(self):\n well = Well.objects.create(create_user=self.user.username, update_user=self.user.username)\n data = {\n 'well': well.well_tag_number,\n 'decommission_start_date': '1999-05-05',\n 'decommission_end_date': '1999-06-06'\n }\n response = self.client.post(reverse('STAFF_EDIT', kwargs={'version': 'v1'}), data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n well = Well.objects.get(well_tag_number=well.well_tag_number)\n self.assertEqual(well.decommission_start_date, datetime.date(1999, 5, 5))\n self.assertEqual(well.decommission_end_date, datetime.date(1999, 6, 6))", "def test_web_task_edit_status_to_done(webapp, new_user, new_task):\n webapp.homepage()\n webapp.sign_in(new_user.username, new_user.password)\n task = webapp.taskboard.find_task(new_task.title)\n assert task.done is False\n task.edit(done=True)\n assert task.done is True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create new instance licenses from template licenses
def _init_instance_licenses(self): for template_license in self.template.template_licenses.all(): InstanceLicense.objects.create( instance=self, template_license=template_license, setup_fee=template_license.setup_fee, monthly_fee=template_license.monthly_fee, )
[ "def test_create_iceberg_add_license_from_file(self):\n pass", "def _add_licenses(crate, crate_entity, file_object, registry_url):\n license_entities = []\n try:\n licenses = file_object.licences.all()\n except AttributeError:\n licenses = []\n\n for license_ in licenses:\n if license_.identifier is not None:\n license_id = license_.identifier\n else:\n license_id = f\"{registry_url}api/license/{license_.id}\"\n\n license_entity = ContextEntity(\n crate,\n license_id,\n properties={\n RO_TYPE: \"CreativeWork\",\n \"description\": license_.licence_info,\n \"identifier\": license_id,\n \"name\": f\"license {license_.id}\",\n },\n )\n\n crate.add(license_entity)\n license_entities.append(license_entity)\n\n # where the crate_entity is a crate we are adding the licenses from all the\n # data products in turn, so there may already be some there\n if isinstance(crate_entity, ROCrate) and crate_entity.license is not None:\n license_entities.extend(crate_entity.license)\n\n if len(license_entities) == 1:\n if isinstance(crate_entity, ROCrate):\n crate_entity.license = license_entities[0]\n else:\n crate_entity[\"license\"] = license_entities[0]\n\n elif len(license_entities) > 1:\n if isinstance(crate_entity, ROCrate):\n crate_entity.license = license_entities\n else:\n crate_entity[\"license\"] = license_entities", "def test_create_new_license_by_authenticated_user_passes(self):\n response = self.client.post(\n self.all_licences_url,\n headers={\"Authorization\": self.test_user_token},\n json={\n \"description\": NEW_LICENSE_DESCRIPTION,\n \"name\": NEW_LICENSE_NAME,\n \"plainText\": \"\",\n },\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response_body, {\"licenseId\": 2})", "def edit_licenses():\n return render_template('admin/edit_licenses.html')", "def add_license(*license_obj):\n\n logger._log_to_console_and_log_file(\"Add License to appliance\")\n s2l = ui_lib.get_s2l()\n\n \"\"\" Call function to navigate to licenses \"\"\"\n _Navigate_To_Licenses()\n\n \"\"\" Retrieve data from datasheet \"\"\"\n if isinstance(license_obj, test_data.DataObj):\n license_obj = [license_obj]\n elif isinstance(license_obj, tuple):\n license_obj = list(license_obj[0])\n\n fail = 0\n for lic in license_obj:\n\n \"\"\" Get the License name strlicense\"\"\"\n strlicense = getattr(lic, 'type', '')\n if strlicense not in ['HP OneView w/o iLO', 'HP OneView']:\n logger._warn(\"Given license type is not supported by fusion. Expected type is 'HP OneView w/o iLO' or 'HP OneView'\")\n s2l.capture_page_screenshot()\n fail += 1\n continue\n\n# if getattr(lic, 'licensepath', '') != '':\n# if re.search(r'noiLO_\\d+\\.dat$', lic.licensepath):\n# logger._log_to_console_and_log_file(\"check for the existence of HP OneView w/o iLO license\")\n# strlicense = \"HP OneView w/o iLO\"\n# elif re.search(r'\\d+\\.dat$', lic.licensepath):\n# logger._log_to_console_and_log_file(\"check for the existence of HP OneView license \")\n# strlicense = \"HP OneView\"\n# else:\n# logger._warn(\"Given license is not supported by fusion\")\n# fail += 1\n# continue\n\n \"\"\" Call function to check the availability of license \"\"\"\n strVal = check_availability_licenses(strlicense)\n if not strVal:\n logger._log_to_console_and_log_file(\"License %s does not exists,Add the license now\" % strlicense)\n if getattr(lic, 'licensepath', '') != '':\n fopen = open(lic.licensepath)\n strLincenseKey = fopen.read()\n else:\n strLincenseKey = getattr(lic, 'content', '')\n if strLincenseKey == '':\n BuiltIn().fail(\"Please specify content attribute for holding license key\")\n \"\"\" Read the license key from the given path\"\"\"\n # with open(lic.licensepath, 'r') as f:\n # f.next()\n # line = f\n # logger._log_to_console_and_log_file(\"HP OneView license1 = \")\n # for line in f:\n # strLincenseKey = line\n # logger._log_to_console_and_log_file(\"HP OneView license = \" % line)\n # f.closed\n \"\"\" Entering inputs in ADD License Page \"\"\"\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_MENU_ACTION_MAIN_BTN)\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_MENU_ACTION_ADDLICENSE)\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_MENU_ACTION_ADDLICENSE)\n ui_lib.wait_for_element_visible(FusionSettingsPage.ID_DLG_ADDLICENSE, fail_if_false=True)\n # s2l.input_text(FusionSettingsPage.ID_INPUT_LICENSEKEY, strLincenseKey)\n s2l.execute_javascript(\"$('#fs-license-licenseKeyValue').val('%s');return true;\" % strLincenseKey)\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_DLG_BTN_ADD)\n\n \"\"\" Check for Error messages \"\"\"\n if not ui_lib.wait_for_element(FusionSettingsPage.ID_ADDLICENSE_ERR_MSG):\n\n if not check_availability_licenses(strlicense):\n logger._warn(\"Fail in Adding License %s\" % strlicense)\n s2l.capture_page_screenshot()\n fail += 1\n else:\n logger._log_to_console_and_log_file(\"License %s is added successfully\" % strlicense)\n\n else:\n strErr = s2l._get_text(FusionSettingsPage.ID_ADDLICENSE_ERR_MSG)\n logger._warn(\"Unable to Add License %s,and the Err Msg is %s\" % (strlicense, strErr))\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_DLG_BTN_CANCEL)\n s2l.capture_page_screenshot()\n fail += 1\n else:\n logger._log_to_console_and_log_file(\"License %s available with licenses,Check the other License\" % strlicense)\n if fail > 0:\n return False\n else:\n return True", "def post(self):\n try:\n license_dto = LicenseDTO(request.get_json())\n license_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"Error validating request: {str(e)}\")\n return {\n \"Error\": \"Unable to create new mapping license\",\n \"SubCode\": \"InvalidData\",\n }, 400\n\n new_license_id = LicenseService.create_licence(license_dto)\n return {\"licenseId\": new_license_id}, 201", "def create(self, cr, uid, vals, context=None):\n if 'license_no' in vals:\n vals['license_no'] = vals['license_no'].strip()\n \n return super(driving_license, self).create(cr, uid, vals, context=context)", "def test_create_new_license_by_unauthenticated_user_fails(self):\n response = self.client.post(\n self.all_licences_url,\n json={\n \"description\": NEW_LICENSE_DESCRIPTION,\n \"name\": NEW_LICENSE_NAME,\n \"plainText\": \"\",\n },\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_body[\"SubCode\"], \"InvalidToken\")", "def test_get_all_licenses_(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_body[\"licenses\"]), 0)\n self.assertEqual(response_body[\"licenses\"], [])\n # setup: add license\n self.test_license_id = create_canned_license()\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response_body[\"licenses\"]), 1)\n licenses = response_body[\"licenses\"][0]\n self.assertEqual(licenses[\"licenseId\"], 1)\n self.assertEqual(licenses[\"name\"], TEST_LICENSE_NAME)\n self.assertEqual(licenses[\"description\"], TEST_LICENSE_DESCRIPTION)\n self.assertEqual(licenses[\"plainText\"], TEST_LICENSE_PLAINTEXT)", "def test_create_new_license_with_invalid_request_fails(self):\n response = self.client.post(\n self.all_licences_url,\n headers={\"Authorization\": self.test_user_token},\n json={\n \"license_description\": [NEW_LICENSE_DESCRIPTION],\n \"license_name\": NEW_LICENSE_NAME,\n },\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_body[\"Error\"], \"Unable to create new mapping license\")\n self.assertEqual(response_body[\"SubCode\"], \"InvalidData\")", "def _license_obj(license):\n obj = None\n\n if license in ('MIT', 'MIT License'):\n obj = {\n 'URL': 'https://api.github.com/licenses/mit',\n 'name': 'MIT'\n }\n elif license in ('BSD 2-clause \"Simplified\" License'):\n obj = {\n 'URL': 'https://api.github.com/licenses/bsd-2-clause',\n 'name': 'BSD-2-Clause'\n }\n elif license in ('BSD 3-clause \"New\" or \"Revised\" License'):\n obj = {\n 'URL': 'https://api.github.com/licenses/bsd-3-clause',\n 'name': 'BSD-3-Clause'\n }\n elif license in ('Apache License 2.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/apache-2.0',\n 'name': 'Apache-2.0'\n }\n elif license in ('GNU General Public License v2.1'):\n obj = {\n 'URL': 'https://api.github.com/licenses/gpl-2.1',\n 'name': 'GPL-2.1'\n }\n elif license in ('GNU General Public License v2.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/gpl-2.0',\n 'name': 'GPL-2.0'\n }\n elif license in ('GNU Lesser General Public License v2.1'):\n obj = {\n 'URL': 'https://api.github.com/licenses/lgpl-2.1',\n 'name': 'LGPL-2.1'\n }\n elif license in ('GNU General Public License v3.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/gpl-3.0',\n 'name': 'GPL-3.0'\n }\n elif license in ('GNU Lesser General Public License v3.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/lgpl-3.0',\n 'name': 'LGPL-3.0'\n }\n elif license in ('Eclipse Public License 1.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/epl-1.0',\n 'name': 'EPL-1.0',\n }\n elif license in ('Mozilla Public License 2.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/mpl-2.0',\n 'name': 'MPL-2.0',\n }\n elif license in ('The Unlicense'):\n obj = {\n 'URL': 'https://api.github.com/licenses/unlicense',\n 'name': 'Unlicense',\n }\n elif license in ('GNU Affero General Public License v3.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/agpl-3.0',\n 'name': 'AGPL-3.0',\n }\n elif license in ('Eclipse Public License 2.0'):\n obj = {\n 'URL': 'https://api.github.com/licenses/epl-2.0',\n 'name': 'EPL-2.0',\n }\n\n if obj is None:\n logger.warn('I dont understand the license: %s', license)\n raise ValueError('Aborting!')\n\n return obj", "def test_create_entitlement_template(self):\n pass", "def generate(abouts, license_dict, min_license_score, template=None, variables=None):\n rendered = None\n error = None\n template_error = check_template(template)\n if template_error:\n lineno, message = template_error\n error = Error(\n CRITICAL,\n 'Template validation error at line: {lineno}: \"{message}\"'.format(**locals())\n )\n return error, None\n\n template = jinja2.Template(template)\n\n # Get the current UTC time\n utcnow = datetime.datetime.utcnow()\n\n try:\n # Convert the field object to dictionary as it's needed for the\n # groupby in JINJA2 template\n about_dict_list = []\n for about in abouts:\n about_dict = convert_object_to_dict(about)\n about_dict_list.append(about_dict)\n rendered = template.render(\n abouts=about_dict_list, license_dict=license_dict,\n min_license_score=min_license_score,\n utcnow=utcnow,\n tkversion=__version__,\n variables=variables\n )\n except Exception as e:\n lineno = getattr(e, 'lineno', '') or ''\n if lineno:\n lineno = ' at line: {}'.format(lineno)\n err = getattr(e, 'message', '') or ''\n error = Error(\n CRITICAL,\n 'Template processing error {lineno}: {err}'.format(**locals()),\n )\n error = Error(\n CRITICAL,\n 'Template processing error:' + str(e),\n )\n\n return error, rendered", "def test_licenses(self):\n i = self.instance.licenses()\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"licenses\"), params={\"per_page\": 100}, headers={}\n )", "def create_with_payment(cls, tpl, user, insurance_info, envelope_args):\n currency_multiplier = 100\n discount_percent = 5\n insurance_rate_percent = 10\n\n # Read template and fill it up\n with open(path.join(TPL_PATH, tpl), 'r') as file:\n content_bytes = file.read()\n\n # Get base64 logo representation to paste it into the HTML file\n with open(path.join(IMG_PATH, 'logo.png'), 'rb') as file:\n img_base64_src = base64.b64encode(file.read()).decode('utf-8')\n content_bytes = Environment(loader=BaseLoader).from_string(\n content_bytes).render(\n user_name=f\"{user['first_name']} {user['last_name']}\",\n user_email=user['email'],\n address=f\"{user['street']}, {user['city']}, {user['state']}\",\n zip_code=user['zip_code'],\n detail_1=insurance_info['detail1']['name'],\n detail_2=insurance_info['detail2']['name'],\n value_detail_1=insurance_info['detail1']['value'],\n value_detail_2=insurance_info['detail2']['value'],\n img_base64_src=img_base64_src\n )\n\n base64_file_content = base64.b64encode(\n bytes(content_bytes, 'utf-8')\n ).decode('ascii')\n\n # Create the envelope definition\n envelope_definition = EnvelopeDefinition(\n email_subject='Buy New Insurance'\n )\n\n # Create the document\n doc1 = Document(document_base64=base64_file_content,\n name='Insurance order form', # Can be different from actual file name\n file_extension='html', # Source data format\n document_id='1' # A label used to reference the doc\n )\n envelope_definition.documents = [doc1]\n\n # Create a signer recipient to sign the document\n signer1 = Signer(\n email=user['email'],\n name=f\"{user['first_name']} {user['last_name']}\",\n recipient_id='1',\n routing_order='1',\n client_user_id=envelope_args['signer_client_id']\n )\n sign_here1 = SignHere(\n anchor_string='/sn1/',\n anchor_y_offset='10',\n anchor_units='pixels',\n anchor_x_offset='20',\n )\n\n # Create number tabs for the coverage amount and deductible\n coverage = Number(\n font='helvetica',\n font_size='size11',\n anchor_string='/l1e/',\n anchor_y_offset='-7',\n anchor_units='pixels',\n tab_label='l1e',\n required='true',\n )\n\n deductible = Number(\n font='helvetica',\n font_size='size11',\n anchor_string='/l2e/',\n anchor_y_offset='-7',\n anchor_units='pixels',\n tab_label='l2e',\n required='true',\n )\n\n # Create checkbox and trigger tabs to apply the discount\n checkbox = Checkbox(\n font='helvetica',\n font_size='size11',\n anchor_string='/cb/',\n anchor_y_offset='-4',\n anchor_units='pixels',\n anchor_x_offset='-8',\n tab_label='checkbox',\n height='50',\n bold='true',\n )\n\n trigger = FormulaTab(\n anchor_string='/trigger/',\n font_color='white',\n anchor_y_offset='10',\n tab_label='trigger',\n conditional_parent_label='checkbox',\n conditional_parent_value='on',\n formula='1',\n required='true',\n locked='true',\n )\n\n discount = FormulaTab(\n font='helvetica',\n font_size='size11',\n bold='true',\n anchor_string='/dt/',\n anchor_y_offset='-4',\n anchor_units='pixels',\n anchor_x_offset='0',\n tab_label='discount',\n formula=f\"if([trigger] > 0, {discount_percent}, 0)\",\n round_decimal_places='0',\n locked='true',\n )\n\n # Create a formula tab for the insurance price\n total = f'([l1e]-[l2e]) * {insurance_rate_percent}/100'\n\n formula_total = FormulaTab(\n font='helvetica',\n bold='true',\n font_size='size12',\n anchor_string='/l4t/',\n anchor_y_offset='-6',\n anchor_units='pixels',\n anchor_x_offset='84',\n tab_label='l4t',\n formula=f'({total}) - (({total}) * [discount]/100)',\n round_decimal_places='2',\n required='true',\n locked='true',\n )\n\n # Create payment line item\n payment_line_iteml1 = PaymentLineItem(\n name='Insurance payment',\n description='$[l4t]',\n amount_reference='l4t'\n )\n\n payment_details = PaymentDetails(\n gateway_account_id=envelope_args['gateway_account_id'],\n currency_code='USD',\n gateway_name=envelope_args['gateway_name'],\n line_items=[payment_line_iteml1]\n )\n\n # Create a hidden formula tab for the payment itself\n formula_payment = FormulaTab(\n tab_label='payment',\n formula=f'([l4t]) * {currency_multiplier}',\n round_decimal_places='2',\n payment_details=payment_details,\n hidden='true',\n required='true',\n locked='true',\n document_id='1',\n page_number='1',\n x_position='0',\n y_position='0'\n )\n\n # Create tabs for the signer\n signer1_tabs = Tabs(\n sign_here_tabs=[sign_here1],\n number_tabs=[coverage, deductible],\n formula_tabs=[\n formula_payment, formula_total, discount, trigger\n ],\n checkbox_tabs=[checkbox]\n )\n signer1.tabs = signer1_tabs\n\n # Add the recipients to the envelope object\n recipients = Recipients(signers=[signer1])\n envelope_definition.recipients = recipients\n\n # Request that the envelope be sent by setting status to 'sent'\n envelope_definition.status = 'sent'\n\n return envelope_definition", "def _find_licenses(self):\n for lic in self.pom_data.findall('licenses/license'):\n yield dict([\n ('name', self._get_attribute('name', lic)),\n ('url', self._get_attribute('url', lic)),\n ('comments', self._get_attribute('comments', lic)),\n # arcane and seldom used\n ('distribution', self._get_attribute('distribution', lic)),\n ])", "def licenses_ajax():\n licenses = db.session.query(AssetLicense).all()\n return ajax.admin.licenses_data(licenses)", "def test_update_iceberg_replace_license(self):\n pass", "def step_add_license(context):\n jlink = context.jlink\n assert jlink.add_license(str(context.text))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds box predictor based on the configuration. Builds box predictor based on the configuration. See box_predictor.proto for configurable options. Also, see box_predictor.py for more details.
def build(argscope_fn, box_predictor_config, is_training, num_classes): if not isinstance(box_predictor_config, box_predictor_pb2.BoxPredictor): raise ValueError('box_predictor_config not of type ' 'box_predictor_pb2.BoxPredictor.') box_predictor_oneof = box_predictor_config.WhichOneof('box_predictor_oneof') #not giben in the parameter if box_predictor_oneof == 'convolutional_box_predictor': #we don't have this(faster RCNN) . I think this used for SSD conv_box_predictor = box_predictor_config.convolutional_box_predictor #get the params conv_hyperparams = argscope_fn(conv_box_predictor.conv_hyperparams, #converting the convolution hyper params needed for the additional conv is_training) #convolution and making help to make the arg scope . done with hyperparametr_builder.build #This is what following object can do : Take high level image feature map as input and produce two predictions, #(1) a tensor encoding box locations, and #(2) a tensor encoding classes for each box. - from the box_predictor.py script #These components are passed directly to loss functions box_predictor_object = box_predictor.ConvolutionalBoxPredictor( #Initialize the boxpredictor class is_training=is_training, num_classes=num_classes, conv_hyperparams=conv_hyperparams, #above slim arg scope which have weight , operations etc min_depth=conv_box_predictor.min_depth, max_depth=conv_box_predictor.max_depth, num_layers_before_predictor=(conv_box_predictor. #how many layers before the predictoot num_layers_before_predictor), use_dropout=conv_box_predictor.use_dropout, dropout_keep_prob=conv_box_predictor.dropout_keep_probability, kernel_size=conv_box_predictor.kernel_size, box_code_size=conv_box_predictor.box_code_size, #what is this size of the encoding ?????????????? apply_sigmoid_to_scores=conv_box_predictor.apply_sigmoid_to_scores) return box_predictor_object #retunrs this since box _predictor class has many functiosn if box_predictor_oneof == 'mask_rcnn_box_predictor': #this is what we have for faster Rcnn mask_rcnn_box_predictor = box_predictor_config.mask_rcnn_box_predictor #get the hyperparams fc_hyperparams = argscope_fn(mask_rcnn_box_predictor.fc_hyperparams, #this is basically getting fully connected params in order to predict is_training) conv_hyperparams = None if mask_rcnn_box_predictor.HasField('conv_hyperparams'): #don't have one yet conv_hyperparams = argscope_fn(mask_rcnn_box_predictor.conv_hyperparams, #assigning hyper params is_training) box_predictor_object = box_predictor.MaskRCNNBoxPredictor( #this create a object from box_predictor class which use to initialize thing ss is_training=is_training, num_classes=num_classes, fc_hyperparams=fc_hyperparams, use_dropout=mask_rcnn_box_predictor.use_dropout, dropout_keep_prob=mask_rcnn_box_predictor.dropout_keep_probability, box_code_size=mask_rcnn_box_predictor.box_code_size, conv_hyperparams=conv_hyperparams, predict_instance_masks=mask_rcnn_box_predictor.predict_instance_masks, mask_prediction_conv_depth=(mask_rcnn_box_predictor. mask_prediction_conv_depth), predict_keypoints=mask_rcnn_box_predictor.predict_keypoints) return box_predictor_object if box_predictor_oneof == 'rfcn_box_predictor': #if rfcn we get thus rfcn_box_predictor = box_predictor_config.rfcn_box_predictor conv_hyperparams = argscope_fn(rfcn_box_predictor.conv_hyperparams, #then we get this slim.arguments is_training) box_predictor_object = box_predictor.RfcnBoxPredictor( #again this is for rfcn same as above is_training=is_training, num_classes=num_classes, conv_hyperparams=conv_hyperparams, crop_size=[rfcn_box_predictor.crop_height, rfcn_box_predictor.crop_width], num_spatial_bins=[rfcn_box_predictor.num_spatial_bins_height, rfcn_box_predictor.num_spatial_bins_width], depth=rfcn_box_predictor.depth, box_code_size=rfcn_box_predictor.box_code_size) return box_predictor_object raise ValueError('Unknown box predictor: {}'.format(box_predictor_oneof))
[ "def __init__(self,\n model_path,\n predictor_mode=\"Analysis\",\n config_type=\"cpu\",\n batch_size=1,\n min_subgraph_size=1,\n trt_dynamic_shape_info=None):\n configs = DeployConfig(\n model_path=model_path,\n batch_size=batch_size,\n min_subgraph_size=min_subgraph_size,\n trt_dynamic_shape_info=trt_dynamic_shape_info)\n analysis_predictor_config = configs.analysis_config(config_type)\n\n logger.debug(\"analysis_predictor_config : {}\".format(\n analysis_predictor_config))\n configs.summary_config(analysis_predictor_config) # summary configs\n\n if predictor_mode == \"Analysis\":\n logger.info(\"current config is Analysis config\")\n predictor0 = fluid.core.create_paddle_predictor(\n analysis_predictor_config)\n # clone main predictor to test predictor.clone api\n self.predictor = predictor0.clone()\n logger.info(\"analysis predictor create and clone successful\")\n elif predictor_mode == \"Native\":\n native_predictor_config = DeployConfig(model_path).native_config(\n config_type)\n logger.info(native_predictor_config)\n logger.info(\"current config is Native config\")\n # use analysis predictor to retrive number of inputs\n analysis_predictor_config.disable_glog_info()\n self.analysis_predictor = fluid.core.create_paddle_predictor(\n analysis_predictor_config)\n # use native predictor to predict\n self.native_predictor = fluid.core.create_paddle_predictor(\n native_predictor_config)\n logger.info(\"native predictor create successful\")", "def _eval_box_proposals(self, predictions):\n if self._output_dir:\n # Saving generated box proposals to file.\n # Predicted box_proposals are in XYXY_ABS mode.\n bbox_mode = BoxMode.XYXY_ABS.value\n ids, boxes, objectness_logits = [], [], []\n for prediction in predictions:\n ids.append(prediction[\"image_id\"])\n boxes.append(prediction[\"proposals\"].proposal_boxes.tensor.numpy())\n objectness_logits.append(prediction[\"proposals\"].objectness_logits.numpy())\n\n proposal_data = {\n \"boxes\": boxes,\n \"objectness_logits\": objectness_logits,\n \"ids\": ids,\n \"bbox_mode\": bbox_mode,\n }\n with PathManager.open(os.path.join(self._output_dir, \"box_proposals.pkl\"), \"wb\") as f:\n pickle.dump(proposal_data, f)\n\n if not self._do_evaluation:\n self._logger.info(\"Annotations are not available for evaluation.\")\n return\n\n self._logger.info(\"Evaluating bbox proposals ...\")\n res = {}\n areas = {\"all\": \"\", \"small\": \"s\", \"medium\": \"m\", \"large\": \"l\"}\n for limit in [100, 1000]:\n for area, suffix in areas.items():\n stats = _evaluate_box_proposals_with_dice(predictions, self._coco_api, area=area, limit=limit)\n key = \"AR{}@{:d}\".format(suffix, limit)\n res[key] = float(stats[\"ar\"].item() * 100)\n self._logger.info(\"Proposal metrics: \\n\" + create_small_table(res))\n self._results[\"box_proposals\"] = res", "def predict(self, filename, bboxes):\n tracker = ObjectTracker(\n tracker_type=self.tracker_type, detect_objects=False, use_gpu=self.use_gpu\n )\n\n bbox_history, shape = tracker.get_four_bboxes(\n filename, bboxes, save_video=False, return_shape=True\n )\n\n two_lines = False\n\n # Case 1: Head On / Body Off\n if self.use_head and not self.use_body:\n gaze_rays = self.head_gaze_estimator.get_gaze_rays(\n filename, bbox_history, show=False\n )\n if len(gaze_rays) == 0:\n gaze_rays = self.body_gaze_estimator.get_gaze_rays(filename, shape)\n # Case 2: Head Off / Body On\n elif not self.use_head and self.use_body:\n gaze_rays = self.body_gaze_estimator.get_gaze_rays(filename, shape)\n if len(gaze_rays) == 0:\n gaze_rays = self.head_gaze_estimator.get_gaze_rays(\n filename, bbox_history, show=False\n )\n # Case 3: Head On / Body On\n else:\n head_gaze_rays = self.head_gaze_estimator.get_gaze_rays(\n filename, bbox_history, show=False\n )\n body_gaze_rays = self.body_gaze_estimator.get_gaze_rays(filename, shape)\n if self.mix_priority == \"head\":\n gaze_rays = {**body_gaze_rays, **head_gaze_rays}\n elif self.mix_priority == \"body\":\n gaze_rays = {**head_gaze_rays, **body_gaze_rays}\n else: # mix_type == \"equal\"\n gaze_rays = join_dicts(head_gaze_rays, body_gaze_rays)\n two_lines = True\n\n # If both are empty, choose randomly\n if len(gaze_rays) == 0:\n return np.random.randint(1, 5)\n\n preds_per_frame = []\n gaze_rays_sorted = OrderedDict(sorted(gaze_rays.items()))\n for frame_no, ray in gaze_rays_sorted.items():\n try:\n pred = self.choose_object(ray, bbox_history[frame_no], two_lines)\n preds_per_frame.append(pred)\n except IndexError:\n continue\n\n most_common = max(range(4), key=preds_per_frame.count)\n\n return 1 + most_common", "def build_box_head(cfg):\n name = cfg.MODEL.ROI_BOX_HEAD.NAME\n return ROI_BOX_HEAD_REGISTRY.get(name)(cfg)", "def _create_box_loss(self) -> \"Tensor\":\n import tensorflow as tf\n\n # Get default graph\n default_graph = tf.get_default_graph()\n\n # Compute box losses\n target_class_phd = tf.placeholder(dtype=tf.int32, shape=[], name=\"target_class_phd\")\n victim_class_phd = tf.placeholder(dtype=tf.int32, shape=[], name=\"victim_class_phd\")\n box_iou_threshold = tf.placeholder(dtype=tf.float32, shape=[], name=\"box_iou_threshold\")\n\n # Ignore background class\n class_predictions_with_background = self.estimator.predictions[\"class_predictions_with_background\"]\n class_predictions_with_background = class_predictions_with_background[:, 1:]\n\n # Convert to 1-hot\n target_class_one_hot = tf.one_hot([target_class_phd - 1], class_predictions_with_background.shape[-1])\n victim_class_one_hot = tf.one_hot([victim_class_phd - 1], class_predictions_with_background.shape[-1])\n\n box_iou_tensor = default_graph.get_tensor_by_name(\"Loss/BoxClassifierLoss/Compare/IOU/Select:0\")\n box_iou_tensor = tf.reshape(box_iou_tensor, (-1,))\n box_target = tf.cast(box_iou_tensor >= box_iou_threshold, dtype=tf.float32)\n\n # Compute box target loss\n box_target_weight = tf.placeholder(dtype=tf.float32, shape=[], name=\"box_target_weight\")\n\n box_target_logit = class_predictions_with_background[:, target_class_phd - 1]\n box_target_loss = box_target_logit * box_target\n box_target_loss = -1 * tf.reduce_sum(box_target_loss)\n weight_box_target_loss = tf.multiply(x=box_target_loss, y=box_target_weight, name=\"weight_box_target_loss\")\n\n # Compute box victim loss\n box_victim_weight = tf.placeholder(dtype=tf.float32, shape=[], name=\"box_victim_weight\")\n\n box_victim_logit = class_predictions_with_background[:, victim_class_phd - 1]\n box_victim_loss = box_victim_logit * box_target\n box_victim_loss = tf.reduce_sum(box_victim_loss)\n weight_box_victim_loss = tf.multiply(x=box_victim_loss, y=box_victim_weight, name=\"weight_box_victim_loss\")\n\n # Compute box target CW loss\n box_target_cw_weight = tf.placeholder(dtype=tf.float32, shape=[], name=\"box_target_cw_weight\")\n box_target_cw_confidence = tf.placeholder(dtype=tf.float32, shape=[], name=\"box_target_cw_confidence\")\n\n box_nontarget_logit = tf.reduce_max(\n class_predictions_with_background * (1 - target_class_one_hot) - 10000 * target_class_one_hot, axis=-1\n )\n box_target_cw_loss = tf.nn.relu(box_nontarget_logit - box_target_logit + box_target_cw_confidence)\n box_target_cw_loss = box_target_cw_loss * box_target\n box_target_cw_loss = tf.reduce_sum(box_target_cw_loss)\n weight_box_target_cw_loss = tf.multiply(\n x=box_target_cw_loss, y=box_target_cw_weight, name=\"weight_box_target_cw_loss\"\n )\n\n # Compute box victim CW loss\n box_victim_cw_weight = tf.placeholder(dtype=tf.float32, shape=[], name=\"box_victim_cw_weight\")\n box_victim_cw_confidence = tf.placeholder(dtype=tf.float32, shape=[], name=\"box_victim_cw_confidence\")\n\n box_nonvictim_logit = tf.reduce_max(\n class_predictions_with_background * (1 - victim_class_one_hot) - 10000 * victim_class_one_hot, axis=-1\n )\n box_victim_cw_loss = tf.nn.relu(box_victim_logit - box_nonvictim_logit + box_victim_cw_confidence)\n box_victim_cw_loss = box_victim_cw_loss * box_target\n box_victim_cw_loss = tf.reduce_sum(box_victim_cw_loss)\n weight_box_victim_cw_loss = tf.multiply(\n x=box_victim_cw_loss, y=box_victim_cw_weight, name=\"weight_box_victim_cw_loss\"\n )\n\n # Compute partial loss\n partial_loss = tf.add_n(\n [weight_box_target_loss, weight_box_victim_loss, weight_box_target_cw_loss, weight_box_victim_cw_loss],\n name=\"partial_box_loss\",\n )\n\n return partial_loss", "def build_predict_pipeline(self):\n predictor, predictor_config = self.build_predictor()\n reader, reader_config = create_class(\n self.reader[\"class_name\"],\n self.reader[\"config\"])\n evaluator, evaluator_config = create_class(\n self.evaluator_config[\"class_name\"],\n self.evaluator_config[\"config\"])\n pl: Pipeline = Pipeline()\n pl.set_reader(reader, config=reader_config)\n for processor in self.processors.values():\n proc, proc_config = create_class(\n processor[\"class_name\"],\n processor[\"config\"])\n pl.add(component=proc, config=proc_config)\n pl.add(component=predictor, config=predictor_config)\n pl.add(component=evaluator, config=evaluator_config)\n pl.initialize()\n return pl, evaluator", "def yolo_filter_boxes(self, box_confidence, boxes, box_class_probs, threshold = .6):\n\n # Step 1: Compute box scores\n ### START CODE HERE ### (โ‰ˆ 1 line)\n box_scores = box_confidence * box_class_probs\n # should be 19x19x5x80 ...\n #print(box_scores.shape)\n #assert(box_scores.shape == (19,19, 5, 80))\n ### END CODE HERE ###\n\n # Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score\n # Basically pick the highest probablity category for each anchor box and each grid location\n ### START CODE HERE ### (โ‰ˆ 2 lines)\n box_classes = K.argmax(box_scores, axis=-1)\n box_class_scores = K.max(box_scores, axis=-1)\n ### END CODE HERE ###\n\n # Step 3: Create a filtering mask based on \"box_class_scores\" by using \"threshold\". The mask should have the\n # same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)\n ### START CODE HERE ### (โ‰ˆ 1 line)\n filtering_mask = box_class_scores > threshold\n ### END CODE HERE ###\n\n # Step 4: Apply the mask to scores, boxes and classes\n ### START CODE HERE ### (โ‰ˆ 3 lines)\n all_scores = K.max(tf.boolean_mask(box_scores,filtering_mask,name='score_mask'),axis = -1)\n all_boxes = tf.boolean_mask(boxes,filtering_mask,name='score_mask')\n all_classes = tf.boolean_mask(box_classes,filtering_mask,name='score_mask')\n\n return all_scores, all_boxes, all_classes", "def _construct_builder(self, **kwargs) -> engine.ProcessBuilder:\n # pylint: disable=too-many-branches,too-many-statements,too-many-locals\n structure = kwargs['structure']\n engines = kwargs['engines']\n protocol = kwargs['protocol']\n spin_type = kwargs['spin_type']\n relax_type = kwargs['relax_type']\n magnetization_per_site = kwargs.get('magnetization_per_site', None)\n threshold_forces = kwargs.get('threshold_forces', None)\n threshold_stress = kwargs.get('threshold_stress', None)\n reference_workchain = kwargs.get('reference_workchain', None)\n\n # Get the protocol that we want to use\n if protocol is None:\n protocol = self._default_protocol\n protocol = self.get_protocol(protocol)\n\n # Set the builder\n builder = self.process_class.get_builder()\n\n # Set code\n builder.code = engines['relax']['code']\n\n # Set structure\n builder.structure = structure\n\n # Set options\n builder.options = plugins.DataFactory('dict')(dict=engines['relax']['options'])\n\n # Set workchain related inputs, in this case, give more explicit output to report\n builder.verbose = plugins.DataFactory('bool')(True)\n\n # Fetch initial parameters from the protocol file.\n # Here we set the protocols fast, moderate and precise. These currently have no formal meaning.\n # After a while these will be set in the VASP workchain entrypoints using the convergence workchain etc.\n # However, for now we rely on plane wave cutoffs and a set k-point density for the chosen protocol.\n # Please consult the protocols.yml file for details.\n parameters_dict = protocol['parameters']\n\n # Set spin related parameters\n if spin_type == SpinType.NONE:\n parameters_dict['ispin'] = 1\n elif spin_type == SpinType.COLLINEAR:\n parameters_dict['ispin'] = 2\n\n # Set the magnetization\n if magnetization_per_site is not None:\n parameters_dict['magmom'] = list(magnetization_per_site)\n\n # Set settings\n # Make sure the VASP parser is configured for the problem\n settings = AttributeDict()\n settings.update({\n 'parser_settings': {\n 'critical_notifications': {\n 'add_brmix': True,\n 'add_cnormn': False,\n 'add_denmp': True,\n 'add_dentet': True,\n 'add_edddav_zhegv': True,\n 'add_eddrmm_zhegv': True,\n 'add_edwav': True,\n 'add_fexcp': True,\n 'add_fock_acc': True,\n 'add_non_collinear': True,\n 'add_not_hermitian': True,\n 'add_pzstein': True,\n 'add_real_optlay': True,\n 'add_rhosyg': True,\n 'add_rspher': True,\n 'add_set_indpw_full': True,\n 'add_sgrcon': True,\n 'add_no_potimm': True,\n 'add_magmom': True,\n 'add_bandocc': True\n },\n 'add_energies': True,\n 'add_forces': True,\n 'add_stress': True,\n 'add_misc': {\n 'type':\n 'dict',\n 'quantities': [\n 'total_energies', 'maximum_stress', 'maximum_force', 'magnetization', 'notifications',\n 'run_status', 'run_stats', 'version'\n ],\n 'link_name':\n 'misc'\n },\n 'energy_type': ['energy_free', 'energy_no_entropy']\n }\n })\n builder.settings = plugins.DataFactory('dict')(dict=settings)\n\n # Configure the handlers\n handler_overrides = {\n 'handler_unfinished_calc_ionic_alt': True,\n 'handler_unfinished_calc_generic_alt': True,\n 'handler_electronic_conv_alt': True,\n 'handler_unfinished_calc_ionic': False,\n 'handler_unfinished_calc_generic': False,\n 'handler_electronic_conv': False\n }\n builder.handler_overrides = plugins.DataFactory('dict')(dict=handler_overrides)\n\n # Set the parameters on the builder, put it in the code namespace to pass through\n # to the code inputs\n builder.parameters = plugins.DataFactory('dict')(dict={'incar': parameters_dict})\n\n # Set potentials and their mapping\n builder.potential_family = plugins.DataFactory('str')(protocol['potential_family'])\n builder.potential_mapping = plugins.DataFactory('dict')(\n dict=self._potential_mapping[protocol['potential_mapping']]\n )\n\n # Set the kpoint grid from the density in the protocol\n kpoints = plugins.DataFactory('array.kpoints')()\n kpoints.set_cell_from_structure(structure)\n if reference_workchain:\n previous_kpoints = reference_workchain.inputs.kpoints\n kpoints.set_kpoints_mesh(previous_kpoints.get_attribute('mesh'), previous_kpoints.get_attribute('offset'))\n else:\n kpoints.set_kpoints_mesh_from_density(protocol['kpoint_distance'])\n builder.kpoints = kpoints\n\n # Set the relax parameters\n relax = AttributeDict()\n if relax_type != RelaxType.NONE:\n # Perform relaxation of cell or positions\n relax.perform = plugins.DataFactory('bool')(True)\n relax.algo = plugins.DataFactory('str')(protocol['relax']['algo'])\n relax.steps = plugins.DataFactory('int')(protocol['relax']['steps'])\n if relax_type == RelaxType.POSITIONS:\n relax.positions = plugins.DataFactory('bool')(True)\n relax.shape = plugins.DataFactory('bool')(False)\n relax.volume = plugins.DataFactory('bool')(False)\n elif relax_type == RelaxType.CELL:\n relax.positions = plugins.DataFactory('bool')(False)\n relax.shape = plugins.DataFactory('bool')(True)\n relax.volume = plugins.DataFactory('bool')(True)\n elif relax_type == RelaxType.VOLUME:\n relax.positions = plugins.DataFactory('bool')(False)\n relax.shape = plugins.DataFactory('bool')(False)\n relax.volume = plugins.DataFactory('bool')(True)\n elif relax_type == RelaxType.SHAPE:\n relax.positions = plugins.DataFactory('bool')(False)\n relax.shape = plugins.DataFactory('bool')(True)\n relax.volume = plugins.DataFactory('bool')(False)\n elif relax_type == RelaxType.POSITIONS_CELL:\n relax.positions = plugins.DataFactory('bool')(True)\n relax.shape = plugins.DataFactory('bool')(True)\n relax.volume = plugins.DataFactory('bool')(True)\n elif relax_type == RelaxType.POSITIONS_SHAPE:\n relax.positions = plugins.DataFactory('bool')(True)\n relax.shape = plugins.DataFactory('bool')(True)\n relax.volume = plugins.DataFactory('bool')(False)\n else:\n # Do not perform any relaxation\n relax.perform = plugins.DataFactory('bool')(False)\n\n if threshold_forces is not None:\n threshold = threshold_forces\n else:\n threshold = protocol['relax']['threshold_forces']\n relax.force_cutoff = plugins.DataFactory('float')(threshold)\n\n if threshold_stress is not None:\n raise ValueError('Using a stress threshold is not directly available in VASP during relaxation.')\n\n builder.relax = relax\n\n return builder", "def prepare_boxlist(self, boxes, scores, size3d):\n boxes = boxes.reshape(-1, 7)\n scores = scores.reshape(-1)\n boxlist = BoxList3D(boxes, size3d, mode=\"yx_zb\", examples_idxscope=None,\n constants={'prediction': True})\n boxlist.add_field(\"scores\", scores)\n return boxlist", "def draw_boxes(boxes, img, model_size, crop_rect, color=(255, 255, 255), debug=False):\n\n retimg = img.copy()\n [xmin, xmax] = crop_rect[0]\n [ymin, ymax] = crop_rect[1]\n crop_w = xmax - xmin\n crop_h = ymax - ymin\n [model_h, model_w] = model_size\n [img_h, img_w] = img.shape[0:2]\n\n for box in boxes:\n # only show if prediction is in CLASSES_TO_SHOW\n if box.cn not in CLASSES_TO_SHOW:\n if debug: print(\"[INFO] detected class\", box.cn)\n continue\n\n label = '{} {:.2f}'.format(box.cn, box.prob)\n\n # convert bounding box to coordinates\n left = (box.x - box.w / 2)\n right = (box.x + box.w / 2)\n top = (box.y - box.h / 2)\n bottom = (box.y + box.h / 2)\n\n # scale up boxes to cropped image size\n left *= crop_w / model_w\n right *= crop_w / model_w\n top *= crop_h / model_h\n bottom *= crop_h / model_h\n\n # shift boxes from cropped to original image\n left += xmin\n right += xmin\n top += ymin\n bottom += ymin\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(img_h, np.floor(bottom + 0.5).astype('int32'))\n right = min(img_w, np.floor(right + 0.5).astype('int32'))\n\n # draw rectangle\n cv2.rectangle(retimg, (left, top), (right, bottom), color=color, thickness=2, lineType=cv2.LINE_AA)\n\n # write label\n fontface = cv2.FONT_HERSHEY_SIMPLEX\n fontscale = 0.5\n fontthickness = 1\n textsize, _ = cv2.getTextSize(label, fontface, fontscale, fontthickness)\n cv2.putText(retimg, label, (left + 2, top + textsize[1] + 2),\n fontface, fontScale=fontscale, color=color,\n thickness=fontthickness, lineType=cv2.LINE_AA)\n\n return retimg", "def post_process(self, boxes: detectron2.structures.Boxes) -> detectron2.structures.Boxes:\n\n if len(boxes) == 0:\n return boxes\n return boxes[boxes.pred_classes == self.class_id]", "def _construct_builder(self, **kwargs) -> engine.ProcessBuilder:\n # pylint: disable=too-many-branches,too-many-statements,too-many-locals\n structure = kwargs['structure']\n engines = kwargs['engines']\n protocol = kwargs['protocol']\n spin_type = kwargs['spin_type']\n relax_type = kwargs['relax_type']\n electronic_type = kwargs['electronic_type']\n magnetization_per_site = kwargs.get('magnetization_per_site', None)\n threshold_forces = kwargs.get('threshold_forces', None)\n threshold_stress = kwargs.get('threshold_stress', None)\n reference_workchain = kwargs.get('reference_workchain', None)\n\n protocol = copy.deepcopy(self.get_protocol(protocol))\n code = engines['relax']['code']\n\n pseudo_family_label = protocol.pop('pseudo_family')\n try:\n pseudo_family = orm.Group.objects.get(label=pseudo_family_label)\n except exceptions.NotExistent as exception:\n raise ValueError(\n f'required pseudo family `{pseudo_family_label}` is not installed. '\n 'Please use `aiida-pseudo install pseudo-dojo` to install it.'\n ) from exception\n\n cutoff_stringency = protocol['cutoff_stringency']\n pseudo_type = pseudo_family.pseudo_type\n recommended_ecut_wfc, recommended_ecut_rho = pseudo_family.get_recommended_cutoffs(\n structure=structure, stringency=cutoff_stringency, unit='Eh'\n )\n if pseudo_type == 'pseudo.jthxml':\n # JTH XML are PAW; we need `pawecutdg`\n cutoff_parameters = {\n 'ecut': np.ceil(recommended_ecut_wfc),\n 'pawecutdg': np.ceil(recommended_ecut_rho),\n }\n else:\n # All others are NC; no need for `pawecutdg`\n cutoff_parameters = {'ecut': recommended_ecut_wfc}\n\n override = {\n 'abinit': {\n 'metadata': {\n 'options': engines['relax']['options']\n },\n 'pseudos': pseudo_family.get_pseudos(structure=structure),\n 'parameters': cutoff_parameters\n }\n }\n\n builder = self.process_class.get_builder()\n\n # Force threshold\n # NB we deal with this here because knowing threshold_f is necessary if the structure is a molecule.\n # threshold_f will be used later in the generator to set the relax threshold\n # (find \"Continue force and stress thresholds\" in this file.)\n if threshold_forces is not None:\n threshold_f = threshold_forces * units.eV_to_Ha / units.ang_to_bohr # eV/โ„ซ -> Ha/Bohr\n else:\n threshold_f = 5.0e-5 # Abinit default value in Ha/Bohr\n\n # Deal with molecular case\n if structure.pbc == (False, False, False):\n # We assume the structure is a molecule which already has an appropriate vacuum applied\n # NB: the vacuum around the molecule must maintain the molecule's symmetries!\n warnings.warn(\n f'The input structure {structure} has no periodic boundary conditions, so we '\n 'assume the structure is a molecule. The structure will be modified to have full PBC. We assume that '\n 'the cell contains appropriate symmetry-conserving vacuum, and various tweaks for molecular systems '\n ' will be applied to the selected protocol!'\n )\n\n # Set pbc to [True, True, True]\n pbc_structure = structure.clone()\n pbc_structure.set_pbc([True, True, True])\n\n # Update protocol\n _ = protocol['base'].pop('kpoints_distance') # Remove k-points distance; we will use gamma only\n _ = protocol['base']['abinit']['parameters'].pop(\n 'tolvrs'\n ) # Remove tolvrs; we will use force tolerance for SCF\n # Set k-points to gamma-point\n protocol['base']['kpoints'] = [1, 1, 1]\n # protocol['base']['abinit']['parameters']['shiftk'] = [[0, 0, 0]]\n protocol['base']['abinit']['parameters']['nkpt'] = 1\n # Set a force tolerance for SCF convergence\n protocol['base']['abinit']['parameters']['toldff'] = threshold_f * 1.0e-1\n # Add a model macroscopic dielectric constant\n protocol['base']['abinit']['parameters']['diemac'] = 2.0\n\n inputs = generate_inputs(self.process_class._process_class, protocol, code, pbc_structure, override) # pylint: disable=protected-access\n elif False in structure.pbc:\n raise ValueError(\n f'The input structure has periodic boundary conditions {structure.pbc}, but partial '\n 'periodic boundary conditions are not supported.'\n )\n else:\n inputs = generate_inputs(self.process_class._process_class, protocol, code, structure, override) # pylint: disable=protected-access\n\n builder._update(inputs) # pylint: disable=protected-access\n\n # RelaxType\n if relax_type == RelaxType.NONE:\n builder.abinit['parameters']['ionmov'] = 0 # do not move the ions, Abinit default\n elif relax_type == RelaxType.POSITIONS:\n # protocol defaults to POSITIONS\n pass\n elif relax_type == RelaxType.POSITIONS_CELL:\n builder.abinit['parameters']['optcell'] = 2 # fully optimize the cell geometry\n builder.abinit['parameters']['dilatmx'] = 1.15 # book additional mem. for p.w. basis exp.\n builder.abinit['parameters']['ecutsm'] = 0.5 # Ha, smearing on the energy cutoff\n elif relax_type == RelaxType.POSITIONS_VOLUME:\n builder.abinit['parameters']['optcell'] = 1 # optimize volume only\n builder.abinit['parameters']['dilatmx'] = 1.15 # book additional mem. for p.w. basis exp.\n builder.abinit['parameters']['ecutsm'] = 0.5 # Ha, smearing on the energy cutoff\n elif relax_type == RelaxType.POSITIONS_SHAPE:\n builder.abinit['parameters']['optcell'] = 3 # constant-volume optimization of cell geometry\n builder.abinit['parameters']['dilatmx'] = 1.05 # book additional mem. for p.w. basis exp.\n builder.abinit['parameters']['ecutsm'] = 0.5 # Ha, smearing on the energy cutoff\n else:\n raise ValueError(f'relax type `{relax_type.value}` is not supported')\n\n # SpinType\n if spin_type == SpinType.NONE:\n # protocol defaults to NONE\n pass\n elif spin_type == SpinType.COLLINEAR:\n if magnetization_per_site is None:\n magnetization_per_site = get_initial_magnetization(structure)\n warnings.warn(f'input magnetization per site was None, setting it to {magnetization_per_site}')\n magnetization_per_site = np.array(magnetization_per_site)\n\n sum_is_zero = np.isclose(sum(magnetization_per_site), 0.0)\n all_are_zero = np.all(np.isclose(magnetization_per_site, 0.0))\n non_zero_mags = magnetization_per_site[~np.isclose(magnetization_per_site, 0.0)]\n all_non_zero_pos = np.all(non_zero_mags > 0.0)\n all_non_zero_neg = np.all(non_zero_mags < 0.0)\n\n if all_are_zero: # non-magnetic\n warnings.warn(\n 'all of the initial magnetizations per site are close to zero; doing a non-spin-polarized '\n 'calculation'\n )\n elif ((sum_is_zero and not all_are_zero) or\n (not all_non_zero_pos and not all_non_zero_neg)): # antiferromagnetic\n print('Detected antiferromagnetic!')\n builder.abinit['parameters']['nsppol'] = 1 # antiferromagnetic system\n builder.abinit['parameters']['nspden'] = 2 # scalar spin-magnetization in the z-axis\n builder.abinit['parameters']['spinat'] = [[0.0, 0.0, mag] for mag in magnetization_per_site]\n elif not all_are_zero and (all_non_zero_pos or all_non_zero_neg): # ferromagnetic\n print('Detected ferromagnetic!')\n builder.abinit['parameters']['nsppol'] = 2 # collinear spin-polarization\n builder.abinit['parameters']['nspden'] = 2 # scalar spin-magnetization in the z-axis\n builder.abinit['parameters']['spinat'] = [[0.0, 0.0, mag] for mag in magnetization_per_site]\n else:\n raise ValueError(f'Initial magnetization {magnetization_per_site} is ambiguous')\n elif spin_type == SpinType.NON_COLLINEAR:\n if magnetization_per_site is None:\n magnetization_per_site = get_initial_magnetization(structure)\n warnings.warn(f'input magnetization per site was None, setting it to {magnetization_per_site}')\n # LATER: support vector magnetization_per_site\n builder.abinit['parameters']['nspinor'] = 2 # w.f. as spinors\n builder.abinit['parameters']['nsppol'] = 1 # spin-up and spin-down can't be disentangled\n builder.abinit['parameters']['nspden'] = 4 # vector magnetization\n builder.abinit['parameters']['spinat'] = [[0.0, 0.0, mag] for mag in magnetization_per_site]\n elif spin_type == SpinType.SPIN_ORBIT:\n builder.abinit['parameters']['nspinor'] = 2 # w.f. as spinors\n builder.abinit['parameters']['kptopt'] = 4 # no time-reversal symmetry\n else:\n raise ValueError(f'spin type `{spin_type.value}` is not supported')\n\n # ElectronicType\n if electronic_type == ElectronicType.UNKNOWN:\n # protocol defaults to UNKNOWN, which is metallic with Gaussian smearing\n pass\n elif electronic_type == ElectronicType.METAL:\n builder.abinit['parameters']['occopt'] = 3 # Fermi-Dirac\n elif electronic_type == ElectronicType.INSULATOR:\n # LATER: Support magnetization with insulators\n if spin_type not in [SpinType.NONE, SpinType.SPIN_ORBIT]:\n raise ValueError(f'`spin_type` {spin_type.value} is not supported for insulating systems.')\n builder.abinit['parameters']['occopt'] = 1 # Fixed occupations, Abinit default\n builder.abinit['parameters']['fband'] = 0.125 # Abinit default\n else:\n raise ValueError(f'electronic type `{electronic_type.value}` is not supported')\n\n # Continue force and stress thresholds from above (see molecule treatment)\n builder.abinit['parameters']['tolmxf'] = threshold_f\n if threshold_stress is not None:\n threshold_s = threshold_stress * units.eV_to_Ha / units.ang_to_bohr**3 # eV/โ„ซ^3\n strfact = threshold_f / threshold_s\n builder.abinit['parameters']['strfact'] = strfact\n\n # previous workchain\n if reference_workchain is not None:\n try:\n previous_kpoints = reference_workchain.inputs.kpoints\n except exceptions.NotExistentAttributeError as not_existent_attr_error:\n query_builder = orm.QueryBuilder()\n query_builder.append(orm.WorkChainNode, tag='relax', filters={'id': reference_workchain.id})\n query_builder.append(\n orm.WorkChainNode,\n tag='base',\n with_incoming='relax',\n )\n query_builder.append(\n orm.CalcFunctionNode,\n tag='calcfunc',\n edge_filters={'label': 'create_kpoints_from_distance'},\n with_incoming='base'\n )\n query_builder.append(orm.KpointsData, tag='kpoints', with_incoming='calcfunc')\n query_builder.order_by({orm.KpointsData: {'ctime': 'desc'}})\n query_builder_result = query_builder.all()\n if query_builder_result == []:\n msg = f'Could not find KpointsData associated with {reference_workchain}'\n raise ValueError(msg) from not_existent_attr_error\n previous_kpoints = query_builder_result[0][0]\n\n # ensure same k-points\n previous_kpoints_mesh, previous_kpoints_offset = previous_kpoints.get_kpoints_mesh()\n new_kpoints = orm.KpointsData()\n new_kpoints.set_cell_from_structure(structure)\n new_kpoints.set_kpoints_mesh(previous_kpoints_mesh, previous_kpoints_offset)\n builder.kpoints = new_kpoints\n\n # ensure same k-points shift\n shiftk = reference_workchain.inputs.abinit__parameters.get_dict().get('shiftk', None)\n if shiftk is not None:\n builder.abinit['parameters']['shiftk'] = shiftk\n\n nshiftk = reference_workchain.inputs.abinit__parameters.get_dict().get('nshiftk', None)\n if nshiftk is not None:\n builder.abinit['parameters']['nshiftk'] = nshiftk\n\n return builder", "def process_outputs(self, outputs, image_size):\n boxes = []\n box_class = []\n box_confidences = []\n i = 0\n for output in outputs:\n boxes.append(output[:, :, :, 0:4])\n box_class.append(self.sigmoid(output[:, :, :, 5:]))\n box_confidences.append(self.sigmoid(output[:, :, :, 4:5]))\n\n for box in boxes:\n H_box = box.shape[0]\n W_box = box.shape[1]\n anchor_box = box.shape[2]\n\n the_box = np.zeros((H_box, W_box, anchor_box))\n\n ind_x = np.arange(W_box)\n ind_y = np.arange(H_box)\n ind_x = ind_x.reshape(1, W_box, 1)\n ind_y = ind_y.reshape(H_box, 1, 1)\n\n box_x = the_box + ind_x\n box_y = the_box + ind_y\n\n tx = box[..., 0]\n ty = box[..., 1]\n tw = box[..., 2]\n th = box[..., 3]\n\n sig_tx = self.sigmoid(tx)\n sig_ty = self.sigmoid(ty)\n\n bx = sig_tx + box_x\n by = sig_ty + box_y\n bx = bx / W_box\n by = by / H_box\n\n pw = self.anchors[i, :, 0]\n ph = self.anchors[i, :, 1]\n\n bw = pw * np.exp(tw)\n bh = ph * np.exp(th)\n\n inp_w = self.model.input.shape[1].value\n inp_h = self.model.input.shape[2].value\n\n bw = bw / inp_w\n bh = bh / inp_h\n\n x1 = bx - bw / 2\n y1 = by - bh / 2\n x2 = x1 + bw\n y2 = y1 + bh\n\n box[..., 0] = x1 * image_size[1]\n box[..., 1] = y1 * image_size[0]\n box[..., 2] = x2 * image_size[1]\n box[..., 3] = y2 * image_size[0]\n i = i + 1\n\n return (boxes, box_confidences, box_class)", "def build_from_config(cls, config: SolverConfig) -> \"SolverBase\":\n pass", "def __init__(self, env_config, paddle_length_factor):\n self.env_config = env_config\n self.frameskip = (2, 5)\n self.env = EnvPongDraft_Surface_Headless.EnvPong(render_screen = False, screen_scale = 1.0)\n self.scale_paddle_height(paddle_length_factor)\n self.seed()\n self._action_set = self.env.getMinimalActionSet()\n self.action_space = spaces.Discrete(len(self._action_set))\n screen_width = screen_height = 42\n self.zoom_val = 42 / 400\n self.observation_space = spaces.Box(low=0, high=255, shape=(screen_height, screen_width), dtype=np.float)", "def post_process(predictions, ground_truth, key, image, shape):\n ground_truth_bounding_boxes = []\n rcnn_bounding_boxes = []\n\n if key in predictions.keys():\n rcnn_bounding_boxes = predictions[key]\n\n if key in ground_truth:\n ground_truth_bounding_boxes = ground_truth[key]\n\n if DEBUG_LEVEL > 1:\n for j in rcnn_bounding_boxes:\n cv2.rectangle(\n image,\n (j.x1, j.y1),\n (j.x1 + j.w, j.y1 + j.h),\n (150, 180, 255),\n max(1, image.shape[0] // 400),\n )\n\n if args.ocr:\n ocr = read_ocr(\n os.path.join(args.images, key),\n os.path.join(args.ocr, key.replace(\".png\", \".pkl\")),\n )\n\n if DEBUG_LEVEL > 1:\n for j in ocr:\n cv2.rectangle(\n image, (j.x1, j.y1), (j.x1 + j.w, j.y1 + j.h), (20, 20, 0), 1\n )\n\n ground_truth_bounding_boxes = tight_fit(ground_truth_bounding_boxes, ocr)\n rcnn_bounding_boxes = tight_fit(rcnn_bounding_boxes, ocr)\n\n if DEBUG_LEVEL > 0:\n for j in rcnn_bounding_boxes:\n cv2.rectangle(\n image,\n (j.x1 - 5, j.y1 - 5),\n (j.x1 - 5 + j.w, j.y1 - 5 + j.h),\n (20, 50, 200),\n max(1, image.shape[0] // 400),\n )\n\n if DEBUG_LEVEL > 0:\n for j in ground_truth_bounding_boxes:\n cv2.rectangle(\n image,\n (j.x1, j.y1),\n (j.x1 + j.w, j.y1 + j.h),\n (0, 255, 80),\n max(1, image.shape[0] // 400),\n )\n\n if DEBUG_LEVEL > 1:\n for j in rcnn_bounding_boxes:\n cv2.putText(\n image,\n str(j.prob),\n (j.x1, j.y1),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (50, 100, 255),\n 4,\n cv2.LINE_AA,\n )\n\n return ground_truth_bounding_boxes, rcnn_bounding_boxes, image", "def __init__(self, ex_grey_box_block, vardata_to_idx, con_offset):\n self._block = ex_grey_box_block\n self._ex_model = ex_grey_box_block.get_external_model()\n self._n_primals = len(vardata_to_idx)\n n_inputs = len(self._block.inputs)\n assert n_inputs == self._ex_model.n_inputs()\n n_eq_constraints = self._ex_model.n_equality_constraints()\n n_outputs = len(self._block.outputs)\n assert n_outputs == self._ex_model.n_outputs()\n\n # store the map of input indices (0 .. n_inputs) to\n # the indices in the full primals vector\n self._inputs_to_primals_map = np.fromiter(\n (vardata_to_idx[v] for v in self._block.inputs.values()),\n dtype=np.int64,\n count=n_inputs,\n )\n\n # store the map of output indices (0 .. n_outputs) to\n # the indices in the full primals vector\n self._outputs_to_primals_map = np.fromiter(\n (vardata_to_idx[v] for v in self._block.outputs.values()),\n dtype=np.int64,\n count=n_outputs,\n )\n\n if (\n self._ex_model.n_outputs() == 0\n and self._ex_model.n_equality_constraints() == 0\n ):\n raise ValueError(\n 'ExternalGreyBoxModel has no equality constraints '\n 'or outputs. It must have at least one or both.'\n )\n\n self._ex_eq_duals_to_full_map = None\n if n_eq_constraints > 0:\n self._ex_eq_duals_to_full_map = list(\n range(con_offset, con_offset + n_eq_constraints)\n )\n\n self._ex_output_duals_to_full_map = None\n if n_outputs > 0:\n self._ex_output_duals_to_full_map = list(\n range(\n con_offset + n_eq_constraints,\n con_offset + n_eq_constraints + n_outputs,\n )\n )\n\n # we need to change the column indices in the jacobian\n # from the 0..n_inputs provided by the external model\n # to the indices corresponding to the full Pyomo model\n # so we create that here\n self._eq_jac_primal_jcol = None\n self._outputs_jac_primal_jcol = None\n self._additional_output_entries_irow = None\n self._additional_output_entries_jcol = None\n self._additional_output_entries_data = None\n self._con_offset = con_offset\n self._eq_hess_jcol = None\n self._eq_hess_irow = None\n self._output_hess_jcol = None\n self._output_hess_irow = None", "def test_base_dense_head_get_bboxes__rknn():\n ssd_head = get_ssd_head_model()\n ssd_head.cpu().eval()\n s = 128\n img_metas = [{\n 'scale_factor': np.ones(4),\n 'pad_shape': (s, s, 3),\n 'img_shape': (s, s, 3)\n }]\n output_names = ['output']\n input_names = []\n for i in range(6):\n input_names.append('cls_scores_' + str(i))\n input_names.append('bbox_preds_' + str(i))\n dynamic_axes = None\n deploy_cfg = mmengine.Config(\n dict(\n backend_config=dict(type=Backend.RKNN.value),\n onnx_config=dict(\n input_names=input_names,\n output_names=output_names,\n input_shape=None,\n dynamic_axes=dynamic_axes),\n codebase_config=dict(\n type='mmdet',\n task='ObjectDetection',\n model_type='rknn',\n post_processing=dict(\n score_threshold=0.05,\n iou_threshold=0.5,\n max_output_boxes_per_class=200,\n pre_top_k=5000,\n keep_top_k=100,\n background_label_id=-1,\n ))))\n\n # For the ssd_head:\n # the cls_score's size: (1, 30, 20, 20), (1, 30, 10, 10),\n # (1, 30, 5, 5), (1, 30, 3, 3), (1, 30, 2, 2), (1, 30, 1, 1)\n # the bboxes's size: (1, 24, 20, 20), (1, 24, 10, 10),\n # (1, 24, 5, 5), (1, 24, 3, 3), (1, 24, 2, 2), (1, 24, 1, 1)\n feat_shape = [20, 10, 5, 3, 2, 1]\n num_prior = 6\n seed_everything(1234)\n cls_score = [\n torch.rand(1, 30, feat_shape[i], feat_shape[i])\n for i in range(num_prior)\n ]\n seed_everything(5678)\n bboxes = [\n torch.rand(1, 24, feat_shape[i], feat_shape[i])\n for i in range(num_prior)\n ]\n\n # to get outputs of onnx model after rewrite\n img_metas[0]['img_shape'] = [s, s]\n wrapped_model = WrapModel(\n ssd_head, 'get_bboxes', img_metas=img_metas, with_nms=True)\n rewrite_inputs = {\n 'cls_scores': cls_score,\n 'bbox_preds': bboxes,\n }\n rewrite_outputs, is_backend_output = get_rewrite_outputs(\n wrapped_model=wrapped_model,\n model_inputs=rewrite_inputs,\n deploy_cfg=deploy_cfg,\n run_with_backend=False)\n\n # output should be of shape [1, N, 4]\n assert rewrite_outputs[0].shape[-1] == 4", "def predict(self, config: Dict[str, Any]) -> None:\n try:\n _ = self.validator_predict(config)\n except JsonSchemaException as ex:\n raise PipelineConfigError(ex.message) from ex" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check magic bytes to figure out the filetype
def check_filetype(filename): max_len = max(len(x) for x in magic_dict2) with open(filename) as f: file_start = f.read(max_len) for magic, filetype in magic_dict2.items(): if file_start.startswith(magic): return filetype(filename) return filename
[ "def _get_magic_type(self):\n\n try:\n with io.open(self.disk.get_fs_path(), \"rb\") as file:\n file.seek(self.offset)\n fheader = file.read(min(self.size, 4096) if self.size else 4096)\n except IOError:\n logger.exception(\"Failed reading first 4K bytes from volume.\")\n return None\n\n # TODO fallback to img-cat image -s blocknum | file -\n # if we were able to load the module magic\n try:\n # noinspection PyUnresolvedReferences\n import magic\n\n if hasattr(magic, 'from_buffer'):\n # using https://github.com/ahupp/python-magic\n logger.debug(\"Using python-magic Python package for file type magic\")\n result = magic.from_buffer(fheader)\n self.info['magic_data'] = result\n return result\n\n elif hasattr(magic, 'open'):\n # using Magic file extensions by Rueben Thomas (Ubuntu python-magic module)\n logger.debug(\"Using python-magic system package for file type magic\")\n ms = magic.open(magic.NONE)\n ms.load()\n result = ms.buffer(fheader)\n ms.close()\n self.info['magic_data'] = result\n return result\n\n else:\n logger.warning(\"The python-magic module is not available, but another module named magic was found.\")\n\n except ImportError:\n logger.warning(\"The python-magic module is not available.\")\n except AttributeError:\n logger.warning(\"The python-magic module is not available, but another module named magic was found.\")\n return None # returning None is better here, since we do not care about the exception in determine_fs_type", "def mimetype(f, t):\n is_t = False\n m = magic.Magic(mime=True)\n ftype = m.from_buffer(f)\n if t in ftype:\n is_t = True\n return is_t", "def _check_magick(self):\n\n self.seek(0,0)\n self.ei_magic = self.read(4)\n classes = {0:'Invalid',1:'32-bit',2:'64-bit'}\n if self.ei_magic != '\\x7fELF':\n raise RuntimeError(\"input {0} doesn't contain supported ELF header\".format(self.name))\n\n self.ei_class = classes[ord(self.read(1))]", "def detect_file_type(self):\n pfile = subprocess.Popen([self.path_file, '-b', '--mime-type', self.input_file], stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL, shell=self.shell_mode)\n pfile_output, pfile_errors = pfile.communicate()\n pfile.wait()\n self.input_file_type = pfile_output.decode(\"utf-8\").strip()\n self.log(\"Input file {0}: type is {1}\".format(self.input_file, self.input_file_type))", "def detect_file_format(data):\n lines = data.split('\\n')\n for line in lines:\n if 'M48' in line:\n return 'excellon'\n elif '%FS' in line:\n return'rs274x'\n return 'unknown'", "def check_magic_no(header):\n try:\n magic_no = ((header[0] << 8) + header[1]).to_bytes(2, 'big')\n if int.from_bytes(magic_no, 'big') != 0x497E:\n sys.exit(1)\n print('Magic number acceptable.\\n')\n\n except:\n print('Error while checking the magic number\\n')\n sys.exit(1)", "def CanReadFile(filename, magic):", "def test_has_mimetype_no_full_type(self):\n with mock.patch('kittengroomer.helpers.magic.from_file',\n return_value='data'):\n file = FileBase(Path('non_existent'), Path('non_existent'))\n assert file.has_mimetype is False", "def get_content_type(self, file_):\n magic = Magic(mime=True)\n return magic.from_buffer(file_.read(1024))", "def _determine_image_type(self, stream_first_4_bytes):\n file_type = None\n bytes_as_hex = b2a_hex(stream_first_4_bytes)\n if bytes_as_hex.startswith(b'ffd8'):\n file_type = '.jpeg'\n elif bytes_as_hex == b'89504e47':\n file_type = '.png'\n elif bytes_as_hex == b'47494638':\n file_type = '.gif'\n elif bytes_as_hex.startswith(b'424d'):\n file_type = '.bmp'\n return file_type", "def get_filetype(data: Any) -> Optional[RayFileType]:\n return None", "def test_determine_function_returns_octet_stream_for_unknown_formats(self):\n typ = determine_content_type(\"unknown.format\")\n assert typ == \"application/octet-stream\"", "def test_get_file_by_ext_bad_type(self):\n no_file_type = self.convert.get_file_type_by_ext('test')\n self.assertEqual(self.convert.UNKNOWN_FILE, no_file_type)\n backup_file_type = self.convert.get_file_type_by_ext('test.xls.bak')\n self.assertEqual(self.convert.UNKNOWN_FILE, backup_file_type)", "def check_source_file_type(origin_file):\n\n return mimetypes.guess_type(origin_file)[0].find('text') >= 0", "def _testFile(fname) :\n\tsuffixes = ['jpg','jpeg','pdf']\n\thdr = imghdr.what(fname)\n\tif hdr == None or hdr != 'jpeg' :\n\t\tfor sfx in suffixes :\n\t\t\tif fname.lower().endswith(sfx) :\n\t\t\t\treturn True\n\t\t# if we got here then, unfortunately, this is not a valid file type\n\t\treturn False\n\telse:\n\t\treturn True", "def get_file_type(self, f):\n soi = f.read(4)\n try:\n if soi[0:4] == [b'II*\\x00', b'MM\\x00*']:\n # TIFF Image file\n return self._types['TIFF']\n elif soi[0:2] == b'\\xFF\\xD8':\n # JPEG Image file\n ExifJPEG(f)\n return self._types['JPEG']\n else:\n raise ExifTypeError( 'Exif not available' )\n except ExifTypeError as e:\n logging.info( e.parameter )", "def test_determine_function_returns_a_string(self):\n typ = determine_content_type(\"me.pdf\")\n\n assert isinstance(typ, str)", "def check_file_type(self, file_path, file_type):\n\n #file_extension\n file_extension = os.path.splitext(file_path)[-1][1:]\n\n if(file_extension == file_type):\n return True\n\n return False", "def filetype(filename):\n if is_mapping(filename):\n return \"mapping\"\n elif filename.endswith(\".fits\"):\n return \"fits\"\n elif filename.endswith(\".yaml\"):\n return \"yaml\"\n elif filename.endswith(\".json\"):\n return \"json\"\n elif filename.endswith(\".asdf\"):\n return \"asdf\"\n elif filename.endswith(\".txt\"):\n return \"text\"\n elif re.match(r\".*\\.r[0-9][hd]$\", filename): # GEIS header\n return \"geis\"\n else:\n return \"unknown\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Procedure for reading both sequences and stitching them together Unless specified, it will read 10^8 sequences from the supplied read
def single_read(read1, direction = 5, nbrofitems = 10**8, fileout = None): seqFreqs = Counter() # TODO: Enfore trimming parameters (or rather YAML config file) if cfg is not None: trim5 = cfg["Trim"]["fwdread"] trim3 = cfg["Trim"]["revread"] else: trim5 = [27,None] trim3 = [21, 150] for rec in islice(read1, nbrofitems): if(direction == 5): rec = rec[trim5[0] : trim5[1]] # Trim the primer variable sequence else: rec = rec[trim3[0] : trim3[1]].reverse_complement() # Trim the low Q half of the 3' read, the primer AND take rev complement aaSeq = rec.seq.translate() if filter_seq(rec, direction) : seqFreqs.update({ str(aaSeq) : 1 }) global nseqs nseqs += 1 if args.no_trim is not True: """ Trim out sequences that occur just once """ seqFreqs = seqFreqs - Counter(k for k in seqFreqs.keys()) if fileout is not None: fout = open(fileout, "w") sys.stdout = fout jsonf = os.path.join(os.path.split(fileout), "seqdata.json") with open(jsonf, 'w') as fp: json.dump(seqFreqs, fp, indent=4) pprint(seqFreqs.most_common(100), width = 120) if fileout is not None: sys.stdout = sys.__stdout__ fout.close()
[ "def paired_read(read1, read2, nbrofitems = 10**8, fileout = None):\n seqFreqs = Counter()\n\n # TODO: Enfore trimming parameters (or rather YAML config file)\n if args.config is not None:\n trim5 = cfg[\"Trim\"][\"fwdread\"]\n trim3 = cfg[\"Trim\"][\"revread\"]\n else:\n trim5 = [27,None]\n trim3 = [21, 150]\n\n for rec1, rec2 in islice(zip(read1, read2), nbrofitems):\n\n rec1 = rec1[trim5[0] : trim5[1]] # Trim the primer variable sequence\n rec2 = rec2[trim3[0] : trim3[1]].reverse_complement() # Trim the low Q half of the 3' read, the primer AND take rev complement\n\n global nseqs \n nseqs += 1\n\n if filter_seq(rec1, direction=5) and filter_seq(rec2, direction=3):\n aa1 = rec1.seq.translate()\n aa2 = rec2.seq.translate()\n\n # Stitch the strings together\n if args.config is not None:\n i = str(aa1).rfind(cfg[\"Stitching\"][\"f_anchor\"])\n j = str(aa2).find(cfg[\"Stitching\"][\"r_anchor\"])\n \n # Check whether or not stitching is done in the expected place\n # TODO: this should be done in a more graceful way\n if i < len(str(aa1)) * 0.75:\n print(\"Warning: linker anchor on VH side not found where it was expected (i = {})\".format(i))\n print(\"read1: {} (i = {})\".format(str(aa1), i))\n\n if j > len(str(aa2)) * 0.25:\n print(\"Warning: linker anchor on VL side not found where it was expected (j = {})\".format(j))\n print(\"read2: {} (j = {})\".format(str(aa2),j))\n \n else:\n i = None\n j = None\n\n aakey = str(aa1)[:i] + linker_str + str(aa2)[j:]\n seqFreqs.update({ aakey : 1 }) \n\n if args.append_summary is not None:\n \"\"\" Export read stats before trimming sequences that occur just once \"\"\" \n filtseqs = sum(seqFreqs.values())\n dist_seqs = len(list(seqFreqs))\n\n promille_seqs = 0\n for k,v in islice(seqFreqs.most_common(), 1000):\n if v > filtseqs / 1000:\n promille_seqs +=1 \n else:\n break\n\n with open(args.append_summary, 'a') as statfile:\n print(os.path.dirname(fileout), nseqs, lowQSeq, starSeqs, filtseqs, dist_seqs, promille_seqs, sep=\"\\t\", file=statfile)\n\n if args.no_trim is not True:\n \"\"\" Trim out sequences that occur just once \"\"\"\n seqFreqs = seqFreqs - Counter(k for k in seqFreqs.keys())\n\n if fileout is not None:\n fout = open(fileout, \"w\")\n sys.stdout = fout\n\n outdir = os.path.dirname(fileout)\n jsonf = os.path.join(outdir, \"seqdata_paired.json\")\n\n with open(jsonf, 'w') as fp:\n json.dump(seqFreqs, fp, indent=4)\n\n pprint(seqFreqs.most_common(100), width = 240)\n \n if fileout is not None:\n sys.stdout = sys.__stdout__\n fout.close()", "def write_split_read(read, outsam, num_copies):\n\n spans_left_breakp = read.reference_start < start < read.reference_end\n spans_right_breakp = read.reference_start < end < read.reference_end\n left_matching_bp = start - read.reference_start\n right_matching_bp = read.reference_end - end\n if num_copies < 1:\n if spans_left_breakp and spans_right_breakp:\n # pick one with more matching bp\n clip_left = left_matching_bp < right_matching_bp\n elif spans_left_breakp:\n clip_left = False\n elif spans_right_breakp:\n clip_left = True\n else:\n raise ValueError('Internal disagreement as to whether read should be split.')\n if clip_left:\n breakpoint = end - read.reference_start\n else:\n breakpoint = start - read.reference_start\n elif num_copies > 1:\n if spans_left_breakp and spans_right_breakp:\n clip_left = left_matching_bp < right_matching_bp\n elif spans_left_breakp:\n clip_left = True\n elif spans_right_breakp:\n clip_left = False\n else:\n raise ValueError('Internal disagreement as to whether read should be split.')\n if clip_left:\n breakpoint = start - read.reference_start\n else:\n breakpoint = end - read.reference_start\n\n # If the breakpoint is beyond the read, just write the original one and bail.\n # This happens with reads that have significant gaps between matching blocks.\n if breakpoint >= read.rlen:\n outsam.write(read)\n return 1\n\n # Use the reverse 'alternate sequence' in case of left clipping, so that it always terminates in the same base.\n # To visualize this, in the following reads only the replaced portion is visible:\n #\n # Left clip: # Right clip:\n # #\n # breakpoint # breakpoint\n # | # |\n # v # v\n # ACGTACGT---------- # -----------ACGT\n # TACGT---------------- # -------ACGTAC\n # GT--------------------- # -----ACGTACGTA\n sequence_for_replacement = left_clip_seq if clip_left else right_clip_seq\n split_read = make_split_read(read, breakpoint, clip_left, sequence=sequence_for_replacement)\n\n # Write variants of the read.\n reads_written = 0\n if num_copies >= 1:\n # If this is a duplication, first write the 'original' split read, then generate modifications of it.\n outsam.write(read)\n reads_written = 1 + write_copies(split_read, outsam, num_copies - 1)\n else:\n # Assume heterozygous deletion - that is, a 50% chance of writing the original read rather than the split one.\n reads_written += write_copies(read, outsam, num_copies)\n reads_written += write_copies(split_read, outsam, 1 - num_copies)\n\n return reads_written", "def convert_reads(inputf,output):\n f = open(inputf,'r')\n g = open(output,'w')\n header = f.readline().rstrip()\n header = header.replace(\" \",\"!\")\n seq = f.readline()\n header2 = f.readline()\n qual = f.readline()\n encoding = encode_c_positions(seq)\n\n while header:\n g.write(header+\"!\"+encoding+\"\\n\")\n converted_seq = seq.replace(\"C\",\"T\")\n g.write(converted_seq)\n g.write(header2)\n g.write(qual)\n\n\n header = f.readline().rstrip()\n header = header.replace(\" \",\"!\")\n seq = f.readline()\n header2 = f.readline()\n qual = f.readline()\n encoding = encode_c_positions(seq)\n f.close()\n g.close()", "def invert_read(read, start, end, sequence, snp_rate, indel_rate, max_clip_len=None):\n inv_len = end - start\n if start >= read.reference_end or end <= read.reference_start or inv_len < 2:\n return read, 0\n\n read_with_inversion = copy.deepcopy(read)\n read_with_inversion.qname = read_with_inversion.query_name = read.qname + '-' + 'inv'\n\n if read.reference_start <= start < end <= read.reference_end:\n # Read spans the entire inversion.\n left_breakpoint = start - read.reference_start\n right_breakpoint = left_breakpoint + inv_len\n read_with_inversion.seq = \"{left}{inv}{right}\".format(\n left=read.seq[:left_breakpoint],\n inv=\"\".join(reversed(read.seq[left_breakpoint:right_breakpoint])),\n right=read.seq[right_breakpoint:])\n\n # Clipped bases in reads must start at a read boundary; choose the closest one.\n # TODO: add a supplemental/secondary read where the shorter region is matched, and the longer one clipped.\n cigar_tuples = unpack_cigar(read.cigarstring)\n if left_breakpoint < read.rlen - right_breakpoint:\n start_clip, end_clip = 0, right_breakpoint\n else:\n start_clip, end_clip = left_breakpoint, read.rlen\n for i in range(start_clip, end_clip):\n cigar_tuples[i] = '1S'\n\n read_with_inversion.cigarstring = str(Cigar(\"\".join(cigar_tuples)).merge_like_ops())\n\n elif start <= read.reference_start < read.reference_end <= end:\n # Inversion spans the entire read.\n pos_in_inversion = read.reference_start - start\n inv_seq = sequence[pos_in_inversion:pos_in_inversion + read.rlen]\n read_with_inversion = make_split_read(read_with_inversion, 0, clip_left=False, sequence=inv_seq)\n\n # If a read was reversed, modify its strand.\n read_with_inversion.is_reverse = not read.is_reverse\n\n elif start > read.reference_start:\n # Inversion starts mid-read, continuing to the end of it (or past it).\n breakpoint = start - read.reference_start\n read_with_inversion = make_split_read(read_with_inversion, breakpoint, clip_left=False, sequence=sequence)\n\n elif end < read.reference_end:\n # Inversion starts before the read, continuing into it.\n breakpoint = end - read.reference_start\n read_with_inversion = make_split_read(read_with_inversion, breakpoint, clip_left=True, sequence=sequence)\n\n if max_clip_len and int(max_clip_len) < get_max_clip_len(read_with_inversion):\n return None, 0\n\n # Add noise.\n return modify_read(read_with_inversion, snp_rate, indel_rate / 2, indel_rate / 2)", "def map_reads(processed_reads, avrg_read_len, settings):\n\n PAS_sites = ['AATAAA', 'ATTAAA', 'TATAAA', 'AGTAAA', 'AAGAAA', 'AATATA',\n 'AATACA', 'CATAAA', 'GATAAA', 'AATGAA', 'TTTAAA', 'ACTAAA',\n 'AATAGA']\n\n pas_patterns = [re.compile(pas) for pas in PAS_sites]\n\n # File path of the mapped reads\n mapped_reads = os.path.splitext(processed_reads)[0]+'_mapped'\n\n # Naming the final output\n polybed_path = os.path.splitext(processed_reads)[0] + '_mapped.bed'\n\n # How many mismatches depend on read length\n if avrg_read_len < 50:\n mismatch_nr = 1\n elif 50 < avrg_read_len < 100:\n mismatch_nr = 2\n elif 100 < avrg_read_len:\n mismatch_nr = 3\n\n ### mapping trimmed reads\n ### DEBUG skipping this step for speedy work :) XXX\n command = \"gem-mapper -I {0} -i {1} -o {2} -q ignore -m {3}\"\\\n .format(settings.gem_index, processed_reads, mapped_reads, mismatch_nr)\n\n p = Popen(command.split())\n p.wait()\n\n # Accept mismatches according to average read length\n acceptables = {1: set(('1:0', '0:1')), 2: set(('1:0:0', '0:1:0', '0:0:1')),\n 3: set(('1:0:0:0', '0:1:0:0', '0:0:1:0', '0:0:0:1'))}\n\n acceptable = acceptables[mismatch_nr]\n getstrand = {'R':'-', 'F':'+'}\n start_re = re.compile('[0-9]*')\n\n reads_file = open(polybed_path, 'wb')\n\n # count the number of noisy reads and total reads\n noisecount = 0\n allcount = 0\n\n for line in open(mapped_reads + '.0.map', 'rb'):\n (at, tail_info, seq, mapinfo, position) = line.split('\\t')\n\n # Acceptable reads and poly(A) reads are mutually exclusive.\n if mapinfo in acceptable:\n allcount += 1\n # Get chromosome, strand, and beg\n (chrom, rest) = position.split(':')\n strand = getstrand[rest[0]]\n beg = start_re.match(rest[1:]).group()\n\n ## Don't write if this was a noisy read\n if read_is_noise(chrom, strand, beg, seq, at, tail_info, settings):\n noisecount += 1\n continue\n\n # Get the PAS and the PAS distance of this read\n PAS, PAS_dist = get_early_PAS(seq, at, pas_patterns)\n\n if PAS != 'NA':\n nPAS = ':'.join(PAS)\n nPAS_dist = ':'.join([str(d) for d in PAS_dist])\n else:\n nPAS = 'NA'\n nPAS_dist = 'NA'\n\n # Write to file in .bed format\n name = '#'.join([at, tail_info, nPAS, nPAS_dist])\n\n reads_file.write('\\t'.join([chrom, beg, str(int(beg)+len(seq)), name,\n '0', strand]) + '\\n')\n\n # close file\n reads_file.close()\n\n # Write to logfile\n if allcount > 0:\n vals = (noisecount, allcount, noisecount/float(allcount))\n\n noiseinf = '\\nNoise reads: {0}\\nTotal reads: {1}\\nNoise ratio: {2:.2f}\\n'\\\n .format(*vals)\n\n noiselog = open('NOISELOG.LOG', 'ab')\n noiselog.write('-'*80+'\\n')\n noiselog.write(polybed_path)\n noiselog.write(noiseinf)\n noiselog.write('-'*80+'\\n')\n noiselog.close()\n\n return polybed_path", "def extract_reads_from_PE_fastq(fname_iPCR_PE1, fname_iPCR_PE2):\n\n # This is the scarcode that allows to identify which\n # experiment is sequenced (must be CT).\n matcher = seeq.compile('CGCTAATTAATGGAATCATG', 3)\n\n outf1 = open('CT_TCT.fasta', 'w')\n outf2 = open('CT_ACG.fasta', 'w')\n\n # There are many errors in the index, especially in the\n # first base. The most frequent errors are hard coded\n # in the dictionary so that the reads are written to the\n # proper file.\n outfiles = {\n 'TCT': outf1,\n 'GCT': outf1,\n 'ACT': outf1,\n 'ACG': outf2,\n 'AGG': outf2,\n 'CCG': outf2,\n }\n\n with gzopen(fname_iPCR_PE1) as f, gzopen(fname_iPCR_PE2) as g:\n for lineno,(line1,line2) in enumerate(izip(f,g)):\n # Take sequence lines of the fastq files.\n if lineno % 4 != 1: continue\n\n brcd = trimSuffix(matcher, line1)\n # If we find a barcode between 13 and 25 nucleotides\n # then the scarcode must have been the right one.\n if len(brcd) < 13 or len(brcd) > 25: continue\n\n # Remove first 25 nucleotides.\n suff = line2.rstrip()[25:].split('CATG')[0]\n # Cut genome fragment after the first CATG.\n genome = re.sub(r'CATG.*', 'CATG', suff)\n\n # Avoid short strings that are unmappable.\n if len(genome) < 20:\n genome = 'gatcctgatgctagtgactgatgagctgctgaagctgga'\n\n # The first 3 nucleotides of the reverse read are the\n # index. Check that it belongs to the right group.\n idx = line2[:3]\n if idx in outfiles:\n outf = outfiles[idx]\n outf.write('>%s\\n%s\\n' % (brcd,genome))", "def get_aligned_segment_from_read(self, read):\n\n read_alignment_start = read.reference_start\n # read_alignment_stop = self.get_read_stop_position(read)\n\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n # read_quality = read.query_qualities\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n completion_status = False\n\n if read_id in self.read_start_indices:\n print(\"WARNING: read_id hash conflict\", read_id)\n\n for c,cigar in enumerate(cigar_tuples):\n cigar_code = cigar[0]\n length = cigar[1]\n\n # get the sequence segments that are effected by this operation\n # read_quality_segment = read_quality[read_index:read_index + length]\n read_sequence_segment = read_sequence[read_index:read_index + length]\n\n # skip parsing the first segment if it is not a match\n if cigar_code != 0 and found_valid_cigar is False:\n # only increment the read index if the non-match cigar code is INS or SOFTCLIP\n if cigar_code == 1 or cigar_code == 4:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment, completion_status = \\\n self.parse_cigar_tuple(read_index=read_index,\n cigar_code=cigar_code,\n length=length,\n alignment_position=read_alignment_start + ref_index,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n completion_status=completion_status)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n if completion_status or c == len(cigar_tuples) - 1:\n start_index = self.read_start_indices[read_id]\n end_index = self.read_end_indices[read_id]\n\n segment_alignment_start = self.read_alignment_starts[read_id]\n segment_alignment_end = self.read_alignment_ends[read_id]\n\n # to simulate Paolo Carnevali's data, all reads should span the full region, match on start and end pos.\n if self.exclude_loose_ends:\n if segment_alignment_start == self.start_position and segment_alignment_end == self.end_position:\n if self.padding is not None and self.padding_end_offset is not None:\n # print(start_index - self.padding, end_index + self.padding + self.padding_end_offset)\n # print(start_index, end_index)\n # print(read_sequence[start_index - self.padding:end_index + self.padding + self.padding_end_offset + 1])\n # print(self.padding*\" \"+read_sequence[start_index:end_index + 1])\n\n start_index = start_index - self.padding\n end_index = end_index + self.padding + self.padding_end_offset\n\n sequence = read_sequence[start_index:end_index + 1]\n\n if len(sequence) < SEQUENCE_LENGTH_CUTOFF_FACTOR*self.window_size:\n self.sequences[read_id] = sequence\n\n else:\n if self.padding is not None and self.padding_end_offset is not None:\n\n start_index = start_index - self.padding\n end_index = end_index + self.padding + self.padding_end_offset\n\n sequence = read_sequence[start_index:end_index + 1]\n\n if len(sequence) < SEQUENCE_LENGTH_CUTOFF_FACTOR * self.window_size:\n self.sequences[read_id] = sequence\n else:\n print(\"excessive read length found for region\", len(sequence), self.window_size)\n\n # if the read segment has been obtained then fetch its directionality (Forward/Reverse), True if Reverse\n self.reversal_status[read_id] = read.is_reverse\n\n # else:\n # print(\"incomplete read segment\")\n # print(\"expected interval:\", self.start_position, self.end_position)\n # print(\"segment interval:\", segment_alignment_start, segment_alignment_end)\n # if len(sequence) == 0:\n # print()\n # print(\"***WARNING***: EMPTY SEQUENCE!\")\n # print(read_id)\n # # print(cigar_tuples)\n # print(\"start i\\t\", start_index)\n # print(\"end i\\t\", end_index)\n # print(\"start pos\\t\\t\", read.reference_start)\n # print(\"len\\t\\t\\t\\t\", len(read_sequence))\n # print(\"start + length\\t\", read.reference_start + len(read_sequence))\n # print(sequence)\n # # print(read_sequence)\n # # print(''.join([str(c[0])*c[1] for c in cigar_tuples]))\n # print()\n # else:\n # print()\n # print(\"GOOD SEQUENCE!\")\n # print(read_id)\n # # print(cigar_tuples)\n # print(\"start i\\t\",start_index)\n # print(\"end i\\t\", end_index)\n # print(\"start pos\\t\\t\", read.reference_start)\n # print(\"len\\t\\t\\t\\t\", len(read_sequence))\n # print(\"start + length\\t\", read.reference_start + len(read_sequence))\n # print(sequence)\n # # print(read_sequence)\n # # print(''.join([str(c[0])*c[1] for c in cigar_tuples]))\n # print()\n\n break\n\n return True", "def write_read_seqs(both_read_seqs, keep, remap_bam, fastqs, dropped=None, remap_num=0):\n reads, seqs = zip(*both_read_seqs)\n assert len(reads) == len(fastqs)\n num_seqs = len(both_read_seqs[0][1])\n if num_seqs == 0:\n if dropped is not None:\n for read in reads:\n dropped.write(read)\n return 0\n else:\n return 0\n elif num_seqs == 1:\n for read, seqs in both_read_seqs:\n keep.write(read)\n else:\n assert len(reads) > 0\n for read in reads:\n remap_bam.write(read)\n left_pos = min(r.pos for r in reads)\n right_pos = max(r.pos for r in reads)\n loc_line = '{}:{}:{}:{}:{}'.format(\n remap_num,\n read.reference_name,\n left_pos,\n right_pos,\n len(seqs[0])-1,\n )\n\n first = True\n # Some python fanciness to deal with single or paired end reads (or\n # n-ended reads, if such technology ever happens.\n for read_seqs in zip(*seqs):\n if first:\n first = False\n continue\n for seq, read, fastq in zip(read_seqs, reads, fastqs):\n assert len(seq) == len(read.qual)\n fastq.write(\n \"@{loc_line}\\n{seq}\\n+{loc_line}\\n{qual}\\n\"\n .format(\n loc_line=loc_line,\n seq=reverse_complement(seq) if read.is_reverse else seq,\n qual=read.qual)\n )\n return 1\n return 0", "def process_sample(job, inputs, tar_id):\n job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n # I/O\n tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar'))\n # Untar File and concat\n subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir])\n os.remove(os.path.join(work_dir, 'sample.tar'))\n # Grab files from tarball\n fastqs = []\n for root, subdir, files in os.walk(work_dir):\n fastqs.extend([os.path.join(root, x) for x in files])\n # Check for read 1 and read 2 files\n r1 = sorted([x for x in fastqs if 'R1' in x])\n r2 = sorted([x for x in fastqs if 'R2' in x])\n if not r1 or not r2:\n # Check if using a different standard\n r1 = sorted([x for x in fastqs if '_1' in x])\n r2 = sorted([x for x in fastqs if '_2' in x])\n # Prune file name matches from each list\n if len(r1) > len(r2):\n r1 = [x for x in r1 if x not in r2]\n elif len(r2) > len(r1):\n r2 = [x for x in r2 if x not in r1]\n # Flag if data is single-ended\n assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\\nR2:{}'.format(r1, r2)\n command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz') else 'cat'\n with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:\n p1 = subprocess.Popen([command] + r1, stdout=f1)\n with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:\n p2 = subprocess.Popen([command] + r2, stdout=f2)\n p1.wait()\n p2.wait()\n # Write to fileStore\n r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))\n r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))\n job.fileStore.deleteGlobalFile(tar_id)\n # Start cutadapt step\n job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()", "def map_reads(\n read1_path,\n read2_path,\n tags,\n barcode_slice,\n umi_slice,\n indexes,\n whitelist,\n debug,\n start_trim,\n maximum_distance,\n sliding_window,\n):\n # Initiate values\n results = {}\n no_match = Counter()\n n = 1\n t = time.time()\n with gzip.open(read1_path, \"rt\") as textfile1, gzip.open(\n read2_path, \"rt\"\n ) as textfile2:\n\n # Read all 2nd lines from 4 line chunks. If first_n not None read only 4 times the given amount.\n secondlines = islice(\n zip(textfile1, textfile2), indexes[0] * 4 + 1, indexes[1] * 4 + 1, 4\n )\n for read1, read2 in secondlines:\n read1 = read1.strip()\n read2 = read2.strip()\n\n # Progress info\n if n % 1000000 == 0:\n print(\n \"Processed 1,000,000 reads in {}. Total \"\n \"reads: {:,} in child {}\".format(\n secondsToText.secondsToText(time.time() - t), n, os.getpid()\n )\n )\n sys.stdout.flush()\n t = time.time()\n\n # Get cell and umi barcodes.\n cell_barcode = read1[barcode_slice]\n # This change in bytes is required by umi_tools for umi correction\n UMI = bytes(read1[umi_slice], \"ascii\")\n # Trim potential starting sequences\n TAG_seq = read2[start_trim:]\n\n if cell_barcode not in results:\n results[cell_barcode] = defaultdict(Counter)\n\n if sliding_window:\n best_match = find_best_match_shift(TAG_seq, tags, maximum_distance)\n else:\n best_match = find_best_match(TAG_seq, tags, maximum_distance)\n\n results[cell_barcode][best_match][UMI] += 1\n\n if best_match == \"unmapped\":\n no_match[TAG_seq] += 1\n\n if debug:\n print(\n \"\\nline:{0}\\n\"\n \"cell_barcode:{1}\\tUMI:{2}\\tTAG_seq:{3}\\n\"\n \"line length:{4}\\tcell barcode length:{5}\\tUMI length:{6}\\tTAG sequence length:{7}\\n\"\n \"Best match is: {8}\".format(\n read1 + read2,\n cell_barcode,\n UMI,\n TAG_seq,\n len(read1 + read2),\n len(cell_barcode),\n len(UMI),\n len(TAG_seq),\n best_match,\n )\n )\n sys.stdout.flush()\n n += 1\n print(\n \"Mapping done for process {}. Processed {:,} reads\".format(os.getpid(), n - 1)\n )\n sys.stdout.flush()\n return (results, no_match)", "def test_single_two_read_two_snp_two_chrom(self):\n \n test_data = Data(read1_seqs = [\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\",\n \"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\"],\n read1_quals = [\"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"],\n genome_seqs = [\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\\n\" +\n \"TTTTTTTTTTATTTTTTTTTTTTTTTTTTT\",\n \"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\\n\" +\n \"CCCCCCCCCCGCCCCCCCCCCCCCCCCCCC\"],\n chrom_names = ['test_chrom1', 'test_chrom2'],\n snp_list = [['test_chrom1', 1, \"A\", \"C\"],\n ['test_chrom2', 3, \"G\", \"C\"]])\n \n test_data.setup()\n test_data.index_genome_bowtie2()\n test_data.map_single_bowtie2()\n test_data.sam2bam()\n\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir, is_paired_end=False,\n is_sorted=False)\n\n #\n # Verify new fastq is correct. The first base of the first read\n # should be switched from C to an A, and the third base of second read\n # should be switched from C to G\n #\n with gzip.open(test_data.fastq_remap_filename) as f:\n lines = [x.strip() for x in f.readlines()]\n assert len(lines) == 8\n\n l = list(test_data.read1_seqs[0])\n l[0] = 'C'\n new_seq = \"\".join(l)\n assert lines[1] == new_seq\n assert lines[3] == test_data.read1_quals[0]\n\n l = list(test_data.read1_seqs[1])\n l[2] = 'C'\n new_seq = \"\".join(l)\n\n assert lines[5] == new_seq\n assert lines[7] == test_data.read1_quals[1]\n \n #\n # Verify to.remap bam is the same as the input bam file.\n #\n old_lines = read_bam(test_data.bam_filename)\n new_lines = read_bam(test_data.bam_remap_filename)\n assert old_lines == new_lines\n\n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n test_data.cleanup()", "def reformat_read(name_1, seq_1, plus_1, quality_1,\n name_2, seq_2, plus_2, quality_2, barcodes,\n RANDOMER_LENGTH=2):\n barcode_lengths = {len(barcode) for barcode in barcodes.keys()}\n barcode = None\n\n #assumes larger barcodes are less likely, and searches for them first\n #for each barcode length see if known barcodes appear\n for barcode_length in sorted(barcode_lengths, reverse=True):\n cur_barcode = seq_1[:barcode_length]\n randomer = seq_2[:RANDOMER_LENGTH]\n if cur_barcode in barcodes:\n barcode = cur_barcode\n break\n\n name_1 = name_1[0] + randomer + \":\" + name_1[1:]\n seq_1 = seq_1[barcode_length:]\n quality_1 = quality_1[barcode_length:]\n\n name_2 = name_2[0] + randomer + \":\" + name_2[1:]\n seq_2 = seq_2[RANDOMER_LENGTH:]\n quality_2 = quality_2[RANDOMER_LENGTH:]\n\n #if none appear the barcode is unassigne\n if barcode is None:\n barcode = \"unassigned\"\n\n result_1 = name_1 + seq_1 + plus_1 + quality_1\n result_2 = name_2 + seq_2 + plus_2 + quality_2\n\n return barcode, randomer, result_1, result_2", "def process_reads_watson(args):\n watson_r1 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_r1', dir=args.tmpdir,\n delete=False)\n watson_r2 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_r2', dir=args.tmpdir,\n delete=False)\n args.watson_r1 = watson_r1.name\n args.watson_r2 = watson_r2.name\n print('Started processing watson reads')\n if args.watson_val_r1.endswith('.gz'):\n w_r1_handle = gzip.open(args.watson_val_r1, 'rt')\n w_r2_handle = gzip.open(args.watson_val_r2, 'rt')\n else:\n w_r1_handle = open(args.watson_val_r1, 'rt')\n w_r2_handle = open(args.watson_val_r2, 'rt')\n #make 4 file handles for forward and reverse watson and crick\n watson_r1_handle = open(args.watson_r1, 'w')\n watson_r2_handle = open(args.watson_r2, 'w')\n j = 0\n while True:\n w_r1 = []\n w_r2 = []\n for i in range(4):\n try:\n w_r1.append(next(w_r1_handle))\n w_r2.append(next(w_r2_handle))\n except StopIteration:\n break\n j += 1\n try:\n if int(args.sequences) == j:\n break\n except TypeError:\n pass\n if not j % 1000000:\n print('Processed %s reads' % (j))\n if not w_r1:\n break\n convert_w_r1 = w_r1[1].upper().replace('C', 'T')\n convert_w_r2 = w_r2[1].upper().replace('G', 'A')\n c_pos_w = [str(n) for n, i in enumerate(w_r1[1]) if i.upper() == 'C']\n g_pos_w = [str(n) for n, i in enumerate(w_r2[1].rstrip('\\n')[::-1]) if i.upper() == 'G']\n header_w = '@%s' % (w_r1[0][1:-1].replace(' ', '|').replace('\\t', '|'))\n header_w += '|%s\\n' % (','.join(c_pos_w) + '|' + ','.join(g_pos_w))\n watson_r1_handle.write(header_w + convert_w_r1 + '+\\n' + w_r1[3])\n #print(read_r1[3])\n watson_r2_handle.write(header_w + convert_w_r2 + '+\\n' + w_r2[3])\n watson_r1_handle.close()\n watson_r2_handle.close()\n return args", "def demultiplex(forward_fasta, reverse_fasta, barcodefile, barcodelength, outfile,logfile, max_mismatches,\n trimsize_forward, trimsize_reverse, includeshort, spacersequence, sampleindex, keepunassigned):\n\n # get the barcode and fasta data\n barcodes = process_barcodefile(barcodefile, barcodelength)\n fastas = zip(SeqIO.parse(forward_fasta, 'fasta'), SeqIO.parse(reverse_fasta,'fasta'))\n fastadicts = (fasta_to_dict(fasta) for fasta in fastas)\n\n # get barcode information\n fastabarcodes = (check_barcode(fastadict,\n barcodedict=barcodes,\n barcodelength=barcodelength,\n maxdistance=max_mismatches)\n for fastadict in fastadicts)\n\n #filter sizes, and reverse complement\n fastasizetruncated = (truncate_by_size(fastadict,\n trimsize_forward=trimsize_forward,\n trimsize_reverse=trimsize_reverse)\n for fastadict in fastabarcodes)\n\n # validate data before progressing\n fastadata = (fastadataSchema.validate(d) for d in fastasizetruncated)\n\n #iterate through and keep relevant data\n tooshortcount = 0\n badbarcodecount = 0\n errorcount = 0\n count = 0\n samplecounts = defaultdict(int)\n\n for result in fastadata:\n #sampledata\n forward_id = result['forward_id']\n forward_desc = result[\"forward_desc\"]\n forward_seq = result[\"forward_sequence\"]\n reverse_id = result[\"reverse_id\"]\n reverse_desc = result[\"reverse_desc\"]\n reverse_seq = result[\"reverse_sequence\"]\n sample = result[\"sample\"]\n barcode = result[\"barcode\"]\n brcd_dist = result[\"barcode_distance\"]\n tooshort = result[\"tooshort\"]\n spacermismatch = result['spacermismatch']\n\n #accounting\n count += 1\n samplecounts[sample] += 1\n if not sample: badbarcodecount += 1\n if tooshort: tooshortcount += 1\n\n #write sample\n def writesample(forward_seq=forward_seq,\n reverse_seq=reverse_seq,\n sample=sample,forward_id=forward_id, count=count, barcode=barcode, brcd_dist=brcd_dist):\n\n\n #combine the forward and reverse sequence\n allseq = forward_seq + spacersequence + reversecomplement(reverse_seq)\n\n # write out sequences\n if sample is None:\n sample = \"Unassigned\"\n\n fastaheader = \"{}.{}.{:06d} barcode:{} barcodemismatches:{} spacermismatch: {}\".format(\n sample, forward_id, count, barcode, brcd_dist, str(spacermismatch))\n\n outfile.write(\">{}\\n{}\\n\".format(fastaheader,allseq))\n\n def shouldwritesample(sample=sample,includeshort=includeshort,tooshort=tooshort,\n brcd_dist=brcd_dist,max_mismatches=max_mismatches):\n \"encapsulate sequence-writing logic in a function\"\n\n # Only use sequences samples that have a sample\n if not sample:\n if keepunassigned:\n return True\n else:\n return False\n\n # Ignore short sequences if the flag is false\n if includeshort is False and tooshort is True:\n return False\n\n # Ignore sequences with barcode mismatches above the threshold\n if brcd_dist > max_mismatches:\n return False\n\n return True\n\n shouldwrite = shouldwritesample()\n\n if shouldwrite == True:\n writesample()\n\n # write out log information\n logfile.write(\"\"\"\n Barcode File: {}\n Sequenced Processed: {}\n Samples Below the Length Cutoff: {}\n Samples Unassigned due to Barcodes: {}\n\n \"\"\".format(barcodefile, count, tooshortcount, badbarcodecount))\n\n for sam, cnt in samplecounts.items():\n logfile.write(\"Observed Counts for Sample {}: {}\\n\".format(sam,cnt))\n\n print(\"Finished Demultiplexing\")", "def test_paired_two_reads_one_indel(self):\n test_data = Data()\n\n read1_seqs = [\"AACGAAAAGGAGAA\",\n \"AAAAAAATTTAAAA\"]\n read2_seqs = [\"AAGAAACAACACAA\",\n \"AAAAATAAAAAATA\"]\n \n read1_quals = [\"B\" * len(read1_seqs[0]),\n \"C\" * len(read1_seqs[1])]\n read2_quals = [\"D\" * len(read2_seqs[0]),\n \"E\" * len(read2_seqs[1])]\n\n # 10 20 30\n # POS 123456789012345678901234567890\n # read1[0] AACGAAAAGGAGAA\n # read1[1] AAAAAAATTTAAAA\n # SNP ^\n genome_seq = [\"AAAAAACGAAAAGGAGAAAAAAATTTAAAA\\n\"\n \"TTTATTTTTTATTTTTTTGTGTTGTTTCTT\"]\n # read2[0] AACACAACAAAGAA\n # read2[1] ATAAAAAATAAAAA\n # INDEL ^ \n # POS 123456789012345678901234567890\n # 40 50\n \n snp_list = [['test_chrom', 18, \"A\", \"C\"],\n ['test_chrom', 52, \"G\", \"GTTA\"]]\n \n test_data = Data(genome_seqs=genome_seq,\n read1_seqs=read1_seqs,\n read2_seqs=read2_seqs,\n read1_quals=read1_quals,\n read2_quals=read2_quals,\n snp_list=snp_list)\n \n test_data.setup()\n test_data.index_genome_bowtie2()\n test_data.map_paired_bowtie2()\n test_data.sam2bam()\n\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir, \n is_paired_end=True, is_sorted=False)\n\n # Currently reads overlapping indels are thrown out\n expect_reads = set([(\"ACAAAAATTTAAAA\", \"AAAAATAAAAAATA\")])\n\n #\n # Verify fastq1 and fastq2 have appropriate read pairs\n #\n with gzip.open(test_data.fastq1_remap_filename) as f:\n lines1 = [x.strip() for x in f.readlines()]\n assert len(lines1) == len(expect_reads) * 4\n\n with gzip.open(test_data.fastq2_remap_filename) as f:\n lines2 = [x.strip() for x in f.readlines()]\n assert len(lines2) == len(expect_reads) * 4\n \n for i in range(1, len(lines2), 4):\n read_pair = (lines1[i], lines2[i])\n assert read_pair in expect_reads\n expect_reads.remove(read_pair)\n\n assert len(expect_reads) == 0\n\n \n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n test_data.cleanup()", "def _update_seq(self, r_data, reg_base_data):\n read_bases = get_single_slot_read_centric(r_data, 'base')\n if read_bases is None:\n warning_message(\n 'Unable to extract data from read. Potentially corrupted file ' +\n 'or invalid Tombo index file for this directory.')\n return reg_base_data, max(0, r_data.start - self.start)\n r_seq = b''.join(read_bases).decode()\n\n if r_data.strand == '-':\n r_seq = rev_comp(r_seq)\n\n # if read starts before the interval\n if r_data.start <= self.start:\n r_end_overlap = r_data.end - self.start\n # if read covers the whole interval\n if r_data.end > self.end:\n r_end_clip = r_data.end - self.end\n reg_base_data = r_seq[-r_end_overlap:-r_end_clip]\n return reg_base_data, len(reg_base_data)\n # end of read overlaps beginning of interval\n reg_base_data[:r_end_overlap] = r_seq[-r_end_overlap:]\n return reg_base_data, r_end_overlap\n # read doesn't cover the beginning of region\n if r_data.end > self.end:\n # beginning of read covers to the end of the region\n r_begin_overlap = self.end - r_data.start\n reg_base_data[-r_begin_overlap:] = r_seq[:r_begin_overlap]\n return reg_base_data, len(reg_base_data)\n # first read is completely contained in the interval\n r_len = r_data.end - r_data.start\n r_int_start = r_data.start - self.start\n reg_base_data[r_int_start:r_int_start + r_len] = r_seq\n return reg_base_data, r_int_start + r_len", "def read_simple(self):\r\n\r\n #Create the sequence that we gonna feed with blocks\r\n reading_seq = Sequence('Reading sequence')\r\n \r\n # Create a Channel pulse for the train of pulse\r\n train = ChannelPulses(channel=6, name='Tchou Tchou')\r\n train.add_trainPulses(0, 20,20, 20)\r\n \r\n dt_read1 = np.linspace(50, 550, 4)\r\n \r\n for i, dt in enumerate(dt_read1):\r\n # Create a Channel for reading the counts\r\n read = ChannelPulses(channel=1, name='read')\r\n read.add_pulses([30,30+dt, 600,670])\r\n \r\n #Create the block\r\n block = PulsePatternBlock(name='Block read %d'%i)\r\n block.add_channelEvents([read, train])\r\n \r\n # Add the block to the sequence\r\n reading_seq.add_block(block)\r\n \r\n return reading_seq", "def get_read_segments(reads, mappings, flanking=1000):\n\tlog.info('Extracting read segments with {} flanking bp'.format(flanking))\n\textracted_reads = {}\n\tread_mappings = []\n\tcurr_read = mappings[0].qName\n\tclipped_segments = []\n\tread_segments = []\n\tno_valid_segments = []\n\tnew_mappings = []\n\t\t\t\t \n\tis_valid = lambda x: True if x.tEnd-x.tStart > 0.9*x.tLength else False\t\t# mapping is valid if covers >90% of IGHV gene sequence\n\tfor m in mappings:\n\t\tif m.qName != curr_read: # finished with mappings from current read\n\t\t\tif read_mappings: # save segments\n\t\t\t\tcurr_read_segments = []\n\t\t\t\tread = reads[curr_read]\n\t\t\t\textracted_reads[curr_read] = []\n\t\t\t\tfor i, (start, end, mapping) in enumerate(read_mappings):\n\t\t\t\t\tif (start < flanking) or (mapping.qLength - end < flanking): # segment is clipped given flanking region length\n\t\t\t\t\t\t\tlog.debug('Mapping segment on read {} clipped, not adding'.format(curr_read))\n\t\t\t\t\t\t\tclipped_segments.append(mapping)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\tnew_read_id = curr_read+'.{}'.format(i)\n\t\t\t\t\tsegment = SeqRecord(new_read_id, reads[curr_read].seq[start-flanking:end+flanking])\n\t\t\t\t\tmapping.qName = new_read_id\n\t\t\t\t\tnew_mappings.append(mapping)\n\n\t\t\t\t\tread_segments.append(segment)\n\t\t\t\t\textracted_reads[curr_read].append(segment)\n\t\t\telse:\n\t\t\t\tno_valid_segments.append(curr_read)\n\t\t\tcurr_read = m.qName\n\t\t\tread_mappings = []\n\t\t\n\t\tif not is_valid(m):\n\t\t\tcontinue\n\t\tsegment = (m.qStart, m.qEnd, m)\n\t\tif not read_mappings:\n\t\t\tread_mappings.append(segment)\n\t\tif all(x[0] > m.qEnd for x in read_mappings) or all(x[1] < m.qStart for x in read_mappings): # m is up or downstream of all previously seen mapping locations for curr_read\n\t\t\tread_mappings.append(segment)\n\n\tlog.info('Extracted {} read segments from {} reads'.format(len(read_segments), len(extracted_reads)))\n\tlog.info('{} reads had no valid segments'.format(len(no_valid_segments)))\n\tlog.info('Ignored {} clipped segments'.format(len(clipped_segments)))\n\n\n\treturn dict([(x.id, x) for x in read_segments]), new_mappings", "def readCodons(seq):\n i = 0\n while i < len(seq):\n t = seq[i:i+3]\n i += 3\n yield t" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Procedure for reading both sequences and stitching them together Unless specified, it will read 10^8 sequences from the supplied reads
def paired_read(read1, read2, nbrofitems = 10**8, fileout = None): seqFreqs = Counter() # TODO: Enfore trimming parameters (or rather YAML config file) if args.config is not None: trim5 = cfg["Trim"]["fwdread"] trim3 = cfg["Trim"]["revread"] else: trim5 = [27,None] trim3 = [21, 150] for rec1, rec2 in islice(zip(read1, read2), nbrofitems): rec1 = rec1[trim5[0] : trim5[1]] # Trim the primer variable sequence rec2 = rec2[trim3[0] : trim3[1]].reverse_complement() # Trim the low Q half of the 3' read, the primer AND take rev complement global nseqs nseqs += 1 if filter_seq(rec1, direction=5) and filter_seq(rec2, direction=3): aa1 = rec1.seq.translate() aa2 = rec2.seq.translate() # Stitch the strings together if args.config is not None: i = str(aa1).rfind(cfg["Stitching"]["f_anchor"]) j = str(aa2).find(cfg["Stitching"]["r_anchor"]) # Check whether or not stitching is done in the expected place # TODO: this should be done in a more graceful way if i < len(str(aa1)) * 0.75: print("Warning: linker anchor on VH side not found where it was expected (i = {})".format(i)) print("read1: {} (i = {})".format(str(aa1), i)) if j > len(str(aa2)) * 0.25: print("Warning: linker anchor on VL side not found where it was expected (j = {})".format(j)) print("read2: {} (j = {})".format(str(aa2),j)) else: i = None j = None aakey = str(aa1)[:i] + linker_str + str(aa2)[j:] seqFreqs.update({ aakey : 1 }) if args.append_summary is not None: """ Export read stats before trimming sequences that occur just once """ filtseqs = sum(seqFreqs.values()) dist_seqs = len(list(seqFreqs)) promille_seqs = 0 for k,v in islice(seqFreqs.most_common(), 1000): if v > filtseqs / 1000: promille_seqs +=1 else: break with open(args.append_summary, 'a') as statfile: print(os.path.dirname(fileout), nseqs, lowQSeq, starSeqs, filtseqs, dist_seqs, promille_seqs, sep="\t", file=statfile) if args.no_trim is not True: """ Trim out sequences that occur just once """ seqFreqs = seqFreqs - Counter(k for k in seqFreqs.keys()) if fileout is not None: fout = open(fileout, "w") sys.stdout = fout outdir = os.path.dirname(fileout) jsonf = os.path.join(outdir, "seqdata_paired.json") with open(jsonf, 'w') as fp: json.dump(seqFreqs, fp, indent=4) pprint(seqFreqs.most_common(100), width = 240) if fileout is not None: sys.stdout = sys.__stdout__ fout.close()
[ "def single_read(read1, direction = 5, nbrofitems = 10**8, fileout = None):\n seqFreqs = Counter()\n\n # TODO: Enfore trimming parameters (or rather YAML config file)\n if cfg is not None:\n trim5 = cfg[\"Trim\"][\"fwdread\"]\n trim3 = cfg[\"Trim\"][\"revread\"]\n else:\n trim5 = [27,None]\n trim3 = [21, 150]\n\n for rec in islice(read1, nbrofitems):\n if(direction == 5):\n rec = rec[trim5[0] : trim5[1]] # Trim the primer variable sequence\n else: \n rec = rec[trim3[0] : trim3[1]].reverse_complement() # Trim the low Q half of the 3' read, the primer AND take rev complement\n \n aaSeq = rec.seq.translate()\n if filter_seq(rec, direction) :\n seqFreqs.update({ str(aaSeq) : 1 }) \n \n global nseqs \n nseqs += 1\n\n if args.no_trim is not True:\n \"\"\" Trim out sequences that occur just once \"\"\"\n seqFreqs = seqFreqs - Counter(k for k in seqFreqs.keys())\n\n if fileout is not None:\n fout = open(fileout, \"w\")\n sys.stdout = fout\n jsonf = os.path.join(os.path.split(fileout), \"seqdata.json\")\n with open(jsonf, 'w') as fp:\n json.dump(seqFreqs, fp, indent=4)\n\n\n pprint(seqFreqs.most_common(100), width = 120)\n\n if fileout is not None:\n sys.stdout = sys.__stdout__\n fout.close()", "def write_read_seqs(both_read_seqs, keep, remap_bam, fastqs, dropped=None, remap_num=0):\n reads, seqs = zip(*both_read_seqs)\n assert len(reads) == len(fastqs)\n num_seqs = len(both_read_seqs[0][1])\n if num_seqs == 0:\n if dropped is not None:\n for read in reads:\n dropped.write(read)\n return 0\n else:\n return 0\n elif num_seqs == 1:\n for read, seqs in both_read_seqs:\n keep.write(read)\n else:\n assert len(reads) > 0\n for read in reads:\n remap_bam.write(read)\n left_pos = min(r.pos for r in reads)\n right_pos = max(r.pos for r in reads)\n loc_line = '{}:{}:{}:{}:{}'.format(\n remap_num,\n read.reference_name,\n left_pos,\n right_pos,\n len(seqs[0])-1,\n )\n\n first = True\n # Some python fanciness to deal with single or paired end reads (or\n # n-ended reads, if such technology ever happens.\n for read_seqs in zip(*seqs):\n if first:\n first = False\n continue\n for seq, read, fastq in zip(read_seqs, reads, fastqs):\n assert len(seq) == len(read.qual)\n fastq.write(\n \"@{loc_line}\\n{seq}\\n+{loc_line}\\n{qual}\\n\"\n .format(\n loc_line=loc_line,\n seq=reverse_complement(seq) if read.is_reverse else seq,\n qual=read.qual)\n )\n return 1\n return 0", "def map_reads(processed_reads, avrg_read_len, settings):\n\n PAS_sites = ['AATAAA', 'ATTAAA', 'TATAAA', 'AGTAAA', 'AAGAAA', 'AATATA',\n 'AATACA', 'CATAAA', 'GATAAA', 'AATGAA', 'TTTAAA', 'ACTAAA',\n 'AATAGA']\n\n pas_patterns = [re.compile(pas) for pas in PAS_sites]\n\n # File path of the mapped reads\n mapped_reads = os.path.splitext(processed_reads)[0]+'_mapped'\n\n # Naming the final output\n polybed_path = os.path.splitext(processed_reads)[0] + '_mapped.bed'\n\n # How many mismatches depend on read length\n if avrg_read_len < 50:\n mismatch_nr = 1\n elif 50 < avrg_read_len < 100:\n mismatch_nr = 2\n elif 100 < avrg_read_len:\n mismatch_nr = 3\n\n ### mapping trimmed reads\n ### DEBUG skipping this step for speedy work :) XXX\n command = \"gem-mapper -I {0} -i {1} -o {2} -q ignore -m {3}\"\\\n .format(settings.gem_index, processed_reads, mapped_reads, mismatch_nr)\n\n p = Popen(command.split())\n p.wait()\n\n # Accept mismatches according to average read length\n acceptables = {1: set(('1:0', '0:1')), 2: set(('1:0:0', '0:1:0', '0:0:1')),\n 3: set(('1:0:0:0', '0:1:0:0', '0:0:1:0', '0:0:0:1'))}\n\n acceptable = acceptables[mismatch_nr]\n getstrand = {'R':'-', 'F':'+'}\n start_re = re.compile('[0-9]*')\n\n reads_file = open(polybed_path, 'wb')\n\n # count the number of noisy reads and total reads\n noisecount = 0\n allcount = 0\n\n for line in open(mapped_reads + '.0.map', 'rb'):\n (at, tail_info, seq, mapinfo, position) = line.split('\\t')\n\n # Acceptable reads and poly(A) reads are mutually exclusive.\n if mapinfo in acceptable:\n allcount += 1\n # Get chromosome, strand, and beg\n (chrom, rest) = position.split(':')\n strand = getstrand[rest[0]]\n beg = start_re.match(rest[1:]).group()\n\n ## Don't write if this was a noisy read\n if read_is_noise(chrom, strand, beg, seq, at, tail_info, settings):\n noisecount += 1\n continue\n\n # Get the PAS and the PAS distance of this read\n PAS, PAS_dist = get_early_PAS(seq, at, pas_patterns)\n\n if PAS != 'NA':\n nPAS = ':'.join(PAS)\n nPAS_dist = ':'.join([str(d) for d in PAS_dist])\n else:\n nPAS = 'NA'\n nPAS_dist = 'NA'\n\n # Write to file in .bed format\n name = '#'.join([at, tail_info, nPAS, nPAS_dist])\n\n reads_file.write('\\t'.join([chrom, beg, str(int(beg)+len(seq)), name,\n '0', strand]) + '\\n')\n\n # close file\n reads_file.close()\n\n # Write to logfile\n if allcount > 0:\n vals = (noisecount, allcount, noisecount/float(allcount))\n\n noiseinf = '\\nNoise reads: {0}\\nTotal reads: {1}\\nNoise ratio: {2:.2f}\\n'\\\n .format(*vals)\n\n noiselog = open('NOISELOG.LOG', 'ab')\n noiselog.write('-'*80+'\\n')\n noiselog.write(polybed_path)\n noiselog.write(noiseinf)\n noiselog.write('-'*80+'\\n')\n noiselog.close()\n\n return polybed_path", "def write_split_read(read, outsam, num_copies):\n\n spans_left_breakp = read.reference_start < start < read.reference_end\n spans_right_breakp = read.reference_start < end < read.reference_end\n left_matching_bp = start - read.reference_start\n right_matching_bp = read.reference_end - end\n if num_copies < 1:\n if spans_left_breakp and spans_right_breakp:\n # pick one with more matching bp\n clip_left = left_matching_bp < right_matching_bp\n elif spans_left_breakp:\n clip_left = False\n elif spans_right_breakp:\n clip_left = True\n else:\n raise ValueError('Internal disagreement as to whether read should be split.')\n if clip_left:\n breakpoint = end - read.reference_start\n else:\n breakpoint = start - read.reference_start\n elif num_copies > 1:\n if spans_left_breakp and spans_right_breakp:\n clip_left = left_matching_bp < right_matching_bp\n elif spans_left_breakp:\n clip_left = True\n elif spans_right_breakp:\n clip_left = False\n else:\n raise ValueError('Internal disagreement as to whether read should be split.')\n if clip_left:\n breakpoint = start - read.reference_start\n else:\n breakpoint = end - read.reference_start\n\n # If the breakpoint is beyond the read, just write the original one and bail.\n # This happens with reads that have significant gaps between matching blocks.\n if breakpoint >= read.rlen:\n outsam.write(read)\n return 1\n\n # Use the reverse 'alternate sequence' in case of left clipping, so that it always terminates in the same base.\n # To visualize this, in the following reads only the replaced portion is visible:\n #\n # Left clip: # Right clip:\n # #\n # breakpoint # breakpoint\n # | # |\n # v # v\n # ACGTACGT---------- # -----------ACGT\n # TACGT---------------- # -------ACGTAC\n # GT--------------------- # -----ACGTACGTA\n sequence_for_replacement = left_clip_seq if clip_left else right_clip_seq\n split_read = make_split_read(read, breakpoint, clip_left, sequence=sequence_for_replacement)\n\n # Write variants of the read.\n reads_written = 0\n if num_copies >= 1:\n # If this is a duplication, first write the 'original' split read, then generate modifications of it.\n outsam.write(read)\n reads_written = 1 + write_copies(split_read, outsam, num_copies - 1)\n else:\n # Assume heterozygous deletion - that is, a 50% chance of writing the original read rather than the split one.\n reads_written += write_copies(read, outsam, num_copies)\n reads_written += write_copies(split_read, outsam, 1 - num_copies)\n\n return reads_written", "def map_reads(\n read1_path,\n read2_path,\n tags,\n barcode_slice,\n umi_slice,\n indexes,\n whitelist,\n debug,\n start_trim,\n maximum_distance,\n sliding_window,\n):\n # Initiate values\n results = {}\n no_match = Counter()\n n = 1\n t = time.time()\n with gzip.open(read1_path, \"rt\") as textfile1, gzip.open(\n read2_path, \"rt\"\n ) as textfile2:\n\n # Read all 2nd lines from 4 line chunks. If first_n not None read only 4 times the given amount.\n secondlines = islice(\n zip(textfile1, textfile2), indexes[0] * 4 + 1, indexes[1] * 4 + 1, 4\n )\n for read1, read2 in secondlines:\n read1 = read1.strip()\n read2 = read2.strip()\n\n # Progress info\n if n % 1000000 == 0:\n print(\n \"Processed 1,000,000 reads in {}. Total \"\n \"reads: {:,} in child {}\".format(\n secondsToText.secondsToText(time.time() - t), n, os.getpid()\n )\n )\n sys.stdout.flush()\n t = time.time()\n\n # Get cell and umi barcodes.\n cell_barcode = read1[barcode_slice]\n # This change in bytes is required by umi_tools for umi correction\n UMI = bytes(read1[umi_slice], \"ascii\")\n # Trim potential starting sequences\n TAG_seq = read2[start_trim:]\n\n if cell_barcode not in results:\n results[cell_barcode] = defaultdict(Counter)\n\n if sliding_window:\n best_match = find_best_match_shift(TAG_seq, tags, maximum_distance)\n else:\n best_match = find_best_match(TAG_seq, tags, maximum_distance)\n\n results[cell_barcode][best_match][UMI] += 1\n\n if best_match == \"unmapped\":\n no_match[TAG_seq] += 1\n\n if debug:\n print(\n \"\\nline:{0}\\n\"\n \"cell_barcode:{1}\\tUMI:{2}\\tTAG_seq:{3}\\n\"\n \"line length:{4}\\tcell barcode length:{5}\\tUMI length:{6}\\tTAG sequence length:{7}\\n\"\n \"Best match is: {8}\".format(\n read1 + read2,\n cell_barcode,\n UMI,\n TAG_seq,\n len(read1 + read2),\n len(cell_barcode),\n len(UMI),\n len(TAG_seq),\n best_match,\n )\n )\n sys.stdout.flush()\n n += 1\n print(\n \"Mapping done for process {}. Processed {:,} reads\".format(os.getpid(), n - 1)\n )\n sys.stdout.flush()\n return (results, no_match)", "def process_reads_watson(args):\n watson_r1 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_r1', dir=args.tmpdir,\n delete=False)\n watson_r2 = tempfile.NamedTemporaryFile(suffix=\".fastq\", prefix='watson_r2', dir=args.tmpdir,\n delete=False)\n args.watson_r1 = watson_r1.name\n args.watson_r2 = watson_r2.name\n print('Started processing watson reads')\n if args.watson_val_r1.endswith('.gz'):\n w_r1_handle = gzip.open(args.watson_val_r1, 'rt')\n w_r2_handle = gzip.open(args.watson_val_r2, 'rt')\n else:\n w_r1_handle = open(args.watson_val_r1, 'rt')\n w_r2_handle = open(args.watson_val_r2, 'rt')\n #make 4 file handles for forward and reverse watson and crick\n watson_r1_handle = open(args.watson_r1, 'w')\n watson_r2_handle = open(args.watson_r2, 'w')\n j = 0\n while True:\n w_r1 = []\n w_r2 = []\n for i in range(4):\n try:\n w_r1.append(next(w_r1_handle))\n w_r2.append(next(w_r2_handle))\n except StopIteration:\n break\n j += 1\n try:\n if int(args.sequences) == j:\n break\n except TypeError:\n pass\n if not j % 1000000:\n print('Processed %s reads' % (j))\n if not w_r1:\n break\n convert_w_r1 = w_r1[1].upper().replace('C', 'T')\n convert_w_r2 = w_r2[1].upper().replace('G', 'A')\n c_pos_w = [str(n) for n, i in enumerate(w_r1[1]) if i.upper() == 'C']\n g_pos_w = [str(n) for n, i in enumerate(w_r2[1].rstrip('\\n')[::-1]) if i.upper() == 'G']\n header_w = '@%s' % (w_r1[0][1:-1].replace(' ', '|').replace('\\t', '|'))\n header_w += '|%s\\n' % (','.join(c_pos_w) + '|' + ','.join(g_pos_w))\n watson_r1_handle.write(header_w + convert_w_r1 + '+\\n' + w_r1[3])\n #print(read_r1[3])\n watson_r2_handle.write(header_w + convert_w_r2 + '+\\n' + w_r2[3])\n watson_r1_handle.close()\n watson_r2_handle.close()\n return args", "def extract_reads_from_PE_fastq(fname_iPCR_PE1, fname_iPCR_PE2):\n\n # This is the scarcode that allows to identify which\n # experiment is sequenced (must be CT).\n matcher = seeq.compile('CGCTAATTAATGGAATCATG', 3)\n\n outf1 = open('CT_TCT.fasta', 'w')\n outf2 = open('CT_ACG.fasta', 'w')\n\n # There are many errors in the index, especially in the\n # first base. The most frequent errors are hard coded\n # in the dictionary so that the reads are written to the\n # proper file.\n outfiles = {\n 'TCT': outf1,\n 'GCT': outf1,\n 'ACT': outf1,\n 'ACG': outf2,\n 'AGG': outf2,\n 'CCG': outf2,\n }\n\n with gzopen(fname_iPCR_PE1) as f, gzopen(fname_iPCR_PE2) as g:\n for lineno,(line1,line2) in enumerate(izip(f,g)):\n # Take sequence lines of the fastq files.\n if lineno % 4 != 1: continue\n\n brcd = trimSuffix(matcher, line1)\n # If we find a barcode between 13 and 25 nucleotides\n # then the scarcode must have been the right one.\n if len(brcd) < 13 or len(brcd) > 25: continue\n\n # Remove first 25 nucleotides.\n suff = line2.rstrip()[25:].split('CATG')[0]\n # Cut genome fragment after the first CATG.\n genome = re.sub(r'CATG.*', 'CATG', suff)\n\n # Avoid short strings that are unmappable.\n if len(genome) < 20:\n genome = 'gatcctgatgctagtgactgatgagctgctgaagctgga'\n\n # The first 3 nucleotides of the reverse read are the\n # index. Check that it belongs to the right group.\n idx = line2[:3]\n if idx in outfiles:\n outf = outfiles[idx]\n outf.write('>%s\\n%s\\n' % (brcd,genome))", "def get_reference_seqs(args, len_reads):\n # generate reference sequence with N's\n if args.ref_mode == \"N\":\n\n print(\"Generating reference sequence with all Ns...\")\n num_ref_seqs = 1\n ref_samples = np.zeros((num_ref_seqs, len_reads, 4))\n\n # create reference sequences with same GC content as the training data set\n elif args.ref_mode == \"GC\":\n\n print(\"Generating reference sequences with same GC-content as training data set...\")\n train_samples = np.load(args.train_data, mmap_mode='r')\n num_ref_seqs = 5\n ref_seqs = [0]*num_ref_seqs\n # calculate frequency of each nucleotide (A,C,G,T,N) in the training data set\n probs = np.mean(np.mean(train_samples, axis=1), axis=0).tolist()\n probs.append(1-sum(probs))\n # generate reference seqs\n for i in range(num_ref_seqs):\n ref_seqs[i] = np.random.choice([0, 1, 2, 3, 4], p=probs, size=len_reads, replace=True)\n ref_samples = to_categorical(ref_seqs, num_classes=5)\n # remove channel of N-nucleotide\n ref_samples = ref_samples[:, :, 0:4]\n nc_dict = {0: 'A', 1: 'C', 2: 'G', 3: 'T', 4: 'N'}\n train_data_set_name = os.path.splitext(os.path.basename(args.train_data))[0]\n # save reference sequences\n with open(args.out_dir + '/' + train_data_set_name + '_references.fasta', 'w') as csv_file:\n file_writer = csv.writer(csv_file)\n for seq_id in range(num_ref_seqs):\n file_writer.writerow([\">\"+train_data_set_name+\"_ref_\"+str(seq_id)])\n file_writer.writerow([\"\".join([nc_dict[base] for base in ref_seqs[seq_id]])])\n del train_samples\n\n # load own reference sequences (args.ref_mode == \"own_ref_file\")\n else:\n\n print(\"Loading reference sequences...\")\n tokenizer = Tokenizer(char_level=True)\n tokenizer.fit_on_texts('ACGT')\n ref_reads = list(SeqIO.parse(args.ref_seqs, \"fasta\"))\n ref_samples = np.array([np.array([tokenizer.texts_to_matrix(read)]) for read in ref_reads])\n # remove unused character\n if not np.count_nonzero(ref_samples[:, :, :, 0]):\n ref_samples = ref_samples[:, :, :, 1:5]\n ref_samples = ref_samples.squeeze(1)\n # num_ref_seqs = ref_samples.shape[0]\n\n return ref_samples", "def process_sample(job, inputs, tar_id):\n job.fileStore.logToMaster('Processing sample into read pairs: {}'.format(inputs.uuid))\n work_dir = job.fileStore.getLocalTempDir()\n # I/O\n tar_path = job.fileStore.readGlobalFile(tar_id, os.path.join(work_dir, 'sample.tar'))\n # Untar File and concat\n subprocess.check_call(['tar', '-xvf', tar_path, '-C', work_dir])\n os.remove(os.path.join(work_dir, 'sample.tar'))\n # Grab files from tarball\n fastqs = []\n for root, subdir, files in os.walk(work_dir):\n fastqs.extend([os.path.join(root, x) for x in files])\n # Check for read 1 and read 2 files\n r1 = sorted([x for x in fastqs if 'R1' in x])\n r2 = sorted([x for x in fastqs if 'R2' in x])\n if not r1 or not r2:\n # Check if using a different standard\n r1 = sorted([x for x in fastqs if '_1' in x])\n r2 = sorted([x for x in fastqs if '_2' in x])\n # Prune file name matches from each list\n if len(r1) > len(r2):\n r1 = [x for x in r1 if x not in r2]\n elif len(r2) > len(r1):\n r2 = [x for x in r2 if x not in r1]\n # Flag if data is single-ended\n assert r1 and r2, 'This pipeline does not support single-ended data. R1: {}\\nR2:{}'.format(r1, r2)\n command = 'zcat' if r1[0].endswith('gz') and r2[0].endswith('gz') else 'cat'\n with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1:\n p1 = subprocess.Popen([command] + r1, stdout=f1)\n with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2:\n p2 = subprocess.Popen([command] + r2, stdout=f2)\n p1.wait()\n p2.wait()\n # Write to fileStore\n r1_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq'))\n r2_id = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq'))\n job.fileStore.deleteGlobalFile(tar_id)\n # Start cutadapt step\n job.addChildJobFn(cutadapt, inputs, r1_id, r2_id, disk='60G').rv()", "def parse_reads(reads, chromosome_name, fasta_handler, homopolymer_window_size=11):\n left_pad = math.floor((homopolymer_window_size - 1)/2)\n right_pad = math.ceil((homopolymer_window_size - 1)/2) + 1\n\n inserts = defaultdict(list)\n deletes = defaultdict(list)\n mismatches = defaultdict(list)\n\n n_secondary = 0\n\n for read in reads:\n if read.is_secondary:\n n_secondary += 1\n # print(read.query_name, n_secondary)\n\n if read.mapping_quality > 0 and not read.is_secondary:\n ref_alignment_start = read.reference_start\n ref_alignment_stop = get_read_stop_position(read)\n ref_length = ref_alignment_stop - ref_alignment_start\n\n reversal_status = read.is_reverse\n\n ref_sequence = fasta_handler.get_sequence(chromosome_name=chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop + 10)\n\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_length = len(read_sequence)\n contig_length = read.infer_read_length()\n\n read_id = read.query_name\n # read_quality = read.query_qualities\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n\n n_total_mismatches = 0\n n_total_deletes = 0\n n_total_inserts = 0\n n_initial_clipped_bases = 0\n\n for c, cigar in enumerate(cigar_tuples):\n cigar_code = cigar[0]\n length = cigar[1]\n\n # get the sequence segments that are effected by this operation\n read_sequence_segment = read_sequence[read_index:read_index + length]\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n\n # skip parsing the first segment if it is not a match\n if cigar_code != 0 and found_valid_cigar is False:\n # only increment the read index if the non-match cigar code is INS or SOFTCLIP\n if cigar_code == 1 or cigar_code == 4:\n read_index += length\n if cigar_code == 5 or cigar_code == 4:\n n_initial_clipped_bases = length\n continue\n\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment, n_mismatches, n_deletes, n_inserts, segment_mismatches = \\\n parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n ref_index=ref_index,\n read_index=read_index,\n read_sequence=read_sequence_segment,\n ref_sequence=ref_sequence_segment)\n\n if cigar_code == 0:\n for mismatch in segment_mismatches:\n # mismatch\n cigar_type = \"SNP\"\n\n ref_start = ref_alignment_start + mismatch[MISMATCH_INDEXES[\"ref_start\"]]\n ref_stop = ref_alignment_start + mismatch[MISMATCH_INDEXES[\"ref_stop\"]]\n read_start = mismatch[MISMATCH_INDEXES[\"read_start\"]]\n read_stop = mismatch[MISMATCH_INDEXES[\"read_stop\"]]\n\n ref_allele = mismatch[MISMATCH_INDEXES[\"ref_allele\"]]\n read_allele = mismatch[MISMATCH_INDEXES[\"read_allele\"]]\n\n left_index = mismatch[MISMATCH_INDEXES[\"ref_start\"]] - left_pad\n right_index = mismatch[MISMATCH_INDEXES[\"ref_start\"]] + right_pad\n\n left_index = max(0, left_index)\n right_index = min(len(ref_sequence), right_index)\n\n ref_window = ref_sequence[left_index:right_index]\n\n entropy = round(calculate_shannon_entropy(ref_window),3)\n max_repeat = find_longest_repeat(ref_window)\n\n is_runlength_error = False\n\n ref_allele_context = ref_sequence[mismatch[MISMATCH_INDEXES[\"ref_start\"]] - 1:mismatch[MISMATCH_INDEXES[\"ref_start\"]] + 2]\n read_allele_context = read_sequence[mismatch[MISMATCH_INDEXES[\"read_start\"]] - 1:mismatch[MISMATCH_INDEXES[\"read_start\"]] + 2]\n\n data = [chromosome_name, cigar_type, ref_start, ref_stop, ref_allele, ref_allele_context, read_start, read_stop,\n read_allele, read_allele_context, reversal_status, ref_window, entropy, max_repeat, is_runlength_error]\n\n mismatches[read_id].append(data)\n\n elif cigar_code == 1:\n # insert\n cigar_type = \"INS\"\n\n ref_start = ref_alignment_start + ref_index\n ref_stop = ref_alignment_start + ref_index + ref_index_increment\n read_start = read_index\n read_stop = read_index + read_index_increment\n\n read_allele = read_sequence[read_start:read_stop]\n ref_allele = ref_sequence[ref_index:ref_index + ref_index_increment]\n\n left_index = max(0, ref_index - left_pad)\n right_index = min(len(ref_sequence), ref_index + right_pad)\n\n ref_window = ref_sequence[left_index:right_index]\n\n entropy = round(calculate_shannon_entropy(ref_window), 3)\n max_repeat = find_longest_repeat(ref_window)\n\n is_runlength_error = False\n\n characters = set(read_allele)\n if len(characters) == 1:\n if read_allele[0] == ref_sequence[ref_index-1] or read_allele[-1] == ref_sequence[ref_index]:\n is_runlength_error = True\n\n # print(\"INSERT\")\n # print(\"REF\\t\",ref_sequence[ref_index-1:ref_index + 1])\n # print(\"READ\\t\", read_sequence[read_index-1:read_index+read_index_increment+1])\n # print(is_runlength_error)\n # print()\n\n ref_allele_context = ref_sequence[ref_index-1:ref_index + 1]\n read_allele_context = read_sequence[read_index-1:read_index+read_index_increment+1]\n\n data = [chromosome_name, cigar_type, ref_start, ref_stop, ref_allele, ref_allele_context, read_start, read_stop,\n read_allele, read_allele_context, reversal_status, ref_window, entropy, max_repeat, is_runlength_error]\n\n inserts[read_id].append(data)\n\n elif cigar_code == 2 or cigar_code == 3:\n # delete or refskip\n cigar_type = \"DEL\"\n\n ref_start = ref_alignment_start + ref_index\n ref_stop = ref_alignment_start + ref_index + ref_index_increment\n read_start = read_index\n read_stop = read_index + read_index_increment\n\n read_allele = read_sequence[read_start:read_stop]\n ref_allele = ref_sequence[ref_index:ref_index + ref_index_increment]\n\n left_index = max(0, ref_index - left_pad)\n right_index = min(len(ref_sequence), ref_index + right_pad)\n\n ref_window = ref_sequence[left_index:right_index]\n\n entropy = round(calculate_shannon_entropy(ref_window), 3)\n max_repeat = find_longest_repeat(ref_window)\n\n is_runlength_error = False\n\n characters = set(ref_allele)\n if len(characters) == 1:\n if ref_allele[0] == read_sequence[read_index-1] or ref_allele[-1] == read_sequence[read_stop]:\n is_runlength_error = True\n\n # print(\"DELETE\")\n # print(\"REF\\t\",ref_sequence[ref_index-1:ref_index+ref_index_increment+1])\n # print(\"READ\\t\",read_sequence[read_start-1:read_stop+1])\n # print(is_runlength_error)\n # print()\n\n ref_allele_context = ref_sequence[ref_index-1:ref_index+ref_index_increment+1]\n read_allele_context = read_sequence[read_start-1:read_stop+1]\n\n data = [chromosome_name, cigar_type, ref_start, ref_stop, ref_allele, ref_allele_context, read_start, read_stop,\n read_allele, read_allele_context, reversal_status, ref_window, entropy, max_repeat, is_runlength_error]\n\n deletes[read_id].append(data)\n\n # increase the read/ref index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n n_total_mismatches += n_mismatches\n n_total_deletes += n_deletes\n n_total_inserts += n_inserts\n\n return inserts, deletes, mismatches", "def convert_reads(inputf,output):\n f = open(inputf,'r')\n g = open(output,'w')\n header = f.readline().rstrip()\n header = header.replace(\" \",\"!\")\n seq = f.readline()\n header2 = f.readline()\n qual = f.readline()\n encoding = encode_c_positions(seq)\n\n while header:\n g.write(header+\"!\"+encoding+\"\\n\")\n converted_seq = seq.replace(\"C\",\"T\")\n g.write(converted_seq)\n g.write(header2)\n g.write(qual)\n\n\n header = f.readline().rstrip()\n header = header.replace(\" \",\"!\")\n seq = f.readline()\n header2 = f.readline()\n qual = f.readline()\n encoding = encode_c_positions(seq)\n f.close()\n g.close()", "def extract_reads(reads, read_file, out):\n # read_ids = file2set(read_file)\n # record_dict = SeqIO.index(reads, \"fastq\")\n # with open(out, \"wb\") as output_handle:\n # for key in read_ids:\n # output_handle.write(record_dict.get_raw(key))\n\n # subset_fa = os.path.join(out, sample_name + \".subset.fa\")\n\n command = \"seqtk subseq \" + reads + \" \" + read_file\n with open(out, \"w\") as output:\n subprocess.call(command, stdout=output, shell=True)", "def test_paired_two_reads_one_indel(self):\n test_data = Data()\n\n read1_seqs = [\"AACGAAAAGGAGAA\",\n \"AAAAAAATTTAAAA\"]\n read2_seqs = [\"AAGAAACAACACAA\",\n \"AAAAATAAAAAATA\"]\n \n read1_quals = [\"B\" * len(read1_seqs[0]),\n \"C\" * len(read1_seqs[1])]\n read2_quals = [\"D\" * len(read2_seqs[0]),\n \"E\" * len(read2_seqs[1])]\n\n # 10 20 30\n # POS 123456789012345678901234567890\n # read1[0] AACGAAAAGGAGAA\n # read1[1] AAAAAAATTTAAAA\n # SNP ^\n genome_seq = [\"AAAAAACGAAAAGGAGAAAAAAATTTAAAA\\n\"\n \"TTTATTTTTTATTTTTTTGTGTTGTTTCTT\"]\n # read2[0] AACACAACAAAGAA\n # read2[1] ATAAAAAATAAAAA\n # INDEL ^ \n # POS 123456789012345678901234567890\n # 40 50\n \n snp_list = [['test_chrom', 18, \"A\", \"C\"],\n ['test_chrom', 52, \"G\", \"GTTA\"]]\n \n test_data = Data(genome_seqs=genome_seq,\n read1_seqs=read1_seqs,\n read2_seqs=read2_seqs,\n read1_quals=read1_quals,\n read2_quals=read2_quals,\n snp_list=snp_list)\n \n test_data.setup()\n test_data.index_genome_bowtie2()\n test_data.map_paired_bowtie2()\n test_data.sam2bam()\n\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir, \n is_paired_end=True, is_sorted=False)\n\n # Currently reads overlapping indels are thrown out\n expect_reads = set([(\"ACAAAAATTTAAAA\", \"AAAAATAAAAAATA\")])\n\n #\n # Verify fastq1 and fastq2 have appropriate read pairs\n #\n with gzip.open(test_data.fastq1_remap_filename) as f:\n lines1 = [x.strip() for x in f.readlines()]\n assert len(lines1) == len(expect_reads) * 4\n\n with gzip.open(test_data.fastq2_remap_filename) as f:\n lines2 = [x.strip() for x in f.readlines()]\n assert len(lines2) == len(expect_reads) * 4\n \n for i in range(1, len(lines2), 4):\n read_pair = (lines1[i], lines2[i])\n assert read_pair in expect_reads\n expect_reads.remove(read_pair)\n\n assert len(expect_reads) == 0\n\n \n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n test_data.cleanup()", "def test_single_two_read_two_snp_two_chrom(self):\n \n test_data = Data(read1_seqs = [\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\",\n \"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\"],\n read1_quals = [\"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\",\n \"BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB\"],\n genome_seqs = [\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\\n\" +\n \"TTTTTTTTTTATTTTTTTTTTTTTTTTTTT\",\n \"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGG\\n\" +\n \"CCCCCCCCCCGCCCCCCCCCCCCCCCCCCC\"],\n chrom_names = ['test_chrom1', 'test_chrom2'],\n snp_list = [['test_chrom1', 1, \"A\", \"C\"],\n ['test_chrom2', 3, \"G\", \"C\"]])\n \n test_data.setup()\n test_data.index_genome_bowtie2()\n test_data.map_single_bowtie2()\n test_data.sam2bam()\n\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir, is_paired_end=False,\n is_sorted=False)\n\n #\n # Verify new fastq is correct. The first base of the first read\n # should be switched from C to an A, and the third base of second read\n # should be switched from C to G\n #\n with gzip.open(test_data.fastq_remap_filename) as f:\n lines = [x.strip() for x in f.readlines()]\n assert len(lines) == 8\n\n l = list(test_data.read1_seqs[0])\n l[0] = 'C'\n new_seq = \"\".join(l)\n assert lines[1] == new_seq\n assert lines[3] == test_data.read1_quals[0]\n\n l = list(test_data.read1_seqs[1])\n l[2] = 'C'\n new_seq = \"\".join(l)\n\n assert lines[5] == new_seq\n assert lines[7] == test_data.read1_quals[1]\n \n #\n # Verify to.remap bam is the same as the input bam file.\n #\n old_lines = read_bam(test_data.bam_filename)\n new_lines = read_bam(test_data.bam_remap_filename)\n assert old_lines == new_lines\n\n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n test_data.cleanup()", "def get_read_segments(reads, mappings, flanking=1000):\n\tlog.info('Extracting read segments with {} flanking bp'.format(flanking))\n\textracted_reads = {}\n\tread_mappings = []\n\tcurr_read = mappings[0].qName\n\tclipped_segments = []\n\tread_segments = []\n\tno_valid_segments = []\n\tnew_mappings = []\n\t\t\t\t \n\tis_valid = lambda x: True if x.tEnd-x.tStart > 0.9*x.tLength else False\t\t# mapping is valid if covers >90% of IGHV gene sequence\n\tfor m in mappings:\n\t\tif m.qName != curr_read: # finished with mappings from current read\n\t\t\tif read_mappings: # save segments\n\t\t\t\tcurr_read_segments = []\n\t\t\t\tread = reads[curr_read]\n\t\t\t\textracted_reads[curr_read] = []\n\t\t\t\tfor i, (start, end, mapping) in enumerate(read_mappings):\n\t\t\t\t\tif (start < flanking) or (mapping.qLength - end < flanking): # segment is clipped given flanking region length\n\t\t\t\t\t\t\tlog.debug('Mapping segment on read {} clipped, not adding'.format(curr_read))\n\t\t\t\t\t\t\tclipped_segments.append(mapping)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\tnew_read_id = curr_read+'.{}'.format(i)\n\t\t\t\t\tsegment = SeqRecord(new_read_id, reads[curr_read].seq[start-flanking:end+flanking])\n\t\t\t\t\tmapping.qName = new_read_id\n\t\t\t\t\tnew_mappings.append(mapping)\n\n\t\t\t\t\tread_segments.append(segment)\n\t\t\t\t\textracted_reads[curr_read].append(segment)\n\t\t\telse:\n\t\t\t\tno_valid_segments.append(curr_read)\n\t\t\tcurr_read = m.qName\n\t\t\tread_mappings = []\n\t\t\n\t\tif not is_valid(m):\n\t\t\tcontinue\n\t\tsegment = (m.qStart, m.qEnd, m)\n\t\tif not read_mappings:\n\t\t\tread_mappings.append(segment)\n\t\tif all(x[0] > m.qEnd for x in read_mappings) or all(x[1] < m.qStart for x in read_mappings): # m is up or downstream of all previously seen mapping locations for curr_read\n\t\t\tread_mappings.append(segment)\n\n\tlog.info('Extracted {} read segments from {} reads'.format(len(read_segments), len(extracted_reads)))\n\tlog.info('{} reads had no valid segments'.format(len(no_valid_segments)))\n\tlog.info('Ignored {} clipped segments'.format(len(clipped_segments)))\n\n\n\treturn dict([(x.id, x) for x in read_segments]), new_mappings", "def demultiplex(forward_fasta, reverse_fasta, barcodefile, barcodelength, outfile,logfile, max_mismatches,\n trimsize_forward, trimsize_reverse, includeshort, spacersequence, sampleindex, keepunassigned):\n\n # get the barcode and fasta data\n barcodes = process_barcodefile(barcodefile, barcodelength)\n fastas = zip(SeqIO.parse(forward_fasta, 'fasta'), SeqIO.parse(reverse_fasta,'fasta'))\n fastadicts = (fasta_to_dict(fasta) for fasta in fastas)\n\n # get barcode information\n fastabarcodes = (check_barcode(fastadict,\n barcodedict=barcodes,\n barcodelength=barcodelength,\n maxdistance=max_mismatches)\n for fastadict in fastadicts)\n\n #filter sizes, and reverse complement\n fastasizetruncated = (truncate_by_size(fastadict,\n trimsize_forward=trimsize_forward,\n trimsize_reverse=trimsize_reverse)\n for fastadict in fastabarcodes)\n\n # validate data before progressing\n fastadata = (fastadataSchema.validate(d) for d in fastasizetruncated)\n\n #iterate through and keep relevant data\n tooshortcount = 0\n badbarcodecount = 0\n errorcount = 0\n count = 0\n samplecounts = defaultdict(int)\n\n for result in fastadata:\n #sampledata\n forward_id = result['forward_id']\n forward_desc = result[\"forward_desc\"]\n forward_seq = result[\"forward_sequence\"]\n reverse_id = result[\"reverse_id\"]\n reverse_desc = result[\"reverse_desc\"]\n reverse_seq = result[\"reverse_sequence\"]\n sample = result[\"sample\"]\n barcode = result[\"barcode\"]\n brcd_dist = result[\"barcode_distance\"]\n tooshort = result[\"tooshort\"]\n spacermismatch = result['spacermismatch']\n\n #accounting\n count += 1\n samplecounts[sample] += 1\n if not sample: badbarcodecount += 1\n if tooshort: tooshortcount += 1\n\n #write sample\n def writesample(forward_seq=forward_seq,\n reverse_seq=reverse_seq,\n sample=sample,forward_id=forward_id, count=count, barcode=barcode, brcd_dist=brcd_dist):\n\n\n #combine the forward and reverse sequence\n allseq = forward_seq + spacersequence + reversecomplement(reverse_seq)\n\n # write out sequences\n if sample is None:\n sample = \"Unassigned\"\n\n fastaheader = \"{}.{}.{:06d} barcode:{} barcodemismatches:{} spacermismatch: {}\".format(\n sample, forward_id, count, barcode, brcd_dist, str(spacermismatch))\n\n outfile.write(\">{}\\n{}\\n\".format(fastaheader,allseq))\n\n def shouldwritesample(sample=sample,includeshort=includeshort,tooshort=tooshort,\n brcd_dist=brcd_dist,max_mismatches=max_mismatches):\n \"encapsulate sequence-writing logic in a function\"\n\n # Only use sequences samples that have a sample\n if not sample:\n if keepunassigned:\n return True\n else:\n return False\n\n # Ignore short sequences if the flag is false\n if includeshort is False and tooshort is True:\n return False\n\n # Ignore sequences with barcode mismatches above the threshold\n if brcd_dist > max_mismatches:\n return False\n\n return True\n\n shouldwrite = shouldwritesample()\n\n if shouldwrite == True:\n writesample()\n\n # write out log information\n logfile.write(\"\"\"\n Barcode File: {}\n Sequenced Processed: {}\n Samples Below the Length Cutoff: {}\n Samples Unassigned due to Barcodes: {}\n\n \"\"\".format(barcodefile, count, tooshortcount, badbarcodecount))\n\n for sam, cnt in samplecounts.items():\n logfile.write(\"Observed Counts for Sample {}: {}\\n\".format(sam,cnt))\n\n print(\"Finished Demultiplexing\")", "def print_reads(miss, fq1, fq2):\n\n\n bn = re.search('/(\\w+)_pass_1.fastq', fq1)\n if not bn:\n sys.stderr.write(f\"Can't parse the base filename from {fq1}\\n\")\n sys.exit(-1)\n\n fqo1 = bn.groups()[0] + \"_missed_1.fastq\"\n fqo2 = bn.groups()[0] + \"_missed_2.fastq\"\n if os.path.exists(fqo1):\n sys.stderr.write(f\"Not overwrting {fqo1}\\n\")\n sys.exit(-1)\n\n if os.path.exists(fqo2):\n sys.stderr.write(f\"Not overwrting {fqo2}\\n\")\n sys.exit(-1)\n\n with open(fqo1, 'w') as out:\n sys.stderr.write(\"Finding reads from {}\\n\".format(fq1))\n c = 0\n for sid, allid, seq, qual in stream_fastq(fq1):\n c += 1\n if not c % 100000:\n sys.stderr.write(\".\")\n sys.stderr.flush()\n test = sid[:sid.rindex(\".1\")].replace('@', '', 1)\n if test in miss:\n out.write(\"@{}\\n{}\\n+\\n{}\\n\".format(allid, seq, qual))\n out.flush()\n\n with open(fqo2, 'w') as out:\n sys.stderr.write(\"\\nFinding reads from {}\\n\".format(fq2))\n c=0\n for sid, allid, seq, qual in stream_fastq(fq2):\n c += 1\n if not c % 100000:\n sys.stderr.write(\".\")\n sys.stderr.flush()\n\n test = sid[:sid.rindex(\".2\")].replace('@', '', 1)\n if test in miss:\n out.write(\"@{}\\n{}\\n+\\n{}\\n\".format(allid, seq, qual))\n out.flush()\n sys.stderr.write(\"\\n\")", "def test_single_one_read_ten_snps(self):\n\n snp_list = [['test_chrom', x, \"A\", \"C\"] for x in range(1, 11)]\n\n\n test_data = Data(snp_list=snp_list)\n\n test_data.setup()\n test_data.index_genome_bowtie2()\n test_data.map_single_bowtie2()\n test_data.sam2bam()\n\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir,\n is_paired_end=False, is_sorted=False,\n max_seqs=10)\n\n #\n # Verify new fastq is correct. There should be no reads,\n # because reads with greater than 10 allelic combinations\n # are thrown out\n #\n with gzip.open(test_data.fastq_remap_filename) as f:\n lines = [x.strip() for x in f.readlines()]\n assert len(lines) == 0\n\n #\n # Verify to.remap bam is empty\n #\n lines = read_bam(test_data.bam_remap_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n #\n # re-run find intersecting SNPs but allow a max of 1024\n # allelic combinations (we expect 1023 new seqs with 10\n # bi-allelic SNPs)\n #\n find_intersecting_snps.main(test_data.bam_filename,\n snp_dir=test_data.snp_dir, \n is_paired_end=False, is_sorted=False,\n max_snps=10,\n max_seqs=1024)\n\n #\n # Verify new fastq is correct. There should be 1023 reads\n # with all possible configurations of the two alleles, except\n # for the original configuration.\n #\n with gzip.open(test_data.fastq_remap_filename) as f:\n lines = [x.strip() for x in f.readlines()]\n assert len(lines) == 4*1023\n\n # get every 4th line, which correspond to sequences starting at line 1\n seqs = [lines[x] for x in range(1, len(lines), 4)]\n\n # test a few combinations of alleles\n l = list(test_data.read1_seqs[0])\n l[0] = 'C'\n new_seq1 = \"\".join(l)\n\n l = list(test_data.read1_seqs[0])\n l[3] = 'C'\n new_seq2 = \"\".join(l)\n\n # read with 3 non-ref alleles\n l = list(test_data.read1_seqs[0])\n l[0] = 'C'\n l[3] = 'C'\n l[9] = 'C'\n new_seq3 = \"\".join(l)\n\n # read with 10 non-ref alleles\n l = list(test_data.read1_seqs[0])\n for i in range(10):\n l[i] = 'C'\n new_seq4 = \"\".join(l)\n\n assert len(seqs) == 1023\n assert new_seq1 in seqs\n assert new_seq2 in seqs\n assert new_seq3 in seqs\n assert new_seq4 in seqs\n\n #\n # Verify to.remap bam is the same as the input bam file.\n #\n old_lines = read_bam(test_data.bam_filename)\n new_lines = read_bam(test_data.bam_remap_filename)\n assert old_lines == new_lines\n\n #\n # Verify that the keep file is empty since only\n # read needs to be remapped. Note that the\n # read_bam still gives back one empty line.\n #\n lines = read_bam(test_data.bam_keep_filename)\n assert len(lines) == 1\n assert lines[0] == ''\n\n test_data.cleanup()", "def invert_read(read, start, end, sequence, snp_rate, indel_rate, max_clip_len=None):\n inv_len = end - start\n if start >= read.reference_end or end <= read.reference_start or inv_len < 2:\n return read, 0\n\n read_with_inversion = copy.deepcopy(read)\n read_with_inversion.qname = read_with_inversion.query_name = read.qname + '-' + 'inv'\n\n if read.reference_start <= start < end <= read.reference_end:\n # Read spans the entire inversion.\n left_breakpoint = start - read.reference_start\n right_breakpoint = left_breakpoint + inv_len\n read_with_inversion.seq = \"{left}{inv}{right}\".format(\n left=read.seq[:left_breakpoint],\n inv=\"\".join(reversed(read.seq[left_breakpoint:right_breakpoint])),\n right=read.seq[right_breakpoint:])\n\n # Clipped bases in reads must start at a read boundary; choose the closest one.\n # TODO: add a supplemental/secondary read where the shorter region is matched, and the longer one clipped.\n cigar_tuples = unpack_cigar(read.cigarstring)\n if left_breakpoint < read.rlen - right_breakpoint:\n start_clip, end_clip = 0, right_breakpoint\n else:\n start_clip, end_clip = left_breakpoint, read.rlen\n for i in range(start_clip, end_clip):\n cigar_tuples[i] = '1S'\n\n read_with_inversion.cigarstring = str(Cigar(\"\".join(cigar_tuples)).merge_like_ops())\n\n elif start <= read.reference_start < read.reference_end <= end:\n # Inversion spans the entire read.\n pos_in_inversion = read.reference_start - start\n inv_seq = sequence[pos_in_inversion:pos_in_inversion + read.rlen]\n read_with_inversion = make_split_read(read_with_inversion, 0, clip_left=False, sequence=inv_seq)\n\n # If a read was reversed, modify its strand.\n read_with_inversion.is_reverse = not read.is_reverse\n\n elif start > read.reference_start:\n # Inversion starts mid-read, continuing to the end of it (or past it).\n breakpoint = start - read.reference_start\n read_with_inversion = make_split_read(read_with_inversion, breakpoint, clip_left=False, sequence=sequence)\n\n elif end < read.reference_end:\n # Inversion starts before the read, continuing into it.\n breakpoint = end - read.reference_start\n read_with_inversion = make_split_read(read_with_inversion, breakpoint, clip_left=True, sequence=sequence)\n\n if max_clip_len and int(max_clip_len) < get_max_clip_len(read_with_inversion):\n return None, 0\n\n # Add noise.\n return modify_read(read_with_inversion, snp_rate, indel_rate / 2, indel_rate / 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build an error message from a response.
def build_errmsg( cls, response, msg: t.Optional[str] = None, exc: t.Optional[Exception] = None, ) -> str: from .tools import json_log url = response.url method = response.request.method code = response.status_code reason = response.reason out_len = len(response.request.body or "") in_len = len(response.text or "") msg = msg or "Error in REST API response" pre = [ msg, get_exc_str(exc=exc), f"URL: {url!r}, METHOD: {method}", f"CODE: {code!r}, REASON: {reason!r}, BYTES OUT: {out_len}, BYTES IN: {in_len}", ] middle = [ "Request Object:", json_log(obj=response.request.body), "Response Object:", json_log(obj=response.text), ] msgs = [*pre, "", *middle, "", *pre] return "\n".join(msgs)
[ "def get_error_message(response) -> str:\n if not ErrorHelper.is_error(response):\n return None\n \n message = f\"{response.message} {response.message_detail}\"\n return message", "def _parse_response_error(self, response):\n message = response.text or \"no_content_on_response\"\n try:\n data = response.json()\n if data:\n return data.get(\"message\", message)\n else:\n return message\n except json.decoder.JSONDecodeError:\n return message", "def parse_response(cls, response):\r\n res_msg = cls.res_msg_type()\r\n try:\r\n res_msg.ParseFromString(response.content)\r\n except DecodeError as e:\r\n trace = sys.exc_info()[2]\r\n raise ParseException(str(e)), None, trace\r\n pass\r\n\r\n return res_msg", "def _extract_error(self, headers, response):\n reason = ER_RE.search(response)\n if reason:\n reason = reason.group()\n reason = reason.replace('<pre>','')\n reason = reason.replace('</pre>','')\n return \"Error: %s\" % str(reason)\n return \"Error: %s\" % response", "def get_message(response):\n try:\n return response.json()['error']['message']\n except Exception: # pylint: disable=broad-except\n return response.content", "def _build_error_response(message, status_code, error_id, **kwargs):\n\n return make_response(\n jsonify({\n \"status_code\": status_code,\n \"error\": {\n \"message\": message,\n \"id\": error_id\n },\n **kwargs\n }), status_code\n )", "def create_xml_error_response(error_msg, error_code):\n response = Element(\"response\")\n\n error = SubElement(response, \"error_description\")\n error.text = error_msg\n\n error_code_element = SubElement(response, \"error_code\")\n error_code_element.text = f\"{error_code}\"\n return tostring(response)", "def __build_message(self, error) -> str:\n return \"Conversion failed with error: {}\".format(error)", "def _render_error_response(self, code, title, message):\n\n if self._response_format == 'py':\n response = {'status': 'error',\n 'code': code,\n 'title': title,\n 'message': message}\n elif self._response_format == 'json':\n response = '{\"status\": \"error\", ' \\\n '\"code\": \"%s\", ' \\\n '\"title\": \"%s\", ' \\\n '\"message\": \"%s\"}' % (code, title, message)\n elif self._response_format == 'xml':\n response = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n' \\\n '<response>\\n' \\\n ' <status>error</status>\\n' \\\n ' <code>%s</code>\\n' \\\n ' <title>%s</title>\\n' \\\n ' <message>%s</message>\\n' \\\n '</response>' % (code, title, message)\n else:\n response = 'status: error\\n' \\\n 'code: %s\\n' \\\n 'title: %s\\n' \\\n 'message: %s' % (code, title, message)\n\n return response", "def format_bad_response(test_name, message, resp):\n return f\"Test {test_name} - Failed: {message}. Response: {resp}\"", "def create_error_from_parts(self, req, code, msg, hdrs, resp):\n err = urllib2.HTTPError(req.get_full_url(), code, msg, hdrs, resp)\n err.id = req.id\n return err", "def parse_error_response(response):\n element = ElementTree.fromstring(response.data.decode())\n\n def _get_text(name):\n return (\n element.find(name).text if element.find(name) is not None else None\n )\n\n return S3Error(\n _get_text(\"Code\"),\n _get_text(\"Message\"),\n _get_text(\"Resource\"),\n _get_text(\"RequestId\"),\n _get_text(\"HostId\"),\n bucket_name=_get_text(\"BucketName\"),\n object_name=_get_text(\"Key\"),\n response=response,\n )", "def error_message_and_fields(obj, err):\n try:\n data = MESSAGES[err.response_error]\n if isinstance(data, dict):\n data = data[obj.__class__.__name__]\n message, fields = data\n return (message % {\n \"obj\": unicode(obj),\n \"name\": getattr(obj, \"name\", \"\")}, fields)\n except KeyError:\n return (\n 'Unknown conflict \"%s\"; please correct and try again.'\n % err.response_error,\n [])", "def make_error_response(self,cond):\n\n if self.get_type() == \"error\":\n raise ValueError, \"Errors may not be generated in response to errors\"\n\n m=Message(stanza_type=\"error\",from_jid=self.get_to(),to_jid=self.get_from(),\n stanza_id=self.get_id(),error_cond=cond)\n\n if self.xmlnode.children:\n n=self.xmlnode.children\n while n:\n m.xmlnode.children.addPrevSibling(n.copyNode(1))\n n=n.next\n return m", "def _construct_error_message(self, failed_checksums, failed_formats):\n checksum_string = (\n (\n \"Synonyms with invalid CAS-RN checksums: [\"\n f\"{', '.join(syn.identifier for syn in failed_checksums)}]\"\n )\n if failed_checksums\n else None\n )\n format_string = (\n (\n \"Synonyms associated with this Synonym Type do not match \"\n \"the proposed regular expression: [\"\n f\"{', '.join(syn.identifier for syn in failed_formats)}]\"\n )\n if failed_formats\n else None\n )\n return \"\\n\".join(filter(None, [checksum_string, format_string]))", "def decode_sentinelhub_err_msg(response):\n try:\n server_message = []\n for elem in decode_data(response.content, MimeType.XML):\n if 'ServiceException' in elem.tag or 'Message' in elem.tag:\n server_message.append(elem.text.strip('\\n\\t '))\n return ''.join(server_message)\n except ElementTree.ParseError:\n return response.text", "def _parse_error_response(self, error: ApiException, file_type: str, file_name: str):\n message = error\n if hasattr(error, 'reason'):\n if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(error.reason):\n message = '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate.\\n' \\\n 'Try running the command with --insecure flag.'\n\n elif 'Failed to establish a new connection:' in str(error.reason):\n message = 'Failed to establish a new connection: Connection refused.\\n' \\\n 'Try checking your BASE url configuration.'\n\n elif error.reason in ('Bad Request', 'Forbidden'):\n error_body = json.loads(error.body)\n message = error_body.get('error')\n\n if error_body.get('status') == 403:\n message += '\\nTry checking your API key configuration.'\n print_error(str(f'\\nUpload {file_type}: {file_name} failed:'))\n print_error(str(message))", "def _adapt_response(self, response):\n errors, meta = super(ServerError, self)._adapt_response(response)\n return errors[0], meta # single error instead of array", "def get_api_error_message(self):\n if self.has_error(self.last_json_result[\"EOXRecord\"][0]):\n msg = \"%s (%s)\" % (self.get_error_description(self.last_json_result[\"EOXRecord\"][0]),\n self.last_json_result[\"EOXRecord\"][0]['EOXError']['ErrorID'])\n return msg\n\n return \"no error\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a new instance of the FormatError exception.
def __init__( self, template: str, error: Exception, args: t.Any = None, kwargs: t.Dict[str, t.Any] = None, ) -> None: self.template: str = template self.error: Exception = error self.args: t.Any = args self.kwargs: t.Dict[str, t.Any] = kwargs super().__init__( f"Error formatting template {template!r}\n" f"{type(self.error)}: {self.error}\n" f"args: {self.args}\n", f"kwargs: {self.kwargs}\n", )
[ "def __init__(self, message):\n if isinstance(message, list):\n self.error_list = []\n for message in message:\n if not isinstance(message, FormatError):\n message = FormatError(message)\n self.error_list.extend(message.error_list)\n else:\n self.message = message\n self.error_list = [self]", "def test_eformat(self):\n self.assertIs(self.exceptionForCode(EFORMAT), DNSFormatError)", "def __init__(self, format_string):\n \n \n self._format_string = format_string\n \n self._strftime_format_string, self._digit_counts = \\\n self._parse_format_string(format_string)\n \n self._min_time_increment = self.get_min_time_increment(format_string)", "def get_error_formats(self):", "def __init__(self, error=None):\n self.error = error\n err_str = 'PPK error: {}'.format(self.error)\n Exception.__init__(self, err_str)", "def formatException(self, ei):\n return ''", "def __init__(self, fmt=None, datefmt=None):\n logging.Formatter.__init__(self, fmt, datefmt)", "def test_format_typeerror(self):\n self.assertRaises(TypeError, when.format, 'a', '%a')", "def from_format(cls, format_str: str):\n pattern = r\"(\\d+):([A-Za-z\\d]+):(.+)\"\n match = re.match(pattern, format_str)\n if not match:\n raise plug.PlugError(\n \"invalid format string: {}\".format(format_str)\n )\n priority_str, symbol, regex = match.groups()\n priority = int(priority_str)\n return super().__new__(\n cls, symbol=symbol, priority=priority, regex=regex\n )", "def errformat(*args, **kw):\n\treturn default_errformat(*args, **kw)", "def format_error(name, ex):\n return \" Name: {}\\n Error: {}\".format(name, ex)", "def __init__(self, response):\r\n self.response = response\r\n Exception.__init__(\r\n self, 'Response for \"{url}\" was invalid:\\n{content}'.format(\r\n url=response.url,\r\n content=response.content))", "def test_excepts_if_empty_input(self):\n\t\tself.assertRaises(ValueError, self.string_manipulation.format)", "def __init__(self, message):\n self.message = message\n super(DataAccessorError, self).__init__(message)", "def testIllegalFormat(self):\n seq_set = self.session.create_object(\"wgs_assembled_seq_set\")\n\n with self.assertRaises(Exception):\n seq_set.format = 1", "def __init__(self, message, value=None, index=None):\n super(CellError, self).__init__(message)\n self.value = value\n self.index = index", "def __init__(self, message: str) -> None:\n\n HermitError.__init__(self,\n \"Invalid signature request: {}.\".format(message))", "def formatted_error(value):\r\n check = type_check(value)\r\n return f\"Input Value: '{value}' of type {check} is not an valid input and can not be formatted\"", "def __init__ (self, message):\n super(MalformedIRIException, self).__init__(message)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns 'True' if all entries of the relation are binary (0 or 1), otherwise it returns 'False'.
def isBinary(self): for i in range(0,self.m): for j in range(i+1,self.m): if self.Q[i,j] != 0 and self.Q[i,j] != 1: return(False) return(True)
[ "def is_binary(self):\n return np.logical_or(self.cmap == 1, self.cmap == 0).all()", "def is_binary(assoc):\n return len(assoc) == 2", "def is_binary(t):\n if t == zero or t == one:\n return True\n elif t.ty != Term.COMB:\n return False\n elif t.head == bit0 or t.head == bit1:\n return is_binary(t.arg)\n else:\n return False", "def __bool__(self: bitlist) -> bool:\n return 1 in self.bits", "def is_binary(self) -> bool:\n\n return self.__is_binary", "def check_s_binary(s: pd.Series) -> None:\n log.debug(f\"Checking that s named {s.name} is binary\")\n unique_values = s.unique()\n error = RuntimeError(\n f\"Series {s.name} expected to be binary [0, 1] only, has values {unique_values}\"\n )\n if not len(unique_values) == 2:\n raise error\n elif not all([val in [0, 1] for val in unique_values]):\n raise error", "def reflection_check_func(relation: list) -> bool:\n t_f = []\n for num in range(len(relation)):\n t_f.append(bool(relation[num][num]))\n\n return all(t_f)", "def isBinaryMatrix(self):\n for i in range(self.matrix.nrow):\n for j in range(self.matrix.ncol):\n if(self.matrix.data[i][j] == 1):\n continue\n if(self.matrix.data[i][j] == 0):\n continue\n else:\n self.matrix.binaryMatrix = False\n raise binaryMatrixException\n self.matrix.binaryMatrix = True\n return self.matrix.binaryMatrix", "def check_binary(self):\n\n is_seg_binary, is_ref_binary = [((x > 0.5) == x).all()\n for x in [self.seg, self.ref]]\n # if (not is_ref_binary) or (not is_seg_binary):\n # raise ValueError(\"The input segmentation/reference images\"\n # \" must be binary for this function.\")", "def isBinary(self) -> \"SbBool\":\n return _coin.SoOutput_isBinary(self)", "def isBinary(self) -> \"SbBool\":\n return _coin.SoInput_isBinary(self)", "def makeBinary(self):\r\n for i in range(0,self.m):\r\n for j in range(i+1,self.m):\r\n if self.Q[i,j]>=0.5:\r\n self.setEntry([i,j],1)\r\n else:\r\n self.setEntry([i,j],0)\r\n return(True)", "def AllBits(env, *args):\n _CheckDeclared(args)\n return set(args).issubset(env['_BITS'])", "def _check_bin_attrs(node):\n out_edges = node.out_edges()\n bin_in_out_ports = ['bin' in edge for edge in out_edges]\n out_node = [node.has('op') and node.op == 'OpOutput' for node in node.out_nodes()]\n return np.any(out_node) or not np.all(bin_in_out_ports)", "def is_binary(cls, fmt: 'SerialFormats') -> bool:\n bins = (cls.BINN, cls.BSON, cls.CBOR, cls.ION, cls.MSGPACK, cls.SMILE, cls.UBJSON)\n if vp := getattr(cls, 'VPACK', None):\n bins = (*bins, vp)\n\n return fmt in bins", "def is_binning_ok(bin_edges):\n # Must be at least two edges to define a single bin\n if len(bin_edges) < 2:\n return False\n # Bin edges must be monotonic and strictly increasing\n if np.any(np.diff(bin_edges) <= 0):\n return False\n return True", "def iszero(self):\n return all((v == 0 for v in self.b))", "def all_ones(num):\n return ((num + 1) & num == 0) and (num != 0)", "def decode_bin_bool(bytes_in):\n b = Decoder.decode_bin(bytes_in)\n return b > 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Changes all entries (Not on the diagonal) >=0.5 to '1' and the others to '0'.
def makeBinary(self): for i in range(0,self.m): for j in range(i+1,self.m): if self.Q[i,j]>=0.5: self.setEntry([i,j],1) else: self.setEntry([i,j],0) return(True)
[ "def apply_bin_labels(X):\n\tA = np.copy(X)\n\tlabel_i = A.shape[1] - 1\n\tA[A[:,label_i]==0,label_i] = -1\n\tA[A[:,label_i]>0,label_i] = 1\n\treturn A", "def _labels_to_plus_minus(*args):\n for x in args:\n x[x <= 0.] = -1\n x[x > 0.] = 1", "def test_to_labels_1q_array(self):\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable(\n np.array(\n [[False, False], [False, True], [False, True], [True, False], [True, True]],\n dtype=bool,\n )\n )\n target = np.array([\"I\", \"Z\", \"Z\", \"X\", \"Y\"])\n value = pauli.to_labels(array=True)\n self.assertTrue(np.all(value == target))", "def test_to_labels_1q(self):\n with self.assertWarns(DeprecationWarning):\n pauli = PauliTable(\n np.array(\n [[False, False], [False, True], [False, True], [True, False], [True, True]],\n dtype=bool,\n )\n )\n target = [\"I\", \"Z\", \"Z\", \"X\", \"Y\"]\n value = pauli.to_labels()\n self.assertEqual(value, target)", "def predict_zero_one(self, input_matrix: np.ndarray) -> np.ndarray:\n self.hidden_matrix_2_activation_binary=np.where(self.predict(input_matrix) < .5 , 0 , 1)\n return self.hidden_matrix_2_activation_binary", "def ones(self):\n return self.constantVector('__ones',self.db.ones())", "def ones():\n return Mat3(\n 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0)", "def custom_mask(mask):\n \n new_mask = np.zeros(mask.shape[0]*mask.shape[1])\n new_mask = new_mask.reshape(mask.shape[0], mask.shape[1])\n for i in range(1):\n for j in range(mask.shape[0]):\n for k in range(mask.shape[1]):\n new_mask[j][k] = mask[j][k]\n if new_mask[j][k] > 0.5 :\n new_mask[j][k] = 1 \n else:\n new_mask[j][k] = 0\n \n return new_mask", "def binary_labels(labels, in_class, column=\"inregister\"):\n\n labels[column] = labels[column].apply(lambda paper: 1 if paper == in_class else 0)\n\n return labels", "def __repr__(self):\n if self.bit:\n return '1'\n else:\n return '0'", "def zero2one(self, x):\n if x == 0:\n x = 1\n return x", "def fix_binary_variables(m):\r\n\r\n for t in m.T:\r\n for g in m.G_THERM:\r\n m.u[g, t].fix(round(m.u[g, t].value, 4))\r\n m.v[g, t].fix(round(m.v[g, t].value, 4))\r\n m.w[g, t].fix(round(m.w[g, t].value, 4))\r\n\r\n if g in m.G_C_THERM:\r\n m.x[g, t].fix(round(m.x[g, t].value, 4))\r\n m.y[g, t].fix(round(m.y[g, t].value, 4))\r\n m.z[g, t].fix(round(m.z[g, t].value, 4))\r\n\r\n return m", "def boolean_matrix(a):\n row_one = col_one = False\n for i in range(len(a[0])):\n if a[0][i] == 1:\n row_one = True\n for i in range(len(a)):\n if a[i][0] == 1:\n col_one = True\n for i in range(1, len(a)):\n for j in range(1, len(a[0])):\n if a[i][j] == 1:\n a[i][0] = 1\n a[0][j] = 1\n for i in range(1, len(a)):\n for j in range(1, len(a[0])):\n if a[i][0] == 1 or a[0][j] == 1:\n a[i][j] = 1\n \n if row_one:\n for i in range(len(a[0])):\n a[0][i] = 1\n \n if col_one:\n for i in range(len(a)):\n a[i][0] = 1\n return a", "def fix_label_matrix(label_matrix):\n no_data_ids = np.argwhere(label_matrix == 255)\n if len(np.ravel(no_data_ids)) != 0:\n # All OutOfExtend pixels becomes 'land'\n label_matrix[label_matrix == 255] = 0\n return label_matrix\n else:\n return label_matrix", "def set_diag(self, *values):", "def toMatrice(self):\n\t\ttxt = \" \"\n\t\tfor i in sorted(self.graphe.keys()):\n\t\t txt += str(i)+\"-\"\n\t\tprint(txt, file=sys.stderr)\n\t\t\n\t\ttxt=\"\"\n\t\tfor i in sorted(self.graphe.keys()):\n\t\t\ttxt += str(i)\n\t\t\tfor j in sorted(self.graphe.keys()):\n\t\t\t\tif i in self.graphe[j].keys():\n\t\t\t\t\ttxt += \" 1\"\n\t\t\t\telse:\n\t\t\t\t\ttxt += \" 0\"\n\t\t\tprint(txt, file=sys.stderr)\n\t\t\ttxt = \"\"", "def transform_bool(col_data):\n return col_data.apply(lambda x: 0 if x == 'f' else 1)", "def convert_to_binary_labels(y):\n return y.isnull().map(lambda x: 0 if x else 1).values", "def computePositiveExceptional(data):\n mask = data[POSITIVE_EXCEPTIONAL_QUESTIONS] == 4\n data.loc[:, POSITIVE_EXCEPTIONAL_QUESTIONS] = mask.astype(int)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a deep copy of this ReciprocalRelation. EXAMPLE >> Q=ReciprocalRelation(np.array([[0.5,0.3,0.4],[0.7,0.5,0.9],[0.6,0.1,0.5]])) >> newQ = Q.copy() >> Q.setEntry([0,1],0.99) >> Q.show() >> newQ.show()
def copy(self): return(ReciprocalRelation(self.Q.copy(),self.precision))
[ "def copy(self):\n r = Relation(cardinality=self.cardinality, ordered=self.isordered())\n r.forward.update(self.forward)\n r.inverse.update(self.inverse)\n return r", "def copy(self):\n return Quadrant(\n list(self.min_coordinates), list(self.max_coordinates))", "def copy(self):\n return Disjoint_Set_Forest(self._parent.copy(), self._rank.copy())", "def reverse_copy(self):\n return Graph(self.map(lambda n: n.reverse_copy()))", "def copy(self):\n return Graph(self.map(lambda n: n.copy()))", "def copy(self):\n return self.__class__( self.first, self.last )", "def reciprocal(self):\n return Rational(self.denominator, self.numerator)", "def inverse(self):\n a=self.numerator\n self.numerator=self.denominator\n self.denominator=a\n return(self)", "def copy(self):\n copy = JunctionTree(self.edges())\n copy.add_nodes_from(self.nodes())\n if self.factors:\n factors_copy = [factor.copy() for factor in self.factors]\n copy.add_factors(*factors_copy)\n return copy", "def reciprocal(self):\n\n value = -1 / (self.val * self.val)\n der = {k: value * v for k, v in self.der.items()}\n return AutoDiff(self.var, 1 / self.val, der)", "def copy_of(self):\n theatom = Atom()\n theatom.atomname = self.atomname\n theatom.residue = self.residue\n theatom.coordinates = self.coordinates.copy_of()\n theatom.element = self.element\n theatom.pdb_index = self.pdb_index\n theatom.line = self.line\n theatom.atomtype = self.atomtype\n theatom.indices_of_atoms_connecting = self.indices_of_atoms_connecting[:]\n theatom.charge = self.charge\n theatom.resid = self.resid\n theatom.chain = self.chain\n theatom.structure = self.structure\n theatom.comment = self.comment\n\n return theatom", "def simplify(self):\n\n r = Relation()\n for k, v in self.items():\n v = simplify(v)\n r._set(k, v)\n return r", "def clone(self):\n fields = dict((k, v.clone() if isinstance(v, FieldSet) else v)\n for k, v in self.fields.iteritems())\n return self.__class__(force_order=self.fields.keys(), **fields)", "def copy(self):\n phi = self.jg.copy()\n h = phi.codomain\n b = [ phi.map[a] for a in self.bd ]\n return RibbonGraph(h,b)", "def clone(self):\n return self.__clone(True)", "def clone(self) -> \"ScXMLDataObj *\":\n return _coin.ScXMLRealDataObj_clone(self)", "def copy(self) -> \"DataArray\":\n return deepcopy(self)", "def __deepcopy__(self):\n return BipartiteGraph.extract_edge_induced_subgraph(self, lambda edge: True) # copy all edges", "def clone(self):\n\n clone = copy(self)\n clone.graph = copy(self.graph)\n clone.labels = dict(self.labels)\n return clone" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of exactly those integers x in 0,1,...,2binom(m,2)1, for which 'getBinaryReciprocalRelation(m,x)' is a WST reciprocal relation.
def getAllWSTIndices(m): assert type(m) is int and m>=1, "`m` has to be a positive integer." AllWSTIndices = list() for idx in range(0,2**(scipy.special.binom(m,2)).astype(int)): if isWST(getBinaryReciprocalRelation(m,idx)) is True: AllWSTIndices.append(idx) return(AllWSTIndices)
[ "def _generate_relations(self, n, base, num_relations):\n relations = []\n floor_sqrt_n = int(math.ceil(math.sqrt(n)))\n\n for x in xrange(floor_sqrt_n, n):\n if len(relations) >= num_relations:\n break\n\n exponents = is_b_smooth((x ** 2) % n, base)\n\n if exponents:\n relations.append((x, exponents))\n\n return relations", "def reciprocal_rank(rels):\n\n nonzero_indices = np.asarray(rels).nonzero()[0]\n if has_no_relevant_items(nonzero_indices):\n # RR equals 0.0 if there is no relevant items\n return 0.0\n\n return 1.0 / (1.0 + nonzero_indices[0])", "def reciprocals(start, stop):\n for i in range(start, stop):\n yield 1 / i", "def getIndexOfBinaryRelation(R):\r\n assert type(R) is ReciprocalRelation and R.isBinary(), \"'R' has to be a binary relation\"\r\n index_str = \"\"\r\n for i in range(0,R.m):\r\n for j in range(i+1,R.m):\r\n index_str = str(int(R.Q[i,j])) + index_str\r\n return(int(index_str, base=2))", "def reciprocal_operations(self):\n return self._reciprocal_operations", "def repetition_vector(\n topology_matrix: TopologyMatrix\n) -> List[int]:\n _, top_matrix = topology_matrix\n matrix = Matrix(top_matrix)\n fractional = list(map(fraction, matrix.nullspace()[0]))\n denominators = [f[1] for f in fractional]\n lcm_null = reduce(lcm, denominators)\n integer = list(n * lcm_null / d for n, d in fractional)\n gcd_integer = reduce(gcd, integer)\n return [x / gcd_integer for x in integer]", "def reciprocal_cycles(n):\n res = [0]*(n+1)\n max_length = 0\n d = 0\n for i in range(1, n + 1):\n cycle_length = find_cycle_length(i)\n if cycle_length > max_length:\n max_length = cycle_length\n d = i\n res[i] = d\n return res", "def brute_force_find_roots(poly: nmod_poly) -> List[int]:\n return [x for x in range(poly.modulus()) if int(poly(x)) == 0]", "def x_synds_near_zero(n, r):\n # (0)000 => (1)000, (0)001, (0)010, (0)100\n # (1)000 => (0)000, (1)001, (1)010, (1)100\n if r == 0:\n return [ 0 ]\n else:\n # combine lists: choose the parity stabiliser, don't choose the parity stabiliser\n return binary_combinations(n, r - 1) + binary_combinations(n, r)", "def bitmasks(n,m):\n if m < n:\n if m > 0:\n for x in bitmasks(n-1,m-1):\n yield bitarray([1]) + x\n for x in bitmasks(n-1,m):\n yield bitarray([0]) + x\n else:\n yield n * bitarray('0')\n else:\n yield n * bitarray('1')", "def brent_get_all_primes(n):\n while not miller_rabin_prime_test(n, 100):\n p = brent_prime(n)\n n /= p\n return brent_get_all_primes(p) + brent_get_all_primes(n)\n if n == 1:\n return []\n return [n]", "def primeroR(self, X):\n\t\tprimero=[]\n\t\tif self.isPureTerminal(X):\n\t\t\treturn [X]\n\t\telse:\n\t\t\tfor prod in self.ProdsJoined:\n\t\t\t\tif prod.Left==X:\n\t\t\t\t\tprint('its a match: '+ X)\n\t\t\t\t\tfor derivation in prod.Right: \n\t\t\t\t\t\tif 'ฮต'==derivation:\n\t\t\t\t\t\t\tprimero.append('ฮต')\n\t\t\t\t\t\telif X not in derivation[0]:\n\t\t\t\t\t\t\tsymbolPrimero=[]\n\t\t\t\t\t\t\tallEpsilon=True\n\t\t\t\t\t\t\tfor symbol in derivation:\n\t\t\t\t\t\t\t\tAux=self.primeroR(symbol)\n\t\t\t\t\t\t\t\tsymbolPrimero.extend(Aux)\n\t\t\t\t\t\t\t\tif 'ฮต' not in Aux:\n\t\t\t\t\t\t\t\t\tallEpsilon=False\n\t\t\t\t\t\t\t\t\tbreak \n\n\t\t\t\t\t\t\tfor val in symbolPrimero:\n\t\t\t\t\t\t\t\tif val!='ฮต' and val not in primero:\n\t\t\t\t\t\t\t\t\tprimero.append(val)\n\t\t\t\t\t\t\tif allEpsilon:\n\t\t\t\t\t\t\t\tprimero.append('ฮต')\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\treturn primero", "def integer_right_triangles(p):\n return [[a,b,p-a-b]\n for a in range(1,p)\n for b in range(a,p)\n if a**2 + b**2 == (p-a-b)**2]", "def find_roots(poly: nmod_poly) -> List[int]:\n # return brute_force_find_roots(poly)\n return flint_find_roots(poly)", "def compute_primes(bound):\n \n answer = list(range(2, bound))\n\n for divisor in range(2, bound):\n # remove all multiple of divisor from answer\n for i in range(len(answer)):\n if answer[i] != 1:\n if answer[i] != divisor:\n if answer[i] % divisor == 0:\n answer[i] = 1\n \n return([num for num in answer if num != 1])", "def _getRelations(self):\r\n \r\n result = list()\r\n for relation in self._relations.values():\r\n result.append(deepcopy(relation))\r\n return result", "def relprimes(n,b=1):\n relprimes = []\n for i in range(1,n):\n if gcd(i,n)==1: relprimes.append(i)\n print(\" n-rp's: %s\" % (relprimes))\n relprimes = map(operator.mul,[b]*len(relprimes),relprimes)\n newremainders = map(operator.mod,relprimes,[n]*len(relprimes))\n print(\"b * n-rp's mod n: %s\" % newremainders)", "def binary_combinations(n, r):\n return [sum(1<<x for x in c) for c in it.combinations(range(n), r)]", "def rdigits(n, base=10):\n return list(iter_rdigits(n, base))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the corresponding index 'idx' of a binary reciprocal relation R, such that getBinaryReciprocalRelation(R.m,idx) and R have the same entries everywhere
def getIndexOfBinaryRelation(R): assert type(R) is ReciprocalRelation and R.isBinary(), "'R' has to be a binary relation" index_str = "" for i in range(0,R.m): for j in range(i+1,R.m): index_str = str(int(R.Q[i,j])) + index_str return(int(index_str, base=2))
[ "def reciprocal_rank(rels):\n\n nonzero_indices = np.asarray(rels).nonzero()[0]\n if has_no_relevant_items(nonzero_indices):\n # RR equals 0.0 if there is no relevant items\n return 0.0\n\n return 1.0 / (1.0 + nonzero_indices[0])", "def _indices(self, r):\n bits = (2 * (r - self.r0[newaxis, :]) / self.lengths[newaxis, :])\n bits = bits.astype('i')\n\n # We do not want to exclude the boundaries with higher values\n bits = where(bits < 2, bits, 1)\n\n p2 = array([4, 2, 1])\n\n # This is the index of the chid where each charge is sitting\n return dot(bits, p2)", "def _map_index(r, c):\n return ((r)*(r)+(r))/2+c", "def get_R_rank(self):\n\t\treturn matrix_rank(self.get_R())", "def get_rank_index(self, rank):\n return self.RANKS.index(rank)", "def GetReciprocal(self) -> \"itkVersorD\":\n return _itkVersorPython.itkVersorD_GetReciprocal(self)", "def relative_ramification_index(self):\n return self.ramification_index()", "def get_refractiveindex(self, wavelength):\r\n wavelength /= 1000.0\r\n if self.rangeMin <= wavelength <= self.rangeMax:\r\n formula_type = self.formula\r\n coefficients = self.coefficients\r\n n = 0\r\n if formula_type == 1: # Sellmeier\r\n nsq = 1 + coefficients[0]\r\n\r\n def sellmeier(c1, c2, w):\r\n return c1 * (w ** 2) / (w ** 2 - c2 ** 2)\r\n\r\n for i in range(1, len(coefficients), 2):\r\n nsq += sellmeier(coefficients[i],\r\n coefficients[i + 1],\r\n wavelength)\r\n n = numpy.sqrt(nsq)\r\n elif formula_type == 2: # Sellmeier-2\r\n nsq = 1 + coefficients[0]\r\n\r\n def sellmeier2(c1, c2, w):\r\n return c1 * (w ** 2) / (w ** 2 - c2)\r\n for i in range(1, len(coefficients), 2):\r\n nsq += sellmeier2(coefficients[i],\r\n coefficients[i + 1],\r\n wavelength)\r\n n = numpy.sqrt(nsq)\r\n elif formula_type == 3: # Polynomal\r\n def polynomial(c1, c2, w):\r\n return c1 * w ** c2\r\n nsq = coefficients[0]\r\n for i in range(1, len(coefficients), 2):\r\n nsq += polynomial(coefficients[i],\r\n coefficients[i + 1],\r\n wavelength)\r\n n = numpy.sqrt(nsq)\r\n elif formula_type == 4: # RefractiveIndex.INFO\r\n def riinfo(wl, ci, cj, ck, cl):\r\n return ci * wl**cj / (wl**2 - ck**cl)\r\n n = coefficients[0]\r\n n += riinfo(wavelength, *coefficients[1:5])\r\n n += riinfo(wavelength, *coefficients[5:9])\r\n for kk in range(len(coefficients[9:]) // 2):\r\n n += coefficients[9+kk] * wavelength**coefficients[9+kk+1]\r\n\r\n n = numpy.sqrt(n)\r\n elif formula_type == 5: # Cauchy\r\n def cauchy(c1, c2, w):\r\n return c1 * w ** c2\r\n n = coefficients[0]\r\n for i in range(1, len(coefficients), 2):\r\n n += cauchy(coefficients[i],\r\n coefficients[i + 1],\r\n wavelength)\r\n elif formula_type == 6: # Gasses\r\n def gasses(c1, c2, w):\r\n return c1 / (c2 - w ** (-2))\r\n n = 1 + coefficients[0]\r\n for i in range(1, len(coefficients), 2):\r\n n += gasses(coefficients[i],\r\n coefficients[i + 1],\r\n wavelength)\r\n elif formula_type == 7: # Herzberger\r\n n = coefficients[0]\r\n n += coefficients[1] / (wavelength**2 - 0.028)\r\n n += coefficients[2] * (1 / (wavelength**2 - 0.028))**2\r\n for i, cc in enumerate(coefficients[3:]):\r\n n += cc * wavelength**(2*(i+1))\r\n elif formula_type == 8: # Retro\r\n n = coefficients[0]\r\n n += coefficients[1] * wavelength**2 /\\\r\n (wavelength**2 - coefficients[2])\r\n n += coefficients[3] * wavelength**2\r\n n = numpy.sqrt(-(2 * n + 1) / (n - 1))\r\n elif formula_type == 9: # Exotic\r\n n = coefficients[0]\r\n n += coefficients[1] / (wavelength**2 - coefficients[2])\r\n n += coefficients[3] * (wavelength - coefficients[4]) / \\\r\n ((wavelength - coefficients[4])**2 + coefficients[5])\r\n n = numpy.sqrt(n)\r\n else:\r\n raise Exception('Bad formula type')\r\n return n\r\n else:\r\n raise Exception('Wavelength {} is out of bounds.'\r\n 'Correct range(um): ({}, {})'.\r\n format(wavelength, self.rangeMin, self.rangeMax))", "def reciprocal(self):\n return Rational(self.denominator, self.numerator)", "def rindex(self, wvl):\n if isinstance(wvl, float):\n return self.calc_rindex(wvl)\n elif isinstance(wvl, int):\n return self.calc_rindex(wvl)\n else:\n return self.calc_rindex(spectra[wvl])", "def reciprocal(cls, x):\n\n retval = x.clone()\n cls._reciprocal(x.data, out = retval.data)\n return retval", "def __right_child(self, index):\n\n return index * 2 + 1", "def reduceindex(M):\n oldM = M\n g = gcd(M[0], M[1])\n h = gcd(g, M[2])\n while h != 1:\n if h == 0:\n raise ValueError(\"Division by zero: Are the miller indices linearly dependent?\")\n M = M // h\n g = gcd(M[0], M[1])\n h = gcd(g, M[2])\n if np.dot(oldM, M) > 0:\n return M\n else:\n return -M", "def r(self, index, seed: Optional[int] = None,\n angle_scale: float = 1.0) -> None:\n if index == -1:\n for i in range(self._nqudits):\n self.apply_one_qudit_gate(\n rgate(seed, angle_scale), i,\n )\n else:\n self.apply_one_qudit_gate(rgate(seed, angle_scale), index)", "def ramification_index(self):\n return ZZ(self.pari_prime().pr_get_e())", "def getIndex(self) -> \"int\":\n return _coin.SoNotRec_getIndex(self)", "def neighbor_indices(self):", "def rnni_findpath(T, R):\n d = 0\n T1 = T.copy()\n # path = [T1.copy()]\n for k in range(len(R) - 1):\n Ck = R[k]\n r = rank(T1, Ck)\n while r > k:\n v = T1[r]\n u = T1[r - 1]\n if bitwise_subset(u, v):\n # u is a child of v, so do a NNI to reduce rank(T1, Ck).\n w = v - u\n # Find children of u.\n # XXX: assumes binary trees\n leaves = np.nonzero(u)[0]\n if len(leaves) == 2:\n # Both children are leaves.\n x = np.zeros_like(u)\n x[leaves[0]] = 1\n else:\n for x in reversed(T1[: r - 1]):\n if bitwise_subset(x, u):\n # x is a child of u\n break\n else:\n raise ValueError(f\"{u} has no children in {T1}\")\n y = u - x\n # Currently we have u = x + y, and the two NNI options are:\n # u = x + w, or,\n # u = y + w.\n # Only one of these will reduce rank(T1, Ck).\n if bitwise_subset(Ck, x + w):\n T1[r - 1] = x + w\n else:\n T1[r - 1] = y + w\n else:\n # Swap nodes v and u.\n T1[[r, r - 1]] = T1[[r - 1, r]]\n r -= 1 # Both operations reduce the rank by 1.\n d += 1\n # path.append(T1.copy())\n return d # , path", "def refractive_index_to_reflection_coeff(n):\n return -np.diff(n) / (2 * n.flatten(order='f')[:-1] + np.diff(n))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if Q has a Condorcet winner, otherwise it returns False
def has_CW(Q): assert type(Q) is ReciprocalRelation for i in range(0,Q.m): i_is_CW = True for j in range(0,Q.m): if i != j and Q.Q[i,j]<0.5: i_is_CW = False if i_is_CW is True: return(True) return(False)
[ "def collision(q):\r\n \r\n\r\n return False", "def check_for_winner(players) -> bool:\n return sum(map(lambda x: not x.is_bankrupt(), players)) == 1", "def is_first_winner(self):\r\n return self.first_winner", "def _check_winner(self):\n for combo in self._winning_combos:\n winner = reduce(lambda x, y: x if x == y else None, [self.board[x] for x in combo])\n if winner:\n return winner\n\n return None if None in self.board else self.draw", "def game_over(self):\n return self.winner() is not None", "def win(self):\n return sum(i.count('X') for i in self.board) == 1", "def winner(self):\n for mark in 'XO':\n if self.is_win(mark):\n return mark\n return None", "def is_winner(boardstate, side):\n winner = False # no winner/draw yet\n if are_winning_rows(boardstate, side) or \\\n are_winning_cols(boardstate, side) or \\\n are_winning_diags(boardstate, side):\n winner = True\n return winner", "def winner(self):\n if len(self.whos_alive()) == 1:\n return self.whos_alive()[0]\n else:\n return None", "def HasqCTO(self):\n return self.__has('qCTO')", "def is_win(self):\n if self._is_terminal:\n return self.board[self.player_goal_idx] > self.board[self.opponent_goal_idx]", "def HasqVCO(self):\n return self.__has('qVCO')", "def HasqWGN(self):\n return self.__has('qWGN')", "def HasqCTV(self):\n return self.__has('qCTV')", "def HasqRON(self):\n return self.__has('qRON')", "def winner(self):\n if self.is_finished():\n scores = self.get_scores()\n if scores[1] == scores[2]:\n return 0\n elif scores[1] > scores[2]:\n return 1\n else:\n return 2\n #print(\"Game is not yet finished!\")\n return 0", "def get_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(i)\r\n return(False)", "def is_draw(self):\n # Game is over and there is no winner\n return self.game_complete() == True and self.winner() is None", "def HasqCCV(self):\n return self.__has('qCCV')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns i if i is the CW of Q. If Q has no CW, it returns False
def get_CW(Q): assert type(Q) is ReciprocalRelation for i in range(0,Q.m): i_is_CW = True for j in range(0,Q.m): if i != j and Q.Q[i,j]<0.5: i_is_CW = False if i_is_CW is True: return(i) return(False)
[ "def has_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(True)\r\n return(False)", "def HasqTRI(self):\n return self.__has('qTRI')", "def is_q_annihilator(self):\n if self.is_below_fermi:\n return -1\n return 0", "def is_corner(self):\n\n # Corner if it_class contains 2 zeros\n return 2 == np.count_nonzero(self.it_class)", "def HasqSIC(self):\n return self.__has('qSIC')", "def HasqTCI(self):\n return self.__has('qTCI')", "def is_corner(ic, pos):\n return pos in ((0, 0), (0, ic.max_y), (ic.max_x, 0), (ic.max_x, ic.max_y))", "def HasqWSL(self):\n return self.__has('qWSL')", "def is_in_weierstrass_disc(self,P):\n if (P[1].valuation() == 0 and P != self(0,1,0)):\n return False\n else:\n return True", "def HasqDIP(self):\n return self.__has('qDIP')", "def HasqCTI(self):\n return self.__has('qCTI')", "def is_q_annihilator(self):\n if self.is_above_fermi:\n return 1\n return 0", "def HasqIFC(self):\n return self.__has('qIFC')", "def HasqATC(self):\n return self.__has('qATC')", "def is_still_positive(self,i):\n return i < self.num_band", "def HasqWFR(self):\n return self.__has('qWFR')", "def clockwise(self):\r\n return self.area < 0", "def HasqDCO(self):\n return self.__has('qDCO')", "def is_ccw(ring):\n return signed_area(ring) >= 0.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples uniformly at random a reciprocal relation Q with m alternatives, which has a CW
def sampleCW(m,decimal_precision=10): Q = sampleReciprocal(m,decimal_precision) cw = np.random.randint(0,m) # cw is chosen to be the CW for j in range(0,m): if Q.Q[cw,j]<0.5: buf = Q.Q[j,cw] Q.setEntry([cw,j],buf) return(Q), cw
[ "def sample_mniw(nu, L, M, S):\n # Sample from inverse wishart\n Q = invwishart.rvs(nu, L)\n # Sample from Matrix Normal\n A = npr.multivariate_normal(M.flatten(order='F'), np.kron(S, Q)).reshape(M.shape, order='F')\n return A, Q", "def random_sequence_qmc(size_mv, i, n=1, randomized=True):\n size_mv = np.int(size_mv)\n n = np.int(n)\n random_seed = random.randrange(10**9)\n #u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0), scrambling=0, seed=random_seed)).reshape((n,size_mv))\n if randomized:\n shift = np.random.rand(1,size_mv)\n u = np.mod(np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv)) + shift, 1)\n else: \n u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv))\n \n while test_random(u):\n random_seed = random.randrange(10**9)\n #u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0), scrambling=0, seed=random_seed)).reshape((n,size_mv))\n if randomized:\n shift = np.random.rand(1,size_mv)\n u = np.mod(np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv)) + shift, 1)\n else: \n u = np.array(randtoolbox.sobol(n=n, dim=size_mv, init=(i==0))).reshape((n,size_mv))\n \n return(u)", "def _rnd_mixnorm(props, mus, covs, rng, n_samples):\n # ---- Randomly select components ----\n # n_comps = len(mus)\n # comps = rng.randint(0, high=n_comps, size=n_samples)\n comps = rnd_discrete(props, rng, n_samples)\n\n # ---- Generate samples from selected components ----\n return np.array(\n [rng.normal(mus[c], covs[c], 1) for c in comps]).reshape(-1)", "def sample_uniform(self, num_choices: int) -> int:\n return self.sample_distribution(np.ones(num_choices) / num_choices)", "def weighted_sample(items, n):\n total = 0.0;\n i = 0\n # overcomplicated in case of future exclusion logic being in third+ location of item array\n for w in items:\n total += items[i][\"selectWeight\"]\n i+=1\n i = 0\n w = items[i][\"selectWeight\"]\n v = items[i]\n while n:\n x = total * (1 - random.random() ** (1.0 / n))\n total -= x\n while x > w:\n x -= w\n i += 1\n w = items[i][\"selectWeight\"]\n v = items[i]\n w -= x\n yield v\n n -= 1", "def sample_uniform(self, N):\n np.random.seed()\n return np.random.dirichlet([1]*self.k, N)", "def random_unitary(N):\n Z = np.random.randn(N,N) + 1.0j * np.random.randn(N,N)\n [Q,R] = sp.linalg.qr(Z)\n D = np.diag(np.diagonal(R) / np.abs(np.diagonal(R)))\n return np.dot(Q, D)", "def sampleRecRel_exactly_h(m,h,decimal_precision=10): \r\n Q = sampleReciprocal(m,decimal_precision)\r\n Q = __EnforceBoundedFromOneHalf__(Q,0.4)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q)", "def random_distribution(n_items):\r\n return np.random.dirichlet([1.0 for i in range(n_items)])", "def random_with_singular_values(m, n, singular_values):\n Q = random_ortho(m)\n singular_values=np.array(singular_values)\n svs = singular_values.shape[0]\n if svs < max(n, m):\n singular_values = np.concatenate((singular_values, np.array([0] * (max(n, m)-svs))))\n D = np.diag(singular_values)\n V = random_ortho(n)\n M = Q*D[:m, :n]*V\n return np.array(M)", "def init_W(rng, dim):\n temp, rng = random.split(rng)\n W = random.normal(temp, (dim,))\n print(W)\n print(W.shape)\n print(W.dtype)\n print(type(W))\n exit()\n W = unit_projection(W)\n temp, rng = random.split(rng)\n W = random.uniform(temp, ()) * W\n return W", "def apply_random_symplectic(self, qubits):\n # Here m is the number of qubits that the gate will be applied to\n # while n is the total number of qubits in the simulation\n m = len(qubits)\n\n # Generate a random symplectic matrix that is\n # symplectic with L = direct_sum_{j=1}^n X\n i = np.random.randint(symplectic.numberofsymplectic(m))\n S = symplectic.symplectic(i, m)\n\n # Convert this symplectic matrix to one that is symplectic\n # with L = [[0, I], [I, 0]]\n S = decompose.transform_symplectic(S)\n\n # Lastly, apply this to our state\n self.apply_symplectic(S, qubits)", "def sampleNotCW_exactly_h(m,h,max_tries=1000,decimal_precision=10): \r\n assert type(h) is float and 0<h<1/2, \"The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`\"\r\n Q = sampleNotCW_boundedFromOneHalf(m=m,h=0.4,max_tries=1000,decimal_precision=decimal_precision)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q)", "def _sample_n(self, n, seed=None):\n return self._inverse_scale.solvevec(\n samplers.normal(\n shape=ps.concat([[n], ps.shape(self._loc)], axis=0), seed=seed),\n adjoint=True)", "def random_orthogonal(p,k):\n Q0 = np.random.randn(p,k)\n Q = nearest_orthogonal(Q0)\n return Q", "def sample_joint_factor_model(n_samples=200, n_features=[10, 20, 30],\n joint_rank=3, noise_std=1, m=1.5,\n random_state=None):\n rng = check_random_state(random_state)\n n_views = len(n_features)\n\n view_loadings = [rand_orthog(d, joint_rank, random_state=rng)\n for d in n_features]\n\n svals = np.arange(1, 1 + joint_rank).astype(float)\n svals *= m * noise_std * (n_samples * max(n_features)) ** (1 / 4)\n U = rng.normal(size=(n_samples, joint_rank))\n U = np.linalg.qr(U)[0]\n\n Es = [noise_std * rng.normal(size=(n_samples, d))\n for d in n_features]\n Xs = [(U * svals) @ view_loadings[b].T + Es[b] for b in range(n_views)]\n\n return Xs, U, view_loadings", "def random_ortho(n):\n A = np.mat(np.random.random((n, n)))\n Q, R = np.linalg.qr(A)\n return Q", "def gen_random_quaternion():\n def normalize(q):\n q_sum = numpy.linalg.norm(q)\n \n return q/q_sum \n \n q = numpy.zeros(4)\n for i in range(4):\n k = 15 # We use the central limit theorem for our samples\n k_sample = numpy.random.standard_normal(size=k)\n ran_coord = k_sample.sum()\n q[i] = ran_coord\n \n q = normalize(q)\n \n return q", "def random_low_weight_bases(N,p,m,NN,weightbound):\n LWB = low_weight_bases(N,p,m,NN,weightbound)\n # this is \"approximately\" row reduced (it's the mod p^n reduction of a\n # matrix over ZZ in Hermite form)\n RandomLWB = []\n for i in range(len(LWB)):\n n = len(LWB[i])\n c = random_matrix(Zmod(p**m), n)\n while c.det() % p == 0:\n c = random_matrix(Zmod(p**m), n)\n RandomLWB.append([ sum([c[j, k] * LWB[i][k] for k in range(n)]) for j in range(n) ])\n\n return RandomLWB" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a reciprocal relation in Q_m^{h}(\not CW), where all nondiagonal entries are in {0.5h , 0.5+h}. EXAMPLE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Q = sampleRecRel_exactly_h(5,0.1) Q.show() print(has_CW(Q)) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def sampleRecRel_exactly_h(m,h,decimal_precision=10): Q = sampleReciprocal(m,decimal_precision) Q = __EnforceBoundedFromOneHalf__(Q,0.4) for i in range(0,Q.m): for j in range(0,Q.m): if Q.Q[i,j]>0.5: Q.Q[i,j] = 0.5+h if Q.Q[i,j]<0.5: Q.Q[i,j] = 0.5-h return(Q)
[ "def has_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(True)\r\n return(False)", "def sampleCW_exactly_h(m,h,decimal_precision=10): \r\n assert type(h) is float and 0<h<1/2, \"The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`\"\r\n Q, buf = sampleCW_boundedFromOneHalf(m,0.4,decimal_precision)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q,buf)", "def sampleNotCW_exactly_h(m,h,max_tries=1000,decimal_precision=10): \r\n assert type(h) is float and 0<h<1/2, \"The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`\"\r\n Q = sampleNotCW_boundedFromOneHalf(m=m,h=0.4,max_tries=1000,decimal_precision=decimal_precision)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q)", "def get_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(i)\r\n return(False)", "def sampleCW(m,decimal_precision=10):\r\n Q = sampleReciprocal(m,decimal_precision) \r\n cw = np.random.randint(0,m) # cw is chosen to be the CW\r\n for j in range(0,m):\r\n if Q.Q[cw,j]<0.5:\r\n buf = Q.Q[j,cw]\r\n Q.setEntry([cw,j],buf)\r\n return(Q), cw", "def test_diagonalizing_gates_overlapping(self):\n diag_op = ValidOp(qml.S(0), qml.PauliX(0))\n diagonalizing_gates = diag_op.diagonalizing_gates()\n\n assert len(diagonalizing_gates) == 1\n diagonalizing_mat = diagonalizing_gates[0].matrix()\n\n true_mat = np.eye(2)\n\n assert np.allclose(diagonalizing_mat, true_mat)", "def test_diagonalizing_gates_non_overlapping(self):\n diag_op = ValidOp(qml.PauliZ(wires=0), qml.Identity(wires=1))\n assert diag_op.diagonalizing_gates() == []", "def pixeldq_propagation(output_hdul, reference_hdul):\n input_dq = np.zeros_like(output_hdul['PIXELDQ'].data)\n result = np.all(core_utils.bitwise_propagate(reference_hdul, input_dq) == output_hdul['PIXELDQ'].data)\n return result", "def test_get_diagonal_coulomb():\n diag = numpy.zeros((5, 5), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_coulomb.DiagonalCoulomb(diag, e_0)\n test2 = fqe.get_diagonalcoulomb_hamiltonian(diag, e_0)\n\n assert test == test2", "def checkReversedDiagonalWin(self, param):\n for i in range(15 - 5):\n for j in range(4, 15):\n if self.ifReverseDiagonalMatch(i, j, [param] * 5):\n return True\n return False", "def test_get_diagonal_hamiltonian():\n diag = numpy.zeros((5,), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_hamiltonian.Diagonal(diag, e_0)\n test2 = fqe.get_diagonal_hamiltonian(diag, e_0)\n\n assert test == test2", "def checkForwardDiagonalWin(self, param):\n for i in range(15 - 5):\n for j in range(15 - 5):\n if self.ifForwardDiagonalMatch(i, j, [param] * 5):\n return True\n return False", "def _is_legal_undirected_edge_weights_matrix(W):\n if boolmatrix_any(W < 0):\n return False\n elif not is_symmetric(W):\n return False\n elif boolmatrix_any(W.diagonal() != 0):\n return False\n\n return True", "def test_boundary_relative_nondimensional_reaction_rate_coefficient(self):\r\n rgn = np.random.rand()\r\n model = random_crack_model(varepsilon=800)\r\n compare = model.k_0(rgn, [1, 1], ensemble='isometric')[0]\r\n self.assertAlmostEqual(\r\n model.k(rgn, ensemble='isometric')[0],\r\n compare, delta=np.abs(1e-0*compare)\r\n )\r\n compare = model.k_0(rgn, [1, 1], ensemble='isotensional')[0]\r\n self.assertAlmostEqual(\r\n model.k(rgn, ensemble='isotensional')[0],\r\n compare, delta=np.abs(1e-0*compare)\r\n )\r\n model = random_crack_model(N=100, varepsilon=800)\r\n compare = np.exp(2*model.kappa/model.alpha/model.N**2*(rgn - 1))\r\n self.assertAlmostEqual(\r\n model.k(rgn, ensemble='isometric')[0],\r\n compare, delta=np.abs(1e-0*compare)\r\n )\r\n model = random_crack_model(N=100, varepsilon=800)\r\n rgn = 3*model.kappa/model.N**3*np.random.rand()\r\n compare = np.exp(2*model.N/3/model.alpha*rgn)\r\n self.assertAlmostEqual(\r\n model.k(rgn, ensemble='isotensional')[0],\r\n compare, delta=np.abs(1e-0*compare)\r\n )", "def verify_connection_weights(self, lsnn, wRec, wOut):\n numRegular = lsnn.modelParams.numRegular\n\n self.check_matrix_equality(lsnn.connRegularToRegularNeurons,\n wRec[:numRegular, :numRegular])\n self.check_matrix_equality(lsnn.connRegularToAdapativeNeurons,\n wRec[numRegular:, :numRegular])\n self.check_matrix_equality(lsnn.connAdaptiveToAdapativeNeurons,\n wRec[numRegular:, numRegular:])\n self.check_matrix_equality(lsnn.connAdaptiveToRegularNeurons,\n wRec[:numRegular, numRegular:])\n self.check_matrix_equality(lsnn.connRegularToOutputNeurons,\n wOut[:, :numRegular])\n self.check_matrix_equality(lsnn.connAdaptiveToOutputNeurons,\n wOut[:, numRegular:])\n print(\"######## verified connection weights\")", "def checkSolution(Hw, Hpb):\n\tTw = [getTW(H) for H in Hw]\n\tTpb = [getTPb(H) for H in Hpb]\n\tdiscrepancyQW = []\n\tdiscrepancyQPb = []\n\tfor i in range(1, N):\n\t\tdeltaHW = Hw[i] - Hw[i-1]\n\t\tdiscrepancy = deltaHW - dHWdz(Tpb[i], Tw[i], Hw[i])*dz\n\t\tdiscrepancyQW.append(getQW(discrepancy)*1e6)\n\tfor i in range(1, N):\n\t\tdeltaHPb = Hpb[i] - Hpb[i-1]\n\t\tdiscrepancy = deltaHPb - dHPbdz(Tpb[i], Tw[i], Hw[i])*dz\n\t\tdiscrepancyQPb.append(getQPb(discrepancy)*1e6)\n\tif plotError:\n\t\tz = np.linspace(0, h, N-1)\n\t\tplt.plot(z, discrepancyQW, label=\"water\")\n\t\tplt.plot(z, discrepancyQPb, label=\"lead\")\n\t\tplt.legend()\n\t\tplt.show()\n\treturn (np.sum([abs(d) for d in discrepancyQW]) + np.sum([abs(d) for d in discrepancyQPb]))/N", "def is_circular(self):\n return np.isclose(self.x_fwhm, self.y_fwhm, rtol=1e-6)", "def is_ccw(ring):\n return signed_area(ring) >= 0.0", "def is_hollow(mat):\n # is_symmetric_and_hollow_cy spends most\n # of its time in symetry check, just use numpy\n return (np.trace(mat) == 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a reciprocal relation in Q_m^{h}(CW), where all nondiagonal entries are in {0.5h , 0.5+h}. EXAMPLE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Q, buf = sampleCW_exactly_h(5,0.1) Q.show()
def sampleCW_exactly_h(m,h,decimal_precision=10): assert type(h) is float and 0<h<1/2, "The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`" Q, buf = sampleCW_boundedFromOneHalf(m,0.4,decimal_precision) for i in range(0,Q.m): for j in range(0,Q.m): if Q.Q[i,j]>0.5: Q.Q[i,j] = 0.5+h if Q.Q[i,j]<0.5: Q.Q[i,j] = 0.5-h return(Q,buf)
[ "def sampleNotCW_exactly_h(m,h,max_tries=1000,decimal_precision=10): \r\n assert type(h) is float and 0<h<1/2, \"The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`\"\r\n Q = sampleNotCW_boundedFromOneHalf(m=m,h=0.4,max_tries=1000,decimal_precision=decimal_precision)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q)", "def sampleRecRel_exactly_h(m,h,decimal_precision=10): \r\n Q = sampleReciprocal(m,decimal_precision)\r\n Q = __EnforceBoundedFromOneHalf__(Q,0.4)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q)", "def sampleCW(m,decimal_precision=10):\r\n Q = sampleReciprocal(m,decimal_precision) \r\n cw = np.random.randint(0,m) # cw is chosen to be the CW\r\n for j in range(0,m):\r\n if Q.Q[cw,j]<0.5:\r\n buf = Q.Q[j,cw]\r\n Q.setEntry([cw,j],buf)\r\n return(Q), cw", "def test_weighted_chisq():\n num_points = 1000\n num_experiments = 100\n num_bins = 20\n bins = np.linspace(-800, 800, num=num_bins + 1)\n\n pi_mass = 139.570\n k_mass = 493.677\n d_mass = 1864.84\n generator = phasespace.nbody_decay(\n d_mass, (k_mass, pi_mass, pi_mass, pi_mass), names=(\"K\", \"pi1\", \"pi2\", \"pi3\")\n )\n\n def gen():\n \"\"\"\n Return K_px arrays a and b for a D->K3pi event, and weights for a\n\n Returns a, b, wt_a\n\n \"\"\"\n # Find k_px with weights\n a_wt, a = generator.generate(num_points)\n a = a[\"K\"].numpy()[:, 0]\n\n # Normalise weights to have an average of 1\n a_wt /= np.mean(a_wt)\n\n # Find k_px using accept-reject\n b = script_util.flat_phsp_points(num_points)[0][0]\n\n return a, b, a_wt\n\n chisqs, p_vals = _find_chisqs(num_experiments, bins, gen)\n _plot(*gen(), bins, chisqs, p_vals)", "def _sample_haar_mtx(size):\n # n by n random complex matrix\n x_mtx = np.random.randn(size,size) + (0+1j)*np.random.randn(size,size)\n # orthonormalizing matrix using QR algorithm\n q_mtx, _ = np.linalg.qr(x_mtx)\n # the resulting Q is Haar-distributed\n return q_mtx", "def kabsch_weighted_rmsd(P: ndarray, Q: ndarray, W: Optional[ndarray] = None) -> float:\n _, _, w_rmsd = kabsch_weighted(P, Q, W)\n return w_rmsd", "def normalize(K, w, h):\n return np.diag([1.0 / w, 1.0 / h, 1.0]) @ K", "def getComplexNMF1DTemplates(S, W, H, p = 2, audioParams = None):\n K = W.shape[2]\n #Step 1: Compute the masked matrices raised to the power p\n AsSum = np.zeros(S.shape)\n As = []\n for k in range(K):\n Hk = np.array(H)\n Hk[0:k, :] = 0\n Hk[k+1::, :] = 0\n As.append(multiplyConv1D(W, Hk)**p)\n AsSum += As[-1]\n #Step 2: Average masked portions of the spectrogram to come up with\n #complex-valued templates\n Ss = []\n Ratios = []\n AllPow = np.abs(np.sum(S*np.conj(S), 0))\n AllPow[AllPow == 0] = 1\n for k in range(K):\n Ss.append(S*As[k]/AsSum)\n Pow = np.abs(np.sum(Ss[k]*np.conj(Ss[k]), 0))\n Ratios.append(Pow/AllPow)\n #Step 4: Save components if user requested\n if audioParams:\n from SpectrogramTools import iSTFT\n [winSize, hopSize] = [audioParams['winSize'], audioParams['hopSize']]\n [Fs, fileprefix] = [audioParams['Fs'], audioParams['fileprefix']]\n import matplotlib.pyplot as plt\n from scipy.io import wavfile\n X = np.array([])\n for k in range(K):\n thisS = np.array(Ss[k])\n thisS[:, Ratios[k] < 0.05] = 0\n Xk = iSTFT(thisS, winSize, hopSize)\n if k == 0:\n X = Xk\n else:\n X += Xk\n wavfile.write(\"%s_%i.wav\"%(fileprefix, k), Fs, Xk)\n plt.clf()\n plt.plot(Ratios[k])\n plt.title(\"Ratio, %.3g Above 0.05\"%(np.sum(Ratios[k] > 0.05)/float(Ratios[k].size)))\n plt.savefig(\"%s_%iPower.svg\"%(fileprefix, k), bbox_inches = 'tight')\n wavfile.write(\"%sNMF.wav\"%fileprefix, Fs, X)\n return (Ss, Ratios)", "def he_initialization(weight_shape):\n if len(weight_shape) == 4:\n fW, fH, fC, num_fitls = weight_shape\n return np.random.normal(0, np.sqrt(2 / (fW*fH*fC*num_fitls)), weight_shape)\n num_input, num_output = weight_shape\n return np.random.normal(0, np.sqrt(2 / num_input), weight_shape)", "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size, 1))\n x[seed_ix] = 1\n generated_seq = []\n for t in range(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n generated_seq.append(ix)\n return generated_seq", "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size, 1))\n x[seed_ix] = 1\n ixes = []\n for t in range(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(y) / np.sum(np.exp(y))\n ix = np.random.choice(range(vocab_size), p=p.ravel()) # ไธ‹้‡‡ๆ ท\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "def notch(Wn, Q=10, analog=False, output='ba'):\n # H(s) = (s**2 + 1) / (s**2 + s/Q + 1)\n b = np.array([1, 0, 1])\n a = np.array([1, 1/Q, 1])\n\n return _transform(b, a, Wn, analog, output)", "def test_get_diagonal_coulomb():\n diag = numpy.zeros((5, 5), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_coulomb.DiagonalCoulomb(diag, e_0)\n test2 = fqe.get_diagonalcoulomb_hamiltonian(diag, e_0)\n\n assert test == test2", "def kabsch_weighted(\n P: ndarray, Q: ndarray, W: Optional[ndarray] = None\n) -> Tuple[ndarray, ndarray, float]:\n # Computation of the weighted covariance matrix\n CMP = np.zeros(3)\n CMQ = np.zeros(3)\n C = np.zeros((3, 3))\n if W is None:\n W = np.ones(len(P)) / len(P)\n W = np.array([W, W, W]).T\n # NOTE UNUSED psq = 0.0\n # NOTE UNUSED qsq = 0.0\n iw = 3.0 / W.sum()\n n = len(P)\n for i in range(3):\n for j in range(n):\n for k in range(3):\n C[i, k] += P[j, i] * Q[j, k] * W[j, i]\n CMP = (P * W).sum(axis=0)\n CMQ = (Q * W).sum(axis=0)\n PSQ = (P * P * W).sum() - (CMP * CMP).sum() * iw\n QSQ = (Q * Q * W).sum() - (CMQ * CMQ).sum() * iw\n C = (C - np.outer(CMP, CMQ) * iw) * iw\n\n # Computation of the optimal rotation matrix\n # This can be done using singular value decomposition (SVD)\n # Getting the sign of the det(V)*(W) to decide\n # whether we need to correct our rotation matrix to ensure a\n # right-handed coordinate system.\n # And finally calculating the optimal rotation matrix U\n # see http://en.wikipedia.org/wiki/Kabsch_algorithm\n V, S, W = np.linalg.svd(C)\n d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0\n\n if d:\n S[-1] = -S[-1]\n V[:, -1] = -V[:, -1]\n\n # Create Rotation matrix U, translation vector V, and calculate RMSD:\n U = np.dot(V, W)\n msd = (PSQ + QSQ) * iw - 2.0 * S.sum()\n if msd < 0.0:\n msd = 0.0\n rmsd_ = np.sqrt(msd)\n V = np.zeros(3)\n for i in range(3):\n t = (U[i, :] * CMQ).sum()\n V[i] = CMP[i] - t\n V = V * iw\n return U, V, rmsd_", "def has_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(True)\r\n return(False)", "def contraharmonic_filter(img: np.ndarray, Q: int, mask_size: tuple) -> np.ndarray:\n \n data = np.array(img, dtype=np.float64)\n data = data.ravel()\n num = np.power(data, Q + 1)\n den = np.power(data, Q)\n kernel = np.full(mask_size, 1.0)\n\n res = filter(num, kernel)/filter(den,kernel)\n return res.reshape(img.shape).astype(np.uint8)", "def get_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(i)\r\n return(False)", "def calc_sample_thicknesses(parameters):\n sample_radius = parameters['thichness_sample'] / 2.0 # [mm]\n\n# a = (parameters['sample_distance']**2.0 - sample_radius**2.0) / \\\n# (1.0 + np.tan(parameters['thetas'])**2.0) # [mm^2]\n# b = (parameters['sample_distance'] * np.tan(parameters['thetas'])) / \\\n# (1.0 + np.tan(parameters['thetas'])**2.0) # [mm]\n#\n# x_1 = b - np.sqrt(b**2 - a) # [mm]\n# x_2 = b + np.sqrt(b**2 - a) # [mm]\n\n a = (parameters['sample_distance'] * np.tan(parameters['thetas'])) / \\\n (1.0 + np.tan(parameters['thetas'])**2.0) # [mm]\n\n b = (sample_radius**2 - parameters['sample_distance']**2 +\n (parameters['sample_distance']**2 /\n (1.0 + np.tan(parameters['thetas'])**2.0))) * \\\n ((np.tan(parameters['thetas'])**2.0) /\n ((1.0 + np.tan(parameters['thetas'])**2.0))) # [mm^2]\n\n x_1 = a - np.sqrt(b) # [mm]\n x_2 = a + np.sqrt(b) # [mm]\n\n # Set nans (complex) and negative (?) to 0\n x_1 = np.nan_to_num(x_1)\n x_1[x_1 < 0] = 0.0\n x_2 = np.nan_to_num(x_2)\n x_2[x_2 < 0] = 0.0\n\n # Calc sample cross section\n y_1 = x_1 / np.tan(parameters['thetas']) # Element wise, [mm]\n y_2 = x_2 / np.tan(parameters['thetas']) # Element wise, [mm]\n\n dx = x_2 - x_1 # [mm]\n dy = y_2 - y_1 # [mm]\n sample_thicknesses = np.sqrt(dx**2.0 + dy**2.0) # [mm]\n\n # Set all angles that do not pass throught the sample (thickness==0) to 0\n thetas = parameters['thetas'].copy()\n thetas[sample_thicknesses == 0] = 0\n return sample_thicknesses, thetas", "def test_get_diagonal_hamiltonian():\n diag = numpy.zeros((5,), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_hamiltonian.Diagonal(diag, e_0)\n test2 = fqe.get_diagonal_hamiltonian(diag, e_0)\n\n assert test == test2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a reciprocal relation in Q_m^{h}(\not CW), where all nondiagonal entries are in {0.5h , 0.5+h}. EXAMPLE >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Q = sampleNotCW_exactly_h(5,0.1) Q.show() print(has_CW(Q)) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def sampleNotCW_exactly_h(m,h,max_tries=1000,decimal_precision=10): assert type(h) is float and 0<h<1/2, "The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`" Q = sampleNotCW_boundedFromOneHalf(m=m,h=0.4,max_tries=1000,decimal_precision=decimal_precision) for i in range(0,Q.m): for j in range(0,Q.m): if Q.Q[i,j]>0.5: Q.Q[i,j] = 0.5+h if Q.Q[i,j]<0.5: Q.Q[i,j] = 0.5-h return(Q)
[ "def has_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(True)\r\n return(False)", "def sampleCW_exactly_h(m,h,decimal_precision=10): \r\n assert type(h) is float and 0<h<1/2, \"The parameter `h` has to be a `float` in the interval :math:`(0,0.5)`\"\r\n Q, buf = sampleCW_boundedFromOneHalf(m,0.4,decimal_precision)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q,buf)", "def sampleCW(m,decimal_precision=10):\r\n Q = sampleReciprocal(m,decimal_precision) \r\n cw = np.random.randint(0,m) # cw is chosen to be the CW\r\n for j in range(0,m):\r\n if Q.Q[cw,j]<0.5:\r\n buf = Q.Q[j,cw]\r\n Q.setEntry([cw,j],buf)\r\n return(Q), cw", "def get_CW(Q):\r\n assert type(Q) is ReciprocalRelation\r\n for i in range(0,Q.m):\r\n i_is_CW = True\r\n for j in range(0,Q.m):\r\n if i != j and Q.Q[i,j]<0.5:\r\n i_is_CW = False\r\n if i_is_CW is True:\r\n return(i)\r\n return(False)", "def sampleRecRel_exactly_h(m,h,decimal_precision=10): \r\n Q = sampleReciprocal(m,decimal_precision)\r\n Q = __EnforceBoundedFromOneHalf__(Q,0.4)\r\n for i in range(0,Q.m):\r\n for j in range(0,Q.m):\r\n if Q.Q[i,j]>0.5:\r\n Q.Q[i,j] = 0.5+h\r\n if Q.Q[i,j]<0.5:\r\n Q.Q[i,j] = 0.5-h\r\n return(Q)", "def test_diagonalizing_gates_non_overlapping(self):\n diag_op = ValidOp(qml.PauliZ(wires=0), qml.Identity(wires=1))\n assert diag_op.diagonalizing_gates() == []", "def test_get_diagonal_coulomb():\n diag = numpy.zeros((5, 5), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_coulomb.DiagonalCoulomb(diag, e_0)\n test2 = fqe.get_diagonalcoulomb_hamiltonian(diag, e_0)\n\n assert test == test2", "def checkSolution(Hw, Hpb):\n\tTw = [getTW(H) for H in Hw]\n\tTpb = [getTPb(H) for H in Hpb]\n\tdiscrepancyQW = []\n\tdiscrepancyQPb = []\n\tfor i in range(1, N):\n\t\tdeltaHW = Hw[i] - Hw[i-1]\n\t\tdiscrepancy = deltaHW - dHWdz(Tpb[i], Tw[i], Hw[i])*dz\n\t\tdiscrepancyQW.append(getQW(discrepancy)*1e6)\n\tfor i in range(1, N):\n\t\tdeltaHPb = Hpb[i] - Hpb[i-1]\n\t\tdiscrepancy = deltaHPb - dHPbdz(Tpb[i], Tw[i], Hw[i])*dz\n\t\tdiscrepancyQPb.append(getQPb(discrepancy)*1e6)\n\tif plotError:\n\t\tz = np.linspace(0, h, N-1)\n\t\tplt.plot(z, discrepancyQW, label=\"water\")\n\t\tplt.plot(z, discrepancyQPb, label=\"lead\")\n\t\tplt.legend()\n\t\tplt.show()\n\treturn (np.sum([abs(d) for d in discrepancyQW]) + np.sum([abs(d) for d in discrepancyQPb]))/N", "def test_diagonalizing_gates_overlapping(self):\n diag_op = ValidOp(qml.S(0), qml.PauliX(0))\n diagonalizing_gates = diag_op.diagonalizing_gates()\n\n assert len(diagonalizing_gates) == 1\n diagonalizing_mat = diagonalizing_gates[0].matrix()\n\n true_mat = np.eye(2)\n\n assert np.allclose(diagonalizing_mat, true_mat)", "def test_weighted_chisq():\n num_points = 1000\n num_experiments = 100\n num_bins = 20\n bins = np.linspace(-800, 800, num=num_bins + 1)\n\n pi_mass = 139.570\n k_mass = 493.677\n d_mass = 1864.84\n generator = phasespace.nbody_decay(\n d_mass, (k_mass, pi_mass, pi_mass, pi_mass), names=(\"K\", \"pi1\", \"pi2\", \"pi3\")\n )\n\n def gen():\n \"\"\"\n Return K_px arrays a and b for a D->K3pi event, and weights for a\n\n Returns a, b, wt_a\n\n \"\"\"\n # Find k_px with weights\n a_wt, a = generator.generate(num_points)\n a = a[\"K\"].numpy()[:, 0]\n\n # Normalise weights to have an average of 1\n a_wt /= np.mean(a_wt)\n\n # Find k_px using accept-reject\n b = script_util.flat_phsp_points(num_points)[0][0]\n\n return a, b, a_wt\n\n chisqs, p_vals = _find_chisqs(num_experiments, bins, gen)\n _plot(*gen(), bins, chisqs, p_vals)", "def notch(Wn, Q=10, analog=False, output='ba'):\n # H(s) = (s**2 + 1) / (s**2 + s/Q + 1)\n b = np.array([1, 0, 1])\n a = np.array([1, 1/Q, 1])\n\n return _transform(b, a, Wn, analog, output)", "def test_mahalanobis_distance_diagonal(self):\n\n result = mahalanobis_distance_diagonal(self.td.RHDX[1],\n self.td.U[2],\n self.td.mask[1],\n self.td.diagonal_covariances[2],\n self.td.Q,\n self.td.P)\n correct = np.sqrt(57/8 * 5/3)\n self.assertAlmostEqual(correct, result, places=6)", "def _is_legal_undirected_edge_weights_matrix(W):\n if boolmatrix_any(W < 0):\n return False\n elif not is_symmetric(W):\n return False\n elif boolmatrix_any(W.diagonal() != 0):\n return False\n\n return True", "def test_get_diagonal_hamiltonian():\n diag = numpy.zeros((5,), dtype=numpy.complex128)\n e_0 = -4.2\n test = diagonal_hamiltonian.Diagonal(diag, e_0)\n test2 = fqe.get_diagonal_hamiltonian(diag, e_0)\n\n assert test == test2", "def is_ccw(ring):\n return signed_area(ring) >= 0.0", "def test_cry_zero_hadamard(self, wires, res):\n commutation = qml.is_commuting(qml.CRY(0.0, wires=wires[0]), qml.Hadamard(wires=wires[1]))\n assert commutation == res", "def check_inv_pos_diagonal(self, thres=-1./T_huge**2):\n \n noise_inv_diag = self.get_inverse_diagonal()\n if sp.any(noise_inv_diag < thres):\n print (sp.sum(noise_inv_diag < 0), noise_inv_diag.size)\n print (noise_inv_diag[noise_inv_diag < 0], sp.amax(noise_inv_diag))\n time_mod.sleep(300)\n raise NoiseError(\"Inverted noise has negitive entries on the \"\n \"diagonal.\")", "def find_dry_squares(self):\n print('calculating number of dry squares')\n kernel = [[0, 1, 0], [1, 0, 1], [0, 1, 0]]\n return convolve(self.landscape, kernel, mode='constant')", "def calc_sample_thicknesses(parameters):\n sample_radius = parameters['thichness_sample'] / 2.0 # [mm]\n\n# a = (parameters['sample_distance']**2.0 - sample_radius**2.0) / \\\n# (1.0 + np.tan(parameters['thetas'])**2.0) # [mm^2]\n# b = (parameters['sample_distance'] * np.tan(parameters['thetas'])) / \\\n# (1.0 + np.tan(parameters['thetas'])**2.0) # [mm]\n#\n# x_1 = b - np.sqrt(b**2 - a) # [mm]\n# x_2 = b + np.sqrt(b**2 - a) # [mm]\n\n a = (parameters['sample_distance'] * np.tan(parameters['thetas'])) / \\\n (1.0 + np.tan(parameters['thetas'])**2.0) # [mm]\n\n b = (sample_radius**2 - parameters['sample_distance']**2 +\n (parameters['sample_distance']**2 /\n (1.0 + np.tan(parameters['thetas'])**2.0))) * \\\n ((np.tan(parameters['thetas'])**2.0) /\n ((1.0 + np.tan(parameters['thetas'])**2.0))) # [mm^2]\n\n x_1 = a - np.sqrt(b) # [mm]\n x_2 = a + np.sqrt(b) # [mm]\n\n # Set nans (complex) and negative (?) to 0\n x_1 = np.nan_to_num(x_1)\n x_1[x_1 < 0] = 0.0\n x_2 = np.nan_to_num(x_2)\n x_2[x_2 < 0] = 0.0\n\n # Calc sample cross section\n y_1 = x_1 / np.tan(parameters['thetas']) # Element wise, [mm]\n y_2 = x_2 / np.tan(parameters['thetas']) # Element wise, [mm]\n\n dx = x_2 - x_1 # [mm]\n dy = y_2 - y_1 # [mm]\n sample_thicknesses = np.sqrt(dx**2.0 + dy**2.0) # [mm]\n\n # Set all angles that do not pass throught the sample (thickness==0) to 0\n thetas = parameters['thetas'].copy()\n thetas[sample_thicknesses == 0] = 0\n return sample_thicknesses, thetas" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this is the sort function for the keys (i.e. columns) of the csv file
def sort_colums_for_csv(column_name): if column_name in column_sort_dict: return column_sort_dict[column_name] else: return ord(column_name[0]) + 99
[ "def re_sort(in_file, out_file):\n # First, read all the rows into a list\n with open(in_file, mode='r', newline='') as csv_file:\n reader = csv.DictReader(csv_file)\n rows = [row for row in reader]\n # Or, rows = list(reader)\n # Get the key of the second column\n second_column = reader.fieldnames[1]\n # Sort all the rows. sorted() returns a new list\n sorted_rows = sorted(rows, key=lambda r: r[second_column])\n # Write it out to the new file\n with open(out_file, mode='w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=reader.fieldnames)\n writer.writeheader()\n writer.writerows(sorted_rows)", "def csvsort(inputfile: str, outputfile: str, columnchoice: str) -> None:\n fileread = readfile(inputfile)\n sorteddata = sortdata(fileread, columnchoice)\n writefile(sorteddata, outputfile)", "def sort_rows_for_csv(part):\n if (part['NAME'].find(',')):\n stri = part['NAME'].split(',')[0]\n else:\n stri = part['NAME']\n if 'DO_NOT_PLACE' in part:\n return '0'\n if 'PROVIDED_BY' in part:\n return '1'\n return ''.join(c for c in stri if not c.isdigit())", "def sort(self):", "def sort_csv_by_order(csv_file_path):\n index_=['id', 'name', 'email', 'state', 'zipcode', 'birthday', 'birthyear']\n df = pd.DataFrame(columns=index_)\n\n #parse into Pandas dataframe\n with open(csv_file_path, \"r\") as csvfile:\n csv_reader = csv.reader(csvfile)\n\n for i, row in enumerate(csv_reader):\n\n if i > 0:\n split_row_entry = split_and_check_num_cols('|'.join(row), 7)\n\n if split_row_entry:\n df = df.append(pd.Series(split_row_entry, index=index_), ignore_index=True)\n\n #convert the order ID to numeric\n df['id'] = df['id'].convert_objects(convert_numeric=True)\n #sort by id\n df = df.sort_values(by='id', ascending=True)\n #save with same name to be parsed again (can use database also)\n df.to_csv(csv_file_path, sep=',', index=False)", "def sort_csv(filename, encoding=None):\n filename = os.path.normpath(filename)\n input_file = open_fr(filename, encoding)\n csv_reader_infile = csv.reader(input_file, escapechar=\"\\\\\")\n # write the data to a temporary file and sort it\n temp_path = os.path.normpath(\"tempfile\")\n temp_file = open_fw(temp_path, encoding)\n\n csv_writer = open_csvw(temp_file)\n i = 0\n for row in csv_reader_infile:\n if i == 0:\n # The first entry is the header line\n infields = row\n i += 1\n else:\n csv_writer.writerow(row)\n input_file.close()\n temp_file.close()\n\n # sort the temp file\n sorted_txt = sort_file(temp_path)\n tmp = open_fr(sorted_txt, encoding)\n in_txt = csv.reader(tmp, delimiter=',', escapechar=\"\\\\\")\n csv_file = open_fw(filename, encoding)\n csv_writer = open_csvw(csv_file)\n csv_writer.writerow(infields)\n csv_writer.writerows(in_txt)\n tmp.close()\n csv_file.close()\n os.remove(os.path.normpath(temp_path))\n return filename", "def prep_csv_file_for_download(key):\n data_ = open(\"sorted_file.csv\", \"w\")\n csvw = csv.writer(data_)\n count = 0\n lines = sort_by_key(key)\n for orgdata in lines:\n if count == 0:\n header = orgdata.keys()\n csvw.writerow(header)\n count += 1\n csvw.writerow(orgdata.values())\n data_.close()\n return", "def sort_by(column, table):\n \n return sorted(table, key = lambda row: row[column])", "def top_something_to_csv(dict_in, filename, column_titles, reverse, sort_key_function, value_format_function=lambda t: t):\n\tordered_list = []\n\tfor key, value in dict_in.items():\n\t\tordered_list.append([key, value_format_function(value)])\n\n\tordered_list = sorted(ordered_list, key=sort_key_function, reverse=reverse)\n\n\twith open(filename, 'w', newline='', encoding=\"utf8\") as csvfile:\n\t\tfile_writer = csv.writer(csvfile, delimiter=DEFAULT_OUTPUT_DELIMITER, quotechar='\"', quoting=csv.QUOTE_MINIMAL)\t\n\t\tfile_writer.writerow(column_titles)\n\t\tfor item in ordered_list[:MAX_WORDS_NUMBER_CSV]:\n\t\t\tfile_writer.writerow([item[0], item[1]])\n\t\tcsvfile.close()", "def sort(self, key=None):\n\t\tif key is None:\n\t\t\traise Exception(\"Key cannot be null to sort matrix.\")\n\n\t\tself.__rows.sort(key=key)", "def sort(self, **colname_dir_pairs):\n @deco.tuplefy\n def sort_key():\n pairs = colname_dir_pairs.items()\n for colname, dir in reversed(list(pairs)):\n if dir not in [1, -1]:\n raise ValueError(\"dir should be 1 or -1\")\n column = self[colname]\n if column.is_object():\n # Avoid TypeError trying to compare different types.\n column = column.as_string()\n if dir < 0 and not (column.is_boolean() or column.is_number()):\n # Use rank for non-numeric types so that we can sort descending.\n column = column.rank(method=\"min\")\n yield column if dir >= 0 else -column\n indices = np.lexsort(sort_key())\n for colname, column in self.items():\n yield colname, column[indices].copy()", "def sortCaseInsensitive():\n pass", "def sort(self, col=None):\n if hasattr(self, 'fyear') and col is None:\n i = np.argsort(self.fyear)\n self.fyear = self.fyear[i]\n self.data = self.data[i,:]\n print 'using column `fyear` to sort data'\n else:\n # 0 is default if `col` not specified\n if col is None: \n col = 0\n i = np.argsort(self.data[:,col])\n self.data = self.data[i,:]\n print 'using column `%d` to sort data' % col", "def sortContacts(self):\r\n self.tableOfContact.sortByColumn(0, QtCore.Qt.AscendingOrder)", "def createSortMap(self, names, sortkey, reverse=0):\n import operator\n recdata = []\n for rec in names: \n recdata.append(self.getRecordAttributeAtColumn(recName=rec, columnName=sortkey))\n #try create list of floats if col has numbers only\n try: \n recdata = self.toFloats(recdata)\n except:\n pass\n smap = zip(names, recdata)\n #sort the mapping by the second key\n smap = sorted(smap, key=operator.itemgetter(1), reverse=reverse) \n #now sort the main reclist by the mapping order\n sortmap = map(operator.itemgetter(0), smap)\n return sortmap", "def sortby(tree, col, descending):\n # grab values to sort\n data = [(tree.set(child, col), child) \\\n for child in tree.get_children('')]\n # if the data to be sorted is numeric change to float\n #data = change_numeric(data)\n # now sort the data in place\n data.sort(reverse=descending)\n for ix, item in enumerate(data):\n tree.move(item[1], '', ix)\n # switch the heading so it will sort in the opposite direction\n tree.heading(col, command=lambda col=col: sortby(tree, col, \\\n int(not descending)))", "def sorted_rows(rows):\n def fourth_tuple_item(t):\n return t[3]\n return sorted(rows, key=fourth_tuple_item)", "def sort_by_population_density(self , csv_file_path):\n self.load_census_data(csv_file_path)\n result = sorted(self.census_list, key = lambda x: int(x.get('DensityPerSqKm')) , reverse = True)\n self.create_json_file('sorted_by_density_population.json', result)\n logging.debug('most densely populous state: {}'.format(int(result[0].get('DensityPerSqKm'))))\n return result", "def sort_by_area(self , csv_file_path):\n self.load_census_data(csv_file_path)\n result = sorted(self.census_list, key = lambda x: int(x.get('AreaInSqKm')) , reverse = True)\n self.create_json_file('sorted_by_area.json', result)\n logging.debug('largest area state: {}'.format(int(result[0].get('AreaInSqKm'))))\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this is the sort function that is used to determine the order of the lines of the csv
def sort_rows_for_csv(part): if (part['NAME'].find(',')): stri = part['NAME'].split(',')[0] else: stri = part['NAME'] if 'DO_NOT_PLACE' in part: return '0' if 'PROVIDED_BY' in part: return '1' return ''.join(c for c in stri if not c.isdigit())
[ "def sort(self):", "def csvsort(inputfile: str, outputfile: str, columnchoice: str) -> None:\n fileread = readfile(inputfile)\n sorteddata = sortdata(fileread, columnchoice)\n writefile(sorteddata, outputfile)", "def re_sort(in_file, out_file):\n # First, read all the rows into a list\n with open(in_file, mode='r', newline='') as csv_file:\n reader = csv.DictReader(csv_file)\n rows = [row for row in reader]\n # Or, rows = list(reader)\n # Get the key of the second column\n second_column = reader.fieldnames[1]\n # Sort all the rows. sorted() returns a new list\n sorted_rows = sorted(rows, key=lambda r: r[second_column])\n # Write it out to the new file\n with open(out_file, mode='w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=reader.fieldnames)\n writer.writeheader()\n writer.writerows(sorted_rows)", "def sort_csv(filename, encoding=None):\n filename = os.path.normpath(filename)\n input_file = open_fr(filename, encoding)\n csv_reader_infile = csv.reader(input_file, escapechar=\"\\\\\")\n # write the data to a temporary file and sort it\n temp_path = os.path.normpath(\"tempfile\")\n temp_file = open_fw(temp_path, encoding)\n\n csv_writer = open_csvw(temp_file)\n i = 0\n for row in csv_reader_infile:\n if i == 0:\n # The first entry is the header line\n infields = row\n i += 1\n else:\n csv_writer.writerow(row)\n input_file.close()\n temp_file.close()\n\n # sort the temp file\n sorted_txt = sort_file(temp_path)\n tmp = open_fr(sorted_txt, encoding)\n in_txt = csv.reader(tmp, delimiter=',', escapechar=\"\\\\\")\n csv_file = open_fw(filename, encoding)\n csv_writer = open_csvw(csv_file)\n csv_writer.writerow(infields)\n csv_writer.writerows(in_txt)\n tmp.close()\n csv_file.close()\n os.remove(os.path.normpath(temp_path))\n return filename", "def _sort_lines(self, lines):\n def sort_key_func(item):\n try:\n return datetime.strptime(item[0], ARCHIVE_DT_FORMAT)\n except ValueError as err:\n self.log.error(str(err))\n raise ValueError\n\n return list(sorted(lines, key=sort_key_func))", "def lines_sort(self, keys):\n logger.trace(\"Sorting lines\")\n raw_lines = list()\n sorted_lines = list()\n for key in sorted(keys):\n title = key.replace(\"_\", \" \").title()\n if key.startswith((\"avg\", \"trend\")):\n sorted_lines.append([key, title])\n else:\n raw_lines.append([key, title])\n\n groupsize = self.lines_groupsize(raw_lines, sorted_lines)\n sorted_lines = raw_lines + sorted_lines\n lines = self.lines_style(sorted_lines, groupsize)\n return lines", "def sort_csv_by_order(csv_file_path):\n index_=['id', 'name', 'email', 'state', 'zipcode', 'birthday', 'birthyear']\n df = pd.DataFrame(columns=index_)\n\n #parse into Pandas dataframe\n with open(csv_file_path, \"r\") as csvfile:\n csv_reader = csv.reader(csvfile)\n\n for i, row in enumerate(csv_reader):\n\n if i > 0:\n split_row_entry = split_and_check_num_cols('|'.join(row), 7)\n\n if split_row_entry:\n df = df.append(pd.Series(split_row_entry, index=index_), ignore_index=True)\n\n #convert the order ID to numeric\n df['id'] = df['id'].convert_objects(convert_numeric=True)\n #sort by id\n df = df.sort_values(by='id', ascending=True)\n #save with same name to be parsed again (can use database also)\n df.to_csv(csv_file_path, sep=',', index=False)", "def _sort_fasta_records( fasta_records ):\n return sorted( fasta_records,\n key=get_exon_num )", "def sorted_rows(rows):\n def fourth_tuple_item(t):\n return t[3]\n return sorted(rows, key=fourth_tuple_item)", "def sqlf_1_sort_group(A):\n return ','.join(sorted(A.split(',')))", "def sort_colums_for_csv(column_name):\n\n if column_name in column_sort_dict:\n return column_sort_dict[column_name]\n else:\n return ord(column_name[0]) + 99", "def sort(self,line):\r\n\t\tcommands = line.split(' ')\r\n\t\tcommands.sort(cmp)\r\n\t\t\r\n\t\tline = \"\"\r\n\t\tfor command in commands:\r\n\t\t\tline += command + \" \"\r\n\t\t\r\n\t\treturn line[:-1]", "def sortCaseInsensitive():\n pass", "def sort(in_stream):\n for line in sorted(in_stream):\n yield line", "def sort_by_state(self , csv_file_path):\n self.load_census_data(csv_file_path)\n result = sorted(self.census_list, key = lambda x: x.get('State'))\n self.convert_to_json_format(result)\n logging.debug('start state and end state: {} {}'.format(result[0].get('State') , result[len(result) - 1].get('State') ))\n return result", "def sortContacts(self):\r\n self.tableOfContact.sortByColumn(0, QtCore.Qt.AscendingOrder)", "def refactor_and_sort_data(color_data):\n return sorted(color_data)", "def sort_file(file_path, encoding=ENCODING):\n file_path = os.path.normpath(file_path)\n input_file = open_fr(file_path, encoding)\n lines = [line.strip().replace('\\x00', '') for line in input_file]\n input_file.close()\n outfile = open_fw(file_path, encoding)\n lines.sort()\n for line in lines:\n outfile.write(line + \"\\n\")\n outfile.close()\n return file_path", "def sort_subject_list() -> None:\n with open(\"resources/subject_list.txt\", \"r+\") as outfile:\n lines = outfile.readlines()\n lines.sort()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find all used keys that are used in a list of dictionaries
def get_keys_from_dict_list(elements): keys = [] for element in elements: element_keys = element.keys() for key in element_keys: if (not key in keys): keys.append(key) return keys
[ "def find_same_dict_from_dictlist(list_of_dict):\n common_items = reduce(operator.__and__, \n (d.viewitems() for d in list_of_dict))\n print common_items\n common_keys = [item[0] for item in common_items]\n print common_keys", "def get_all_keys(ds):\n keys = []\n for d in ds:\n for k in d.keys():\n if not( k in keys):\n keys.append(k)\n return keys", "def db_headings(dictionary):\n #gets a set ready to be worked on\n firststring = list(dictionary)[0]\n firstset = set(dictionary[firststring])\n finalset = firstset\n #starts finding common keys\n for item in dictionary:\n #converts the dictionary item to a set of keynames\n compareset = set(dictionary[item])\n #compares the keynames in finalset and compareset\n #and adjust finalset to be equal to the common key names\n finalset = finalset.intersection(compareset)\n return finalset", "def listkeys(d):\n return list(iterkeys(d))", "def exact_key(self, seq):\r\n key_list = []\r\n for m in self.map_list:\r\n key_list += m.get_keys(seq)\r\n return key_list", "def get_distinct_jsonpaths(dicts):\n path_set = set()\n for dic in dicts:\n for path in jsonpaths_in_dict(dic):\n path_set.add(path)\n return path_set", "def matched_keys(key_path: Any, all_keys: Sequence, case_ignored: bool, space_trimmed: bool = False) -> List:\n normalized = normalize(key_path, case_ignored, space_trimmed)\n keys = [k for k in all_keys if key_matches(k, normalized, case_ignored)]\n\n if len(keys) > 1:\n logger.warning(f\"Multiple matching of '{key_path}': {','.join((str(k) for k in keys))}\")\n return keys", "def extract_small_keys(dict_used, min_nb_positions):\n assert isinstance(dict_used, dict), \\\n \"dict_used must be dict, not {}\".format(type(dict_used))\n assert isinstance(min_nb_positions, int), \\\n \"min_nb_positions must be int, not {}\".format(type(min_nb_positions))\n sorted_dict = {k: v for k, v in sorted(dict_used.items(),\n key=lambda item: item[1])}\n index_value_min_value_pos = list(sorted_dict.keys())[min_nb_positions]\n min_value = sorted_dict[index_value_min_value_pos]\n possible_keys = list(\n {k: v for k, v in sorted_dict.items() if v <= min_value})\n return {\"indices\": possible_keys, \"threshold_value\": min_value}", "def database_shared_headings(database: dict) -> list:\n # creates a list of all keys from 1 sub dictionary\n starting_keys = list(list(database.values())[0].keys())\n # creates a copy to remove keys not in other dictionaries\n like_keys = list(list(database.values())[0].keys())\n # loops through all sub dictionaries\n for sub_dict in database.values():\n sub_dict_keys = sub_dict.keys()\n # checks for like keys and removes them from the like_keys list\n for key in starting_keys:\n if key not in sub_dict_keys:\n like_keys.remove(key)\n return like_keys", "def remove_keys_with_nonunique_values(dict_list, params_to_ignore=None):\n if params_to_ignore is None:\n params_to_ignore = []\n key_to_values = get_dict_key_to_values(dict_list)\n filtered_dicts = []\n for d in dict_list:\n new_d = {\n k: v for k, v in d.items()\n if len(key_to_values[k]) > 1 and k not in params_to_ignore\n }\n filtered_dicts.append(new_d)\n return filtered_dicts", "def distinct(\r\n dictset: Iterator[dict],\r\n cache_size: int = 10000):\r\n from ...utils import LRU_Index\r\n lru = LRU_Index(size=cache_size)\r\n\r\n for record in dictset:\r\n entry = serialize(record)\r\n if lru(entry):\r\n continue\r\n yield record", "def update_missing_keys(list_of_dicts):\n\n LOG.info('Making sure all (%d) entries have the same keys set...', len(list_of_dicts))\n all_keys = set().union(*[d.keys() for d in list_of_dicts])\n for d in list_of_dicts:\n keys_to_fill = all_keys - set(d.keys())\n for key in keys_to_fill:\n d[key] = None\n return list_of_dicts", "def consList():\n return consDict.keys()", "def all_keys(dict_obj, prefix=\"\"):\n\n keys = []\n\n for key, value in dict_obj.items():\n if not isinstance(value, dict):\n keys.append(prefix + key)\n else:\n keys += list(all_keys(value, prefix + key + \".\"))\n\n return keys", "def get_least_used(self):\n\n l_frequencies = self.smachine.scene.alphabet.LETTER_FREQUENCIES\n # l_frequencies = self.l_frequencies\n least_used = sorted(list(l_frequencies.items()), key=lambda x: x[1])\n least_used = [t[0] for t in least_used if t[1] < 1.0]\n lu = {k: [w for t in v for w in t\n if any(char in w[0] and char in self.rack\n for char in least_used)]\n for k, v in self.anchor_scores.items()}\n return {k: v for k, v in lu.items() if v}", "def get_all_item_keys(self):\n\t\treturn self.items.keys()", "def all_sample_keys(self) -> set:\n all_keys = set()\n for sample in self:\n all_keys.update(sample.keys())\n return all_keys", "def _remove_duplicates(items, key='key'):\n seen = set()\n result = []\n for item in items:\n item_key = item[key]\n if item_key in seen:\n continue\n seen.add(item_key)\n result.append(item)\n return result", "def db_consistent(dict_of_dict):\n inner_keys_list = []\n # Build a list of list of keys\n for key in dict_of_dict:\n inner_keys = list(dict_of_dict[key].keys())\n inner_keys.sort()\n inner_keys_list.append(inner_keys)\n for i in range(1, len(inner_keys_list)):\n # If the number of keys is different.\n if len(inner_keys_list[0]) != len(inner_keys_list[i]):\n return False\n # If the keys don't match.\n for j in range(len(inner_keys_list[0])):\n if inner_keys_list[0][j] != inner_keys_list[i][j]:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
group elements by value if they have the same attributes otherwise and write to 'filename' as csv file
def write_value_list(elements, filename, set_delimiter, settings): elements.sort(key=sort_dict_by_all_but_name) groups = [] uniquekeys = [] for key, group in groupby(elements, key=sort_dict_by_all_but_name): groups.append(list(group)) # Store group iterator as a list uniquekeys.append(key) grouped_elements = [] for group in groups: group.sort(key=sort_dict_name_by_number) grouped_element = group.pop(0) count = 1 for element in group: grouped_element['NAME'] += ','+element['NAME'] count += 1 grouped_element['COUNT'] = count grouped_elements.append(grouped_element) if ('set_quantity' in settings): for group in grouped_elements: group['COUNT'] = group['COUNT'] * int(settings['set_quantity']) return write_part_list(grouped_elements, filename, set_delimiter)
[ "def csv(self):\n output = io.StringIO()\n writer = csv.writer(output)\n labels = sorted(self.records.keys())\n\n # x labels.\n writer.writerow([''] + labels)\n\n # y labels and data.\n for y, y_label in enumerate(labels):\n row = [labels[y]]\n for x_label in labels:\n row.append(self.record_similarity(y_label, x_label))\n writer.writerow(row)\n\n return output.getvalue()", "def export_results(file, results):\n\n with open(file, 'w+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(('Time [ns]', 'Best path', 'Best distance',\n 'Current path', 'Current distance'))\n for r in results:\n best_distance = r.best.distance if r.best else -1\n current_distance = r.current.distance if r.current else -1\n writer.writerow((r.time,\n r.best.path if r.best else '',\n best_distance,\n r.current.path if r.current else '',\n current_distance))", "def write_to_csv(entities, calls, outfile=\"out.csv\"):\n with open(outfile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, quotechar='\"', quoting=csv.QUOTE_ALL)\n writer.writerow([\"ELEMENT NAME\", \"HIERARCHY ID\"] +\n list(range(1, len(entities)+1)))\n\n for i in range(len(entities)):\n row = []\n row.append(entities[i][1])\n row.append(i+1)\n for j in range(len(entities)):\n if calls[i][j] == 1:\n row.append(1)\n else:\n row.append(\" \")\n\n writer.writerow(row)", "def to_csv(self, filename):\n\n\t\t\n\t\t# Convert each image to a pandas data frame and append them\n\t\tdf = pd.DataFrame()\n\t\tfor i in range(len(self.image)):\n\t\t\t# Print updates\n\t\t\tprint self.image[i].file_name\n\t\t\tentry = self.image[i].to_pandas()\n\n\t\t\t# Append to growing data frame\n\t\t\tdf = df.append(entry)\n\n\t\t# return df to testing\n\t\tdf.to_csv(filename, index_label='id')\n\n\t\treturn df", "def writeContourData(agg,filename):\n xold = 0\n\n del agg['n']\n del agg['term']\n\n with open(filename,\"wt\") as outfile:\n csvw = csv.writer(outfile)\n csvw.writerow(['x','xdot','class'])\n for (xdot, x), c in agg.items():\n if x != xold:\n csvw.writerow([])\n xold = x\n csvw.writerow([x,xdot,c])", "def write_group(pf, tag,mdef):\n tbl=pf.get_tbl(tag)\n filename=tag+\".csv\"\n fh=open(filename,\"w+\")\n fh.write('\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n' % (\"Key\",\"Type\",\"Mutable\",\"Concept\"))\n for k in tbl:\n t=mdef.type(k)\n tstr=\"undefined\"\n if(t==MDtype.Int64):\n tstr=\"int\"\n elif(t==MDtype.Double):\n tstr=\"double\"\n elif(t==MDtype.String):\n tstr=\"string\"\n elif(t==MDtype.Boolean):\n tstr=\"boolean\"\n writeable=mdef.writeable(k)\n wstr=\"undefined\"\n if(writeable):\n wstr=\"Yes\"\n else:\n wstr=\"No\"\n fh.write('\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n' % (k,tstr,wstr,mdef.concept(k)))\n fh.close()", "def write_file(self, filename):\n logging.info(f\"\\nWriting DataSet to file '{filename}'\")\n with open(filename, 'w') as file:\n for record in self.data_set.records:\n line = record.attribute_values.copy()\n for index, value in self.insert_indexes:\n if value == 'classname':\n line.insert(index, record.output_value)\n else:\n line.insert(index, value)\n file.write(','.join([str(x) for x in line]) + '\\n')", "def make_csv(self, filename, freq_dict):\n if filename.endswith('.csv'):\n file = filename\n else:\n file = str(filename)+'.csv'\n fout = open(file, 'w')\n freq_dict = sorted(freq_dict.items())\n for i, v in freq_dict:\n x = ''\n for j in str(i)[0:3]:\n x += j\n x+='.'\n for j in str(i)[4:7]:\n x += j\n fout.write(str(x)+','+str(v)+'\\n')\n fout.close()\n return True", "def save_to_file_csv(cls, list_objs):\n comma = False\n attrs = ['id', 'size', 'x', 'y']\n if cls.__name__ == 'Rectangle':\n attrs = ['id', 'width', 'height', 'x', 'y']\n with open(\"{}.csv\".format(cls.__name__), \"w\", encoding='utf-8') as f:\n for i in list_objs:\n for x in attrs:\n if comma is True:\n f.write(\",\")\n comma = True\n f.write(\"{}\".format(eval(\"i.{}\".format(x))))", "def generate_csv(fd, results, output_format, header, newline, user_delimiter) :\n\tif results != {} :\n\t\tspamwriter = csv.writer(fd, delimiter=user_delimiter)\n\t\t\n\t\tif header == YES_HEADER:\n\t\t\tcsv_header = [format_item.upper() for format_item in output_format.split('-')]\n\t\t\tspamwriter.writerow(csv_header)\n\t\t\n\t\tfor IP in sorted(results.iterkeys()) :\n\t\t\tformatted_attribute_list = []\n\t\t\t\n\t\t\tfor index,format_item in enumerate(output_format.split('-')) :\n\t\t\t\titem = formatted_item(results[IP], format_item)\n\t\t\t\tformatted_attribute_list.insert(index, item)\n\t\t\t\n\t\t\tformatted_attribute_list = repeat_attributes(formatted_attribute_list)\n\t\t\t\n\t\t\tfor line_to_write in itertools.izip(*formatted_attribute_list):\n\t\t\t\tspamwriter.writerow(list(line_to_write))\n\t\t\t\n\t\t\t# Print a newline if asked\n\t\t\tif newline == YES_NEWLINE:\n\t\t\t\tspamwriter.writerow('')\n\n\treturn", "def save_csv(self, filename):\n redditors = set(self.submitters.keys()).union(self.commenters.keys())\n mapping = dict((x.lower(), x) for x in redditors)\n with codecs.open(filename, 'w', encoding='utf-8') as outfile:\n outfile.write('username, type, permalink, score\\n')\n for _, redditor in sorted(mapping.items()):\n for submission in self.submitters.get(redditor, []):\n outfile.write(u'{0}, submission, {1}, {2}\\n'\n .format(redditor, submission.permalink,\n submission.score))\n for comment in self.commenters.get(redditor, []):\n outfile.write(u'{0}, comment, {1}, {2}\\n'\n .format(redditor, comment.permalink,\n comment.score))", "def dump_stream_to_csv(iterable, csv_filename, csv_flavor = COMMA_DELIM): \n with open(csv_filename, 'w') as csvfile:\n spamwriter = csv.writer(csvfile, **csv_flavor)\n for row in iterable: \n spamwriter.writerow(row)", "def dump_runner_output_to_csv(results, filepath):\n\n if os.path.exists(filepath):\n raise ValueError(\"Output file path already exists\")\n\n with open(filepath, \"w\", newline='') as filehandler:\n csv_writer = csv.writer(filehandler)\n csv_writer.writerow([\"Candidate evidence id\", \"Relation present\"])\n for prediction, value in results.items():\n prediction_id = prediction.id\n csv_writer.writerow([prediction_id, value])", "def asteroids_csv(self, payload):\n csv_file=open(f\"/tmp/asteroids_{self.today}.csv\",'w', newline='\\n')\n fields=list(payload[0].keys())\n writer=csv.DictWriter(csv_file, fieldnames=fields)\n writer.writeheader()\n writer.writerows(payload)\n csv_file.close()", "def serialize(array, filename):\n with open(_defaultify(_uniquify(filename)), 'wb') as csvfile:\n entrywriter = csv.writer(csvfile, encoding='utf-8')\n for entry in array:\n entrywriter.writerow(entry.toArray())", "def write_csv(self,filename):\n with open(filename,'wt') as fp:\n for r in self.reception_reports:\n fp.write(repr(r)+'\\n')", "def write_csv(self,result,outfile):\n import csv\n if len(result.items()) == 0:\n return\n keys = list(list(result.items())[0][1].keys())\n headers = ['ip'] + keys\n with open(outfile,'w+') as f:\n w = csv.DictWriter(f,fieldnames=headers)\n w.writeheader()\n for ip in result.keys():\n row = {'ip': ip}\n for key in keys:\n row[key] = result[ip][key]\n w.writerow(row)", "def to_csv(self):\n pass", "def grouper(fp, cols):\n for k, g in groupby(fp, key=lambda t: ret_item(t, cols, \"gene\")):\n yield g" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the library part from input parameters drawing, library and deviceset
def get_librarypart(drawing, library, deviceset): for library_tree in drawing.iterfind('schematic/libraries/library'): if (library_tree.attrib['name'] == library): for deviceset_tree in library_tree.iterfind('devicesets/deviceset'): if (deviceset_tree.attrib['name'] == deviceset): return deviceset_tree
[ "def _convert_library(self, design):\n\n for _cc in design.components.components:\n _libid = 'default'\n _compname = _cc\n _tech = []\n _attrs = []\n if -1 != _cc.find(':'):\n _libid, _compname = _cc.split(':')\n\n _lib = None\n _libnid = -1\n for _li, _ll in enumerate(self.libraries):\n if _libid == _ll.name:\n _lib = _ll\n _libnid = 1 + _li # numbered from 1\n break\n else:\n _lib = Eagle.Library(name=_libid)\n _libnid = len(self.libraries) # numbered from 1\n self.libraries.append(_lib)\n\n# checking if symbols / devsets / packages are in the library already\n# (adding them if not)\n _co = design.components.components[_cc]\n\n if 0 == len(_lib.devsets):\n _lib.devsets.append(Eagle.DeviceSetHeader(name='default'))\n\n for _di, _dd in enumerate(_lib.devsets[0].shapesets):\n if _compname == _dd.name:\n _dset = _dd\n break\n else:\n _prefix = 'xC'\n _desc = 'n/a'\n if 'prefix' in _co.attributes:\n _prefix = _co.attributes['prefix']\n if 'description' in _co.attributes:\n _desc = _co.attributes['description']\n _dset = Eagle.DeviceSet(name=_compname, prefix=_prefix, \n description=_desc, uservalue=False)\n\n _lib.devsets[0].shapesets.append(_dset)\n\n if 0 == len(_lib.symbols):\n _lib.symbols.append(Eagle.SymbolHeader(name='default'))\n\n for _si, _ss in enumerate(_lib.symbols[0].shapesets):\n if _compname == _ss.name:\n _symbol = _ss\n _symnid = 1 + _si # numbered from 1\n break\n else: # no such symbol yet\n _symbol = Eagle.Symbol(libid=_libnid, name=_compname)\n _symnid = len(_lib.symbols[0].shapesets) # numbered from 1\n\n for _css in _co.symbols:\n for _cbb in _css.bodies:\n\n for _ci in design.component_instances:\n if _cc != _ci.library_id:\n continue\n for _xaa in _ci.attributes:\n if 'technology' == _xaa:\n _tech.append(_ci.attributes[_xaa])\n elif _xaa in ('prefix', 'description'):\n pass\n else:\n _attrs.append((_xaa, _ci.attributes[_xaa]))\n for _sa in _ci.symbol_attributes:\n for _an, _aa in enumerate(_sa.annotations):\n _val = 'n/a'\n if 0 == _an:\n _val = '>NAME'\n elif 1 == _an:\n _val = '>VALUE'\n\n _rot = self.Shape.rotate2strings(_aa.rotation)\n\n _symbol.shapes.append(Eagle.Text(\n value=_val,\n x=_aa.x - _sa.x,\n y=_aa.y - _sa.y,\n size=1.778, layer=95, \n rotate=_rot, font=None,\n ratio=10))\n\n for _cpp in _cbb.pins:\n\n _name = None\n if None != _cpp.label:\n _name = _cpp.label.text\n\n _visible = None\n if 'visible' in _cpp.attributes:\n _visible = _cpp.attributes['visible']\n\n _dir = None\n if 'direction' in _cpp.attributes:\n _dir = _cpp.attributes['direction']\n\n _rot = None\n\n _len = 'short'\n if 'length' in _cpp.attributes:\n _len = _cpp.attributes['length']\n \n _func = None\n if 'function' in _cpp.attributes:\n _func = _cpp.attributes['function']\n \n _swap = 0\n if 'swaplevel' in _cpp.attributes:\n _swap = _cpp.attributes['swaplevel']\n \n _symbol.shapes.append(Eagle.Pin(name=_name,\n x=_cpp.p2.x, y=_cpp.p2.y, visible=_visible,\n direction=_dir, rotate=_rot, length=_len,\n function=_func, swaplevel=_swap))\n for _cff in _cbb.shapes:\n\n _layer = 94\n if 'label' in _cff.attributes:\n _layer = _cff.attributes['layer']\n\n if isinstance(_cff, Line):\n _style = 'Continuous'\n if 'style' in _cff.attributes:\n _style = _cff.attributes['style']\n\n _width = 0.254\n if 'width' in _cff.attributes:\n _width = _cff.attributes['width']\n\n _symbol.shapes.append(Eagle.Wire(\n x1=_cff.p1.x, y1=_cff.p1.y,\n x2=_cff.p2.x, y2=_cff.p2.y,\n style=_style, layer=_layer, width=_width))\n elif isinstance(_cff, Rectangle):\n _symbol.shapes.append(Eagle.Rectangle(\n x1=_cff.x, y1=_cff.y,\n x2=(_cff.x + _cff.width), \n y2=(_cff.y - _cff.height),\n rotate=None, layer=_layer))\n elif isinstance(_cff, Arc):\n _style = 'Continuous'\n if 'style' in _cff.attributes:\n _style = _cff.attributes['style']\n\n _width = 0.254\n if 'width' in _cff.attributes:\n _width = _cff.attributes['width']\n\n _layer = 91 # usually Nets\n\n _dir = ('counterclockwise' \n if _cff.start_angle < _cff.end_angle\n else 'clockwise')\n _symbol.shapes.append(Eagle.Arc( # _cff's angles're in radians\n x1=_cff.x + _cff.radius * math.cos(_cff.start_angle), # sign is ok\n y1=_cff.y + _cff.radius * math.sin(_cff.start_angle),\n x2=_cff.x + _cff.radius * math.cos(_cff.end_angle),\n y2=_cff.y + _cff.radius * math.sin(_cff.end_angle),\n style=_style, \n layer=_layer, width=_width,\n curve=math.degrees(abs(_cff.start_angle - _cff.end_angle)),\n cap=None, \n direction=_dir))\n elif isinstance(_cff, BezierCurve):\n# raise NotImplementedError(\"BezierCurve isn't implemented for Eagle yet\")\n# TODO curve approximation with arcs\n _style = 'Continuous'\n if 'style' in _cff.attributes:\n _style = _cff.attributes['style']\n\n _width = 0.254\n if 'width' in _cff.attributes:\n _width = _cff.attributes['width']\n\n _symbol.shapes.append(Eagle.Wire(\n x1=_cff.p1.x, y1=_cff.p1.y,\n x2=_cff.p2.x, y2=_cff.p2.y,\n style=_style, layer=_layer, width=_width))\n elif isinstance(_cff, Circle):\n _width = 0.254\n if 'width' in _cff.attributes:\n _width = _cff.attributes['width']\n\n _symbol.shapes.append(Eagle.Circle(\n x=_cff.x, y=_cff.y,\n radius=_cff.radius, \n width=_width, layer=_layer))\n elif isinstance(_cff, Polygon):\n _width = 0.254\n if 'width' in _cff.attributes:\n _width = _cff.attributes['width']\n\n _style = 'Continuous'\n if 'style' in _cff.attributes:\n _style = _cff.attributes['style']\n\n _symbol.shapes.append(Eagle.Polygon(\n width=_width, layer=_layer,\n numofshapes=len(_cff.points),\n shapes=[ # lines from points\n Eagle.Wire(\n x1=p1.x, y1=p1.y,\n x2=p2.x, y2=p2.y,\n style=_style, layer=_layer, \n width=_width)\n for p1, p2 in zip(_cff.points, \n _cff.points[1:]+[_cff.points[0],])\n ]))\n elif isinstance(_cff, Label):\n _layer = 95 # usually Names\n if 'label' in _cff.attributes:\n _layer = _cff.attributes['layer']\n\n _rot = self.Shape.rotate2strings(_cff.rotation)\n\n _symbol.shapes.append(Eagle.Text(\n value=_cff.text,\n x=_cff.x, y=_cff.y,\n size=1.778, font=None, ratio=10,\n rotate=_rot, layer=_layer))\n else:\n raise ValueError(\"cannot process \" + _cff.__class__.__name__)\n\n _lib.symbols[0].shapesets.append(_symbol)\n\n _dset.shapes.append(Eagle.Gate(name='G$1', x=0., y=0., \n sindex=_symnid, addlevel=False))\n _dset.connblocks.append(Eagle.ConnectionHeader(name='default', \n attributes=_attrs, technologies=_tech,\n sindex=_symnid))\n \n if 0 == len(_lib.packages):\n _lib.packages.append(Eagle.PackageHeader(name='default'))\n # TODO to load from a library file\n return", "def _get_library_id(self):\r\n libraries = DiskLibraries(self._commcell_object)\r\n return libraries.get(self.library_name).library_id", "def __get_library_layout(self):\n self.add_debug('Fetch library layout ...')\n\n converter = LibraryLayoutConverter(\n self.stock_sample_creation_iso.rack_layout,\n parent=self)\n self.__library_layout = converter.get_result()\n\n if self.__library_layout is None:\n msg = 'Error when trying to convert library layout.'\n self.add_error(msg)\n else:\n self.__library_sectors = QuadrantIterator.sort_into_sectors(\n working_layout=self.__library_layout,\n number_sectors=NUMBER_SECTORS)\n del_sectors = []\n for sector_index, positions in self.__library_sectors.iteritems():\n if len(positions) < 1: del_sectors.append(sector_index)\n for sector_index in del_sectors:\n del self.__library_sectors[sector_index]", "def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:", "def library_prep(self):\n return self[3:6]", "def libparts(self) -> Optional[tuple[str, ...]]:\n if is_data_dir(self.parts[0]):\n if len(self.parts) > 2 and self.parts[1] in (\"purelib\", \"platlib\"):\n return self.parts[2:]\n else:\n return None\n elif is_dist_info_dir(self.parts[0]):\n return None\n else:\n return self.parts", "def _get_library_properties(self):\r\n flag, response = self._commcell_object._cvpysdk_object.make_request(\r\n 'GET', self._library_properties_service\r\n )\r\n\r\n if flag:\r\n if response.json():\r\n if 'libraryInfo' in response.json():\r\n return response.json()['libraryInfo']\r\n raise SDKException('Storage', '102', 'Failed to get disk Library properties')\r\n raise SDKException('Response', '102')\r\n response_string = self._commcell_object._update_response_(response.text)\r\n raise SDKException('Response', '101', response_string)", "def getPathToLibrary(cls, dependency):\n soname = dependency.getBaseName()\n if dependency.depname in cls._pathCache :\n return cls._pathCache[dependency.depname]\n #for each library we have in the system\n for line in getOutputAsList([\"/sbin/ldconfig\",\"-p\"])[0]:\n # if dependency is 64 and library is 64 of\n # dependency is 32 and library is 32:\n if len(line) > 0 and soname in line and \\\n ( (dependency.is64bits() and cls._ldconfig_64bits in line) or \\\n (dependency.is32bits() and not cls._ldconfig_64bits in line) ):\n temp = line.split('=>')\n if len(temp) == 2:\n provider=temp[1].strip()\n if cls._checkMinor(provider, dependency.depname):\n cls._pathCache[dependency.depname] = provider\n return provider\n pathToScan = cls.systemPath\n if \"LD_LIBRARY_PATH\" in os.environ:\n #we need to scan the LD_LIBRARY_PATH too\n pathToScan += os.environ[\"LD_LIBRARY_PATH\"].split(':')\n for path in pathToScan:\n provider = path + '/' + soname\n if os.path.isfile(provider) and \\\n cls._checkMinor(provider, dependency.depname):\n #we found the soname and minor are there return true\n cls._pathCache[dependency.depname] = provider\n return provider\n #the dependency could not be located\n return None", "def find_gamepad():\n for d in hid.enumerate():\n if '2Axes' in d['product_string']:\n return d", "def view_library_param(arg,\n scene_root=_scene_root,\n history_db=_history_db,\n library_db=_library_db,\n query_start=_query_start,\n query_end=_query_end,\n current_scene_db=None):\n from src.praxxis.parameter import list_param\n from src.praxxis.util import roots\n\n if current_scene_db is None:\n current_scene_db = roots.get_current_scene_db(scene_root, history_db)\n\n list_param.list_library_param(arg, library_db, current_scene_db, query_start, query_end)", "def get_lib_path(self, dim):\n import os\n import platform\n import peanoclaw as peanoclaw\n if platform.system() == 'Linux':\n shared_library_extension = 'so'\n elif platform.system() == 'Darwin':\n shared_library_extension = 'dylib'\n else:\n raise(\"Unsupported operating system. Only Linux and MacOS supported currently.\")\n \n libraryFileName = os.path.join(os.path.dirname(peanoclaw.__file__), 'libpeano-claw-'+ str(dim)+ 'd' + self.internal_settings.getFilenameSuffix() + '.' + shared_library_extension)\n logging.getLogger('peanoclaw').info(libraryFileName)\n return os.path.join(libraryFileName)", "def getSingleLibrary(self, context, id):\n libraries = [l for l in self.getLibraries(context) if l['id']==id]\n\n for l in libraries:\n l['src'] = self.kupuUrl(l['src'])\n if libraries:\n return libraries[0]\n return None", "def _get_library_entry(videoid):\n if videoid.mediatype in [common.VideoId.MOVIE, common.VideoId.EPISODE]:\n return (common.get_path(videoid.to_list(), g.library()),\n videoid.mediatype)\n elif videoid.mediatype == common.VideoId.SHOW:\n return (\n _any_child_library_entry(\n _any_child_library_entry(g.library()[videoid.tvshowid])),\n common.VideoId.EPISODE)\n elif videoid.mediatype == common.VideoId.SEASON:\n return (\n _any_child_library_entry(\n g.library()[videoid.tvshowid][videoid.seasonid]),\n common.VideoId.EPISODE)\n else:\n # Items of other mediatype are never in library\n raise ItemNotFound", "def getLibrary(self):\n return self._library", "def _get_vendor_part(self):\n return self.__vendor_part", "def useLibraryMesh(self):\r\n #When the button is clicked, first we get a duplicate of the mesh\r\n selectedMesh = cmds.ls(sl=True)\r\n if not selectedMesh:\r\n #This means nothing is selected. So don't do anything\r\n return\r\n\r\n self.workMesh = cmds.duplicate(n ='TubxLibraryMesh', rr=True)\r\n #We we isolate the object\r\n cmds.select(self.workMesh)\r\n cmds.isolateSelect('modelPanel4', state=1)\r\n cmds.select(self.workMesh)\r\n cmds.isolateSelect('modelPanel4', addSelected = True)\r\n\r\n #We create the shaders\r\n LibraryGeneration.createShaders()\r\n\r\n #Finally, we assign the default shader to the workMesh\r\n cmds.sets(self.workMesh, edit = True, forceElement = 'TubxDefaultSG')\r\n\r\n #We also enable tracking selection so we can get the order of selection for the ear vertex\r\n self.TrackingStatus = cmds.selectPref(q=True, trackSelectionOrder=True)\r\n cmds.selectPref(trackSelectionOrder = True)", "def _convert_shapes1(self, design):\n for _pp in design.component_instances:\n _libid = -1\n _devn = -1\n _libname = 'default'\n _pname = _pp.library_id\n if -1 != _pp.library_id.find(':'):\n _libname, _pname = _pp.library_id.split(':')\n \n for _li, _ll in enumerate(self.libraries):\n if _libname == _ll.name:\n _libid = _li\n for _di, _dd in enumerate(_ll.devsets[0].shapesets):\n if _pname == _dd.name:\n _devn = _di\n break\n break\n\n self.shapeheader.parts.append(Eagle.Part(\n name=_pp.instance_id, libid=_libid, devsetndx=_devn,\n symvar=1, techno=1)) # after OpenJSON all parts are split\n return", "def get_library_file(instrument, detector, filt, pupil, wfe, wfe_group, library_path, wings=False):\n psf_files = glob(os.path.join(library_path, '*.fits'))\n\n # Create a dictionary of header information for all PSF library files\n # psf_table = {}\n matches = []\n\n instrument = instrument.upper()\n detector = detector.upper()\n filt = filt.upper()\n pupil = pupil.upper()\n wfe = wfe.lower()\n\n for filename in psf_files:\n header = fits.getheader(filename)\n file_inst = header['INSTRUME'].upper()\n try:\n file_det = header['DETECTOR'].upper()\n except KeyError:\n file_det = header['DET_NAME'].upper()\n file_filt = header['FILTER'].upper()\n\n try:\n file_pupil = header['PUPIL_MASK'].upper()\n except KeyError:\n # If no pupil mask value is present, then assume the CLEAR is\n # being used\n if file_inst.upper() == 'NIRCAM':\n file_pupil = 'CLEAR'\n elif file_inst.upper() == 'NIRISS':\n file_pupil = 'CLEARP'\n\n # NIRISS has many filters in the pupil wheel. Webbpsf does\n # not make a distinction, but Mirage does. Adjust the info\n # to match Mirage's expectations\n if file_inst.upper() == 'NIRISS' and file_filt in NIRISS_PUPIL_WHEEL_FILTERS:\n save_filt = copy(file_filt)\n if file_pupil == 'CLEARP':\n file_filt = 'CLEAR'\n else:\n raise ValueError(('Pupil value is something other than '\n 'CLEARP, but the filter being used is '\n 'in the pupil wheel.'))\n file_pupil = save_filt\n\n opd = header['OPD_FILE']\n if 'requirements' in opd:\n file_wfe = 'requirements'\n elif 'predicted' in opd:\n file_wfe = 'predicted'\n\n file_wfe_grp = header['OPDSLICE']\n\n # allow check below to pass for FGS\n if instrument.lower() == 'fgs':\n file_filt = 'N/A'\n filt = 'N/A'\n file_pupil = 'N/A'\n pupil = 'N/A'\n\n if not wings:\n match = (file_inst == instrument and file_det == detector and file_filt == filt and\n file_pupil == pupil and file_wfe == wfe and file_wfe_grp == wfe_group)\n else:\n match = (file_inst == instrument and file_det == detector and file_filt == filt and\n file_pupil == pupil and file_wfe == wfe)\n\n if match:\n matches.append(filename)\n # psf_table[filename] = [file_inst, file_det, file_filt, file_pupil, file_wfe, file_wfe_grp, match]\n\n # Find files matching the requested inputs\n if len(matches) == 1:\n return matches[0]\n elif len(matches) == 0:\n raise ValueError(\"No PSF library file found matching requested parameters.\")\n elif len(matches) > 1:\n raise ValueError(\"More than one PSF library file matches requested parameters: {}\".format(matches))", "def get_lib(self, cursor, uid):\n proxy = self.pool['ir.config_parameter']\n webkit_path = proxy.get_param(cursor, uid, 'webkit_path')\n\n if not webkit_path:\n try:\n defpath = os.environ.get('PATH', os.defpath).split(os.pathsep)\n if hasattr(sys, 'frozen'):\n defpath.append(os.getcwd())\n if tools.config['root_path']:\n defpath.append(os.path.dirname(tools.config['root_path']))\n webkit_path = tools.which('wkhtmltopdf', path=os.pathsep.join(defpath))\n except IOError:\n webkit_path = None\n\n if webkit_path:\n return webkit_path\n\n raise except_osv(\n _('Wkhtmltopdf library path is not set'),\n _('Please install executable on your system' \\\n ' (sudo apt-get install wkhtmltopdf) or download it from here:' \\\n ' http://code.google.com/p/wkhtmltopdf/downloads/list and set the' \\\n ' path in the ir.config_parameter with the webkit_path key.' \\\n 'Minimal version is 0.9.9')\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the package name of a device from input parameters drawing, library, deviceset and device
def get_package(drawing, library, deviceset, device): deviceset_tree = get_librarypart(drawing, library, deviceset) for device_tree in deviceset_tree.iterfind('devices/device'): if device_tree.attrib['name'] == device: if "package" in device_tree.attrib: return device_tree.attrib['package'] return ""
[ "def get_device_name_and_platform(self, device):\r\n # Lowercase the device name\r\n if device is not None:\r\n device = device.lower()\r\n device = device.strip().replace(\" \",\"\")\r\n # If given vague iphone/ipad/android then set the default device\r\n if re.match(DEFAULT_REGEX_IOS,device):\r\n # Set to default to iphone6 for automotive and iphone7 all other vertical\r\n if 'iphone' == device:\r\n '''\r\n try:\r\n directory = str(os.path.abspath(__file__))\r\n print \"Dir: \" + directory\r\n if 'Automotive_Automation' in directory:\r\n device = 'iphone6'\r\n else:\r\n device = 'iphone7'\r\n except:\r\n device = 'iphone7'\r\n '''\r\n device = 'iphone6'\r\n else:\r\n device = 'ipadair2'\r\n elif re.match(DEFAULT_REGEX_ANDROID,device):\r\n device = 'androidemulator8'\r\n \r\n print(\"Device: \" + str(device))\r\n # Get full name, and platform\r\n fullName = self._determine_fullname(device)\r\n platform = self._determine_platform(device)\r\n \r\n print(\"Actual Name: \" + str(fullName))\r\n print(\"Actual Name: \" + str(platform))\r\n return fullName, platform", "def device(self):\n return str(self.dummy_param.device)", "def get_device_name(device):\n from OpenGL.EGL.EXT.device_query import (\n eglQueryDeviceStringEXT,\n )\n from OpenGL.EGL.EXT.device_drm import (\n EGL_DRM_DEVICE_FILE_EXT,\n )\n if eglQueryDeviceStringEXT:\n name = eglQueryDeviceStringEXT(\n device,\n EGL_DRM_DEVICE_FILE_EXT\n )\n return name.decode('ascii',errors='ignore')\n return None", "def deviceInfo(self):\n getusbs = usb.core.find(find_all=True)\n devices = dict(enumerate(str(dev.manufacturer) + \":\" + str(dev.idProduct) + \":\" + str(dev.idVendor) for dev in getusbs))\n for key, value in devices.items():\n print(key, \":\", value)\n hook = input(\"---> Select a device: \")\n idProd, idVen = devices[int(hook)].split(':')[1:]\n device = usb.core.find(idVendor=int(idVen), idProduct=int(idProd))\n print(device)", "def get_device_name(self, device):\n return None if device == \"DEV1\" else device.lower()", "def read_kit_device(self):\n self._is_hid_tool_not_connected_raise()\n\n dap_info = read_tool_info(self.housekeeper)\n\n device_name = dap_info['device_name'].lower()\n\n if device_name == '':\n device_name = None\n\n return device_name", "def test_extract_device_name():\n assert grml2usb.extract_device_name(\"/dev/sda\") == \"sda\"\n assert grml2usb.extract_device_name(\"/dev/sdb\") == \"sdb\"\n assert grml2usb.extract_device_name(\"/dev/sdb4\") == \"sdb\"", "def device_name(self, text):\n pass", "def _infer_device_name(graph_def):\n device_name = None\n for node in graph_def.node:\n if node.device:\n device_name = node.device\n break\n if device_name is None:\n logging.warn(\n \"Failed to infer device name from partition GraphDef: none of the \"\n \"nodes of the GraphDef has a non-empty device name.\")\n return device_name", "def get_device_instance_name(dev, apply_to_builtin=True):\n if 'PluginDevice' in dev.class_name or dev.class_name.startswith('MxD'):\n return dev.name\n else:\n if apply_to_builtin and dev.name != dev.class_display_name:\n return dev.name\n return", "def get_devname(device_name, addPath=False):\n out, err, rc = run_command(\n [UDEVADM, 'info', '--query=name', '--name', str(device_name)],\n throw=False)\n if len(out) > 0:\n # we have at least a single line of output\n fields = out[0].split()\n if len(fields) == 1:\n # we have a single word output so return it with or without path\n if addPath:\n return '/dev/%s' % fields[0]\n # return the word (device name ie sda) without added /dev/\n return fields[0]\n # a non one word reply was received on the first line from udevadm or\n return None", "def get_default_output_device():\n read_access = wr.KEY_READ | wr.KEY_WOW64_64KEY if is_os_64bit() else wr.KEY_READ\n audio_path = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\MMDevices\\Audio\\Render'\n audio_key = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, audio_path, 0, read_access)\n num_devices = wr.QueryInfoKey(audio_key)[0]\n active_last_used, active_device_name = -1, None\n for i in range(num_devices):\n device_key_path = f'{audio_path}\\\\{wr.EnumKey(audio_key, i)}'\n device_key = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, device_key_path, 0, read_access)\n if wr.QueryValueEx(device_key, 'DeviceState')[0] == 1: # if enabled\n properties_path = f'{device_key_path}\\\\Properties'\n properties = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, properties_path, 0, read_access)\n device_name = wr.QueryValueEx(properties, '{b3f8fa53-0004-438e-9003-51a46e139bfc},6')[0]\n device_type = wr.QueryValueEx(properties, '{a45c254e-df1c-4efd-8020-67d146a850e0},2')[0]\n pa_name = f'{device_type} ({device_name})' # name shown in PyAudio\n last_used = wr.QueryValueEx(device_key, 'Level:0')[0]\n if last_used > active_last_used: # the bigger the number, the more recent it was used\n active_last_used = last_used\n active_device_name = pa_name\n return active_device_name", "def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:", "def get_librarypart(drawing, library, deviceset):\n for library_tree in drawing.iterfind('schematic/libraries/library'):\n if (library_tree.attrib['name'] == library):\n for deviceset_tree in library_tree.iterfind('devicesets/deviceset'):\n if (deviceset_tree.attrib['name'] == deviceset):\n return deviceset_tree", "def udev_device_get_name(info):\n return info.get(\"DM_NAME\", info[\"name\"])", "def choose_device(cls, devices):\n return devices[0]", "def device_name(id):\n return device_id_to_name_mapping[id] if id in device_id_to_name_mapping else 'Unknown Device'", "def find_device_path():\n out = subprocess.check_output([\"colormgr\", \"get-devices-by-kind\", \"display\"])\n\n # If there is more than one device being managed, there will be multiple data blocks\n # separated by blank lines. In each block the 'Object Path' line will always occur\n # before the 'Model' or 'Embedded' line, so we repeatedly set the object_path and\n # only break when we find an appropriate match. If we are not targeting a specific\n # device, we just pick the first embedded device we find (i.e. the laptops screen).\n\n object_path = None\n for line in out.decode(\"utf8\").split(\"\\n\"):\n if line.startswith(\"Object Path:\"):\n object_path = line.split(\":\")[1].lstrip()\n elif target.device is None:\n if line.startswith(\"Embedded:\"):\n embedded = line.split(\":\")[1].lstrip()\n if embedded == \"Yes\":\n break\n else:\n if line.startswith(\"Model:\"):\n model_name = line.split(\":\")[1].lstrip()\n if model_name.startswith(target.device):\n break\n\n return object_path", "def _determine_fullname(self, device):\r\n androidGroups = re.findall(REGEX_ANDROID, device)\r\n iosGroups = re.findall(REGEX_IOS, device)\r\n if iosGroups!=[]:\r\n deviceType = iosGroups[0][0]\r\n model = iosGroups[0][1]\r\n fullName = self._parse_ios(deviceType, model)\r\n elif androidGroups!=[]:\r\n androidVersion = androidGroups[0][2]\r\n fullName = \"AndroidEmulator\"+androidVersion\r\n else:\r\n raise Exception(\"The provided device name '{}' does not match the expected formats for either iOS or Android.\".format(device))\r\n \r\n print(\"Given name '{}' translated to '{}'.\".format(device,fullName))\r\n return fullName", "def model(self) -> str:\n self._logger.info(\"Retrieving device model name (aka project)...\")\n return self._device_info().get(\"project\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the description of a deviceset from input parameters drawing, library and deviceset
def get_description(drawing, library, deviceset): deviceset_tree = get_librarypart(drawing, library, deviceset) for description in deviceset_tree.iterfind('description'): return description.text
[ "def get_librarypart(drawing, library, deviceset):\n for library_tree in drawing.iterfind('schematic/libraries/library'):\n if (library_tree.attrib['name'] == library):\n for deviceset_tree in library_tree.iterfind('devicesets/deviceset'):\n if (deviceset_tree.attrib['name'] == deviceset):\n return deviceset_tree", "def deviceInfo(self):\n getusbs = usb.core.find(find_all=True)\n devices = dict(enumerate(str(dev.manufacturer) + \":\" + str(dev.idProduct) + \":\" + str(dev.idVendor) for dev in getusbs))\n for key, value in devices.items():\n print(key, \":\", value)\n hook = input(\"---> Select a device: \")\n idProd, idVen = devices[int(hook)].split(':')[1:]\n device = usb.core.find(idVendor=int(idVen), idProduct=int(idProd))\n print(device)", "def get_device_type_descriptions(self):\n print(\"\\n\" + \"DEVICE TYPE\".ljust(25) + \"| DESCRIPTION\")\n for d in self._device_type_maps:\n print(\"{name:24} | {description}\".format(**d))", "def list_devices():\n out = subprocess.check_output([\"colormgr\", \"get-devices-by-kind\", \"display\"])\n for line in out.decode(\"utf8\").split(\"\\n\"):\n if line.startswith(\"Model:\"):\n print(line.split(\":\")[1].lstrip())", "def device_description(self):\n return self.call_action(\"DeviceInfo1\", \"GetInfo\")[\"NewDescription\"]", "def gpu_list_desc(use_for=None):\n return ('define which GPUs to use{}: \"all\", \"None\", or a comma-separated list, e.g. \"1,2\"'\n .format('' if use_for is None else ' for ' + use_for))", "def get_descriptor_set_alloc_info(alloc_descriptor_set, architecture):\n return VulkanStruct(\n architecture, DESCRIPTOR_SET_ALLOCATE_INFO_ELEMENTS,\n lambda offset, size: little_endian_bytes_to_int(require(\n alloc_descriptor_set.get_read_data(\n alloc_descriptor_set.hex_pAllocateInfo + offset, size))))", "def device(self):\n return str(self.dummy_param.device)", "def listInputDevices():\n pass", "def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]:", "def getChipset(signature):\r\n return getToken(signature, '', '/')", "def getModelDesc():\n\n return \"Example model: A 3D spherical Gaussian blob in a 3D cartesian grid\"", "def get_default_output_device():\n read_access = wr.KEY_READ | wr.KEY_WOW64_64KEY if is_os_64bit() else wr.KEY_READ\n audio_path = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\MMDevices\\Audio\\Render'\n audio_key = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, audio_path, 0, read_access)\n num_devices = wr.QueryInfoKey(audio_key)[0]\n active_last_used, active_device_name = -1, None\n for i in range(num_devices):\n device_key_path = f'{audio_path}\\\\{wr.EnumKey(audio_key, i)}'\n device_key = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, device_key_path, 0, read_access)\n if wr.QueryValueEx(device_key, 'DeviceState')[0] == 1: # if enabled\n properties_path = f'{device_key_path}\\\\Properties'\n properties = wr.OpenKeyEx(wr.HKEY_LOCAL_MACHINE, properties_path, 0, read_access)\n device_name = wr.QueryValueEx(properties, '{b3f8fa53-0004-438e-9003-51a46e139bfc},6')[0]\n device_type = wr.QueryValueEx(properties, '{a45c254e-df1c-4efd-8020-67d146a850e0},2')[0]\n pa_name = f'{device_type} ({device_name})' # name shown in PyAudio\n last_used = wr.QueryValueEx(device_key, 'Level:0')[0]\n if last_used > active_last_used: # the bigger the number, the more recent it was used\n active_last_used = last_used\n active_device_name = pa_name\n return active_device_name", "def user32_GetRawInputDeviceInfo(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hDevice\", \"uiCommand\", \"pData\", \"pcbSize\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_read_set(self):\n load_known_modules()\n dosi = DetectedObjectSetInput.create('simulator')\n dosi.set_configuration(_create_simulator_config())\n sets = list(iter(dosi.read_set, None))\n self.assertEqual(len(sets), SIMULATOR_CONFIG['max_sets'])\n for s in sets:\n self.assertEqual(len(s), 2)\n dos, image_name = s\n self.assertIsInstance(dos, DetectedObjectSet)\n self.assertEqual(len(dos), SIMULATOR_CONFIG['set_size'])\n self.assertEqual(image_name, SIMULATOR_CONFIG['image_name'])", "def main(spc):\n devices_list = spc.device_management.devices.get()\n for device in devices_list:\n print(\"%s,\\t%s\" % (device.domain_name, device.name))", "def device_info(self) -> Dict[str, Any]:\n return {\n \"identifiers\": {(LITTERROBOT_DOMAIN, self.robot.serial)},\n \"name\": self.robot.name,\n \"manufacturer\": \"Litter-Robot\",\n \"model\": \"Litter-Robot 3 Connect\"\n if self.robot.serial.startswith(\"LR3C\")\n else \"unknown\",\n }", "def test(device):\n device_marking = device.get_device_marking()\n assert type(device_marking) is str", "def test_get_asset_device_configuration_list(self):\n pass", "def print_menu_and_get_device(device_type):\n\n devices = None\n if device_type == \"sink\":\n devices = pulse.get_sinks()\n print(\"Available Pulse Audio sinks:\")\n elif device_type == \"source\":\n devices = pulse.get_sources()\n print(\"Available Pulse Audio sources:\")\n else:\n raise ValueError(\"device_type must be either sink or source\")\n for index, device in enumerate(devices):\n print(\"\\t{index}: {active_indicator}{name}\".format(\n index=index,\n active_indicator=\"(active default) \" if device[\"active\"] else \"\",\n name=device[\"device_name\"]))\n valid_input = False\n selection = None\n while not valid_input:\n selection = input(\"? \")\n valid_input = is_int(selection) and 0 <= int(selection) < len(devices)\n selection = int(selection)\n return devices[selection]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check weather a part is a schematic only part or if it is also on the PCB
def is_part_on_pcb(drawing, library, deviceset): deviceset_tree = get_librarypart(drawing, library, deviceset) if deviceset_tree.find('devices/device/connects'): return True
[ "def check_Part_policing_operation(observation):\n \n part_oper = observation.get(\"Part of a policing operation\")\n \n if isinstance(part_oper, str):\n error = \"Field 'Part of a policing operation' is not a boolean\"\n return False, error \n \n return True, \"\"", "def _check_partno(self, ctx=None):\n if hasattr(self, \"Item\") and self.teilenummer and not self.Item:\n raise ue.Exception(\"part_number\", self.teilenummer, self.t_index)", "def is_part(feature):\n return \"_part\" in feature[\"localId\"]", "def hasPart(self, *args):\n return _coin.SoCylinder_hasPart(self, *args)", "def isAnySPC(vhcl):\n if vhcl.getVehicleType() == \"SPC\" or vhcl.getVehicleType() == \"SPCO\":\n return True\n else:\n return False", "def check_conditions(self, part=None):\n assert part is not None, 'must specify what to check'\n\n # check the BSP itself?\n if part == 'bright_star_pipeline':\n # force redo requested?\n _force_redo = self.db_entry['pipelined'][self.name]['status']['force_redo']\n # pipeline done?\n _done = self.db_entry['pipelined'][self.name]['status']['done']\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['status']['retries']\n\n go = _force_redo or ((not _done) and (_num_tries <= self.config['misc']['max_retries']))\n\n return go\n\n # Preview generation for the results of BSP processing?\n elif part == 'bright_star_pipeline:preview':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # preview generated?\n _preview_done = self.db_entry['pipelined'][self.name]['preview']['done']\n\n # last_modified == pipe_last_modified?\n _outdated = abs((self.db_entry['pipelined'][self.name]['preview']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['preview']['retries']\n\n # if self.db_entry['_id'] == '3_J1144+6946_VIC_Si_o_20170607_043349.042103':\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n # input('WAIT!!')\n\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n go = (_pipe_done and (not _pipe_failed)) and ((not _preview_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go\n\n # Strehl calculation for the results of BSP processing?\n elif part == 'bright_star_pipeline:strehl':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # Strehl calculated?\n _strehl_done = self.db_entry['pipelined'][self.name]['strehl']['status']['done']\n\n # last_modified == pipe_last_modified?\n _outdated = abs((self.db_entry['pipelined'][self.name]['strehl']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['strehl']['status']['retries']\n\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n go = (_pipe_done and (not _pipe_failed)) and ((not _strehl_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go\n\n # Run PCA high-contrast processing pipeline?\n elif part == 'bright_star_pipeline:pca':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # pca done?\n _pca_done = self.db_entry['pipelined'][self.name]['pca']['status']['done']\n\n # last_modified == pipe_last_modified?\n _outdated = abs((self.db_entry['pipelined'][self.name]['pca']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['pca']['status']['retries']\n\n # print(_pipe_done, _pipe_failed, _preview_done, _outdated)\n go = (_pipe_done and (not _pipe_failed)) and ((not _pca_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go\n\n elif part == 'bright_star_pipeline:pca:preview':\n\n # pipeline done?\n _pipe_done = self.db_entry['pipelined'][self.name]['status']['done']\n\n # failed?\n _pipe_failed = self.db_entry['pipelined'][self.name]['classified_as'] == 'failed'\n\n # pca done?\n _pca_done = self.db_entry['pipelined'][self.name]['pca']['status']['done']\n\n # pca preview done?\n _pca_preview_done = self.db_entry['pipelined'][self.name]['pca']['preview']['done']\n\n # last_modified == pipe_last_modified? (or old DB entry)\n _outdated = 'last_modified' not in self.db_entry['pipelined'][self.name]['pca']['preview'] or \\\n (abs((self.db_entry['pipelined'][self.name]['pca']['preview']['last_modified'] -\n self.db_entry['pipelined'][self.name]['last_modified']).total_seconds()) > 1.0)\n\n # how many times tried?\n _num_tries = self.db_entry['pipelined'][self.name]['pca']['preview']['retries']\n\n go = (_pipe_done and (not _pipe_failed) and _pca_done) and ((not _pca_preview_done) or _outdated) \\\n and (_num_tries <= self.config['misc']['max_retries'])\n\n return go", "def check(self, partnames):\n\n for i in partnames:\n if i not in shader_part:\n return False\n\n return True", "def is_in_weierstrass_disc(self,P):\n if (P[1].valuation() == 0 and P != self(0,1,0)):\n return False\n else:\n return True", "def check_win(symbol):\n for line in lines:\n if concat_section(create_section(line)) == (symbol * 3):\n return True\n return False", "def isPlatooning(self):\n\n\t\treturn self.vehicle.fwd_pair_partner != None or self.vehicle.bwd_pair_partner != None", "def getPart(self, partname: 'SbName', makeifneeded: 'SbBool') -> \"SoNode *\":\n return _coin.SoBaseKit_getPart(self, partname, makeifneeded)", "def supports_assessment_part_smart_bank(self):\n return False", "def discipline_check():\n\n file_path = cmds.file(query=1, expandName=1)\n\n if '/surface/' in file_path:\n discipline = 'surface'\n return discipline\n elif '/model/' in file_path:\n discipline = 'model'\n return discipline\n else:\n print '\\nThe file has no discipline.\\n'\n discipline = 'none'\n return discipline", "def is_setting_format_correct(piece_input):\n if len(piece_input) != 5:\n return False\n piece_name_cond = piece_input[0].upper() in \"RNBQKP\"\n piece_color_cond = piece_input[1].lower() in \"wb\"\n sign_cond = piece_input[2] == \"-\"\n position_cond = piece_input[3].lower() in COLS and piece_input[4] in ROWS\n if all([piece_name_cond, piece_color_cond, sign_cond, position_cond]):\n return True\n else:\n return False", "def _sandhiPrepNoPrep(self):\n if self.Pada1 in CAntaPadary_set:\n NoPrep = True\n self.NoStoh = True # extra condition\n elif self.Pada1 in sandhi_noprep_set:\n NoPrep = True\n else:\n NoPrep = False\n return NoPrep", "def test_spw_id_exact(self):\n spw='23'\n self.res=sdgrid(spw=spw,infiles=self.infiles,outfile=self.outname,gridfunction=self.gfunc,npix=self.npix)\n self.assertEqual(self.res,None, msg='Any error occurred during calibration')\n tbsel = {'IFNO': [23]}\n self._comparecal_with_selection(self.outname, tbsel)", "def is_valid_position(self, piece):\r\n for (x, y) in piece.get_template():\r\n if x < 0 or x > 9 or y > 19 or \\\r\n (0 <= x <= 9 and 0 <= y <= 19 and self.grid[y][x]):\r\n return False\r\n return True", "def supports_assessment_part_bank(self):\n return False", "def _pre_condition(self):\n self.logger.info(\"Checking precondition for SubtractSky\")\n skyfile = None\n skymask = None\n # check if kcwi.sky exists\n if os.path.exists('kcwi.sky'):\n f = open('kcwi.sky')\n skyproc = f.readlines()\n f.close()\n # is our file in the list?\n ofn = self.action.args.name\n for row in skyproc:\n if ofn in row.split()[0]:\n skyfile = row.split()[1]\n if len(row.split()) > 2:\n skymask = row.split()[2]\n if skyfile:\n if not os.path.exists(skyfile):\n skyfile = None\n if skymask:\n if not os.path.exists(skymask):\n skymask = None\n self.action.args.skyfile = skyfile\n self.action.args.skymask = skymask\n if skyfile:\n self.logger.info(\"pre condition got 1 master sky, expected 1\")\n return True\n else:\n\n target_type = 'SKY'\n tab = self.context.proctab.search_proctab(frame=self.action.args.ccddata,\n target_type=target_type,\n nearest=True)\n self.logger.info(\"pre condition got %d master sky, expected 1\"\n % len(tab))\n if len(tab) <= 0:\n return False\n else:\n return True", "def in_use(self, piece):\n # TODO check for valid comparisons\n use = piece.shape in self.in_play[piece.color]\n if use:\n print(\"Not a valid move: %s %s already in play\" %(piece.color, piece.shape))\n return use" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find out if the element has different settings for the selected variant and change it accordingly
def change_part_by_variant(part_tree, part, selected_variant): if 'populate' in part_tree.attrib and part_tree.attrib['populate'] == "no": part['DO_NOT_PLACE'] = "yes" return for variant in part_tree.iterfind('variant'): if (variant.attrib['name'] == selected_variant): if ('value' in variant.attrib): part['VALUE'] = variant.attrib['value'] if ('populate' in variant.attrib): part['DO_NOT_PLACE'] = "yes"
[ "def select_variant(drawing, variant_find_string, settings):\n \n ##stores the actual used variant\n selected_variant = \"\"\n ##stores the default variant if available\n default_variant = \"\"\n ##stores the number of defined variants in the board file\n number_variant = 0 \n\n #find all variants that are in the schematic\n if( 'variantlist' in settings ):\n print(\"VARIANT,CURRENT\")\n for elem in drawing.iterfind(variant_find_string):\n number_variant = number_variant + 1\n if (('current' in elem.attrib) and (elem.attrib['current']==\"yes\")):\n default_variant = elem.attrib['name']\n variant_current = \"TRUE\"\n else:\n variant_current = \"FALSE\"\n if( 'variantlist' in settings ):\n print(elem.attrib['name'] + \",\" + variant_current)\n if (elem.attrib['name'] == settings['set_variant']):\n selected_variant = settings['set_variant']\n if( 'variantlist' in settings ):\n sys.exit(0)\n\n #find out which variant to use, if there is any\n if (selected_variant == \"\" and\n default_variant == \"\" and\n number_variant > 0):\n print (\"invalid variant defined, aborting\")\n return\n elif (selected_variant == \"\"):\n selected_variant = default_variant\n\n if (number_variant > 0):\n debug_print (settings, 1, \"variant: \" + selected_variant)\n\n return selected_variant", "def test_used_as_variant_theme (self):\n self._test_scoped(self.create_variant())", "def reset_variant(self):\n self.current_variant = 'O'\n self.current_variant_grid = self.original_grid.copy()", "def SoOverrideElement_getPickStyleOverride(state: 'SoState') -> \"SbBool\":\n return _coin.SoOverrideElement_getPickStyleOverride(state)", "def test_integration(self):\n s = self.selenium\n self.open('/repository/2007/03/group')\n s.click('link=Variants')\n s.waitForCssCount('css=#variant-content', 1) # wait for tab to load\n\n # test that descriptive name is displayed near image\n s.assertTextPresent('Vollbreit (L)')\n\n # change default\n s.dragAndDrop('css=.ui-slider-handle', '0,-50')\n s.dragAndDrop('css=.focuspoint', '50,50')\n s.waitForCssCount('css=.saved', 1)\n s.waitForCssCount('css=.saved', 0)\n\n # switch to first preview, i.e. cinema-small\n s.click('css=img.preview')\n s.waitForCssCount('css=.switched', 1)\n s.waitForCssCount('css=.switched', 0)\n\n # change settings for cinema-small\n s.dragAndDrop('css=.cropper-point.point-nw', '50,50')\n s.waitForCssCount('css=.saved', 1)\n s.waitForCssCount('css=.saved', 0)\n\n repository = zope.component.getUtility(\n zeit.cms.repository.interfaces.IRepository)\n variants = repository['2007']['03']['group'].variants\n self.assertEqual(['cinema-small', 'default'], sorted(variants.keys()))\n # Compare Zoom values: cinema-small < default < 1 (default setting)\n self.assertLess(variants['default']['zoom'], 1)\n self.assertLess(\n variants['cinema-small']['zoom'],\n variants['default']['zoom'])\n # Compare Focus Point: cinema-small > default > 0.5 (default setting)\n self.assertGreater(variants['default']['focus_x'], 0.5)\n self.assertGreater(\n variants['cinema-small']['focus_x'],\n variants['default']['focus_x'])\n\n s.click('css=input[value=Verwerfen]')\n s.waitForCssCount('css=.reset_single', 1)\n s.waitForCssCount('css=.reset_single', 0)\n\n repository = zope.component.getUtility(\n zeit.cms.repository.interfaces.IRepository)\n variants = repository['2007']['03']['group'].variants\n self.assertEqual(['default'], sorted(variants.keys()))", "def update_selected(self, caller, value):\n for index, node in enumerate(self.data):\n if value == node[\"text\"]:\n self.layout_manager.select_node(index)", "def getPickStyleOverride(state: 'SoState') -> \"SbBool\":\n return _coin.SoOverrideElement_getPickStyleOverride(state)", "def __makeVariantList(self, set):\n self.variant_list = self.__makeVariantStringList()\n if not self.variant_list:\n set = False\n self.ui.listVariants.clear()\n self.ui.listVariants.addItems(self.variant_list)\n if set:\n row = self.variant_list.index(self.selected_variant_str_ID)\n self.ui.listVariants.setCurrentRow(row)\n return True\n else:\n self.selected_variant_str_ID = \"base\"\n self.setState(\"make_base\")\n return False", "def check_selected(self, sender, args):\n self._set_states(state=True, selected=True)", "def test_set_selected(self):\n self.radio_buttons.append(\"option1\")\n self.radio_buttons.append(\"option2\")\n value = 1\n self.radio_buttons.setSelected(value)\n self.assertEqual(self.radio_buttons.selected(), value)", "def handle_change_option(self, change):\n m = self.get_active_modal()\n m.current_option = (m.current_option + change) % len(m.option_labels)\n self.option_labels.set_highlight(m.current_option)", "def _on_option_clicked(self, *_):\n self.variable.set(True)", "def on_option_change(self, event):\n\t\telement = event.GetEventObject()\n\t\t_id = element.GetId()\n\t\tvar_name = self.var_ids[_id]\n\t\tif var_name == 'time_index' or var_name == 'pl_index':\n\t\t\tval = int(element.GetValue().split(\" \")[0])\n\t\telif var_name == 'preset':\n\t\t\tval = element.GetValue()\n\t\t\tself.display_map_preview(val)\n\t\telse:\n\t\t\tval = element.GetValue()\n\t\tself.update_option(var_name, val)\n\t\tevent.Skip()", "def do_change_then_find(self, settings: Settings) -> bool:\n p = self.c.p\n self.init_ivars_from_settings(settings)\n if not self.check_args('change-then-find'):\n return False\n if self.change_selection(p):\n return bool(self.do_find_next(settings))\n return False", "def change_selected_sign():\n change_sign()", "def _draw_icon_variant_tools(layout, index):\n props = layout.operator('object.switch_variant_selection', text=\"\", emboss=False, icon='RESTRICT_SELECT_OFF')\n props.variant_index = index\n props.select_type = _OP_consts.SelectionType.undecided\n props = layout.operator('object.switch_variant_visibility', text=\"\", emboss=False, icon='RESTRICT_VIEW_OFF')\n props.variant_index = index\n props.view_type = _OP_consts.ViewType.undecided", "def onPotencialChanged(self):\n self.potencial = self.potenzialDropDown.currentIndex()", "def is_selected(self,index):\n return self._animalSelect[index]", "def on_settings1(self, state1):\r\n if state1 == PyQt5.QtCore.Qt.Checked:\r\n config[\"settings\"][0][\"chrome\"] = \"true\"\r\n else:\r\n config[\"settings\"][0][\"chrome\"] = \"false\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find all variants that are defined in the drawing select the most appropriate one based on settings and default variant
def select_variant(drawing, variant_find_string, settings): ##stores the actual used variant selected_variant = "" ##stores the default variant if available default_variant = "" ##stores the number of defined variants in the board file number_variant = 0 #find all variants that are in the schematic if( 'variantlist' in settings ): print("VARIANT,CURRENT") for elem in drawing.iterfind(variant_find_string): number_variant = number_variant + 1 if (('current' in elem.attrib) and (elem.attrib['current']=="yes")): default_variant = elem.attrib['name'] variant_current = "TRUE" else: variant_current = "FALSE" if( 'variantlist' in settings ): print(elem.attrib['name'] + "," + variant_current) if (elem.attrib['name'] == settings['set_variant']): selected_variant = settings['set_variant'] if( 'variantlist' in settings ): sys.exit(0) #find out which variant to use, if there is any if (selected_variant == "" and default_variant == "" and number_variant > 0): print ("invalid variant defined, aborting") return elif (selected_variant == ""): selected_variant = default_variant if (number_variant > 0): debug_print (settings, 1, "variant: " + selected_variant) return selected_variant
[ "def test_default_variant_is_most_common_or_control(self):\n s = Swab(self.datadir)\n s.add_experiment('foo', ['v1', 'v2'], 'goal')\n assert s.experiments['foo'].default_variant == 'v1'\n\n s.add_experiment('bar', ['v1', 'v2', 'v2'], 'goal')\n assert s.experiments['bar'].default_variant == 'v2'", "def pick_matching_options(self, var, aff):\n temp = []\n\n for i in self.options:\n if i.affinity_bonus[aff] >= var[0]:\n temp.append(i)\n temp = list(set(temp))\n if self.selected:\n for i in temp:\n if i in self.selected:\n temp.remove(i)\n # The number of collected constellations cannot be lower than the minimum required specified in var[1].\n if len(temp) < var[1]:\n return None\n else:\n return temp", "def find_variant(name, version=None):\n prod = None\n for prod in SUPPORTED_VARIANTS:\n if prod.name == name:\n if version is None:\n return (prod, prod.latest_version())\n for v in prod.versions:\n if v.name == version:\n return (prod, v)\n\n return (None, None)", "def enum(self, partial_solution, remaining_selection_ordered, covered, cost, sets_benefits, depth, selected_sets, lds_limit):\n\n \"\"\"if time.time() - self.prune_factor_update > 0.3:\n self.prune_factor_update = time.time()\n self.prune_factor *= 1.01\n print('PF:', self.prune_factor)\n\n if random.random() < self.prune_factor and depth > 20:\n return\"\"\"\n\n if time.time() - self.start_time > self.time_limit:\n return\n\n # fully covered\n if len(covered) == self.item_count:\n if cost < self.best_global_solution_cost:\n print(cost)\n self.best_global_solution_cost = cost\n self.best_global_solution = partial_solution\n return\n\n # can't add more items but not yet done\n if len(remaining_selection_ordered) == 0:\n return\n\n # bound\n if cost >= self.best_global_solution_cost:\n return\n\n if self.set_count < 6000:\n local_worths = np.array([len(s) / self.costs[i] for i, s in enumerate(sets_benefits)])\n selecting_set_among_remaining = np.argmax(local_worths[remaining_selection_ordered])\n selecting_set = remaining_selection_ordered[selecting_set_among_remaining]\n #print(local_worths, selecting_set_among_remaining, selecting_set, remaining_selection_ordered)\n #print(len(remaining_selection_ordered))\n remaining_selection_ordered = remaining_selection_ordered[np.arange(len(remaining_selection_ordered)) != selecting_set_among_remaining].copy()\n #print(len(remaining_selection_ordered))\n\n #print(remaining_selection_ordered, depth, '\\n')\n else:\n selecting_set = remaining_selection_ordered[0]\n remaining_selection_ordered = remaining_selection_ordered[1:]\n\n added_items = sets_benefits[selecting_set]\n if not added_items.issubset(covered):\n p1 = partial_solution.copy()\n p1[selecting_set] = True\n if self.set_count < 6000:\n pass_sets = [s-added_items for s in sets_benefits]\n else:\n pass_sets = sets_benefits\n self.enum(p1, remaining_selection_ordered, covered | added_items, cost + self.costs[selecting_set], pass_sets, depth + 1, selected_sets | {selecting_set}, lds_limit)\n\n # DONT USE THIS SET\n \"\"\"total_possible_items = selected_sets | set(remaining_selection_ordered)\n for rare_set_family in self.rare_item_sets:\n if not (rare_set_family & total_possible_items):\n return\"\"\"\n\n if lds_limit > 0:\n p2 = partial_solution.copy() # TODO: unnecessary?\n p2[selecting_set] = False # TODO: unnecessary?\n self.enum(p2, remaining_selection_ordered, covered, cost, sets_benefits, depth+1, selected_sets, lds_limit-1)", "def get_mostDeleterious(varType, AC, AF): \n ranks = ['del', 'ins', 'complex', 'mnp', 'snp']\n variants = varType.split(',')\n allele_counts = [int(item) for item in AC.split(',')]\n allele_frequencies = [float(item) for item in AF.split(',')]\n\n varIdx_dic = {}\n for i, var in enumerate(variants):\n if var not in varIdx_dic:\n varIdx_dic[var] = [i]\n else:\n varIdx_dic[var].append(i)\n\n if 'del' in variants and 'ins' in variants:\n if variants.count('del') > variants.count('ins'):\n mostDelVarType = 'del'\n elif variants.count('del') < variants.count('ins'):\n mostDelVarType = 'ins'\n elif variants.count('del') == variants.count('ins'):\n max_del_AC = max([item for item in [allele_counts[i] for i in varIdx_dic['del']] ])\n max_ins_AC = max([item for item in [allele_counts[i] for i in varIdx_dic['del']] ])\n if max_del_AC > max_ins_AC:\n mostDelVarType = 'del'\n if max_ins_AC > max_del_AC:\n mostDelVarType = 'ins'\n if max_ins_AC == max_del_AC:\n mostDelVarType = random.choice(['ins', 'del'])\n else:\n for var in ranks:\n if var in variants:\n mostDelVarType = var\n break\n\n mostDelVarType_idx = varIdx_dic[mostDelVarType]\n if len(mostDelVarType_idx) == 1:\n idx = mostDelVarType_idx[0]\n allele_count, allele_frequency = allele_counts[idx], allele_frequencies[idx]\n else:\n max_count = -1\n idx = -1\n for i in mostDelVarType_idx:\n if allele_counts[i] > max_count:\n max_count = allele_counts[i]\n idx = i\n allele_count, allele_frequency = allele_counts[idx], allele_frequencies[idx]\n\n return mostDelVarType, allele_count, allele_frequency, idx", "def findShapeSel(self):\n sel= cmds.ls(sl=True)\n self.shapeSel=cmds.listRelatives(sel, s=True)\n\n return self.shapeSel", "def _draw_icon_variant_tools(layout, index):\n props = layout.operator('object.switch_variant_selection', text=\"\", emboss=False, icon='RESTRICT_SELECT_OFF')\n props.variant_index = index\n props.select_type = _OP_consts.SelectionType.undecided\n props = layout.operator('object.switch_variant_visibility', text=\"\", emboss=False, icon='RESTRICT_VIEW_OFF')\n props.variant_index = index\n props.view_type = _OP_consts.ViewType.undecided", "def _variants(self, name):\n return sorted(name.variants, self._cmp_variant)", "def getVariant(self):\n return self.variant, self.designId0", "def best_config_by_selection_types(self):\n output = {}\n for instance, benchmarks in self.benchmarks.items():\n output[instance] = self.__get_best_benchmark(instance)\n return output", "def _find_matching_variant_in_reader(self, variant):\n\n def _usable_truth(truth_variant):\n return (variant.start == truth_variant.start and\n not variantutils.is_filtered(truth_variant))\n\n region = variantutils.variant_position(variant)\n matches = [m for m in self._vcf_reader.query(region) if _usable_truth(m)]\n if not matches:\n return None\n elif len(matches) > 1:\n logging.warning(\n 'Multiple matches detected, keeping first, for variant %s: %s',\n variant, matches)\n return matches[0]", "def get_most(self, game, app):\n dots = {}\n kinds = app.get_dot_kinds()\n for kind in kinds:\n dots[kind] = []\n for pos in app.get_grid():\n if game.grid[pos].get_dot().get_kind() == kind:\n dots[kind].append(pos)\n most_kind, most_value = max(dots.items(), key = lambda x: len(set(x[1])))\n return most_kind", "def filter_variants(self, min_dp=10, min_gq=0, min_vaf=20, max_vaf=100, min_prct_cells=25, min_mut_prct_cells=0.5, min_mut_num_cells=None, min_std=0, method='mb', min_alt_read = 5):\n \n gt = self.layers[NGT]\n dp = self.layers[DP]\n gq = self.layers[GQ]\n vaf = self.layers[AF]\n\n # @ HZ: filter on alternative reads absolute value\n if min_alt_read > 0 and 'alt_read_count' in self.layers:\n alt = self.layers['alt_read_count']\n alt_keep = alt >= min_alt_read\n elif min_alt_read > 0 and 'alt_read_count' not in self.layers:\n print('alt_read_count not calculated, calculate now')\n alt = (np.multiply(vaf, dp)/100).astype(int)\n self.add_layer('alt_read_count', alt)\n alt_keep = alt >= min_alt_read\n else:\n alt_keep = 1\n\n dp_keep = dp >= min_dp\n gq_keep = gq >= min_gq\n min_vaf_keep = ~np.logical_and(vaf < min_vaf, gt == 1)\n max_vaf_keep = ~np.logical_and(vaf > max_vaf, gt == 1)\n gt = (gt - 3) * dp_keep * gq_keep * min_vaf_keep * max_vaf_keep * alt_keep + 3 # workaround to apply filter in one line\n # ^^^ \n # @HZ: this is dangerous since this will only trim down variants already filtered by the default thresholds\n\n self.add_layer(NGT_FILTERED, gt)\n\n num_cells = len(self.barcodes())\n \n ##############################################################\n # @HZ: different way of filtering based on read depth per cell\n if method == 'mb':\n min_cells_filter = np.isin(gt, [0, 1, 2]).sum(axis=0) > num_cells * min_prct_cells / 100\n elif method == 'hz':\n min_cells_filter = dp_keep.sum(axis=0) > num_cells * min_prct_cells / 100\n else:\n print(\"method should be either 'mb' or 'hz' \")\n raise NotImplementedError\n ########################################\n\n if min_mut_num_cells is not None:\n if min_mut_prct_cells is not None:\n print(\"only one of [min_mut_prct_cells] and [min_mut_num_cells] should be input \")\n raise NotImplementedError\n elif not (0 <= min_mut_num_cells < num_cells):\n print(\"[min_mut_num_cells] should be greater than or equal to zero and smaller than the total number of cells in the sample\")\n raise ValueError\n\n else:\n min_cells_mut_filter = np.isin(gt, [1, 2]).sum(axis=0) > min_mut_num_cells\n else:\n min_cells_mut_filter = np.isin(gt, [1, 2]).sum(axis=0) > round(num_cells * min_mut_prct_cells / 100)\n\n good_variants = min_cells_mut_filter * min_cells_filter\n\n final_filter = (vaf.std(axis=0) >= min_std) * good_variants\n\n # @HZ: add reason for exclusion as a layer \"filter_info\" to each variant-cell pair\n \n # dp_fil = np.char.array(np.where(~dp_keep, 'dp', ''))\n # gq_fil = np.char.array(np.where(~gq_keep, 'gq', ''))\n # min_vaf_fil = np.char.array(np.where(~min_vaf_keep, 'min_vaf', ''))\n # max_vaf_fil = np.char.array(np.where(~max_vaf_keep, 'max_vaf', ''))\n\n # filter_info = dp_fil + ' ' + gq_fil + ' ' + min_vaf_fil + ' ' + max_vaf_fil\n # self.add_layer('filter_info', filter_info)\n\n # min_cells_fil = np.char.array(np.where(~min_cells_filter, 'min_cells_covered', ''))\n # min_cells_mut_fil = np.char.array(np.where(~min_cells_mut_filter, 'min_cells_mut', ''))\n \n # var_filter_info = min_cells_fil + ' ' + min_cells_mut_fil\n\n # var_filter_info_dict = {}\n # for variant, info in zip(self.col_attrs[ID], var_filter_info):\n # var_filter_info_dict[variant] = info\n\n return self.col_attrs[ID][final_filter].astype(str)", "def match(self, variant):\n matched_variant = self._find_matching_variant_in_reader(variant)\n if self._confident_regions is None:\n confident = matched_variant is not None\n else:\n confident = self._confident_regions.variant_overlaps(\n variant, empty_set_return_value=False)\n if matched_variant is None and confident:\n matched_variant = self._make_synthetic_hom_ref(variant)\n return confident, matched_variant", "def _get_select_opts(self):\n provs = self.mp_controls.get_value(self._COMP_PATH)\n self.prov_settings_map = _get_map(provs)\n existing_provs = list(provs.keys())\n return [(val, idx) for idx, val in enumerate(sorted(existing_provs))]", "def ChosenLineSearch(x,s):\n if lineSearchVariant==\"exact\":\n return self.ExactLineSearch(x,s)\n elif lineSearchVariant==\"inexact\":\n return self.InexactLineSearch(x,s)[0]", "def calibration_select(choice):\n #\"\"\"Distorted image variables setting\"\"\"\n # ROS calibration result\n if choice == 1:\n cal_distort = Cal3DS2(fx=347.820593, fy=329.096945, s=0, u0=295.717950,\n v0=222.964889, k1=-0.284322, k2=0.055723, p1=0.006772, p2=0.005264)\n if choice == 2:\n cal_distort = Cal3DS2(fx=333.4, fy=314.7, s=0, u0=303.6,\n v0=247.6, k1=-0.282548, k2=0.054412, p1=-0.001882, p2=0.004796)\n if choice == 3:\n cal_distort = Cal3DS2(fx=343.555173, fy=327.221818, s=0, u0=295.979699,\n v0=261.530851, k1=-0.305247, k2=0.064438, p1=-0.007641, p2=0.006581)\n if choice == 4:\n cal_distort = Cal3DS2(fx=384.768827, fy=365.994262, s=0, u0=293.450481,\n v0=269.045187, k1=-0.350338, k2=0.086711, p1=-0.006112, p2=0.013082)\n # Matlab toolbox calibration result\n if choice == 5:\n cal_distort = Cal3DS2(fx=331.0165, fy=310.4791, s=0, u0=332.7372,\n v0=248.5307, k1=-0.3507, k2=0.1112, p1=8.6304e-04, p2=-0.0018)\n\n # Manually extracted features\n # cam1_features = [Point2(293,307),Point2(292,348),Point2(292,364),Point2(328,307),Point2(327,347),Point2(326,362)]\n # cam2_features = [Point2(73,307),Point2(74,346),Point2(74,361),Point2(109,307),Point2(110,348),Point2(110,362)]\n cam1_features = [Point2(348, 293), Point2(348, 332), Point2(\n 348, 348), Point2(388, 291), Point2(388, 332), Point2(388, 348)]\n cam2_features = [Point2(213, 311), Point2(214, 350), Point2(\n 213, 365), Point2(249, 313), Point2(250, 352), Point2(251, 368)]\n\n cal = cal_distort\n kp1 = cam1_features\n kp2 = cam2_features\n\n #\"\"\"Undistorted image variables setting\"\"\"\n if choice == 5:\n # cal_undistort = Cal3_S2(fx=331.6959, fy=310.4940,s=0,u0=334.6017, v0=250.2013)\n cal_undistort = Cal3DS2(fx=331.6959, fy=310.4940, s=0, u0=334.6017,\n v0=250.2013, k1=-0.0076, k2=0.0088, p1=-6.0889e-04, p2=3.3046e-06)\n cam1_features_undistort = [Point2(302, 289), Point2(303, 324), Point2(\n 303, 338), Point2(335, 339), Point2(334, 324), Point2(333, 288)]\n cam2_features_undistort = [Point2(249, 222), Point2(249, 257), Point2(\n 249, 270), Point2(278, 272), Point2(278, 257), Point2(277, 222)]\n\n if choice == 1:\n # cal_undistort = Cal3_S2(fx=240.446564, fy=265.140778,s=0,u0=302.423680, v0=221.096494)\n cal_undistort = Cal3_S2(\n fx=232.0542, fy=252.8620, s=0, u0=325.3452, v0=240.2912)\n # cal_undistort = Cal3DS2(fx=232.0542, fy=252.8620,s=0,u0=325.3452, v0=240.2912, k1=-0.0076, k2=0.0088, p1=-6.0889e-04 , p2=3.3046e-06)\n\n # cam1_features_undistort = [Point2(302,289),Point2(303,324),Point2(303,338),Point2(335,339),Point2(334,324),Point2(333,288)]\n # cam2_features_undistort = [Point2(249,222),Point2(249,257),Point2(249,270),Point2(278,272),Point2(278,257),Point2(277,222)]\n cam1_features_undistort = [Point2(339, 277), Point2(339, 312), Point2(\n 340, 326), Point2(369, 277), Point2(369, 313), Point2(370, 327)]\n cam2_features_undistort = [Point2(243, 295), Point2(243, 329), Point2(\n 242, 343), Point2(269, 295), Point2(269, 330), Point2(269, 344)]\n\n cal_undist = cal_undistort\n kp1_undist = cam1_features_undistort\n kp2_undist = cam2_features_undistort\n\n return [cal, kp1, kp2], [cal_undist, kp1_undist, kp2_undist]", "def whatFlavors(cost, money):\n # quadratic \n ids = {}\n for i, c in enumerate(cost):\n if c in ids:\n ids[c].append(i + 1)\n else:\n ids[c] = [i + 1]\n print(ids)\n for i, c in enumerate(cost):\n diff = money - c\n if diff in ids:\n index = 0\n for j in ids[diff]:\n if j != i + 1:\n index = j\n return (i + 1, index) if (i + 1) < index else (index, i + 1)\n return None", "def filter_rdtest(variants, cutoffs):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this function reads the eagle XML and processes it to produce the bill of material
def bom_creation(settings): #prepare differences for brd and sch files if ('in_filename_brd' in settings): tree = ET.ElementTree(file=settings['in_filename_brd']) variant_find_string = "board/variantdefs/variantdef" part_find_string = "board/elements/element" elif ('in_filename_sch' in settings): tree = ET.ElementTree(file=settings['in_filename_sch']) variant_find_string = "schematic/variantdefs/variantdef" part_find_string = "schematic/parts/part" root = tree.getroot() drawing = root[0] elements = [] #select which variant to use selected_variant = select_variant(drawing, variant_find_string, settings) #read all elements that are on the board for elem in drawing.iterfind(part_find_string): element = {} deviceset_tree = get_librarypart(drawing, elem.attrib['library'], elem.attrib['deviceset']) device_tree = get_device_tree(deviceset_tree, elem.attrib['device']) for technology_tree in device_tree.iter('attribute'): #find('technologies/technology/attribute'): element[technology_tree.attrib['name']] = technology_tree.attrib['value'] element['NAME'] = elem.attrib['name'] if ("value" in elem.attrib): element['VALUE'] = elem.attrib['value'] if ("package" in elem.attrib): element['PACKAGE'] = elem.attrib['package'] # only try to get description if we use the schematic... # the BRD file does not contain this information if ('in_filename_sch' in settings): description = get_description(drawing, elem.attrib['library'], elem.attrib['deviceset']) description = get_first_line_text_from_html(description) element['DESCRIPTION'] = description element['DEVICE'] = elem.attrib["device"] element['PACKAGE'] = get_package(drawing, elem.attrib['library'], elem.attrib['deviceset'], elem.attrib['device']) #get all attributes of the element for attribute in elem.iterfind('attribute'): if ('value' in attribute.attrib): attribute_name = attribute.attrib['name'].upper() attribute_value = attribute.attrib['value'] element[attribute_name] = attribute_value change_part_by_variant(elem, element, selected_variant) if ('EXCLUDEFROMBOM' not in element and (('in_filename_sch' in settings and is_part_on_pcb(drawing, elem.attrib['library'], elem.attrib['deviceset']) ) or 'in_filename_brd' in settings)): if((settings['notestpads'] == False) or ('TP_SIGNAL_NAME' not in element)): elements.append(element) debug_print(settings, 1, "writing bom of type " + settings['bom_type']) if (settings['bom_type']=='value'): write_value_list(elements, settings['out_filename'], settings['set_delimiter'], settings) elif (settings['bom_type']=='part'): write_part_list(elements, settings['out_filename'], settings['set_delimiter'])
[ "def _readMoreXML(self,xmlNode):\n Dummy._readMoreXML(self, xmlNode)\n self.initializationOptionDict['name'] = self.name\n paramInput = ROM.getInputSpecification()()\n paramInput.parseNode(xmlNode)\n def tryStrParse(s):\n \"\"\"\n Trys to parse if it is stringish\n @ In, s, string, possible string\n @ Out, s, string, original type, or possibly parsed string\n \"\"\"\n return utils.tryParse(s) if type(s).__name__ in ['str','unicode'] else s\n\n for child in paramInput.subparts:\n if len(child.parameterValues) > 0:\n if child.getName() == 'alias':\n continue\n if child.getName() not in self.initializationOptionDict.keys():\n self.initializationOptionDict[child.getName()]={}\n self.initializationOptionDict[child.getName()][child.value]=child.parameterValues\n else:\n if child.getName() == 'estimator':\n self.initializationOptionDict[child.getName()] = {}\n for node in child.subparts:\n self.initializationOptionDict[child.getName()][node.getName()] = tryStrParse(node.value)\n else:\n self.initializationOptionDict[child.getName()] = tryStrParse(child.value)\n # if working with a pickled ROM, send along that information\n if self.subType == 'pickledROM':\n self.initializationOptionDict['pickled'] = True\n self._initializeSupervisedGate(**self.initializationOptionDict)\n #the ROM is instanced and initialized\n self.mods = self.mods + list(set(utils.returnImportModuleString(inspect.getmodule(SupervisedLearning),True)) - set(self.mods))\n self.mods = self.mods + list(set(utils.returnImportModuleString(inspect.getmodule(LearningGate),True)) - set(self.mods))", "def readEK80(self):\n \n #get file index\n idx = self._index_ek80()\n\n with open(self.fn , 'rb') as file:\n file.seek(0)\n for i in range(len(idx)):\n #######\n # XML0\n #######\n if idx['datagram'][i] == b'XML0':\n file.seek(idx['start'][i]+self.DATAGRAM_HEADER_SIZE)\n xml_string = np.fromfile(file, dtype = 'a'+str(idx['length'][i] - self.DATAGRAM_HEADER_SIZE),count=1)[0]\n xml0_dict = self.parse_XML(xml_string)\n xmltype = list(xml0_dict.keys())[0]\n #########\n # Config\n #########\n if xmltype == 'Configuration':\n xml_dat = xml0_dict['Configuration']\n self.sensors = xml_dat['ConfiguredSensors'][0]\n self.config_header = xml_dat['Header'][0]\n self.config_transceiver = xml_dat['Transceivers'][0]['Transceiver']\n self.config_transducer = xml_dat['Transducers'][0]['Transducer']\n self.n_trans = len(self.config_transceiver)\n self.CID =[]\n for c in range(self.n_trans):\n self.CID.append(xml_dat['Transceivers'][0]['Transceiver'][c]['Channels'][0]['Channel'][0]['ChannelID'])\n self.pings[c] = defaultdict(list)\n ping_count = [-1] * len(self.CID)\n #########\n # environment\n #########\n if xmltype == 'Environment':\n self.count_env += 1\n self.environment[self.count_env] = xml0_dict['Environment']\n #########\n # Ping self.parameters\n #########\n if xmltype == 'Parameter':\n self.count_para += 1\n self.parameters[self.count_para] = xml0_dict['Parameter']['Channel'][0]\n \n ###########\n # self.mru - Motion\n ###########\n elif idx['datagram'][i] == b'MRU0':\n self.count_mru += 1\n file.seek(idx['start'][i]+self.DATAGRAM_HEADER_SIZE)\n mru_dtype = np.dtype([('heave','f4'),('roll','f4'),('pitch','f4'),('heading','f4')])\n var = ['heave','roll','pitch','heading']\n mru = np.fromfile(file, dtype=mru_dtype, count=1)\n self.mru[self.count_mru] = dict(zip(var,mru[0]))\n ###########\n # FIL1 - Filter data\n ###########\n elif idx['datagram'][i] == b'FIL1':\n self.count_filt += 1\n file.seek(idx['start'][i]+self.DATAGRAM_HEADER_SIZE)\n FIL1_dtype = np.dtype([('Stage','i2'),('Spare','i2'), ('Channel','S128'),('NCoeff','i2'),('DecFac','i2')])\n f1 = np.fromfile(file, dtype = FIL1_dtype, count=1)\n coeffs = np.fromfile(file,dtype = str(2 * f1['NCoeff'][0]) + 'f4', count=1)\n self.filters[self.count_filt] = dict({'ChID':f1['Channel'][0].decode('ascii'),'Stage':f1['Stage'][0],'Spare':f1['Spare'][0],'NCoeff':f1['NCoeff'][0],'DecFac':f1['DecFac'][0], 'Coeff': coeffs})\n ################################################\n ##### NME0 Reader\n ################################################\n elif idx['datagram'][i] == b'NME0':\n self.count_NMEA += 1\n file.seek(idx['start'][i])\n head = np.fromfile(file,dtype='3i4',count=1)\n self.NMEA_string = np.fromfile(file,dtype='a'+str(idx['length'][i]),count=1)\n if len(self.NMEA_string) > 0:\n self.NMEA_string = self.NMEA_string[0]\n if len(self.NMEA_string) > 6:\n self.NMEA_type = self.NMEA_string[3:6] \n self.NMEA_ori = self.NMEA_string[1:3] #SD for sounder, Depth\n self.NMEA[self.count_NMEA] = dict({'string': self.NMEA_string,'type':self.NMEA_type,'ori':self.NMEA_ori})\n \n ########\n # RAW 3\n ########\n elif idx['datagram'][i] == b'RAW3':\n file.seek(idx['start'][i]+4)\n \n #get ping header information into temporary ping dictionary\n h_dtype = np.dtype([('lowDate','i4'),\n ('highDate','i4'),\n ('chID', 'S128'),\n ('dataType','I'),\n ('Offset','I'),\n ('SampleCount','I')])\n head = np.fromfile(file, dtype = h_dtype, count=1)\n head = dict(zip(['lowDate', 'highDate','chID', 'dataType','Offset','SampleCount'] ,list(head.tolist()[0])))\n ntSecs = (head['highDate'] * 2 ** 32 + head['lowDate']) / 10000000\n head.update({'dgTime' : datetime(1601, 1, 1, 0, 0, 0) + timedelta(seconds = ntSecs)})\n \n #get channel ID\n head.update({'cid' : self.CID.index(head['chID'].decode('ascii'))})\n \n #get current ping number\n ping_count[head['cid']] += 1\n ping_n = ping_count[head['cid']]\n \n #get sample count\n sampleCount = head['SampleCount']\n \n #convert datatype to binary and reverse\n dTbin = format(head['dataType'],'b')[::-1]\n \n #check file format for complex, and angular information to compute power\n if dTbin[0:10][0] == format(1,'b'):\n if dTbin[0:10][1] == format(1, 'b'):\n #power and angular information\n if sampleCount * 4 == idx['length'][i] - self.DATAGRAM_HEADER_SIZE - 12 - 128:\n head.update({'power': np.fromfile(file, \n dtype = str(int(sampleCount)) + 'i2', \n count=1)[0] * 10. * np.log10(2) / 256.})\n angles = np.fromfile(file, \n dtype = str(int(2*sampleCount)) + 'i1', \n count=1)[0].reshape(2,sampleCount)\n head.update({'acrossPhi': angles[0]})\n head.ip({'alongPhi': angles[1]})\n else:\n #only power information\n head.update({'power': np.fromfile(file, \n dtype = str(int(sampleCount)) + 'i2', \n count=1)[0] * 10. * np.log10(2) / 256. })\n #...complex samples are available\n else: \n \n nb_cplx_per_samples =int(dTbin[7:][::-1],2)\n \n if dTbin[3] == format(1, 'b'):\n fmt = 'f' #'float32'\n ds = 4 #4 bytes\n elif dTbin[2] == format(1, 'b'):\n fmt = 'e' #'float16'\n ds = 2 #2 bytes\n if sampleCount > 0:\n temp = np.fromfile(file, \n dtype=np.dtype([('cplx_samples', fmt + str(ds))]), \n count=int(nb_cplx_per_samples) * int(sampleCount))\n temp =np.array([i[0] for i in temp]).T\n temp = temp.reshape(int(sampleCount),-1)\n if temp.size % nb_cplx_per_samples != 0:\n sampleCount = 0\n else:\n sampleCount = temp.size / nb_cplx_per_samples\n comp_sig = {}\n for isig in range(int(nb_cplx_per_samples/2)):\n comp_sig.update([('comp_sig_%i'%int(isig), \n temp[:, 1 + 2 * (isig) - 1 ] + complex(0,1) * temp[:, 2+2*(isig)-1]) ] )\n head.update({'comp_sig':comp_sig})\n self.cmpPwrEK80(ping = head, config= self.config_transceiver)\n \n self.pings[head['cid']][ping_n] = head \n #get power, y and complex np arrays in shape \n\n self.power_data = [defaultdict] * self.n_trans\n self.y_data = [defaultdict] * self.n_trans\n self.comp_sig_data = [defaultdict] * self.n_trans\n \n for c in range(self.n_trans):\n if 'power' in self.pings[c][0]:\n self.power_data[c] = np.array([self.pings[c][x]['power'] for x in self.pings[c]]).squeeze()\n if 'y' in self.pings[c][0]: \n self.y_data[c] = np.array([self.pings[c][x]['y'] for x in self.pings[c]]).squeeze()\n if 'comp_sig' in self.pings[c][0]:\n self.comp_sig_data[c] = np.array([np.transpose(np.asarray([(v) for v in self.pings[c][x]['comp_sig'].values()])) for x in self.pings[c]]).squeeze()\n\n params = pd.DataFrame(self.parameters).transpose()\n self.params = params.drop_duplicates()", "def load_xml(self):\n try:\n self.root = XMLReader(self.path).root\n\n #for sign in self.root.findall('./signs/sign'):\n # self.load_sign_xml(sign)\n\n for block in self.root.findall('./blocks/block'):\n self.load_block_xml(block)\n\n # load replacments etc...\n except Exception, e:\n log.exception('error loading buildfile')", "def XML_Broadening(self):\n\n broadening_xml = []\n if 'gamma_air' in self.__dict__:\n lineshape_xml = []\n lineshape_xml.append(' <Lineshape name=\"Lorentzian\">\\n'\\\n ' <Comments>The temperature-dependent pressure'\\\n ' broadening Lorentzian lineshape</Comments>\\n'\\\n ' <LineshapeParameter name=\"gammaL\">\\n'\\\n ' <FitParameters functionRef=\"FgammaL\">\\n'\\\n ' <FitArgument name=\"T\" units=\"K\">\\n'\\\n ' <LowerLimit>240</LowerLimit>\\n'\\\n ' <UpperLimit>350</UpperLimit>\\n'\\\n ' </FitArgument>\\n'\\\n ' <FitArgument name=\"p\" units=\"K\">\\n'\\\n ' <LowerLimit>0.</LowerLimit>\\n'\\\n ' <UpperLimit>1.2</UpperLimit>\\n'\\\n ' </FitArgument>\\n'\\\n ' <FitParameter name=\"gammaL_ref\">\\n')\n if self.gamma_air.ref is not None:\n lineshape_xml.append(' <SourceRef>%s%s</SourceRef>\\n'\n % (source_prefix, self.gamma_air.ref))\n lineshape_xml.append(' <Value units=\"1/cm\">%s</Value>\\n'\n % self.gamma_air.val)\n if self.gamma_air.err is not None:\n lineshape_xml.append(' <Accuracy><Statistical>'\n '%s</Statistical></Accuracy>\\n' % str(self.gamma_air.err))\n lineshape_xml.append(' </FitParameter>\\n')\n if 'n_air' in self.__dict__:\n lineshape_xml.append(' <FitParameter name=\"n\">\\n')\n if self.n_air.ref is not None:\n lineshape_xml.append(' <SourceRef>%s%s'\n '</SourceRef>\\n' % (source_prefix, self.n_air.ref))\n lineshape_xml.append(' <Value units=\"unitless\">%s'\n '</Value>\\n' % self.n_air.val)\n if self.n_air.err is not None:\n lineshape_xml.append(' <Accuracy><Statistical>'\n '%s</Statistical></Accuracy>\\n' % str(self.n_air.err))\n lineshape_xml.append(' </FitParameter>\\n')\n lineshape_xml.append(' </FitParameters>\\n'\\\n ' </LineshapeParameter>\\n</Lineshape>\\n')\n broadening_xml.append(' <Broadening'\n ' envRef=\"Eair-broadening-ref-env\" name=\"pressure\">\\n'\n '%s </Broadening>\\n' % ''.join(lineshape_xml))\n if 'gamma_self' in self.__dict__:\n lineshape_xml = []\n lineshape_xml.append(' <Lineshape name=\"Lorentzian\">\\n'\\\n ' <LineshapeParameter name=\"gammaL\">\\n')\n if self.gamma_self.ref is not None:\n lineshape_xml.append(' <SourceRef>%s%s</SourceRef>\\n'\n % (source_prefix, self.gamma_self.ref))\n lineshape_xml.append(' <Value units=\"1/cm\">%s</Value>\\n'\n % self.gamma_self.val)\n if self.gamma_self.err is not None:\n lineshape_xml.append(' <Accuracy><Statistical>'\\\n '%s</Statistical></Accuracy>\\n' % str(self.gamma_self.err))\n lineshape_xml.append(' </LineshapeParameter>\\n'\\\n ' </Lineshape>\\n')\n broadening_xml.append(' <Broadening'\\\n ' envRef=\"Eself-broadening-ref-env\" name=\"pressure\">\\n'\\\n '%s </Broadening>\\n' % ''.join(lineshape_xml))\n return ' %s\\n' % ''.join(broadening_xml)", "def xmlread(filename):\n\n tree = ET.parse(filename)\n root = tree.getroot()\n value_node_spacecraft = None\n value_node_passdir = None\n node_component = root.find('component')\n for node in node_component:\n if node.get('name') == 'mission': # mission=S1 or spacecraftname=Sentinel-1\n value_node_spacecraft = node.find('value')\n\n node_bursts = node_component.find('component')\n for node in node_bursts:\n if node.get('name') in ['burst1', 'burst2', 'burst3']:\n for property in node:\n if property.get('name') == 'passdirection':\n value_node_passdir = property.find('value')\n\n if value_node_passdir.text == 'DESCENDING':\n passdirection = 'Desc'\n else:\n passdirection = 'Asc'\n\n attrdict = {'missionname': value_node_spacecraft.text, 'passdirection': passdirection}\n\n return attrdict", "def _readMoreXML(self,xmlNode):\n self.outputDeck = -1 # default is the last deck!\n for child in xmlNode:\n if child.tag == 'outputDeckNumber':\n try : self.outputDeck = int(child.text)\n except ValueError: raise ValueError(\"can not convert outputDeckNumber to integer!!!! Got \"+ child.text)", "def parseMANEUVER(filename):\n print \"\\nNow Parsing MANEUVER file\"\n ###### Load The dataframe\n df,combo_filename = getPairedEventFiles(filename,cols=[\"Target\",\"Start\",\"Stop\",\"Duration\"])\n\n ###### Find Start and Stop Times\n startTime,stopTime = getStartStopTimes(df)\n\n root = generateXMLHeader(startTime,stopTime,combo_filename)\n\n ###### Iterate over COMM dataframe (each row of events)\n for idx,row in df.iterrows():\n utcStart =convertTimeFormat(row.Start)\n duration = str((convertTimeFormat(row.Stop,to_string=False) - \n convertTimeFormat(row.Start,to_string=False)).total_seconds()*1e3)\n duration = str(row.Duration*1e3)\n uid = str(getNextUniqueID())\n descr = \"MANEUVER\"\n sat = row.Sat\n param_names = [\"ACS_POINT\"]\n param_values = [row.Target]\n\n entity_names = ['UTC_Start_Time','Duration','Unique_Id','Event_Description','Sat',\n 'Entity','List_of_Event_Parameters']\n event_params = {'Event_Parameter':[{'Event_Par_Name':param_names[i],'Event_Par_Value':param_values[i]} for i in np.arange(len(param_names))]}\n\n xml_entity = None\n\n entity_values = [utcStart,duration,uid,descr,sat,xml_entity,event_params]\n\n entities = dict(zip(entity_names,entity_values))\n root = createEventElement(root,entities,override_keys=entity_names) # Override to preserve order in xml\n \n # csv_filename = string.replace(string.replace(filename.split('/')[-1],'SAT1_',''),'SAT2_','')\n return root,combo_filename,df", "def _read_xml_database( filename ):\n\n def parse_simple_node_list( node, node_name, attribute_name ):\n \"\"\"\n Parses a list of nodes from a parent node. Each node in the list is\n expected to have the same element name and the value to parse is\n in the specified attribute. The values are returned as a list with\n the same order as encountered.\n\n If an unexpected child node is encountered, a RuntimeError is raised\n indicating the node found.\n\n Takes 3 argument:\n\n node - Element whose children nodes are to be parsed.\n node_name - Name of the children nodes to parse. Any child node\n with a differing name will raise an exception.\n attribute_name - Name of the children nodes' attribute to parse for\n values.\n\n Returns 1 value:\n\n values - List of value strings parsed from the children nodes.\n\n \"\"\"\n\n values = []\n\n for child_index, child_node in enumerate( node ):\n if child_node.tag != node_name:\n raise RuntimeError( \"Expected '{:s}' but got '{:s}' instead for child #{:d}.\".format( node_name,\n child_node.tag,\n child_index ) )\n\n values.append( child_node.get( attribute_name ) )\n\n return values\n\n def parse_art_fields_node( art_fields_node ):\n \"\"\"\n Parses the art fields from the supplied node. Returns a dictionary\n with the following keys:\n\n types - List of art types.\n sizes - List of art sizes.\n qualities - List of art qualities.\n\n No validation is performed on the values parsed.\n\n Takes 1 argument:\n\n art_fields_node - Element whose children contain art field nodes to\n parse.\n\n Returns 1 value:\n\n art_fields - Dictionary whose values are lists of the values parsed.\n See above for a list of keys.\n \"\"\"\n\n if len( art_fields_node ) != 3:\n raise RuntimeError( \"Expected 3 children, received {:d}.\".format( len( art_fields_node ) ) )\n\n # parse each of our children nodes as simple lists.\n if art_fields_node[0].tag != \"Types\":\n raise RuntimeError( \"Expected the 1st child to be 'Types', received '{:s}'.\".format( art_fields_node[0].tag ) )\n types = parse_simple_node_list( art_fields_node[0], \"Type\", \"name\" )\n\n if art_fields_node[1].tag != \"Sizes\":\n raise RuntimeError( \"Expected the 2nd child to be 'Sizes', received '{:s}'.\".format( art_fields_node[1].tag ) )\n sizes = parse_simple_node_list( art_fields_node[1], \"Size\", \"name\" )\n\n if art_fields_node[2].tag != \"Qualities\":\n raise RuntimeError( \"Expected the 3rd child to be 'Qualities', received '{:s}'.\".format( art_fields_node[2].tag ) )\n qualities = parse_simple_node_list( art_fields_node[2], \"Quality\", \"name\" )\n\n return { \"types\": types,\n \"sizes\": sizes,\n \"qualities\": qualities }\n\n def parse_processing_states_node( processing_states_node ):\n \"\"\"\n Parses the processing states from the supplied node. Returns a list\n of values. No validation is performed on the values parsed.\n\n Takes 1 argument:\n\n processing_states_node - Element whose children nodes contain the\n processing states to parse.\n\n Returns 1 value:\n\n processing_states - List of processing states parsed.\n\n \"\"\"\n\n return parse_simple_node_list( processing_states_node, \"State\", \"name\" )\n\n def parse_artists_node( artists_node ):\n \"\"\"\n Parses the artists from the supplied node. Returns a list of values.\n No validation is performed on the values parsed.\n\n Takes 1 argument:\n\n artists_node - Element whose children nodes contain the artists to\n parse.\n\n Returns 1 value:\n\n artists - List of artists parsed.\n\n \"\"\"\n\n return parse_simple_node_list( artists_node, \"Artist\", \"name\" )\n\n def parse_fields_node( fields_node ):\n \"\"\"\n Parses the databases fields from the supplied node. Returns a\n dictionary for the art fields and a list of artists. The dictionary's\n keys are:\n\n types - List of art types.\n sizes - List of art sizes.\n qualities - List of art qualities.\n\n No validation is performed on the values parsed.\n\n Takes 1 argument:\n\n fields_node - Element whose children nodes contain the art fields,\n processing states, and artists to parse.\n\n Returns 2 values:\n\n art_fields - Dictionary whose values are lists of the values\n parsed. See above for a list of keys.\n processing_states - List of processing states.\n\n \"\"\"\n\n if len( fields_node ) != 3:\n raise RuntimeError( \"parse_fields_node(): Expected 3 nodes but got {:d}.\".format( len( fields_node ) ) )\n\n if fields_node[0].tag != \"ArtFields\":\n raise RuntimeError( \"\" )\n art_fields = parse_art_fields_node( fields_node[0] )\n\n if fields_node[1].tag != \"ProcessingStates\":\n raise RuntimeError( \"\" )\n processing_states = parse_processing_states_node( fields_node[1] )\n\n if fields_node[2].tag != \"Artists\":\n raise RuntimeError( \"\" )\n artists = parse_artists_node( fields_node[2] )\n\n return ({ \"types\": art_fields[\"types\"],\n \"sizes\": art_fields[\"sizes\"],\n \"qualities\": art_fields[\"qualities\"],\n \"artists\": artists },\n processing_states )\n\n def parse_photos_node( photos_node ):\n \"\"\"\n Parses a Photos node into a list of PhotoRecord objects. No\n validation is performed on the values parsed.\n\n Takes 1 argument:\n\n photos_node - Element whose children represent the database's photo\n records.\n\n Returns 1 value:\n\n photos - List of PhotoRecord objects parsed.\n\n \"\"\"\n\n photos = []\n\n for photo_index, photo_node in enumerate( photos_node ):\n if photo_node.tag != \"Photo\":\n raise RuntimeError( \"Expected a Photo node but got {:s} [#{:d}].\".format( photo_node.tag,\n photo_index ) )\n\n # get a proper dictionary of this node's attributes.\n attributes = photo_node.attrib\n\n # these are our mandatory arguments for building a PhotoRecord...\n id = int( attributes.pop( \"id\", None ) )\n filename = attributes.pop( \"filename\", None )\n\n # ... and these are the optional ones.\n state = attributes.pop( \"processing_state\", \"unreviewed\" ) # name change.\n created_time = float( attributes.pop( \"created_time\", \"0.0\" ) )\n modified_time = float( attributes.pop( \"modified_time\", \"0.0\" ) )\n location = attributes.pop( \"location\", None )\n photo_time = float( attributes.pop( \"photo_time\", \"0.0\" ) )\n resolution = attributes.pop( \"resolution\", None )\n rotation = int( attributes.pop( \"rotation\", \"0\" ) )\n tags = attributes.pop( \"tags\", \"\" )\n\n # handle conversion between our XML and internal Python\n # representations. resolutions are specified as \"NxM\" and\n # locations as \"X, Y\". tags is a comma delimited list of\n # strings.\n if resolution is not None:\n if resolution == \"\":\n resolution = None\n else:\n resolution = [size for size in map( int, resolution.split( \"x\" ) )]\n\n if tags == \"\":\n tags = []\n else:\n tags = [string for string in map( lambda x: x.strip(), tags.split( \",\" ) )]\n\n # take care to only create a location if the attribute was more\n # than just whitespace (or empty).\n if location is not None:\n if location == \"\":\n location = None\n else:\n location = [where for where in map( float, location.split( \",\" ) )]\n\n #\n # NOTE: all of the remaining attributes are fine to be passed as is.\n #\n photos.append( PhotoRecord( id,\n filename,\n created_time=created_time,\n location=location,\n modified_time=modified_time,\n photo_time=photo_time,\n resolution=resolution,\n rotation=rotation,\n state=state,\n tags=tags,\n **attributes ) )\n\n return photos\n\n def parse_arts_node( arts_node ):\n \"\"\"\n Parses a Arts node into a list of ArtRecord objects. No validation is\n performed on the values parsed.\n\n Takes 1 argument:\n\n art_node - Element whose children represent the database's art\n records.\n\n Returns 1 value:\n\n art - List of ArtRecord objects parsed.\n\n \"\"\"\n\n art = []\n\n for art_index, art_node in enumerate( arts_node ):\n if art_node.tag != \"Art\":\n raise RuntimeError( \"Expected a Art node but got {:s} [#{:d}].\".format( art_node.tag,\n art_index ) )\n\n # get a proper dictionary of this node's attributes.\n attributes = art_node.attrib\n\n # these are our mandatory arguments for building a ArtRecord...\n id = int( attributes.pop( \"id\", None ) )\n photo_id = int( attributes.pop( \"photo_id\", None ) )\n art_type = attributes.pop( \"type\", None )\n\n # ... and these are the optional ones.\n date = attributes.pop( \"date\", \"\" )\n state = attributes.pop( \"processing_state\", \"unreviewed\" ) # name change.\n artists = attributes.pop( \"artists\", \"Unknown\" )\n associates = attributes.pop( \"associates\", \"\" )\n vandals = attributes.pop( \"vandals\", \"\" )\n created_time = float( attributes.pop( \"created_time\", None ) )\n modified_time = float( attributes.pop( \"modified_time\", None ) )\n region = attributes.pop( \"region\", None )\n tags = attributes.pop( \"tags\", \"\" )\n\n # handle conversion between our XML and internal Python\n # representations. artists, associates, tags, and vandals are all\n # comma delimited lists. region is a comma delimited 4-tuple of\n # normalized floats.\n if artists == \"\":\n artists = [\"Unknown\"]\n else:\n artists = [string for string in map( lambda x: x.strip(), artists.split( \",\" ) )]\n if associates == \"\":\n associates = []\n else:\n associates = [string for string in map( lambda x: x.strip(), associates.split( \",\" ) )]\n if tags == \"\":\n tags = []\n else:\n tags = [string for string in map( lambda x: x.strip(), tags.split( \",\" ) )]\n if vandals == \"\":\n vandals = []\n else:\n vandals = [string for string in map( lambda x: x.strip(), vandals.split( \",\" ) )]\n\n if region is not None:\n region = tuple( [value for value in map( float, region.split( \",\" ))] )\n\n art.append( ArtRecord( id,\n photo_id,\n art_type,\n artists=artists,\n associates=associates,\n created_time=created_time,\n date=date,\n modified_time=modified_time,\n region=region,\n state=state,\n tags=tags,\n vandals=vandals,\n **attributes ) )\n\n return art\n\n def validate_art_fields( art_fields, processing_states ):\n \"\"\"\n Validates the art fields and processing states to ensure that they are\n suitable for processing. Ensures that there is at least one value\n for each field and that there aren't duplicates within a field.\n\n If the supplied arguments are invalid a RuntimeError describing the\n validation error is raised.\n\n Takes 2 arguments:\n\n art_fields - Dictionary (from parse_fields_node()) to validate.\n processing_states - List of processing states to validate.\n\n Returns nothing.\n\n \"\"\"\n\n # validate that we did not have any duplicate fields in what we read.\n duplicate_art_types = [item for item, count in collections.Counter( fields[0][\"types\"] ).items() if count > 1]\n duplicate_art_sizes = [item for item, count in collections.Counter( fields[0][\"sizes\"] ).items() if count > 1]\n duplicate_art_qualities = [item for item, count in collections.Counter( fields[0][\"qualities\"] ).items() if count > 1]\n duplicate_artists = [item for item, count in collections.Counter( fields[0][\"artists\"] ).items() if count > 1]\n duplicate_processing_states = [item for item, count in collections.Counter( fields[1] ).items() if count > 1]\n\n if len( duplicate_art_types ) > 0:\n raise RuntimeError( \"Duplicate art types: {:s}\".format( \", \".join( duplicate_art_types ) ) )\n elif len( fields[0][\"types\"] ) == 0:\n raise RuntimeError( \"No art types were parsed.\" )\n\n if len( duplicate_art_sizes ) > 0:\n raise RuntimeError( \"Duplicate art sizes: {:s}\".format( \", \".join( duplicate_art_sizes ) ) )\n elif len( fields[0][\"sizes\"] ) == 0:\n raise RuntimeError( \"No art sizes were parsed.\" )\n\n if len( duplicate_art_qualities ) > 0:\n raise RuntimeError( \"Duplicate art qualities: {:s}\".format( \", \".join( duplicate_art_qualities ) ) )\n elif len( fields[0][\"qualities\"] ) == 0:\n raise RuntimeError( \"No art qualities were parsed.\" )\n\n if len( duplicate_artists ) > 0:\n raise RuntimeError( \"Duplicate artists: {:s}\".format( \", \".join( duplicate_artists ) ) )\n elif len( fields[0][\"artists\"] ) == 0:\n raise RuntimeError( \"No artists were parsed.\" )\n\n if len( duplicate_processing_states ) > 0:\n raise RuntimeError( \"Duplicate aprocessing states: {:s}\".format( \", \".join( duplicate_processing_states ) ) )\n elif len( fields[1] ) == 0:\n raise RuntimeError( \"No processing states were parsed.\" )\n\n def validate_identifiers( photos, art ):\n \"\"\"\n Validates the photo and art records to ensure that they are suitable\n for processing. Ensures that every record's identifier is unique\n within its class, and that every art record has a parent photo record.\n\n If the supplied arguments are invalid a RuntimeError describing the\n validation error is raised.\n\n Takes 2 arguments:\n\n photos - List of PhotoRecord objects to validate.\n art - List of ArtRecord objects to validate.\n\n Returns nothing.\n\n \"\"\"\n\n # validate that we did not have any duplicates in our photo record\n # identifiers.\n photo_ids = dict()\n for photo in photos:\n if photo[\"id\"] in photo_ids:\n photo_ids[photo[\"id\"]] += 1\n else:\n photo_ids[photo[\"id\"]] = 1\n\n duplicate_photo_ids = [photo_id for photo_id, count in photo_ids.items() if count > 1]\n\n # validate that we don't have duplicate art record identifiers, as well as\n # seeing if we have any orphaned art records (read: no parent photo\n # record).\n art_ids = dict()\n orphaned_art_ids = []\n for record in art:\n if record[\"id\"] in art_ids:\n art_ids[record[\"id\"]] += 1\n else:\n art_ids[record[\"id\"]] = 1\n\n if record[\"photo_id\"] not in photo_ids:\n orphaned_art_ids.append( record[\"id\"] )\n\n duplicate_art_ids = [art_id for art_id, count in art_ids.items() if count > 1]\n\n if len( duplicate_photo_ids ) > 0:\n raise RuntimeError( \"Duplicate photo ID: {:s}.\".format( \", \".join( map( str, duplicate_photo_ids ) ) ) )\n\n if len( duplicate_art_ids ) > 0:\n raise RuntimeError( \"Duplicate art ID: {:s}.\".format( \", \".join( map( str, duplicate_art_ids ) ) ) )\n\n if len( orphaned_art_ids ) > 0:\n raise RuntimeError( \"Orphaned art records: {:s}.\".format( \", \".join( map( str, orphaned_art_ids ) ) ) )\n\n # read the XML file as a giant string and then convert its DOM into\n # something we can work with.\n with open( filename, \"rt\" ) as xml_file:\n xml_string = xml_file.read()\n root_node = etree.fromstring( xml_string )\n\n if len( root_node ) != 3:\n raise RuntimeError( \"Expected 3 elements within the document, but received {:d}.\".format( len( root_node ) ) )\n\n # parse the fields, and our photos and art records.\n if root_node[0].tag != \"Fields\":\n raise RuntimeError( \"\" )\n fields = parse_fields_node( root_node[0] )\n\n if root_node[1].tag != \"Photos\":\n raise RuntimeError( \"\" )\n photos = parse_photos_node( root_node[1] )\n\n if root_node[2].tag != \"Arts\":\n raise RuntimeError( \"\" )\n art = parse_arts_node( root_node[2] )\n\n # validate what we received so we don't pass garbage back to the user.\n validate_art_fields( fields[0], fields[1] )\n validate_identifiers( photos, art )\n\n # XXX: rework the interface here\n return (fields[0], fields[1], photos, art)", "def read_sfbxmlfile(self):\n sfbxml = self.sdict['sfbxml']\n #open elementtree\n try:\n tree = et.parse(sfbxml)\n except:\n #return \"empty\" xml file\n series = et.Element('Series')\n tree = et.ElementTree(series)\n \n series = tree.getroot()\n for _exp_el in series.findall('Experiment'):\n print _exp_el, _exp_el.tag, _exp_el.attrib\n _path = _exp_el.attrib['path']\n _id = _exp_el.attrib['id']\n self.add_new_experiment(str(_path), int(_id))\n #adding parameters to experiment\n \"\"\"\n for _para_el in _exp_el.findall('Parameters'):\n for _input_el in _para_el.findall('Input'):\n _n = _input_el.attrib['name']\n _u = _input_el.attrib['unit']\n _v = _input_el.text\n self.exp[str(_id)].add_parameter(_n, _v, _u)\n \"\"\"", "def loadFromXmlPart(self,xmlPart):\r\n\t\t\"\"\" we need to display the egg of the station \"\"\"\r\n\t\tself.id=int(xmlPart.getElementsByTagName('idstation')[0].firstChild.data)\r\n\t\tself.name=str(xmlPart.getElementsByTagName('name')[0].firstChild.data)\r\n\t\tposx=float(xmlPart.getElementsByTagName('posx')[0].firstChild.data)\r\n\t\tposy=float(xmlPart.getElementsByTagName('posy')[0].firstChild.data)\r\n\t\tposz=float(xmlPart.getElementsByTagName('posz')[0].firstChild.data)\r\n\t\thprh=float(xmlPart.getElementsByTagName('hprh')[0].firstChild.data)\r\n\t\thprp=float(xmlPart.getElementsByTagName('hprp')[0].firstChild.data)\r\n\t\thprr=float(xmlPart.getElementsByTagName('hprr')[0].firstChild.data)\r\n\t\tself.mass=float(xmlPart.getElementsByTagName('mass')[0].firstChild.data)\r\n\t\tself.egg=str(xmlPart.getElementsByTagName('egg')[0].firstChild.data)\r\n\t\tself.scale=float(xmlPart.getElementsByTagName('scale')[0].firstChild.data)\r\n\t\tself.exitZone=int(xmlPart.getElementsByTagName('exitzone')[0].firstChild.data)\r\n\t\tself.pos=(posx,posy,posz)\r\n\t\tself.hpr=(hprh,hprp,hprr)\r\n\t\tif self.inSpace==True:\r\n\t\t\tself.node=loader.loadModel(shimConfig.getInstance().getConvRessourceDirectory() +self.egg)\r\n\t\t\tself.node.reparentTo(render)\r\n\t\t\tself.node.setName( self.name)\r\n\t\t\tself.node.setPos(self.pos)\r\n\t\t\tself.node.setHpr(self.hpr)\r\n\t\t\tself.node.setTag(\"name\",\"station\")\r\n\t\t\tself.node.setTag(\"classname\",\"station\")\r\n\t\t\tself.node.setTag(\"id\",str(self.id))\r\n\t\t\tself.node.setShaderAuto()", "def XMLScanModification2(DH_atoms, angle=0.174533, step=36, sigma_0=3.14159, MC_sigma=0.523599, repeat=10000, t_0=300, t_i=300): #TODO:Running, test settings needed\n\n Algorithm = {\n '1': ' <Algorithm>', \n '2': ' <TransformationSequence repeats=\"1\" weight=\"1.0\">', \n '3': 'AbsoluteTransformation', \n '4': ' </TransformationSequence>', \n '5': ' <TransformationSequence repeats=\"36\" weight=\"1.0\">', \n '6': 'RelativeTransformation', \n '7': ' <TransformationSequence repeats=\"1\" weight=\"1.0\">', \n '8': ' <RepeatedMove>', \n '9': ' <repeats>1000</repeats>', \n '10': ' <tend>300.0</tend>', \n '11': ' <tscaling>geometric</tscaling>', \n '12': ' <tstart>300.0</tstart>', \n '13': ' <TransformationSequence weight=\"1.0\" repeats=\"1\">', \n '14': ' <TransformationSequence repeats=\"1\" weight=\"1.0\">', \n '15': ' <RecalcEnergies>', \n '16': ' <beginstep>0</beginstep>', \n '17': ' <laststep>0</laststep>', \n '18': ' <stepmod>1000</stepmod>', \n '19': ' </RecalcEnergies>', \n '20': ' <PrintEnergy>', \n '21': ' <beginstep>0</beginstep>', \n '22': ' <laststep>0</laststep>', \n '23': ' <stepmod>1000</stepmod>', \n '24': ' </PrintEnergy>', \n '25': ' <BestConfigurationOutput>', \n '26': ' <beginstep>0</beginstep>', \n '27': ' <filename>best.pdb</filename>', \n '28': ' <laststep>0</laststep>', \n '29': ' <stepmod>1</stepmod>', \n '30': ' <type>pdb</type>', \n '31': ' </BestConfigurationOutput>', \n '32': ' <ConfigurationOutput>', \n '33': ' <beginstep>0</beginstep>', \n '34': ' <filename>trajectory.pdb</filename>', \n '35': ' <laststep>0</laststep>', \n '36': ' <only_new>0</only_new>', \n '37': ' <stepmod>1000</stepmod>', \n '38': ' <type>pdb</type>', \n '39': ' </ConfigurationOutput>', \n '40': ' <MetadataOutput>', \n '41': ' <beginstep>0</beginstep>', \n '42': ' <laststep>0</laststep>', \n '43': ' <metadataname>Summary</metadataname>', \n '44': ' <stepmod>1000</stepmod>', \n '45': ' </MetadataOutput>', \n '46': ' </TransformationSequence>', \n '47': ' <ConditionalTransformation weight=\"1.0\">', \n '48': ' <MetropolisAcceptanceCriterion>', \n '49': ' <energymodel_nr>0</energymodel_nr>', \n '50': ' <kB>0.0019858775</kB>', \n '51': ' </MetropolisAcceptanceCriterion>', \n '52': ' <TransformationChoice weight=\"1.0\" repeats=\"1\">', \n '53': 'RelativeRandomTransformation', \n '54': ' </TransformationChoice>', \n '55': ' </ConditionalTransformation>', \n '56': ' </TransformationSequence>', \n '57': ' </RepeatedMove>', \n '58': ' </TransformationSequence>', \n '59': ' </TransformationSequence>', \n '60': ' </Algorithm>'}\n\n #UPDATE step=36, repeat=10000, t_0=300, t_i=300\n Algorithm.update({'5': ' <TransformationSequence repeats=\"{}\" weight=\"1.0\">'.format(step)})\n Algorithm.update({'9': ' <repeats>{}</repeats>'.format(repeat)})\n Algorithm.update({'10': ' <tend>{}.0</tend>'.format(t_i)})\n Algorithm.update({'12': ' <tstart>{}.0</tstart>'.format(t_0)})\n # '50': ' <kB>0.0019858775</kB>'\n\n #dh_id = 2, sigma = 3\n AbsoluteDHLib = {\n '1': ' <SetDihedralAbsolute weight=\"1.0\">', \n '2': ' <dihedral_id>0</dihedral_id>', \n '3': ' <value>3.14159</value>', \n '4': ' </SetDihedralAbsolute>'}\n\n #dh_id = 2, sigma = 3\n RelativeDHLib = {\n '1': ' <SetDihedralRelative weight=\"1.0\">', \n '2': ' <dihedral_id>0</dihedral_id>', \n '3': ' <value>0.174533</value>', \n '4': ' </SetDihedralRelative>'}\n\n #dh_id = 2, sigma = 4, distribution_type = 3\n RelativeRandomDHLib = {\n '1': ' <SetDihedralRelativeRandom weight=\"1.0\">', \n '2': ' <dihedral_id>1</dihedral_id>', \n '3': ' <distribution type=\"gaussian\">', \n '4': ' <sigma>0.523599</sigma>', \n '5': ' <mean>0.0</mean>', \n '6': ' </distribution>', \n '7': ' </SetDihedralRelativeRandom>'}\n\n for dh_id in range(0, len(DH_atoms)):\n os.mkdir('{}_SIMONAScanStep'.format(dh_id))\n os.chdir('{}_SIMONAScanStep'.format(dh_id))\n\n with open('{}_dihedralscan.xml'.format(dh_id), 'w') as outfile:\n XMLLines = []\n AlgorithmLimits = []\n with open('../SIMONA_inputs/tmp.xml','r') as infile: #'../SIMONA_inputs/tmp.xml'\n for line in infile:\n XMLLines.append(line)\n\n for index, line in enumerate(XMLLines):\n if \"<Algorithm>\" in line:\n AlgorithmLimits.append(index)\n if \"</Algorithm>\" in line:\n AlgorithmLimits.append(index)\n\n #write all lines from 0 to <Algorithm>\n for idx in range(0,AlgorithmLimits[0]): # not including Algorithm\n outfile.write(XMLLines[idx])\n #print(Algorithm.get(str(6)))\n #ALGORITHM section\n for AlgorithmLine in range(1, len(list(Algorithm))+1):\n\n if Algorithm.get(str(AlgorithmLine)) == \"AbsoluteTransformation\":\n AbsoluteDHLib.update({'2': ' <dihedral_id>{}</dihedral_id>'.format(dh_id)})\n AbsoluteDHLib.update({'3': ' <value>{}</value>'.format(sigma_0)})\n for idx in range(1, len(list(AbsoluteDHLib))+1):\n outfile.write(AbsoluteDHLib.get(str(idx)) + \"\\n\")\n elif Algorithm.get(str(AlgorithmLine)) == \"RelativeTransformation\":\n RelativeDHLib.update({'2': ' <dihedral_id>{}</dihedral_id>'.format(dh_id)})\n RelativeDHLib.update({'3': ' <value>{}</value>'.format(angle)})\n for idx in range(1, len(list(RelativeDHLib))+1):\n outfile.write(RelativeDHLib.get(str(idx)) + \"\\n\")\n elif Algorithm.get(str(AlgorithmLine)) == \"RelativeRandomTransformation\":\n RelativeRandomDHLib.update({'4': ' <sigma>{}</sigma>'.format(MC_sigma)})\n for dh_id2 in range(0, len(DH_atoms)):\n if dh_id2 == dh_id:\n continue\n else:\n RelativeRandomDHLib.update({'2': ' <dihedral_id>{}</dihedral_id>'.format(dh_id2)})\n for idx in range(1, len(list(RelativeRandomDHLib))+1):\n outfile.write(RelativeRandomDHLib.get(str(idx)) + \"\\n\")\n else:\n outfile.write(Algorithm.get(str(AlgorithmLine))+ \"\\n\")\n\n\n #Last lines of XML file\n #(AlgorithmLimits)\n for idx in range(AlgorithmLimits[1]+1,len(XMLLines)): # not including Algorithm\n outfile.write(XMLLines[idx])\n\n os.chdir('../')", "def __init__(self, article_xml, n=1, is_stemmed=False):\n\n try: self.tree = ET.fromstring(article_xml)\n except ValueError:\n print article_xml\n \n try: self.title = self.tree.find('title').text\n except: self.title = ''\n try: self.headline = self.tree.find('headline').text\n except: self.headline = ''\n try:\n self.byline = self.tree.find('byline').text\n except:\n self.byline = ''\n \n self.paragraphs = self.tree.findall('text/p')\n try: self.text = '\\n'.join(p.text for p in self.paragraphs)\n except: self.text = ''\n \n self.document = '\\n'.join([self.title, self.byline, self.text])\n \"\"\" \n The document is the collection of tokens we wish to include in\n our estimation problem (e.g. title, text).\n \n I joined title, headline, byline, and text into the document,\n but if you wish to analyze some subset of these, simply change\n the instantiation.\n \"\"\"\n \n self.codeses = self.tree.findall(\".//codes\")\n try:\n self.region_codes = filter(lambda codes: 'countries' in codes.attrib['class'], self.codeses)[0]\n \"\"\"\n In this line, I arbitrarily code a document's region as\n the first listed code. This is a strong assumption that\n should be tweaked in later investigation, here as well as\n for the industry and topic codes.\n \"\"\"\n self.region_code = self.region_codes.find('code').attrib['code']\n except:\n self.region_code = None\n \n try:\n self.industry_codes = filter(lambda codes: 'industries' in codes.attrib['class'], self.codeses)[0]\n self.industry_code = self.industry_codes.find('code').attrib['code']\n except:\n self.industry_code = None\n \n try:\n self.topic_codes = filter(lambda codes: 'topics' in codes.attrib['class'], self.codeses)[0]\n self.topic_code = self.topic_codes.find('code').attrib['code']\n except:\n\t self.topic_codes = None\n\t self.topic_code = None\n \n self.region_code_id = region_code_id(self.region_code)\n self.industry_code_id = industry_code_id(self.industry_code)\n self.topic_code_id = topic_code_id(self.topic_code)\n\n self.tokens = self.__tokenize(is_useful=None)\n #self.tokens = self.__tokenize(is_useful=self.__is_not_stopword)\n self.ngrams = self.__get_ngrams(n)\n self.generalized_bigrams = self.__get_generalized_bigrams()", "def process_data(self):\n time_start = time.time()\n exemption_count = 0\n count_tags = 0\n count_tags_cleaned = 0\n print(\"Start time: {}\".format(time.asctime()))\n print(\"Processing OSM file...\")\n self.reset_data_files() # Reset data files to avoid data duplication\n for element in self.get_element():\n element_id = element.attrib['id']\n lat = element.get('lat')\n lon = element.get('lon') \n try:\n self.node_writer.writerow({k:v for k, v in element.attrib.items()})#.encode('utf-8')\n except:\n exemption_count += 1\n continue\n if len(element) == 0:\n continue \n for child in element.findall(\"./tag\"):\n child.attrib['id'] = element_id\n k = child.get('k')\n v = child.get('v')\n new = None\n count_tags += 1\n if k == 'addr:postcode':\n new = self.shape_zipcode(v, lat, lon)\n child.attrib['v'] = new\n elif k == 'addr:city':\n new = self.shape_city(v)\n child.attrib['v'] = new\n elif k == 'addr:street':\n new = self.shape_streetname(v)\n child.attrib['v'] = new\n elif k == 'population':\n city_name = element.findall(\"./tag[@k='name']\")[0].attrib['v']\n new = self.shape_population(city_name, v)\n child.attrib['v'] = new\n if new != v:\n count_tags_cleaned += 1\n try:\n self.tag_writer.writerow({k:v for k, v in child.attrib.items()})\n except:\n exemption_count += 1\n continue\n self.nodes_csv_file.close()\n self.tags_csv_file.close()\n self.cleaned_cities.close()\n self.cleaned_population.close()\n self.cleaned_streets.close()\n self.cleaned_zipcodes.close()\n time_end = time.time()\n total_time = round(time_end - time_start, 1)\n print(\"Data processed in {} secs.\".format(total_time))\n print(\"Number of tags reviewed: {}\".format(count_tags))\n print(\"Number of tags corrected: {}\".format(count_tags_cleaned))\n print(\"Number of tags and nodes exempted: {}\".format(exemption_count))", "def read_xml_file(self, xml_fn):\n pass", "def eval_xml(xml_string):\n result_dict = {}\n provisional_references_dict = {}\n # Parse XML string\n doc = ET.fromstring(xml_string)\n # List containing the relevant tags\n tags = ['doknr', 'ecli', 'gertyp', 'gerort', 'spruchkoerper', 'entsch-datum', 'aktenzeichen', 'doktyp', 'norm', 'vorinstanz', 'gruende', 'entscheidungsgruende', 'identifier', 'sonstlt', 'abwmeinung', 'tatbestand', 'tenor', 'sonstosatz', 'leitsatz', 'titelzeile', 'mitwirkung', 'region']\n tags_translation = {'doknr': 'documentnumber',\n 'ecli': 'ecli',\n 'gertyp': 'court',\n 'gerort': 'courtlocation',\n 'spruchkoerper': 'spruchkoerper',\n 'entsch-datum': 'date',\n 'aktenzeichen': 'filenumber',\n 'doktyp': 'documenttype',\n 'entscheidungsgruende': 'reasonfordecision',\n 'abwmeinung': 'abwmeinung',\n 'sonstosatz': 'miscsentence',\n 'norm': 'norms',\n 'vorinstanz': 'previouscourt',\n 'gruende': 'reasons',\n 'identifier': 'identifier',\n 'sonstlt': 'other',\n 'tatbestand': 'offense',\n 'tenor': 'tenor',\n 'leitsatz': 'keysentence',\n 'titelzeile': 'title',\n 'mitwirkung': 'mitwirkung',\n 'region': 'region'}\n\n # Load each tag into dictionary:\n\n outgoing_references_dict = {} # Contains additional information about where the references are\n outgoing_references_set = set() # Contains only the referenced filenumbers\n for tag in tags:\n tag_array = [] # Contains child-tags\n # Iterate through child tags of a tag:\n for child in doc.find(tag).iter():\n if child.text and child.text.rstrip() != \"\":\n if tag == 'entsch-datum':\n tag_array.append(int(child.text)) # Append child date to array as int\n else:\n tag_array.append(child.text.strip()) # Append child tag to array\n # If the array only contains one element, or the tag doesn't have child-tags,\n # only load that tag into the directory. Array is empty if there is no value inside the tag:\n if len(tag_array) == 1:\n if tag == 'vorinstanz':\n outgoing_references, outgoing_references_set = ref.find_reference(tag_array, outgoing_references_set)\n #outgoing_references_dict.append(outgoing_references)\n outgoing_references_dict[tag] = outgoing_references\n # Enter Data into the correct translated dict entry\n result_dict[tags_translation[tag]] = tag_array[0]\n else:\n # Specific tags require search for references\n # This path also enters any tags that are contained in arrays\n reference_tags = ['gruende', 'tenor', 'entscheidungsgruende', 'tatbestand', 'leitsatz', 'vorinstanz']\n if tag in reference_tags:\n outgoing_references, outgoing_references_set = ref.find_reference(tag_array, outgoing_references_set)\n # outgoing_references_dict.append(outgoing_references)\n outgoing_references_dict[tag] = outgoing_references #todo tags_translation?\n result_dict[tags_translation[tag]] = tag_array\n\n result_dict['keywords'] = []\n result_dict['incoming_count'] = -1\n result_dict['successful'] = \"\"\n\n # build provisional reference-dict for ES that does not contain incoming references yet:\n provisional_references_dict = create_reference_dict(result_dict['filenumber'], outgoing_references_dict, outgoing_references_set, [], result_dict['documentnumber'])\n # ES fields: [ID][filenumber][list outgoing references][set outgoing references][set incoming references]\n # [sum of incoming references]\n\n return result_dict, provisional_references_dict", "def WriteXML2String(self, xmlVersion=\"1.0\", encoding=\"UTF-8\",\\\n dtdFileName=\"ppm1.1.dtd\", dtdVersionName=\"1.1\"):\n #beginning and end:\n beginSection = \"\"\"<?xml version=\"%s\" encoding=\"%s\"?>\n<!DOCTYPE aria_ppm:list SYSTEM \"%s\">\n<aria_ppm:list>\n <aria_ppm:version>%s</aria_ppm:version>\n\"\"\" % (xmlVersion, encoding, dtdFileName, dtdVersionName)\n\n endSection = \"\"\"</aria_ppm:list>\n\"\"\"\n #loop over all the atoms:\n outString = ''\n for eachObj in self.atomlist:\n beginAssignment = \"\"\" <aria_ppm:assignment>\n\"\"\"\n endAssignment = \"\"\" </aria_ppm:assignment>\n\"\"\"\n beginList = \"\"\" <aria_ppm:atom_list>\n\"\"\"\n endList = \"\"\" </aria_ppm:atom_list>\n\"\"\"\n outString = outString + beginAssignment + beginList\n for eachAtom in eachObj.atomname:\n# print eachAtom #test\n if eachObj.segid:\n segmentName = eachObj.segid\n else:\n segmentName = '' #default\n atomString = \"\"\" <aria_ppm:atom>\n <aria_ppm:segment_name>%s</aria_ppm:segment_name>\n <aria_ppm:residue_number>%s</aria_ppm:residue_number>\n <aria_ppm:atom_name>%s</aria_ppm:atom_name>\n </aria_ppm:atom>\n\"\"\" % (segmentName, eachObj.residuenumber, eachAtom)\n outString = outString + atomString\n ppmString = \"\"\" <aria_ppm:ppm_list>\n <aria_ppm:ppm>%s</aria_ppm:ppm>\n <aria_ppm:ppm_error>%s</aria_ppm:ppm_error>\n </aria_ppm:ppm_list>\n\"\"\" % (eachObj.shift, eachObj.shifterror)\n outString = outString + endList + ppmString + endAssignment\n outString = beginSection + outString + endSection\n return outString", "def read_system_xml(self, db=\"hyperspin\"):\n if db == \"hyperspin\":\n xml = os.path.join(self.db_path, self.system + \".xml\")\n else:\n xml = db\n\n msg = \"Extracting ROM info from {} . . .\".format(self.system)\n logger.info(msg)\n\n tree = ET.parse(xml)\n doc = tree.getroot()\n\n # Read Header\n system = {}\n for header in doc.iter(\"header\"):\n for dat in header.iter():\n system[dat.tag] = dat.text\n # Iterate thru the ROMs\n rom = {}\n roms = []\n for game in doc.iter('game'):\n rom['name'] = game.get('name')\n rom['image'] = game.get('image')\n rom['index'] = game.get('index')\n rom['rom'] = False\n rom['artwork1'] = False\n rom['artwork2'] = False\n rom['artwork3'] = False\n rom['artwork4'] = False\n rom['wheel'] = False\n rom['video'] = False\n rom['theme'] = False\n\n for dat in game.iter():\n rom[dat.tag] = dat.text\n roms.append(dict(rom))\n\n for rom in roms:\n if rom[\"crc\"]:\n rom[\"crc\"] = rom[\"crc\"].zfill(8).upper()\n\n return system, roms", "def xml_parser(self):\n\n # Get the XML file\n tree = ET.parse(self.xml_path)\n # Get the root of the tree\n root = tree.getroot()\n\n # Get the model name to write the .mo file\n self.model_name = root.attrib.get('modelName')\n\n # Remove Invalid characters from the model name as this is used\n # by the Modelica model and the FMU\n s = ('Invalid characters will be removed from the model name={!s}.').format(\n self.model_name)\n log.info(s)\n self.model_name = sanitize_name(self.model_name)\n s = ('The new model name is {!s}.').format(self.model_name)\n log.info(s)\n\n if(self.exec_target=='python'):\n # Specify the module name which shouldn't contain invalid characters\n self.module_name=self.model_name+'_wrapper'\n s = ('Declare the Python module name as {!s}.').format(\n self.module_name)\n log.info(s)\n\n # Check if the script fort the module name is in the list of Python scripts\n resource_scripts_base = [os.path.basename(item)\n for item in self.resource_scripts_path]\n if not(self.module_name+'.py' in resource_scripts_base):\n s = (self.module_name+'.py' +' not found in the list of Python scripts={!s}.'\\\n ' The name of the model is {!s}.'\\\n ' Hence the name of the Python wrapper script must be {!s}.').format(\n self.resource_scripts_path, self.module_name, self.module_name+'.py')\n log.error(s)\n raise ValueError(s)\n\n\n if(self.exec_target=='server'):\n # Specify the module name which shouldn't contain invalid characters\n if(platform.system().lower()=='windows'):\n start_server_name='start_server.bat'\n elif(platform.system().lower()=='linux'):\n raise ValueError(\"To be implemented\")\n s = ('Declare the server module name as {!s}.').format(\n start_server_name)\n log.info(s)\n\n # Check if the script fort the module name is in the list of Python scripts\n resource_scripts_base = [os.path.basename(item)\n for item in self.resource_scripts_path]\n if not(start_server_name in resource_scripts_base):\n s = (start_server_name +' not found in the list of Resources files={!s}.')\n log.error(s)\n raise ValueError(s)\n\n # Iterate through the XML file and get the ModelVariables.\n real_input_variable_names = []\n modelica_real_input_variable_names = []\n real_output_variable_names = []\n modelica_real_output_variable_names = []\n real_parameter_variable_names = []\n modelica_real_parameter_variable_names = []\n string_parameter_variable_names = []\n modelica_string_parameter_variable_names = []\n # Parameters used to write annotations.\n inpY1 = 88\n inpY2 = 110\n outY1 = 88\n outY2 = 108\n indel = 20\n outdel = 18\n # Get variables\n scalar_variables = []\n for child in root.iter('ModelVariables'):\n for element in child:\n scalar_variable = {}\n\n # Iterate through ScalarVariables and get attributes\n (name, description, causality, vartype, unit, start) = \\\n element.attrib.get('name'), \\\n element.attrib.get('description'), \\\n element.attrib.get('causality'),\\\n element.attrib.get('type'),\\\n element.attrib.get('unit'),\\\n element.attrib.get('start')\n\n if vartype is None:\n s='Variable type of variable={!s} is None.'\\\n ' This is not allowed. Variable type'\\\n ' must be of type Real or String'.format(name)\n raise ValueError(s)\n\n if causality is None:\n s='Causality of variable={!s} is None.'\\\n ' This is not allowed. Variable causality'\n ' must be of input, output, or parameter'.format(name)\n raise ValueError(s)\n\n if (not(vartype in ['Real', 'String'])):\n s = 'Variable type of variable={!s} must be of'\\\n ' type Real or String. The variable type'\n ' is currently set to {!s}'.format(name, vartype)\n raise ValueError(s)\n\n if (not(causality in ['input', 'output', 'parameter'])):\n s = 'Causality of variable={!s} must be of type'\\\n ' input, output, or parameter. The causality is '\n ' currently set to {!s}'.format(name, causality)\n raise ValueError(s)\n\n # Set a default unit for variables other than String\n if unit is None:\n unit=\"1\"\n\n # Iterate through children of ScalarVariables and get\n # attributes\n log.info('Invalid characters will be removed from the '\n 'variable name {!s}.'.format(name))\n new_name = sanitize_name(name)\n log.info('The new variable name is {!s}.'.format(new_name))\n scalar_variable['name'] = new_name\n scalar_variable['vartype'] = vartype\n scalar_variable['causality'] = causality\n scalar_variable['unit'] = unit\n if not (description is None):\n scalar_variable['description'] = description\n\n if not (start is None):\n scalar_variable['start'] = start\n\n if (causality == 'input' and vartype=='Real'):\n if start is None:\n start = 0.0\n scalar_variable['start'] = start\n real_input_variable_names.append(name)\n modelica_real_input_variable_names.append(new_name)\n inpY1 = inpY1 - indel\n inpY2 = inpY2 - indel\n scalar_variable['annotation'] = (' annotation'\n '(Placement'\n '(transformation'\n '(extent={{-122,'\n + str(inpY1) + '},'\n '{-100,' + str(inpY2)\n + '}})))')\n\n if (causality == 'output' and vartype=='Real'):\n real_output_variable_names.append(name)\n modelica_real_output_variable_names.append(new_name)\n outY1 = outY1 - outdel\n outY2 = outY2 - outdel\n scalar_variable['annotation'] = (' annotation'\n '(Placement'\n '(transformation'\n '(extent={{100,'\n + str(outY1) + '},'\n '{120,' + str(outY2)\n + '}})))')\n\n if (causality == 'parameter' and vartype=='Real'):\n if start is None:\n start = 0.0\n scalar_variable['start'] = start\n real_parameter_variable_names.append(name)\n modelica_real_parameter_variable_names.append(new_name)\n\n if (causality == 'parameter' and vartype=='String'):\n if start is None:\n start=\"dummy.txt\"\n scalar_variable['start'] = start\n string_parameter_variable_names.append(name)\n modelica_string_parameter_variable_names.append(new_name)\n\n scalar_variables.append(scalar_variable)\n # perform some checks on variables to avoid name clash\n # before returning the variables to Modelica\n log.info(\n 'Check for duplicates in input, output and parameter variable names.')\n for i in [modelica_real_input_variable_names,\n modelica_real_output_variable_names,\n modelica_real_parameter_variable_names,\n modelica_string_parameter_variable_names]:\n check_duplicates(i)\n\n if(self.exec_target=='python'):\n len_strVar=len(string_parameter_variable_names)\n if len(string_parameter_variable_names)>1:\n s = 'The Python architecture supports a maximum of 1 string parameter.'\\\n ' The model description file={!s} lists {!s} variables={!s}. Please correct'\\\n ' the input file prior to compiling the FMU.'.format(self.xml_path,\n len_strVar, string_parameter_variable_names)\n log.error(s)\n raise ValueError(s)\n\n #if(self.exec_target=='python'):\n #res_key_words = ['_configurationFileName', '_saveToFile', 'time']\n # res_key_words = ['_saveToFile', 'time']\n #elif(self.exec_target=='server'):\n res_key_words = ['_saveToFile', 'time']\n for elm in res_key_words:\n for nam in [modelica_real_input_variable_names,\n modelica_real_output_variable_names,\n modelica_real_parameter_variable_names]:\n if elm in nam:\n s = 'Reserved name={!s} is in the list'\\\n ' of input/output/parameters variables={!s}.'\\\n ' Check the XML input file={!s} and correct'\\\n ' the variable name.'.format(elm, nam, self.xml_path)\n log.error(s)\n raise ValueError(s)\n\n if(len(modelica_real_input_variable_names) < 1):\n s = 'The XML input file={!s} does not contain any input variable. '\\\n 'At least, one input variable needs to be defined'.format(self.xml_path)\n log.error(s)\n raise ValueError(s)\n\n if(len(modelica_real_output_variable_names) < 1):\n s = 'The XML input file={!s} does not contain any output variable. '\\\n 'At least, one output variable needs to be defined'.format(self.xml_path)\n log.error(s)\n raise ValueError(s)\n\n s = 'Parsing of {!s} was successfull.'.format(self.xml_path)\n log.info(s)\n print(\"ScalarVariables={!s}\".format(scalar_variables))\n return scalar_variables, real_input_variable_names, \\\n real_output_variable_names, real_parameter_variable_names, \\\n string_parameter_variable_names,\\\n modelica_real_input_variable_names, \\\n modelica_real_output_variable_names, \\\n modelica_real_parameter_variable_names,\\\n modelica_string_parameter_variable_names", "def parse_elements(self):\n if self.root.tag == \"transformation\":\n self.name = self.root.find(\"./info/name\").text\n for step in self.root.iter(\"step\"):\n try:\n self.steps[step.find(\"type\").text].append(step)\n except AttributeError:\n pass\n for hop in self.root.iter(\"hop\"):\n self.hops.append(hop)\n for error_node in self.root.iter(\"error\"):\n self.error_handling.append(error_node)\n elif self.root.tag == \"job\":\n self.name = self.root.find(\"./name\").text\n for step in self.root.iter(\"entry\"):\n self.steps[step.find(\"type\").text].append(step)\n for hop in self.root.iter(\"hop\"):\n self.hops.append(hop)\n else:\n self.logger(\"Invalid XML. Root tag should be 'transformation' or 'job'\")\n raise ValueError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unupvote this post (this is different from other downvoting systems).
async def downvote(self) -> None: await self._state.http.vote_on_user_post(self.author.id64, self.id, 0)
[ "def downvote(self):\n self._authenticated_action_click(NinegagXPaths.Post.DOWNVOTE_BUTTON, 'Downvoting')", "def unvote(self, obj):\n obj._set_vote(self, 0)", "def upvote(self):\n self._authenticated_action_click(NinegagXPaths.Post.UPVOTE_BUTTON, 'Upvoting')", "def upvotePost(self):\n self.votes = self.votes + 1\n self.save()", "def remove_votes(checkin, vote):\n\n\t# update database\n\tif vote == \"up\":\n\t\t# remove upvote\n\t\tcheckin.upvotes -= 1\n\telif vote == \"down\":\n\t\t# remove downvote\n\t\tcheckin.downvotes -= 1\n\n\t# commit changes to database\n\tmodel.db.session.commit()", "def test_update_remove_upvote(self):\n self.test_uuid = str(uuid.uuid4())\n u = Upvote(1, ['upvoter 1 uuid'])\n u.save(self.test_uuid, db)\n u.update_remove_upvote('upvoter 1 uuid', 0, self.test_uuid, db)\n\n _u = Upvote.get(self.test_uuid, db)\n self.assertEqual(u.to_dict(), _u.to_dict())", "def downvote(self, count):\n count = self.validate_positive_integer(count)\n\n self.vote_score = self.vote_score - count\n return self.vote_score", "def vote(self, user_id):\n already_voted = self.has_voted(user_id)\n vote_status = None\n if not already_voted:\n # vote up the post\n db.engine.execute(\n PostUpvote.insert(),\n user_id=user_id,\n post_id=self.id\n )\n self.votes = self.votes + 1\n vote_status = True\n else:\n # unvote the post\n db.engine.execute(\n PostUpvote.delete(\n db.and_(\n PostUpvote.user_id == user_id,\n PostUpvote.post_id == self.id\n )\n )\n )\n self.votes = self.votes - 1\n vote_status = False\n db.session.commit() # for the vote count\n return vote_status", "def unvote(self, user):\n if self.__votes_cast.has_key(user):\n vote = self.__votes_cast[user]\n del self.__votes_cast[user]\n self._get_vote_list(vote).remove(user)\n self._p_changed = 1", "def upvote(checkin_id):\n\n\tvote = \"up\"\n\n\treturn update_vote(checkin_id, vote)", "def vote_down(self, question, user):\n vote, created = self.get_or_create(user=user, question=question,\n defaults={'vote': Vote.DOWN_VOTE})\n if not created:\n vote.vote = Vote.DOWN_VOTE\n vote.save()", "def downvote(self, obj):\n \n # If no ID is present Quote has not yet been saved to database\n # so have downvote do nothing instead of cause a Interval Server Error (500).\n if obj.id:\n url = reverse(\"admin:{}_{}_downvote\".format(self.model._meta.app_label, self.model._meta.model_name), args=(obj.id, ))\n else:\n url = '#'\n\n return mark_safe('<a href=\"{}\"><div class=\"arrow-down\"></div></a>'.format(url))", "def upvote(self, obj):\n\n # If no ID is present Quote has not yet been saved to database\n # so have upvote do nothing instead of cause a Interval Server Error (500).\n if obj.id:\n url = reverse(\"admin:{}_{}_upvote\".format(self.model._meta.app_label, self.model._meta.model_name), args=(obj.id, ))\n else:\n url = '#'\n\n return mark_safe('<a href=\"{}\"><div class=\"arrow-up\"></div></a>'.format(url))", "def upvote(self, count):\n count = self.validate_positive_integer(count)\n\n self.vote_score = self.vote_score + count\n return self.vote_score", "def downvote_view(self, request, obj_id):\n\n obj = get_object_or_404(self.model, pk=unquote(obj_id))\n obj.downvote()\n\n return HttpResponseRedirect(request.META['HTTP_REFERER'])", "def upvote(self, comment):\n if settings.UPVOTE_ENABLED:\n res = self.make_request(comment.upvote_url)\n print res.status_code, res.text\n\n # track comment to mixpanel\n mixpanel.track('Upvoted Comment', {\n 'Comment ID': comment.id,\n 'Text Length': len(comment.text),\n 'Debug Mode': not settings.UPVOTE_ENABLED,\n }.update(comment.sentiment['probability']))\n\n sleep(settings.VOTE_DELAY)\n else:\n print 'Would upvote %s' % comment", "def upvote(request, issue_id):\n if request.method == 'POST':\n issue = get_object_or_404(Issue, pk=issue_id)\n issue.upvotes += 1\n issue.save()\n messages.success(request, 'Upvoted successfully!')\n return redirect('/issue/' +str(issue.id))", "def _apply_post_vote(submission, upvote, is_upvoted):\n vote_delta = sum(\n [\n # adds the upvote\n 1 if upvote and not is_upvoted else 0,\n # clear an existing upvote\n -1 if upvote is False and is_upvoted else 0,\n ]\n )\n\n if vote_delta:\n # apply an update to the nullable post score\n # by substituting the current ups value from reddit if there's a null\n Post.objects.filter(post_id=submission.id).update(\n score=Coalesce(\"score\", submission.ups) + vote_delta\n )", "def upvote_view(self, request, obj_id):\n\n obj = get_object_or_404(self.model, pk=unquote(obj_id))\n obj.upvote()\n\n return HttpResponseRedirect(request.META['HTTP_REFERER'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Random split data and save mindrecord
def random_split_trans2mindrecord(input_file_path, output_file_path, recommendation_dataset_stats_dict, part_rows=100000, line_per_sample=1000, train_line_count=None, test_size=0.1, seed=2020, dense_dim=13, slot_dim=26): if train_line_count is None: raise ValueError("Please provide training file line count") test_size = int(train_line_count * test_size) all_indices = [i for i in range(train_line_count)] np.random.seed(seed) np.random.shuffle(all_indices) print("all_indices.size:{}".format(len(all_indices))) test_indices_set = set(all_indices[:test_size]) print("test_indices_set.size:{}".format(len(test_indices_set))) print("-----------------------" * 10 + "\n" * 2) train_data_list = [] test_data_list = [] cats_list = [] dense_list = [] label_list = [] writer_train = FileWriter(os.path.join(output_file_path, "train_input_part.mindrecord"), 21) writer_test = FileWriter(os.path.join(output_file_path, "test_input_part.mindrecord"), 3) schema = {"label": {"type": "float32", "shape": [-1]}, "num_vals": {"type": "float32", "shape": [-1]}, "cats_vals": {"type": "int32", "shape": [-1]}} writer_train.add_schema(schema, "CRITEO_TRAIN") writer_test.add_schema(schema, "CRITEO_TEST") with open(input_file_path, encoding="utf-8") as file_in: items_error_size_lineCount = [] count = 0 train_part_number = 0 test_part_number = 0 for i, line in enumerate(file_in): count += 1 if count % 1000000 == 0: print("Have handle {}w lines.".format(count // 10000)) line = line.strip("\n") items = line.split("\t") if len(items) != (1 + dense_dim + slot_dim): items_error_size_lineCount.append(i) continue label = float(items[0]) values = items[1:1 + dense_dim] cats = items[1 + dense_dim:] assert len(values) == dense_dim, "values.size: {}".format(len(values)) assert len(cats) == slot_dim, "cats.size: {}".format(len(cats)) dense, cats = recommendation_dataset_stats_dict.map_cat2id(values, cats) dense_list.extend(dense) cats_list.extend(cats) label_list.append(label) if count % line_per_sample == 0: if i not in test_indices_set: train_data_list.append({"cats_vals": np.array(cats_list, dtype=np.int32), "num_vals": np.array(dense_list, dtype=np.float32), "label": np.array(label_list, dtype=np.float32) }) else: test_data_list.append({"cats_vals": np.array(cats_list, dtype=np.int32), "num_vals": np.array(dense_list, dtype=np.float32), "label": np.array(label_list, dtype=np.float32) }) if train_data_list and len(train_data_list) % part_rows == 0: writer_train.write_raw_data(train_data_list) train_data_list.clear() train_part_number += 1 if test_data_list and len(test_data_list) % part_rows == 0: writer_test.write_raw_data(test_data_list) test_data_list.clear() test_part_number += 1 cats_list.clear() dense_list.clear() label_list.clear() if train_data_list: writer_train.write_raw_data(train_data_list) if test_data_list: writer_test.write_raw_data(test_data_list) writer_train.commit() writer_test.commit() print("-------------" * 10) print("items_error_size_lineCount.size(): {}.".format(len(items_error_size_lineCount))) print("-------------" * 10) np.save("items_error_size_lineCount.npy", items_error_size_lineCount)
[ "def segment_test_train():\n lookup = get_default_lookup()\n\n\n # Lets randomize all possible fic ids\n all_ids = lookup.keys()\n shuffle(all_ids)\n\n #now define 1/5 of the dataset as train\n num_ids = len(all_ids)\n test = int(num_ids/5)\n\n testdata = all_ids[0:test]\n traindata = all_ids[test:-1]\n\n with open('traindata.pik', 'w') as f:\n pik.dump(traindata, f)\n\n with open('testdata.pik', 'w') as f:\n pik.dump(testdata, f)\n\n return traindata, testdata", "def test_create_data_split(self) -> None:\n\n # Validate data splits for the full data set\n train_test_split = random.uniform(0.1, 0.9)\n full_data_dict = self.data_loader.create_data_split((0, \"hx\"),\n train_test_split=train_test_split)\n train_x, train_y = full_data_dict[\"train_x\"], full_data_dict[\"train_y\"]\n test_x, test_y = full_data_dict[\"test_x\"], full_data_dict[\"test_y\"]\n self._validate_data_split(train_x, train_y,\n test_x, test_y,\n self.num_labels,\n train_test_split)\n\n # Validate data splits for a partial data set\n cutoff = random.randrange(5, self.num_labels - 1)\n partial_data_dict = self.data_loader.create_data_split(\n (0, \"hx\"), train_test_split=train_test_split, data_subset_size=cutoff\n )\n partial_train_x, partial_train_y = partial_data_dict[\"train_x\"], partial_data_dict[\"train_y\"]\n partial_test_x, partial_test_y = partial_data_dict[\"test_x\"], partial_data_dict[\"test_y\"]\n self._validate_data_split(\n partial_train_x, partial_train_y, partial_test_x, partial_test_y,\n size=cutoff, data_split=train_test_split\n )", "def save_dataset(self):\n\n # Move non-required columns to metadata:\n artiset_data_with_metadata = []\n for example in self.artiset_data:\n if 'metadata' not in example:\n new_example = {'metadata':{}}\n else:\n new_example = {'metadata': example['metadata']}\n new_example.update({k:example[k] for k in ['id', 'phrase', 'context', 'answer']})\n new_example['metadata'].update({k: example[k] for k in set(example.keys()) - {'id', 'phrase', 'context', 'answer','metadata'}})\n artiset_data_with_metadata.append(new_example)\n self.artiset_data = artiset_data_with_metadata\n\n # splitting\n if len(self._split) > 0:\n train_inds, dev_inds, test_inds = self.split_by_columns()\n elif 'split' in self.examples_meta:\n test_inds = list(self.examples_meta[self.examples_meta['split'] == 'test'].index)\n dev_inds = list(self.examples_meta[self.examples_meta['split'] == 'dev'].index)\n train_inds = list(self.examples_meta[self.examples_meta['split'] == 'train'].index)\n random.seed(17)\n random.shuffle(train_inds)\n #random.shuffle(test_inds)\n #random.shuffle(dev_inds)\n test_inds = test_inds[0: self._config['test_dev_size'][0]]\n dev_inds = dev_inds[0:self._config['test_dev_size'][1]]\n train_inds = train_inds[0:self._config['max_number_of_examples']]\n else:\n inds = [i for i in range(len(self.artiset_data))]\n random.seed(17)\n random.shuffle(inds)\n test_inds = inds[0:self._config['test_dev_size'][0]]\n dev_inds = inds[self._config['test_dev_size'][0]:sum(self._config['test_dev_size'])]\n train_inds = inds[sum(self._config['test_dev_size']):]\n\n\n if self._output_file.startswith('s3://'):\n save_func = upload_jsonl_to_s3\n elif is_path_creatable(self._output_file) and len(self._output_file) > 0:\n save_func = save_jsonl_to_local\n else:\n # Do nothing\n return\n\n if self._save_sample:\n if 'split' in self.examples_meta.columns:\n logger.info(f\"size of each split:\\n{self.examples_meta['split'].value_counts()}\")\n random.seed(17)\n if len(self.artiset_data) > 100:\n self.artiset_data = random.sample(self.artiset_data,100)\n save_func(self._output_file, self.artiset_data, sample_indent=self._save_sample)\n else:\n logger.info('uploading %d,%d,%d test,dev,train examples' % (len(test_inds),len(dev_inds),len(train_inds)))\n save_func(self._output_file.replace('.jsonl', '_test.jsonl'), [self.artiset_data[i] for i in test_inds])\n save_func(self._output_file.replace('.jsonl', '_dev.jsonl'), [self.artiset_data[i] for i in dev_inds])\n save_func(self._output_file.replace('.jsonl', '_train.jsonl'), [self.artiset_data[i] for i in train_inds])\n if len(self.examples_meta) > 0:\n save_func(self._output_file.replace('.jsonl', '_meta.jsonl'), self.examples_meta.to_dict(orient='rows'))\n\n return train_inds, dev_inds, test_inds", "def dset_split(to_split, num_splits, n_tot, split_prefix):\n while (True):\n num = np.random.poisson(n_tot / float(num_splits), num_splits - 1)\n np.append(num, n_tot - np.sum(num))\n if all(num > 0):\n break\n\n\n def group_copy(name, node, rows, fp):\n dtype = node.dtype\n value = node[...]\n fp.require_dataset(name, data=value[rows], shape=(len(rows),), dtype=dtype)\n \n with h5py.File(to_split, 'r') as to_split_fp:\n for i, number in enumerate(num):\n split_name = split_prefix + str(i) + '.h5py'\n logging.info(\"-Constructing: \" + split_name)\n chosen_rows = np.random.random_integers(0, n_tot-1, number)\n with h5py.File(split_name, 'w') as copy_to_fp: \n for key in to_split_fp.keys():\n dset_to_copy = to_split_fp[key]\n dset_to_copyto = copy_to_fp.require_group(key)\n if key != 'meta':\n copier = partial(group_copy, rows=chosen_rows, fp=dset_to_copyto)\n dset_to_copy.visititems(copier)\n else:\n group_copy(\"meta/Status\", dset_to_copy['Status'], chosen_rows,\n dset_to_copyto)", "def create_fake_data(split_name, num_examples=4):\n output_file = os.path.join(FLAGS.out_directory,\n 'cifar10_%s.tfrecord' % split_name)\n writer = tf.python_io.TFRecordWriter(output_file)\n for _ in range(num_examples):\n image = np.random.randint(256, size=(_IMAGE_SIZE, _IMAGE_SIZE, 3),\n dtype=np.uint8)\n image = Image.fromarray(image)\n image_buffer = StringIO.StringIO()\n image.save(image_buffer, format='png')\n image_buffer = image_buffer.getvalue()\n\n label = 0\n example = dataset_utils.image_to_tfexample(\n image_buffer, 'png', _IMAGE_SIZE, _IMAGE_SIZE, label)\n writer.write(example.SerializeToString())\n writer.close()", "def split_data(data, proportion): #function taken from hackathon_3 notebook and improved by Jingchao Zhang\n size = data.shape[0]\n s = np.random.permutation(size)\n split_idx = int(proportion * size)\n train = data[s[:split_idx]]\n val = data[s[split_idx:]]\n return train, val", "def split_training_data(infile, n_splits, location):\n datafile = h5py.File(location+infile,'r')\n data = {}\n print(\"Reading {0}\".format(location+infile))\n for keys in datafile.keys():\n data[keys] = np.array_split(datafile[keys],n_splits)\n for d in data[keys]:\n print(keys, d.shape)\n\n train_test_datadir = \"{0}/models/datain/\".format(crm_data)\n for n in range(n_splits):\n outfile=train_test_datadir+\"train_test_data_021501AQ_{0}.h5\".format(str(n).zfill(3))\n with h5py.File(outfile) as hfile:\n for k in data:\n var_data = data[k][n]\n print(\"Saving split data {0}: {1} of {2}\".format(k,n,n_splits))\n hfile.create_dataset(k, data=var_data)", "def createTrainFile(self):\n\n a_patterns = self.readDataFile()\n self.randomizeAndWriteTrain(a_patterns)", "def save_mimic_split_patient_wise(df, label_column, save_dir, task_name, seed, column_list=None):\n if column_list is None:\n column_list = [\"ID\", \"TEXT\", label_column]\n\n # Load prebuilt MIMIC patient splits\n data_split = {\"train\": pd.read_csv(\"tasks/mimic_train.csv\"),\n \"val\": pd.read_csv(\"tasks/mimic_val.csv\"),\n \"test\": pd.read_csv(\"tasks/mimic_test.csv\")}\n\n # Use row id as general id and cast to int\n df = df.rename(columns={'HADM_ID': 'ID'})\n df.ID = df.ID.astype(int)\n\n # Create path to task data\n os.makedirs(save_dir, exist_ok=True)\n\n # Save splits to data folder\n for split_name in [\"train\", \"val\", \"test\"]:\n split_set = df[df.SUBJECT_ID.isin(data_split[split_name].SUBJECT_ID)].sample(frac=1,\n random_state=seed)[column_list]\n\n # lower case column names\n split_set.columns = map(str.lower, split_set.columns)\n\n split_set.to_csv(os.path.join(save_dir, \"{}_{}.csv\".format(task_name, split_name)),\n index=False,\n quoting=csv.QUOTE_ALL)", "def testFixture(self):\n for value in self.testing_data:\n model_test = TestingModel(pickle_field=value)\n model_test.save()\n dumpdata = Dumpdata()\n json = dumpdata.handle('mbdb')\n pass", "def create_data_split(x, y, experiment):\n\n if experiment == 'qgen':\n train_size = pd.read_csv('../data/df_qgen_train.csv').shape[0]\n val_size = pd.read_csv('../data/df_qgen_val.csv').shape[0]\n test_size = pd.read_csv('../data/df_qgen_test.csv').shape[0]\n elif experiment == 'dialogue':\n train_size = pd.read_csv('../data/df_dialogue_train.csv').shape[0]\n val_size = pd.read_csv('../data/df_dialogue_val.csv').shape[0]\n test_size = pd.read_csv('../data/df_dialogue_test.csv').shape[0]\n else:\n print('Invalid experiment name specified !')\n return\n\n train_indices = range(train_size)\n val_indices = range(train_size, train_size + val_size)\n test_indices = range(train_size + val_size, train_size + val_size + test_size)\n\n x_train = x[train_indices]\n y_train = y[train_indices]\n x_val = x[val_indices]\n y_val = y[val_indices]\n x_test = x[test_indices]\n y_test = y[test_indices]\n\n return x_train, y_train, x_val, y_val, x_test, y_test", "def data_sampling_load(self, mode='random'):\n \n if mode == 'random' :\n file_name = './data/df_invoice_line_sample_random.dump'\n else :\n print(\"*** ERROR : mode = \"+str(mode)+ \" is not supported!\" )\n return\n\n self._df_invoice = p5_util.object_load(file_name)\n\n\n #-------------------------------------------------------------------------\n # Countries are filtered\n #-------------------------------------------------------------------------\n if 'Country' in self._df_invoice.columns:\n self.df_invoice \\\n = self.df_invoice[self.df_invoice.Country == 'United Kingdom']\n self._df_invoice_original = self._df_invoice.copy() \n\n #-------------------------------------------------------------------------\n # Compute statistics about number of customers, number of invoice\n #-------------------------------------------------------------------------\n self._compute_numbers()\n \n #-------------------------------------------------------------------------\n # Dataframe with out of sample customers is relaoded.\n #-------------------------------------------------------------------------\n file_name = './data/df_invoice_line_out_sample_random.dump'\n self._df_invoice_line_out_sample = p5_util.object_load(file_name)", "def save_local(ori_data, save_dir, n_partition=10,\n drop_rate=-1, need_meta=True):\n if tf.io.gfile.exists(save_dir):\n tf.io.gfile.rmtree(save_dir)\n tf.io.gfile.makedirs(save_dir)\n \n if need_meta:\n save_meta_dir = save_dir + \"_meta\"\n if tf.io.gfile.exists(save_meta_dir):\n tf.io.gfile.rmtree(save_meta_dir)\n tf.io.gfile.makedirs(save_meta_dir)\n\n data = repartition_and_sort(ori_data, n_partition)\n for i in range(n_partition):\n count = 0\n fpath = os.path.join(save_dir, '%05d.tfrecord' % i)\n meta_fpath = os.path.join(save_meta_dir, '%05d.meta' % i)\n writer = tf.python_io.TFRecordWriter(fpath)\n if need_meta:\n meta_writer = open(meta_fpath, 'w')\n for eid, record in data[i]:\n if drop_rate > 0:\n r = random.random()\n if r > drop_rate:\n count += 1\n writer.write(record)\n if need_meta:\n meta_writer.write('%s\\n' % eid)\n else:\n count += 1\n writer.write(record)\n if need_meta:\n meta_writer.write('%s\\n' % eid)\n writer.close()\n if need_meta:\n meta_writer.close()\n logging.info(\"Write %s done. %d records\" % (fpath, count))", "def shuffle_dynamic_dataset(self, sv=False):\n\n self.dynamic_dataset = shuffle(self.dynamic_dataset)\n\n if sv:\n pd.DataFrame(self.dynamic_dataset).to_csv('..\\\\Dynamic_df.csv')\n\n # Set up training data and apply grouping\n self.set_training_data()", "def create_datasets_file(self):\n data_list = self.get_data()\n\n split_num = 80 * int(103 * 0.75)\n print(f\"split_num {split_num}\")\n # Save train and validation dataset\n filename = os.path.join(self.dataset_dir, \"train.datatxt\")\n save_dataset_list(filename, data_list[:split_num])\n print(\n f\"The dataset of the size {len(data_list[:split_num])} saved in {filename}.\"\n )\n filename = os.path.join(self.dataset_dir, \"val.datatxt\")\n save_dataset_list(filename, data_list[split_num:])\n print(\n f\"The dataset of the size {len(data_list[split_num:])} saved in {filename}.\"\n )", "def split_recordings(path,recording_file,file_extension,fs,seconds_split):\r\n\r\n\r\n N = (fs * seconds_split) + 1 # number of data points per file for a chosen seconds_split\r\n data_path = os.path.join(path, recording_file + file_extension)\r\n write_path = os.path.join(path, recording_file + \" split_folder\")\r\n if not os.path.exists(write_path):\r\n os.mkdir(write_path)\r\n print(\"Number of data points per file is\", N)\r\n\r\n data = []\r\n data = txt_to_list(data_path,data,\"float\")\r\n\r\n N_files = int(np.ceil(len(data) / N))\r\n\r\n start_range = 0\r\n end_range = N\r\n\r\n for i in range(N_files):\r\n\r\n file = open(os.path.join(write_path, recording_file + \"_split_\" + str(i) + file_extension), \"w\")\r\n\r\n for n in range(start_range, end_range):\r\n file.write(str(data[n]) + '\\n')\r\n file.close()\r\n\r\n start_range = end_range\r\n if (i == (N_files - 2)):\r\n end_range = len(data)\r\n else:\r\n end_range = end_range + N + 1\r\n\r\n return write_path", "def split_data(data, per_train = 0.6, per_dev = 0.2, per_test = 0.2 , path = '../data_sets', should_load = True, data_name = '', should_shuffle = True, verbos = False):\n assert per_train + per_dev + per_test == 1\n name = data_name + str(per_train) + str(per_dev) + str(per_test) + '/'\n full_path = path + '/' + name if path != None else None\n if path != None and os.path.exists(full_path) and should_load:\n print('Loading data set from %s...' % (full_path))\n train = np.load(full_path + 'train.npy')\n dev = np.load(full_path + 'dev.npy')\n test = np.load(full_path + 'test.npy')\n else:\n per_dev += per_train\n \n if should_shuffle:\n data = break_data(data)\n [np.random.shuffle(d) for d in data]\n \n train = take_partial_data(data, 0, per_train)\n dev = take_partial_data(data, per_train, per_dev)\n test = take_partial_data(data, per_dev, 1)\n\n if path != None:\n try:\n if not os.path.exists(full_path):\n os.mkdir(full_path)\n except OSError:\n print (\"Creation of the directory %s failed, not saving set\" % full_path)\n else:\n print (\"Successfully created the directory %s now saving data set\" % full_path)\n np.save(full_path + 'train', train)\n np.save(full_path + 'dev', dev)\n np.save(full_path + 'test', test)\n\n if verbos:\n data = np.concatenate((train, dev, test))\n num_clusters = data.shape[0]\n num_wfs = count_waveforms(data)\n print_data_stats(train, 'train', num_clusters, num_wfs)\n print_data_stats(dev, 'dev', num_clusters, num_wfs)\n print_data_stats(test, 'test', num_clusters, num_wfs)\n \n return train, dev, test", "def split_data(dev_test_ratio=.1):\n # Prevent splitting when data is already split by checking if\n # DATA_DIR/train exists.\n print(\"Checking if data needs to be split\")\n if os.access(os.path.join(DATA_DIR, \"train\"), os.F_OK):\n return\n\n # Move all files to train/dev/test randomly using a set seed\n print(\"Splitting data into train/dev/test sets\")\n seed = SEED\n random.seed(seed)\n\n for media_dir in os.scandir(DATA_DIR):\n if not media_dir.is_dir():\n continue\n for path, _, filenames in os.walk(media_dir):\n if len(path.split('/')) <= 2:\n continue\n\n _, media_type, emotion_type = path.split(\"/\")\n\n # If no images in these categories then remove the folders\n if len(filenames) == 0:\n os.removedirs(os.path.join(DATA_DIR, media_type, emotion_type))\n continue\n\n # Shuffle images and then save into train/dev/test guaranteeing\n # that at least one image is in the test set from each category\n random.shuffle(filenames)\n set_size = int(math.ceil(len(filenames) * dev_test_ratio))\n\n train_path = os.path.join(DATA_DIR, \"train\", media_type, emotion_type)\n for fn in filenames[:-2*set_size]:\n os.renames(os.path.join(path, fn), os.path.join(train_path, fn))\n\n dev_path = os.path.join(DATA_DIR, \"dev\", media_type, emotion_type)\n for fn in filenames[-2*set_size:-set_size]:\n os.renames(os.path.join(path, fn), os.path.join(dev_path, fn))\n\n test_path = os.path.join(DATA_DIR, \"test\", media_type, emotion_type)\n for fn in filenames[-set_size:]:\n os.renames(os.path.join(path, fn), os.path.join(test_path, fn))\n print(\"Data successfully split\\n\")", "def split_train_valid_pnoneside(data,train_ratio=0.8):\n print(\"Split data in train set and valid set !\")\n print(\"For a molecule, positive and negative sample should split into two different dataset.\")\n data = data.sample(frac=1.0).reset_index(drop = True)\n smiles = list(set(data[\"smiles\"]))\n belongs = {}\n for smile in smiles:\n belongs[smile] = int(0.5 + np.random.random())\n valid_ids = []\n train_ids = []\n for i in range(len(data)):\n smiles = data[\"smiles\"][i]\n label = data[\"label\"][i]\n rand = (np.random.random() < train_ratio)\n if label == belongs[smiles]:\n if rand:\n train_ids.append(i)\n else:\n valid_ids.append(i)\n else:\n if rand:\n valid_ids.append(i)\n else:\n train_ids.append(i)\n\n train_data = data.iloc[train_ids].sample(frac=1.0).reset_index(drop = True)\n valid_data = data.iloc[valid_ids].sample(frac=1.0).reset_index(drop = True)\n # check here\n train_smiles = list(set(train_data[\"smiles\"]))\n valid_smiles = list(set(valid_data[\"smiles\"]))\n #print(\"Check the dataset below\")\n #for i in range(1000):\n # if train_smiles[i] not in valid_smiles:\n # continue\n # train_label = list(train_data[train_data[\"smiles\"] == train_smiles[i]][\"label\"])\n # valid_label = list(valid_data[valid_data[\"smiles\"] == train_smiles[i]][\"label\"])\n # if sum(train_label) + sum(valid_label) >= 20:\n # print(\"Ratio : %.3f\"%(1.0*sum(train_label)/(sum(valid_label) + sum(train_label))))\n print(\"Total samples : %d , Train samples : %d , Valid samples : %d\"%(len(data),len(train_data),len(valid_data)))\n print(\"Total Proteins : %d , Train Proteins : %d , Valid Proteins : %d\"%(\n len(set(data[\"protein\"])),len(set(train_data[\"protein\"])),len(set(valid_data[\"protein\"]))\n ))\n print(\"Total Smiles : %d , Train Smiles : %d , Valid Smiles : %d\"%(\n len(set(data[\"smiles\"])),len(set(train_data[\"smiles\"])),len(set(valid_data[\"smiles\"]))\n ))\n print(\"Total positive ratio : %.2f , Train positive ratio : %.2f , Valid positive ratio : %.2f\"%(\n len(data[data[\"label\"] == 1])/len(data),len(train_data[train_data[\"label\"]==1])/len(train_data),\n len(valid_data[valid_data[\"label\"]==1])/len(valid_data)\n ))\n return train_data,valid_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turn competition mode on or off.
def competition_mode(self, on): raise NotImplementedError
[ "def toggle_gamemode(self):\n # todo: remove this check\n if self != shared.world.get_active_player():\n return\n\n if self.gamemode == 1:\n self.set_gamemode(3)\n elif self.gamemode == 3:\n self.set_gamemode(1)", "def pet_mode(self) -> bool:\n return self.mode_toggle('pet')", "def turn_off_engine(self):\n if self.engine_status == \"On\":\n self.engine_status = \"Off\"\n print(f\"Engine of {self.my_car} is turned off\")\n else:\n print(f\"Engine of {self.my_car} is already off\")", "def toggle_sim():\n\tglobal sim_on\n\tif sim_on == False:\n\t\tsim_on = True\n\telse:\n\t\tsim_on = False", "def detected_mode_set(self, event):\n self.mode.set(2)\n self.change_mode()", "def turn_on(self, **kwargs):\n self.data.switch_on()", "def toggle_pump(self):\n if self.pump_on:\n self.pump_on = False\n self.actor_off(int(self.pump))\n else:\n self.pump_on = True\n self.actor_on(int(self.pump))\n self.pump_time = time.time()", "def set_gamemode(self, gamemode: typing.Union[int, str]):\n if str(gamemode) in self.GAMEMODE_DICT:\n gamemode = self.GAMEMODE_DICT[str(gamemode)]\n\n if gamemode == 0:\n self.flying = False\n elif gamemode == 1:\n pass\n elif gamemode == 2:\n self.flying = False\n elif gamemode == 3:\n self.flying = True\n\n self.gamemode = gamemode\n else:\n # todo: add an option to raise an exception here\n logger.println(\"[ERROR] invalid gamemode:\", gamemode)\n\n if not shared.IS_TEST_ENV:\n shared.tick_handler.schedule_once(\n self.send_update_package_when_server(update_flags=32)\n )", "def turnOnAC(self,frame,db):\r\n self.aircon.actuators[0].turnOn()\r\n frame.aCStateDisplayLabel.config(text=\"On\")\r\n frame.update()\r\n db.commit()", "def set_training_mode(self, mode: bool) -> None:\n self.actor.set_training_mode(mode)\n self.critic.set_training_mode(mode)\n self.training = mode", "def set_turbo_mode(self, enable: params.Toggle, /) -> GoProResp:", "def toSafeMode(self):\r\n self.start()\r\n time.sleep(0.03)\r\n # now we're in PASSIVE_MODE, so we repeat the above code...\r\n self.send( SAFE )\r\n # they recommend 20 ms between mode-changing commands\r\n time.sleep(0.03)\r\n # change the mode we think we're in...\r\n self.sciMode = SAFE_MODE\r\n # no response here, so we don't get any...\r\n return", "def thermo_state(self):\n if False:\n self.furnace.turn_on(self.time)\n else:\n self.furnace.turn_off(self.time)", "def set_preset_mode(self, preset_mode: str):\n return self.turn_on(preset_mode=preset_mode)", "def mode_toggle(self, mode: str) -> bool:\n if mode.lower() not in self.modes:\n logger.debug('Invalid purifier mode used - %s',\n mode)\n return False\n head, body = self.build_api_dict('setPurifierMode')\n if not head and not body:\n return False\n\n body['payload']['data'] = {\n 'mode': mode.lower()\n }\n if mode == 'manual':\n body['payload'] = {\n 'data': {\n 'id': 0,\n 'level': 1,\n 'type': 'wind'\n },\n 'method': 'setLevel',\n 'type': 'APP'\n }\n\n r, _ = Helpers.call_api(\n '/cloud/v2/deviceManaged/bypassV2',\n method='post',\n headers=head,\n json_object=body,\n )\n\n if Helpers.code_check(r):\n if mode.lower() == 'manual':\n self.speed = 1\n self.mode = 'manual'\n else:\n self.mode = mode\n self.speed = 0\n return True\n logger.debug('Error setting purifier mode')\n return False", "def captured_mode_set(self, event):\n self.mode.set(1)\n self.change_mode()", "def mode(ctx, mode, touch_eject, autoeject_timeout, chalresp_timeout, force):\n dev = ctx.obj['dev']\n if autoeject_timeout:\n touch_eject = True\n autoeject = autoeject_timeout if touch_eject else None\n\n if mode is not None:\n if mode.transports != TRANSPORT.CCID:\n autoeject = None\n if touch_eject:\n ctx.fail('--touch-eject can only be used when setting'\n ' CCID-only mode')\n\n if not force:\n if mode == dev.mode:\n click.echo('Mode is already {}, nothing to do...'.format(mode))\n ctx.exit()\n elif not dev.has_mode(mode):\n click.echo('Mode {} is not supported on this YubiKey!'\n .format(mode))\n ctx.fail('Use --force to attempt to set it anyway.')\n force or click.confirm('Set mode of YubiKey to {}?'.format(mode),\n abort=True, err=True)\n\n try:\n dev.set_mode(mode, chalresp_timeout, autoeject)\n if not dev.can_write_config:\n click.echo(\n 'Mode set! You must remove and re-insert your YubiKey '\n 'for this change to take effect.')\n except ModeSwitchError as e:\n logger.debug('Failed to switch mode', exc_info=e)\n click.echo('Failed to switch mode on the YubiKey. Make sure your '\n 'YubiKey does not have an access code set.')\n\n else:\n click.echo('Current connection mode is: {}'.format(dev.mode))\n supported = ', '.join(t.name for t in TRANSPORT\n .split(dev.config.usb_supported))\n click.echo('Supported USB interfaces are: {}'.format(supported))", "def test_setMode(self):\n assert(self.radio.mode == RadioMode.off)\n \n # Set mode to receive\n self.changeMode(RadioMode.receive)\n\n # Set mode to off\n self.changeMode(RadioMode.off)\n\n # Set mode to transmit\n self.changeMode(RadioMode.transmit)\n \n # Set mode to sleep\n self.changeMode(RadioMode.sleep)", "def test_mode_toggle(self, caplog, api_mock):\n self.mock_api.return_value = ({'code': 0}, 200)\n fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj)\n f = fan.auto_mode()\n assert f\n assert fan.mode == 'auto'\n f = fan.manual_mode()\n assert fan.mode == 'manual'\n assert f\n f = fan.sleep_mode()\n assert fan.mode == 'sleep'\n assert f" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Commons out the code checking function settings for randomness Interesting to see that the rates go up with increasing width of word. No doubt because it is spending proportionately time in highly optimized math routines. Lots of patterns in that data, not a waste of time, but insight/hour is low, I think. 24 June 2018 the first set of 32bit tests consistently have a duplicate, just one. Always with paranoia level 2, tho not so many only the last 3 runs of test_evocprngs.py show this. Actually, all of them do, because it is the same sequence every time, I don't have outside entropy. Fixed that. 5 July, 2018 I made the executive decision to prohibit 32bit integers for the PRNs and hashes. I could produce code that passed Dieharder, but not cleanly, and had dups, 0s and all FFs, 0xFFF... So, given that everything is 64 bit now, including cell phones, further effort wasn't timeeffective. Further analysis indicates that the dups are legit, birthday paradox. The fact that thee are 1418 dups / 1M randoms is plausible, tho I haven't had the mental energy to grasp those equations and try them out, and they all give the 50% probability number for a given size of the sampled unvierse, so it isn't straightfoward from what I have read so far. I don't have a good explanation for the 0s and ffs. That is too high a probability of a particular number, far too high. And I see the same now in the 64bit randoms, it must be due to the fold, and or the next() function, the dump of the integer vectors does not reveal anything obvious, they look random to me. Need more debug info. Tomorrow.
def check_function( self, the_function, the_rnt ) : print( the_function ) print( "vec_size int_width statesize p_lvl difficulty " + "duplicates zeros all ff's elapsed time byterate" ) sys.stdout.flush() function_return = True n_samples = self.difficulty * 64 * 1024 random_table = [ 0 for _ in range( n_samples ) ] for n_lcgs in [ 7, 11, 19 ] : for integer_width in [ 64, 128 ] : for lcg_depth in [ 9, 17 ] : for paranoia_level in [ 1, 2 ] : beginning_time = int( time.time() ) the_crypto = the_function( the_rnt, n_lcgs, integer_width, lcg_depth, paranoia_level ) for i in range( n_samples ) : # this becomes slower over time. Why? new_random = the_crypto.next( integer_width, paranoia_level ) random_table[ i ] = new_random ending_time = int( time.time() ) sys.stdout.flush() elapsed_time = ending_time - beginning_time if elapsed_time == 0 : elapsed_time = 1 byte_rate = ( n_samples * ( integer_width / 8 )) / \ elapsed_time duplicates = count_duplicates( random_table ) function_return &= duplicates == 0 zeros = count_zeros( random_table ) function_return &= zeros == 0 # these are not signed numbers, 0xFFFF... is problem all_fs = count_all_fs( random_table, integer_width ) function_return &= all_fs == 0 print( "%5d %10d %8d %7d %10d %7d %7d %7d %7d %18.2f" % ( n_lcgs, integer_width, lcg_depth, paranoia_level, n_samples, duplicates, zeros, all_fs, ending_time - beginning_time, byte_rate ) ) sys.stdout.flush() self.assertTrue( function_return )
[ "def entropy_whole_process(lang, wordlength, fixations, nreps, knownwords, targetwords, mcconkie_drop, control=\"\"):\n\n\n\n #STEP 1: Compute entropy per trial\n pertrial_files=compute_entropy_pertrial(knownwords, knownwords, lang, wordlength, nreps, mcconkie_drop, fixations, control )\n #(outputs in file)\n print(\"Step 1 done! Entropy computed, %i repetitions.\"%nreps)\n\n #STEP 2: Compute mean entropy, across trials\n fmeans={}\n for fixation in fixations:\n infile=get_outputfile_pertrial(lang, fixation, wordlength, control)\n fmeans[fixation]=get_outputfile_means(lang, fixation, wordlength, control)\n mcconkie_probs = McConkie_Model(wordlength, mcconkie_drop, fixation)\n average_across_trials(infile,fmeans[fixation])\n print(\"Step 2 done! Entropy averaged accross repetitions.\")\n\n\n #CONTROL STEP (correlate with uniform frequencies)\n if control == \"types\":\n #Merge\n dfz=pd.read_csv(\"%s_zerofreq_entropydiff.csv\"%lang, sep=\";\")\n dff=pd.read_csv(\"%s_entropydiff.csv\"%lang, sep=\";\")\n dfz=dfz.sort_values(\"target\")\n dff=dff.sort_values(\"target\")\n #correlation\n print(\"Correlation with uniform frequency, for %s\"%lang,pearsonr(dfz['difference_entropy'], dff['difference_entropy'])) #r,p-value\n\n #Load frequency in dataframe\n wordfreq=pd.read_csv(\"/home/beltza/Research/L2STATS/nnfixrec/data/clean_%s_50k.txt\"%lang, sep=\" \", header=None)\n wordfreq.columns=[\"frequency\",\"target\"]\n merged=wordfreq.merge(dff,how=\"right\",on=\"target\")\n print(\"Correlation frequency and entropy(with frequency), for %s\"%lang,pearsonr(merged['difference_entropy'], merged['frequency']))\n merged=wordfreq.merge(dfz,how=\"right\",on=\"target\")\n print(\"Correlation frequency and entropy(without frequency), for %s\"%lang,pearsonr(merged['difference_entropy'], merged['frequency']))\n #Correlation for English: 0.729\n #Correlation for Hebrew: 0.772\n return fmeans", "def randomization_test():\n statsCounter = 0\n r = 1000\n realStat = abs(macroAveragedRecall - mutualMacroAveragedRecall)\n for trial in range(0, r):\n newSpams = dict()\n newTopKSpams = dict()\n for key in testSpamMailDict.keys():\n if random.randint(1, 2) == 1:\n newSpams[key] = testSpamMailDict[key]\n newTopKSpams[key] = mutualtestSpamMailDict[key]\n else:\n newSpams[key] = mutualtestSpamMailDict[key]\n newTopKSpams[key] = testSpamMailDict[key]\n\n newLegitimates = dict()\n newTopKLegitimate = dict()\n for key in testLegitimateMailDict.keys():\n if random.randint(1, 2) == 1:\n newLegitimates[key] = testLegitimateMailDict[key]\n newTopKLegitimate[key] = mutualtestLegitimateMailDict[key]\n else:\n newLegitimates[key] = mutualtestLegitimateMailDict[key]\n newTopKLegitimate[key] = testLegitimateMailDict[key]\n\n pseudoStat = abs(calculateMacroValuesAndFScore(sum(newLegitimates.values()), sum(newSpams.values()),len(testLegitimateMailDict.keys()),len(testSpamMailDict.keys()))[2] - calculateMacroValuesAndFScore(sum(newTopKLegitimate.values()), sum(newTopKSpams.values()), len(testLegitimateMailDict.keys()), len(testSpamMailDict.keys()))[2])\n\n if pseudoStat >= realStat:\n statsCounter = statsCounter + 1\n resultRandomization = (statsCounter + 1) / (r + 1)\n return resultRandomization", "def test_generate_random_table( self ) :\n print( \"\\ntest_generate_random_table\" )\n self.test_name = 'test_generate_random_table'\n\n self.setUp()\n\n str_random_table = generate_random_table( self.the_rnt, 4096, 64 )\n\n # that is strings, so need an integer array\n the_program = '\\nN_K_RANDOM_BYTES=[\\n' + \\\n str_random_table + ']\\n'\n\n N_K_RANDOM_BYTES = convert_string( the_program )\n \n self.assertTrue( count_duplicates( N_K_RANDOM_BYTES ) == 0 )\n self.assertTrue( count_zeros( N_K_RANDOM_BYTES ) == 0 )", "def test_generate_constants( self ) :\n print( \"test_generate_constants\" )\n\n entropy_bits = \\\n 0xd262fbc7cbc7e757d16234bd7e88f12cc5dfef7c2ee82c9a4e289113d83d8724\n n_prngs = 19\n for integer_width in [ 64, 128, 256 ] :\n\n for n_prngs in [ 7, 19, 31 ] :\n constant_generator = generate_constants( integer_width, n_prngs,\n entropy_bits )\n\n for _ in range( n_prngs ) :\n multiplier, addition, lag, delta = next( constant_generator)\n print( multiplier, addition, lag, delta )\n\n try :\n multiplier, addition, lag, delta = next( constant_generator)\n\n except StopIteration :\n print( \"StopIteration -- Proper result\" )\n\n print( \"success test_generate_constants\" )", "def test_mutate(self):\n a = Alphabet('abc')**2\n m = Probs([0.5,0.25,0.25,0.1,0.8,0.1,0.3,0.6,0.1], a)\n #because of fp math in accumulate, can't predict boundaries exactly\n #so add/subtract eps to get the result we expect\n eps = 1e-6\n # a b b a c c a b c\n seq = array([0,1,1,0,2,2,0,1,2])\n random_vec = array([0,.01,.8-eps,1,1,.3,.05,.9+eps,.95])\n self.assertEqual(m.mutate(seq, random_vec), \\\n # a a b c c a a c c\n array([0,0,1,2,2,0,0,2,2]))\n #check that freq. distribution is about right\n seqs = array([m.mutate(seq) for i in range(1000)])\n #WARNING: bool operators return byte arrays, whose sums wrap at 256!\n zero_count = asarray(seqs == 0, 'int32')\n sums = sum(zero_count, axis=0)\n #expect: 500, 100, 100, 500, 300, 300, 500, 100, 300\n #std dev = sqrt(npq), which is sqrt(250), sqrt(90), sqrt(210)\n means = array([500, 100, 100, 500, 300, 300, 500, 100, 300])\n var = array([250, 90, 90, 250, 210, 210, 250, 90, 210])\n three_sd = 3 * sqrt(var)\n for obs, exp, sd in zip(sums, means, three_sd):\n assert exp - 2*sd < obs < exp + 2*sd", "def test_random_examples(self):\r\n\r\n for n in range(0, 1000):\r\n num1 = random.choices(range(0, 10 ** 3), k=1)\r\n num2 = random.choices(range(0, 10 ** 3), k=1)\r\n\r\n self.assertEqual(gcd_it(num1[0], num2[0]), math.gcd(num1[0], num2[0]))\r\n self.assertEqual(gcd_rec(num1[0], num2[0]), math.gcd(num1[0], num2[0]))", "def e3RsaAttack():\n #http://stackoverflow.com/a/358134\n def nth_root(x,n):\n \"\"\"Finds the integer component of the n'th root of x,\n an integer such that y ** n <= x < (y + 1) ** n.\n \"\"\"\n high = 1\n while high ** n < x:\n high *= 2\n low = high/2\n while low < high:\n mid = (low + high) // 2\n if low < mid and mid**n < x:\n low = mid\n elif high > mid and mid**n > x:\n high = mid\n else:\n return mid\n return mid + 1\n\n\n m = \"No Pain No Gain!\"\n print 'Encrypting:', m\n m = long(m.encode('hex'), 16)\n bits = 1024\n e = 3\n\n pubkeys = [getStrongPrime(bits, e) * getStrongPrime(bits, e) for _ in xrange(3)]\n captures = [pow(m, e, n) for n in pubkeys]\n\n c0, c1, c2 = [c % n for c,n in zip(captures, pubkeys)]\n n0, n1, n2 = pubkeys\n ms0 = n1 * n2\n ms1 = n0 * n2\n ms2 = n0 * n1\n N012 = n0 * n1 * n2\n\n result = ((c0 * ms0 * invmod(ms0, n0)) +\n (c1 * ms1 * invmod(ms1, n1)) +\n (c2 * ms2 * invmod(ms2, n2))) % N012\n\n m = nth_root(result, 3)\n m = hex(long(m))\n m = m[2:-1].decode('hex')\n print 'Decrypted: ', m", "def test_first_37(self):\n for sv in range(1, 38):\n prn_seq = prn.PRN(sv)\n for i in range(10):\n prn_seq.next()\n prn_test = prn.prn_info['first_ten_chips'][str(sv)]\n prn_test = bin(int(prn_test, 8))[2:]\n for i in range(10):\n self.assertEqual(prn_test[i], str(prn_seq.ca[i]))", "def _random_whole(self):\n random_whole = lambda: self._raw_random_whole(self.random_whole_bit_count)\n n = random_whole()\n\n if self.randomize_random_whole_bit_count:\n # modify the number of bits the next call will use\n\n offset = random_whole()\n \n if self.random_whole_bit_count >= 3:\n offset = math.ceil(self._severe_log(offset))\n offset *= -1 if self._random_bool() else 1\n self.random_whole_bit_count += offset\n\n if self.random_whole_bit_count <= 0:\n self.random_whole_bit_count = 1\n return n", "def simulate():\n\tnp.random.seed(42)\n\tmask = np.asarray([1,2,3]*ceil(N/3))[:N]\n\twon, lost = 0, 0\n\tfor i in range(10**6):\n\t\tdeck = np.asarray(list(range(1,int(N/KINDS)+1))*4)\n\t\tnp.random.shuffle(deck)\n\t\tres = not any(deck == mask)\n\t\tif res:\twon += 1\n\t\telse:\tlost += 1\n\t\t\n\t\tif not i%10**4:\n\t\t\tp_eval = won/(won+lost)\n\t\t\tprint(f\">>> Simulated win probability with {i} games: {100*p_eval:.4f}%\")", "def test_random_spanning_tree_multiplicative_large():\n from math import exp\n from random import Random\n\n pytest.importorskip(\"numpy\")\n stats = pytest.importorskip(\"scipy.stats\")\n\n gamma = {\n (0, 1): -0.6383,\n (0, 2): -0.6827,\n (0, 5): 0,\n (1, 2): -1.0781,\n (1, 4): 0,\n (2, 3): 0,\n (5, 3): -0.2820,\n (5, 4): -0.3327,\n (4, 3): -0.9927,\n }\n\n # The undirected support of gamma\n G = nx.Graph()\n for u, v in gamma:\n G.add_edge(u, v, lambda_key=exp(gamma[(u, v)]))\n\n # Find the multiplicative weight for each tree.\n total_weight = 0\n tree_expected = {}\n for t in nx.SpanningTreeIterator(G):\n # Find the multiplicative weight of the spanning tree\n weight = 1\n for u, v, d in t.edges(data=\"lambda_key\"):\n weight *= d\n tree_expected[t] = weight\n total_weight += weight\n\n # Assert that every tree has an entry in the expected distribution\n assert len(tree_expected) == 75\n\n # Set the sample size and then calculate the expected number of times we\n # expect to see each tree. This test uses a near minimum sample size where\n # the most unlikely tree has an expected frequency of 5.15.\n # (Minimum required is 5)\n #\n # Here we also initialize the tree_actual dict so that we know the keys\n # match between the two. We will later take advantage of the fact that since\n # python 3.7 dict order is guaranteed so the expected and actual data will\n # have the same order.\n sample_size = 1200\n tree_actual = {}\n for t in tree_expected:\n tree_expected[t] = (tree_expected[t] / total_weight) * sample_size\n tree_actual[t] = 0\n\n # Sample the spanning trees\n #\n # Assert that they are actually trees and record which of the 75 trees we\n # have sampled.\n #\n # For repeatability, we want to take advantage of the decorators in NetworkX\n # to randomly sample the same sample each time. However, if we pass in a\n # constant seed to sample_spanning_tree we will get the same tree each time.\n # Instead, we can create our own random number generator with a fixed seed\n # and pass those into sample_spanning_tree.\n rng = Random(37)\n for _ in range(sample_size):\n sampled_tree = nx.random_spanning_tree(G, \"lambda_key\", seed=rng)\n assert nx.is_tree(sampled_tree)\n\n for t in tree_expected:\n if nx.utils.edges_equal(t.edges, sampled_tree.edges):\n tree_actual[t] += 1\n break\n\n # Conduct a Chi squared test to see if the actual distribution matches the\n # expected one at an alpha = 0.05 significance level.\n #\n # H_0: The distribution of trees in tree_actual matches the normalized product\n # of the edge weights in the tree.\n #\n # H_a: The distribution of trees in tree_actual follows some other\n # distribution of spanning trees.\n _, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))\n\n # Assert that p is greater than the significance level so that we do not\n # reject the null hypothesis\n assert not p < 0.05", "def trans_int(f, pdf, trans, lims, e, consec = 50, sd = 0):\r\n rn.seed(sd) #seed the generator\r\n n, c = 1,0 #counter of samples taken & break counter\r\n Q_sum, Q_sq_sum = 0,0\r\n x_samples = [] # temp store of x samples\r\n hist_samples = np.zeros(100) # histogram of x_samples added to sequentially\r\n hist_width = 100 / abs(lims[1] - lims[0]) \r\n \r\n while(True):\r\n new_x = trans(rn.rand()) # random number from distributed over pdf\r\n x_samples.append(new_x)\r\n Q = f(new_x)/pdf(new_x) # find new Q value\r\n Q_sum += Q # add to the total\r\n Q_sq_sum += Q**2\r\n n += 1 #increase the counter\r\n \r\n # every 1000 steps after the burn inn\r\n if n % 1000 == 0 and n>5000:\r\n I = Q_sum / n # new estimate guess\r\n \r\n # histogram all samples to reduce storage space\r\n to_hist = np.array(x_samples) * hist_width # scale to a list from 0 to 100\r\n bc = np.bincount( to_hist.astype(int) ) # count the enteries rounded down\r\n hist_samples += bc # add these counts to store\r\n \r\n #find the standard error from vairience\r\n error = np.sqrt(( Q_sq_sum/n - (Q_sum/n)**2) /n)\r\n try:\r\n converge = np.vstack(( converge,[[I,error]] )) # store the values\r\n rel = abs( 1. - converge[-1,0] / converge[-2,0]) # relative accuracy\r\n if c >= consec: break # if accurate enough consec times\r\n elif rel < e: c += 1\r\n #if error < e*I: break\r\n except NameError: converge = np.array([[I,error]]) # create store first time\r\n \r\n sy.stdout.write('\\rIteration: %.4g Error %.4g' % (n, error) )\r\n \r\n print('\\nTransformation Monte Carlo found Integral = %.6g' % I,\r\n '+- %.6g' % error, 'after', n, 'samples')\r\n \r\n return converge, hist_samples", "def test_handcrafted_examples(self):\n self.assertTrue(abs(pi(1000000) - 3.14) < 0.01)", "def chi2_is_random(\n data: bytes, \n alpha: int=0.05,\n unit_size_bits: int=8, \n ) -> bool:\n # Build frequency table\n f_obs = []\n for i in range(2**unit_size_bits):\n f_obs.append(0)\n if unit_size_bits == 8:\n for i in range(len(data)):\n f_obs[data[i]] += 1\n else:\n raise Exception(f\"unit_size_bits = {unit_size_bits} not yet implemented\")\n \n return chi2_from_freqs_pval(f_obs) > alpha", "def test_newrand_bounded_rand_int(range_, n_pts):\n # XXX: this test is very seed sensitive: either it is wrong (too strict?)\n # or the wrapped RNG is not uniform enough, at least on some platforms.\n set_seed_wrap(42)\n n_iter = 100\n ks_pvals = []\n uniform_dist = stats.uniform(loc=0, scale=range_)\n # perform multiple samplings to make chance of outlier sampling negligible\n for _ in range(n_iter):\n # Deterministic random sampling\n sample = [bounded_rand_int_wrap(range_) for _ in range(n_pts)]\n res = stats.kstest(sample, uniform_dist.cdf)\n ks_pvals.append(res.pvalue)\n # Null hypothesis = samples come from an uniform distribution.\n # Under the null hypothesis, p-values should be uniformly distributed\n # and not concentrated on low values\n # (this may seem counter-intuitive but is backed by multiple refs)\n # So we can do two checks:\n\n # (1) check uniformity of p-values\n uniform_p_vals_dist = stats.uniform(loc=0, scale=1)\n res_pvals = stats.kstest(ks_pvals, uniform_p_vals_dist.cdf)\n assert res_pvals.pvalue > 0.05, (\n \"Null hypothesis rejected: generated random numbers are not uniform.\"\n \" Details: the (meta) p-value of the test of uniform distribution\"\n f\" of p-values is {res_pvals.pvalue} which is not > 0.05\"\n )\n\n # (2) (safety belt) check that 90% of p-values are above 0.05\n min_10pct_pval = np.percentile(ks_pvals, q=10)\n # lower 10th quantile pvalue <= 0.05 means that the test rejects the\n # null hypothesis that the sample came from the uniform distribution\n assert min_10pct_pval > 0.05, (\n \"Null hypothesis rejected: generated random numbers are not uniform. \"\n f\"Details: lower 10th quantile p-value of {min_10pct_pval} not > 0.05.\"\n )", "def testRandomLarge():\n simulateRandom(\n maxCaps=8,\n maxSpecs=12,\n maxResources=100,\n runsPerConfig=100,\n numConfigs=100,\n verifyFunc=checkValid,\n seed=int(time.time())\n )", "def _random_bits():\n while True:\n yield random.choice([0, 255])", "def prob_estimation(n):\n truecount = 0\n for i in range(n):\n test = gen_rand_23()\n if has_duplicates(test):\n truecount += 1\n return truecount", "def test_calc_entropy(self):\n entropy_functions = (m._ENTROPY_FUNCTIONS + [m.calc_entropy])\n \n if sys.version_info.major > 2:\n entropy_functions.remove(m._entropy_counter1)\n \n for entropy in entropy_functions:\n msg = 'Entropy function `%s` failed' % entropy.__name__\n self.assertEqual(entropy([]), 0, msg=msg)\n self.assertEqual(entropy([1]), 0, msg=msg)\n self.assertEqual(entropy([1, 1]), 0, msg=msg)\n self.assertAlmostEqual(entropy([0, 1]), 1, msg=msg)\n self.assertAlmostEqual(entropy([0, 0.5, 1]), np.log2(3), msg=msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }