query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Unselect the specified `Card`.
def UnselectCard(self, card): self.selec.UnselectCard(card)
[ "def UnselectCard(self, card):\n if card in self.cards:\n self.cards.remove(card)\n card.Unselect()", "def UnselectAll(self):\n while len(self.cards) > 0:\n c = self.cards[0]\n self.UnselectCard(c)", "def deselect(self, *args):\n return _coin.SoSelection_deselect(self, *args)", "def deselect(self, *args) -> \"void\":\n return _coin.SoSelection_deselect(self, *args)", "def unselect_target(self, box):\n box.unselect()\n self._sel_boxes.remove(box)", "def deSelected(self):\n self.isSelected = False\n selectedSprites.remove(self)", "def OnCardDelete(self, ev):\n card = ev.GetEventObject()\n self.cards.remove(card)\n self.UnselectCard(card)", "def discard(self, card):\n \n self.hand.pop(self.hand.index(card))\n self.cardList.append(card)", "def deselect(self,index):\n self._animalSelect[index] = False", "def unselect(self, game):\n game.tower_buttons.empty()", "def unselect(self, item):\n if item.selected:\n item.selected=False\n self._total_selected-=1\n debug('*** total_selected={}'.format(self._total_selected))", "def uncheck_selected(self, sender, args):\n self._set_states(state=False, selected=True)", "def on_deselect(self):\n for tile in self.divisions:\n tile.group_selected = False\n tile.selected = False", "def removeChoice(self, *args) -> \"void\":\n return _coin.SoVRMLSwitch_removeChoice(self, *args)", "def removeChoice(self, *args):\n return _coin.SoVRMLSwitch_removeChoice(self, *args)", "def removeDeselectionCallback(self, *args) -> \"void\":\n return _coin.SoSelection_removeDeselectionCallback(self, *args)", "def discard_scard(self, card):\n try:\n card = random.choice(self.shand)\n move(card, self.shand, self.survival_discard)\n except:\n logger.info('{} tried to discard a survival card due to Toxin but had none'.format(self.name))", "def remove_card(self, card):\n if card not in self._cards:\n print('you dont have that card')\n self._cards.remove(card) # O(n)", "def removeDeselectionCallback(self, *args):\n return _coin.SoSelection_removeDeselectionCallback(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copies every `Card` currently selected to `wx.TheClipboard`.
def CopySelected(self): # get the data data = [] for c in self.GetSelection(): data.append(c.Dump()) # create our own custom data object obj = wx.CustomDataObject("CardList") obj.SetData(str([json.dumps(d) for d in data])) # write the data to the clipboard if wx.TheClipboard.Open(): wx.TheClipboard.SetData(obj) wx.TheClipboard.Close()
[ "def PasteFromClipboard(self, pos=wx.DefaultPosition):\n if wx.TheClipboard.Open():\n # get data\n obj = wx.CustomDataObject(\"CardList\")\n wx.TheClipboard.GetData(obj)\n\n # don't use eval()! Use ast.literal_eval() instead\n data = [json.loads(d) for d in ast.literal_eval(obj.GetData())]\n\n # create new cards with the data\n for d in data:\n # copy all info and set focus to it\n card = self.NewCard(d[\"class\"])\n card.Load(d)\n card.SetFocus()\n\n # default position: a step away from the original\n if pos == wx.DefaultPosition:\n new_pos = [i + self.GetPadding() for i in d[\"pos\"]]\n else:\n new_pos = pos\n \n card.SetPosition(new_pos)\n\n wx.TheClipboard.Close()", "def copy(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n if content.text() is not None:\n clip.setText(content.text())\n else:\n pass", "def tile_copy(self):\n self._ui.clipboard_set( self._tile_set[self.current_tile_num] )", "def copy(self, cut=False):\n files = [QtCore.QUrl.fromLocalFile(\n os.path.join(self.location, self.proxy.itemData(index).get(0)))\n for index in self.view.selectionModel().selectedIndexes()]\n mime_data = self.proxy.mimeData(self.view.selectionModel().\n selectedIndexes())\n if cut:\n data = b'1' # same as QtCore.QByteArray(0, '1')\n mime_data.setData(\"application/x-kde-cutselection\", data)\n data = b'cut'\n mime_data.setData(\"x-special/gnome-copied-files\", data)\n mime_data.setUrls(files)\n clipboard = QtWidgets.QApplication.clipboard()\n clipboard.setMimeData(mime_data)", "def copy(self, select, position):\n if select == []:\n self.status.update('No row selected to copy !')\n return\n select_iid = self.select_to_iid(select)\n self.clipboard = []\n for iid in select_iid:\n data_dict = self.Widget.set(iid) # get row data\n self.clipboard.append([data_dict[column] for column in self.header])", "def copy_pv_list_to_clipboard(self):\n pv_list = [connection.address for connection in self.table_view.model().connections]\n if len(pv_list) == 0:\n return\n\n pvs_to_copy = \" \".join(pv_list)\n clipboard = QApplication.clipboard()\n if platform.system() == 'Linux':\n # Mode Selection is only valid for X11.\n clipboard.setText(pvs_to_copy, clipboard.Selection)\n clipboard.setText(pvs_to_copy, clipboard.Clipboard)", "def getCurrentCopyTypes(self) -> List[ghidra.app.util.ClipboardType]:\n ...", "def __copyAllChat(self):\n txt = self.chatEdit.toPlainText()\n if txt:\n cb = QApplication.clipboard()\n cb.setText(txt)", "def paste(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n row = content.row()\n col = content.column()\n if content.text() is not None:\n self.tableWidget.setItem(row, col, QTableWidgetItem(str(clip.text())))\n self.isChanged = True\n else:\n pass", "def copyToClipboard(data, type=CF_TEXT):\n#-------------------------------------------------------------------------------\n OpenClipboard()\n EmptyClipboard()\n SetClipboardData(type, data)\n CloseClipboard()", "def _copyToClip(self):\n if self.path is not None:\n self.clipboard_clear()\n self.clipboard_append(self.path)", "def _copy_to_clipboard(value):\n bus = dbus.SessionBus()\n clipboard = bus.get_object('org.kde.klipper', '/klipper')\n clipboard.setClipboardContents(value)", "def set_clipboard():\n clipboard.copy(zoom_details[ID])", "def cut(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n row = content.row()\n col = content.column()\n if content.text() is not None:\n clip.setText(content.text())\n self.tableWidget.setItem(row, col, QTableWidgetItem(str()))\n self.isChanged = True\n else:\n pass", "def copy(self):\n\n cards = [None]*len(self)\n for i in range(len(self)):\n cards[i]=Card('').fromstring(str(self[i]))\n return CardList(cards)", "def copy(self):\n self.focus()\n self.dispatch('Copy')\n return self", "def on_copyPreviewButton_clicked(self):\n QApplication.clipboard().setPixmap(self.preview.pixmap())", "def cmd_copy(self, name):\n if not settings.dbus_support:\n msg = 'Dbus support is needed for copy.'\n raise pim_errors.CommandNotSupportedError(msg)\n if name not in self.infocollection:\n raise pim_errors.ItemDoesNotExistError\n item = self.infocollection[name]\n _copy_to_clipboard(item.value)\n return True", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pastes every `Card` currently in `wx.TheClipboard`.
def PasteFromClipboard(self, pos=wx.DefaultPosition): if wx.TheClipboard.Open(): # get data obj = wx.CustomDataObject("CardList") wx.TheClipboard.GetData(obj) # don't use eval()! Use ast.literal_eval() instead data = [json.loads(d) for d in ast.literal_eval(obj.GetData())] # create new cards with the data for d in data: # copy all info and set focus to it card = self.NewCard(d["class"]) card.Load(d) card.SetFocus() # default position: a step away from the original if pos == wx.DefaultPosition: new_pos = [i + self.GetPadding() for i in d["pos"]] else: new_pos = pos card.SetPosition(new_pos) wx.TheClipboard.Close()
[ "def CopySelected(self):\n # get the data\n data = []\n for c in self.GetSelection():\n data.append(c.Dump())\n\n # create our own custom data object\n obj = wx.CustomDataObject(\"CardList\")\n obj.SetData(str([json.dumps(d) for d in data]))\n\n # write the data to the clipboard\n if wx.TheClipboard.Open():\n wx.TheClipboard.SetData(obj)\n wx.TheClipboard.Close()", "def paste(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n row = content.row()\n col = content.column()\n if content.text() is not None:\n self.tableWidget.setItem(row, col, QTableWidgetItem(str(clip.text())))\n self.isChanged = True\n else:\n pass", "def paste(self):\n clipboard = QtWidgets.QApplication.clipboard()\n # check if cut or copy\n # x-kde-cutselection: is 1 if cut else 0\n # x-special/gnome-copied-files: has cut or copy mentioned\n logger.debug(clipboard.mimeData().formats())\n gnome_op = clipboard.mimeData().data(\n 'x-special/gnome-copied-files').split(b'\\n')[0]\n gnome_cut = True if gnome_op == b'cut'else False\n kde_op = clipboard.mimeData().data('application/x-kde-cutselection')\n kde_cut = True if kde_op == b'1' else False\n cut = True if kde_cut or gnome_cut else False\n logger.debug(f\"Files were cut: {cut}\")\n urls = [QtCore.QUrl.toLocalFile(url)\n for url in clipboard.mimeData().urls()]\n logger.debug(f\"Paste {urls}\")\n if not urls:\n return\n\n if cut:\n act = \"move\"\n else:\n act = \"copy\"\n self.t = threading.Thread(target=self.copier,\n args=(act, urls, self.location))\n self.t.start()", "def copy(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n if content.text() is not None:\n clip.setText(content.text())\n else:\n pass", "def tile_copy(self):\n self._ui.clipboard_set( self._tile_set[self.current_tile_num] )", "def tile_paste(self):\n try:\n self._tile_set[self.current_tile_num].from_str(self._ui.clipboard_get() )\n self._tile_set.modified=True\n except Exception as err:\n print(err)\n traceback.print_exc()\n self._ui.showerror(\"Unable to paste as tile\")\n self._ui.update_tile(self._tlayer, self._tile_set,\n self.current_tile_num, self.current_pal)", "def cut_to_clipboard(self, widget, data=None):\n\t\t#print \"Copying text\"\n\t\tbuff = self._get_buffer()\n\t\tbuff.cut_clipboard(self.clipboard, True)", "def __copyAllChat(self):\n txt = self.chatEdit.toPlainText()\n if txt:\n cb = QApplication.clipboard()\n cb.setText(txt)", "def copyToClipboard(data, type=CF_TEXT):\n#-------------------------------------------------------------------------------\n OpenClipboard()\n EmptyClipboard()\n SetClipboardData(type, data)\n CloseClipboard()", "def getCurrentCopyTypes(self) -> List[ghidra.app.util.ClipboardType]:\n ...", "def on_paste(self, sender, arg=None):\n buf = self.get_active_buffer()\n if buf is not None:\n buf.paste_clipboard(sender, arg)", "def _copy_to_clipboard(value):\n bus = dbus.SessionBus()\n clipboard = bus.get_object('org.kde.klipper', '/klipper')\n clipboard.setClipboardContents(value)", "def cut(self):\n if self.tabWidget.currentIndex() == 0:\n clip = QApplication.clipboard()\n for content in self.tableWidget.selectedItems():\n row = content.row()\n col = content.column()\n if content.text() is not None:\n clip.setText(content.text())\n self.tableWidget.setItem(row, col, QTableWidgetItem(str()))\n self.isChanged = True\n else:\n pass", "def copy_paste(copy_ws, paste_ws, entries):\n for copyfrom, pasteto in entries:\n _copy_paste(copy_ws, paste_ws, copyfrom, pasteto)", "async def copypasta(self, ctx, index: int, *, name=None):\n copy_pasta = self.copypastas[index]\n category, copypastas = copy_pasta['category'], copy_pasta['copypastas']\n pasta = random.choice(list(copypastas.values())) if name is None else copypastas[name.title()]\n\n embed = discord.Embed(title=f\"{category} {name}\", description=pasta, colour=0x00FF00)\n await ctx.send(embed=embed)", "def paste_items(self, model, row):\n\n\t\titem_list = self.__session_model.get_clipboard()\n\t\t\n\t\told_items = model.get_item_list()\n\t\tmodel.clear_item_list()\n\n\t\told_items[row+1:row+1] = item_list\n\n\t\tfor item in old_items:\n\t\t\tmodel.add_item(item)\n\n\t\tself.__view.refresh_item_list(jump=False)", "def paste_from_clipboard(self):\n to = self.get_current_path()\n if os.path.isfile(to):\n to = os.path.abspath(os.path.join(to, os.pardir))\n mime = QtWidgets.QApplication.clipboard().mimeData()\n\n paste_operation = None\n if mime.hasFormat(self._UrlListMimeData.format(copy=True)):\n paste_operation = True\n elif mime.hasFormat(self._UrlListMimeData.format(copy=False)):\n paste_operation = False\n if paste_operation is not None:\n self._paste(\n self._UrlListMimeData.list_from(mime, copy=paste_operation),\n to, copy=paste_operation)", "def _copyToClip(self):\n if self.path is not None:\n self.clipboard_clear()\n self.clipboard_append(self.path)", "def do_paste(self, event):\n # find where the user has clicked\n col_ind = self.GetGridCursorCol()\n row_ind = self.GetGridCursorRow()\n # read in clipboard text\n text_df = pd.read_clipboard(header=None, sep='\\t').fillna('')\n # add extra rows if need to accomadate clipboard text\n row_length_diff = len(text_df) - (len(self.row_labels) - row_ind)\n if row_length_diff > 0:\n for n in range(row_length_diff):\n self.add_row()\n # ignore excess columns if present\n col_length_diff = len(text_df.columns) - (len(self.col_labels) - col_ind)\n if col_length_diff > 0:\n text_df = text_df.iloc[:, :-col_length_diff].copy()\n # go through copied text and parse it into the grid rows\n for label, row_data in text_df.iterrows():\n col_range = list(range(col_ind, col_ind + len(row_data)))\n if len(row_data) > 1:\n cols = list(zip(col_range, row_data.index))\n for column in cols:\n value = row_data[column[1]]\n this_col = column[0]\n self.SetCellValue(row_ind, this_col, str(value))\n else:\n value = row_data[0]\n self.SetCellValue(row_ind, col_ind, str(value))\n row_ind += 1\n # could instead use wxPython clipboard here\n # see old git history for that\n self.size_grid()\n event.Skip()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a list of every `CardGroup` that contains `card`.
def GetContainingGroups(self, card): return [g for g in self.groups if card in g.GetMembers()]
[ "def get_same_month_cards(self, card: Card) -> List[Card]:\n object_month = card.month\n cards = []\n for field_card in self.cards:\n if field_card.month == object_month:\n cards.append(field_card)\n return cards", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def iter_card_sets(self):\n for card_set in self.root.all_card_sets():\n yield card_set", "def get_duplicate_cards(cards):\n return [card for card in cards if names(cards).count(card['name']) > 1]", "def get_cards(self):\n for c in sorted(self.cards, key=lambda card: card.data['house']):\n for i in range(self.data['_links']['cards'].count(c.key)):\n c.data['is_legacy'] = c.key in self.data.get('set_era_cards',{}).get('Legacy',[])\n c.data['bonus_icons'] = []\n for bonus_card in self.data.get(\"bonus_icons\", []):\n if bonus_card[\"card_id\"] == c.key:\n c.data['bonus_icons'] = bonus_card['bonus_icons']\n yield c", "def findCardsByNum(self, number):\n if self.verbose:\n print(self.name + \" finding all cards of number \" + str(number))\n if self.log is not None:\n self.log.write(self.name + \" finding all cards of number \" + str(number) + \"\\n\")\n result = []\n for card in self.hand:\n if card.get_number() == number:\n result.append(card)\n return result", "def get_cards(self, expansion=None):\n print(\"getting cards\")\n session = Session()\n cards = session.query(Card).filter(\n #Card.data['is_enhanced']=='false',\n Card.data['is_maverick']=='false')\n if expansion:\n cards = cards.filter(Card.data['expansion']==str(expansion))\n cards = cards.all()\n card_expansion = {}\n for card in cards:\n key = card.name+\";\"+str(card.data['expansion'])+\";\"+card.data['rarity']\n # Prefer non-enhanced non-maverick card that is in current_set\n if key in card_expansion:\n if card.data['is_enhanced']:\n continue\n if not card.is_from_current_set:\n continue\n if card.data['is_maverick']:\n continue\n card_expansion[key] = card\n print([card.data['expansion'] for card in card_expansion.values() if card.name=='Mookling'])\n print(len(cards))\n print(len(card_expansion.values()))\n return card_expansion.values()", "def get_same_suit_cards(cards):\n suit_cards_dict = dict()\n for card in cards:\n if card.suit not in suit_cards_dict:\n suit_cards_dict[card.suit] = list()\n\n suit_cards_dict[card.suit].append(card)\n\n return suit_cards_dict", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def get_all_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards;\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def get_contest_groups(self):\n groups = db.get_items_by_data_type(DataType.DATA_TYPE_CONTEST_GROUPING)\n return_val = []\n for g in groups:\n\n group_contests = [c for c in self.contests if\n c.parents(DataType.DATA_TYPE_CONTEST_GROUPING, lambda x: x.value == g.value)]\n\n return_val.append({'group': g.value,\n 'expanded': False,\n 'contests': group_contests})\n\n return return_val", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def get_consecutive_cards(cards):\n consecutive_cards_set_list = list()\n cards = sorted(cards, key=attrgetter('rank'))\n for card in cards:\n added = False\n for index, cc_list in enumerate(consecutive_cards_set_list):\n cc_ranks_list = sorted([card_.rank for card_ in cc_list])\n if card.rank not in cc_ranks_list and \\\n (card.rank + 1 == cc_ranks_list[0] or card.rank - 1 == cc_ranks_list[-1]):\n consecutive_cards_set_list[index].append(card)\n added = True\n break\n\n if not added:\n consecutive_cards_set_list.append([card])\n\n return consecutive_cards_set_list", "def build(self):\n cards = []\n # for each suit\n for s in self.SUITS:\n # for each rank\n for r in self.RANKS:\n # create a new card\n card = Card(s, r)\n # set's the image src\n card.set_image_src(CARD_IMAGE_SRC)\n # set the back image src\n card.set_back_image_src(CARD_BACK_IMAGE_SRC)\n # set's the card size\n card.set_size(CARD_IMAGE_SIZE)\n # add the new card into the list\n cards.append(card)\n return cards", "def get_played_cards(cards, progress):\n return [card for card in cards if has_been_played(card, progress)]", "def _unique_cards(self, game_mode='ranked', game_threshold = 5, formatted = True):\n cards = self.generate_card_stats(game_mode, game_threshold).reset_index()\n cards = cards['card'].unique().tolist()\n return cards", "def fetch_cards_of_suit(self, suit):\n\n def sort_by_value(card):\n \"\"\" Returns the value of the card based on it's value name \"\"\"\n return VALUE_BY_NAME[card.value]\n\n cards_of_suit = [card for card in self.cards if suit == card.suit]\n\n # Sort for easy viewing.\n cards_of_suit.sort(key=sort_by_value)\n return cards_of_suit", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new `CardGroup` with `cards` as members.
def NewGroup(self, cards=[]): self.groups.append(card.CardGroup(label=len(self.groups), members=cards))
[ "def create_Deck(self):\n print('Creating Deck')\n for a in [\"Heart\", \"Diamond\", \"Club\", \"Spade\"]:\n for x in range(2, 11):\n self.cards.append(Card(a, x, x))\n self.cards.append(Card(a, \"A\", 11))\n self.cards.append(Card(a, \"J\", 10))\n self.cards.append(Card(a, \"K\", 10))\n self.cards.append(Card(a, \"Q\", 10))", "def create_deck():\n suits = [0, 1, 2, 3]\n ranks = ['two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'jack', 'queen', 'king', 'ace']\n cards = [Card(suit=suit, rank=rank) for rank in ranks for suit in suits]\n Card.objects.bulk_create(cards)", "def make_cards(self):\n\n for suit in self._suits:\n for card_num in xrange(1, self._cards_per_suit + 1):\n left = CARD_WIDTH * (card_num - 1)\n top = CARD_HEIGHT * self._suits.index(suit)\n rect = (left, top, CARD_WIDTH, CARD_HEIGHT)\n image = self._cards_sprite.subsurface(rect)\n rect = image.get_rect()\n card = Card(card_num, suit, image, rect)\n self._cards[str(card)] = card\n self.add(card)\n #card back last row, col = 4,1\n subrect = CARD_WIDTH, CARD_HEIGHT * 4, CARD_WIDTH, CARD_HEIGHT\n image = self._cards_sprite.subsurface(subrect)\n rect = image.get_rect()\n self._cardback = Card(14, 'cardback', image, rect)\n #card back", "def build_deck(self):\n suits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']\n ranks = {\n '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':10, 'Q':10, 'K':10, 'A':11,\n }\n for suit in suits:\n for rank, value in ranks.items():\n card = Card(rank, value, suit)\n self.cards.append(card)", "def build(self):\n cards = []\n # for each suit\n for s in self.SUITS:\n # for each rank\n for r in self.RANKS:\n # create a new card\n card = Card(s, r)\n # set's the image src\n card.set_image_src(CARD_IMAGE_SRC)\n # set the back image src\n card.set_back_image_src(CARD_BACK_IMAGE_SRC)\n # set's the card size\n card.set_size(CARD_IMAGE_SIZE)\n # add the new card into the list\n cards.append(card)\n return cards", "def initiate_deck(self):\n for suit in self.suits:\n for i in range(1, 14):\n new_card = Card(i, suit)\n self.cards.append(new_card)", "def _create_new_deck(self,player):\n\t\tdeck = [Card(character,number,player) for character in [\"A\",\"B\",\"C\",\"D\",\"E\"] for number in range(1,6)]\n\t\trandom.shuffle(deck)\n\t\treturn deck", "def _create_groups(self, groups):\n\n acls = self.mumblectl.getACL(self.settings['mumble_server_id'], 0)\n glist = []\n for mgroup in acls[1]:\n glist.append(mgroup.name)\n\n newgroups = False\n for agroup in groups:\n if not str(agroup.name.replace(' ', '').lower()) in glist:\n group = self.mur.Group()\n group.name = str(agroup.name.replace(' ', '').lower())\n group.members = []\n group.add = []\n group.remove = []\n group.inheritable = True\n group.inherit = True\n group.inherited = False\n acls[1].append(group)\n newgroups = True \n\n if newgroups:\n self.mumblectl.setACL(self.settings['mumble_server_id'], 0, acls[0], acls[1], acls[2])\n\n return acls", "def new_deck(n_sets=4):\n card_set = [Card(rank, suit) for _ in range(n_sets) for rank in Rank for suit in Suit]\n return Deck(card_set)", "def create_cards():\n return {\n \"A\": (0, 11),\n \"2\": 2,\n \"3\": 3,\n \"4\": 4,\n \"5\": 5,\n \"6\": 6,\n \"7\": 7,\n \"8\": 8,\n \"9\": 9,\n \"10\": 10,\n \"J\": 10,\n \"Q\": 10,\n \"K\": 10\n }", "def create_instances(self, cards_dict):\n cards = []\n for key in cards_dict:\n cards.append(CardBacks(self.config, name=key, info=cards_dict[key]))\n return cards", "def create_group():\n qgis_groups = get_group()\n for i, g_item in enumerate(reversed(PROJECT_GROUP[:2])):\n if g_item not in groups_to_array(qgis_groups):\n qgis_groups.insertGroup(i,g_item)\n rsx_group = qgis_groups.findGroup(PROJECT_GROUP[0])\n if rsx_group is not None:\n for item in PROJECT_GROUP[2:]:\n if qgis_groups.findGroup(item) is None:\n rsx_group.addGroup(item)", "def create_deck(self):\n deck = Deck()\n return deck", "async def channel_group_create(self, *users):\n assert _assert__channel_group_create__users(users)\n \n user_ids = set()\n for user in users:\n user_id = get_user_id(user)\n user_ids.add(user_id)\n \n user_ids.add(self.id)\n \n data = {'recipients': user_ids}\n data = await self.http.channel_group_create(self.id, data)\n return Channel.from_data(data, self, 0)", "def create(self):\n path = '/projects/%s/groups/' % (self.client.project,)\n info = self.client._connection.api_request(\n method='POST', path=path, data=self._to_dict())\n self._set_properties_from_dict(info)", "def _init_group_list(self, group_list, count):\n for i in range(count):\n group_list.append(TileContainer())", "async def create_group(self, userid, gameid):\n raise NotImplementedError()", "def add_card(cls, card, comment=''):\n sid = integer(card, 1, 'sid')\n if card.field(5) in [None, '']:\n nodes = [integer(card, 2, 'G1'),]\n components = [components_or_blank(card, 3, 'C1', 0)]\n enforced = [double_or_blank(card, 4, 'D1', 0.0)]\n else:\n nodes = [\n integer(card, 2, 'G1'),\n integer(card, 5, 'G2'),\n ]\n # :0 if scalar point 1-6 if grid\n components = [components_or_blank(card, 3, 'C1', 0),\n components_or_blank(card, 6, 'C2', 0)]\n enforced = [double_or_blank(card, 4, 'D1', 0.0),\n double_or_blank(card, 7, 'D2', 0.0)]\n return SPCD(sid, nodes, components, enforced, comment=comment)", "def list():\n click.echo(\"Create cards from the current membership list\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scroll in both direction so that `card` is fully in view.
def ScrollToCard(self, card): rect = card.GetRect() pt = rect.GetBottomRight() pt = self.CalcUnscrolledPosition(pt) self.ScrollToPoint(pt) # call rect again since we may have scrolled the window rect = card.GetRect() pt = rect.GetTopLeft() pt = self.CalcUnscrolledPosition(pt) self.ScrollToPoint(pt)
[ "def scrolls(self , scroll):\n if(scroll.scroll_y <= MainWindow.distance):\n operations.load_more() \n scroll.scroll_to(content.ArticlesContainerCopy.articles_container_copy.children[content.Data.limit] , padding=0, animate=True)", "def scroll(self, dx, dy):\n self._Camera_Rect.move_ip(dx,dy)\n\n # Not updating the clipping position of the Views,\n # as multiple Cameras might point at the same View.", "def scrollIntoView(self, event):\r\n\r\n canvas = self.canvas\r\n widget_top = event.widget.winfo_y()\r\n widget_bottom = widget_top + event.widget.winfo_height()\r\n canvas_top = canvas.canvasy(0)\r\n canvas_bottom = canvas_top + canvas.winfo_height()\r\n\r\n if widget_bottom >= canvas_bottom:\r\n delta = int(canvas_bottom - widget_bottom)\r\n canvas.yview_scroll(-delta, \"units\")\r\n elif widget_top < canvas_top:\r\n delta = int(widget_top - canvas_top)\r\n canvas.yview_scroll(delta, \"units\")", "def scroll(self):\n self.x = self.x - 1\n if(self.x + 600 == 0):\n self.x = self.reset_x\n return False\n return True", "def scroll_to(self, element):\n element.scroll_into_view()", "def scroll_down_half_view(self):\n\n window_size = self.driver.get_window_size() # this will give You a dictionary\n start_x = window_size[\"width\"] * 0.25\n start_y = window_size[\"height\"] * 0.15\n if window_size[\"height\"] <= 800:\n end_y = window_size[\"height\"] * 0.4\n else:\n end_y = window_size[\"height\"] * 0.5\n logging.info(\"scroll down half of the screen\")\n sleep(1)\n self.driver.swipe(start_x, end_y, start_x, start_y, 3000) # each swipe is scrolling one screen\n sleep(1)", "def scroll_down_one_view(self):\n\n window_size = self.driver.get_window_size() # this will give You a dictionary\n start_x = window_size[\"width\"] * 0.25\n start_y = window_size[\"height\"] * 0.15\n if window_size[\"height\"] <= 800:\n end_y = window_size[\"height\"] * 0.6\n else:\n end_y = window_size[\"height\"] * 0.8\n logging.info(\"scroll down only one screen\")\n sleep(2)\n self.driver.swipe(start_x, end_y, start_x, start_y, 3000) # each swipe is scrolling one screen\n sleep(1)", "def _scroll(self):\n list_height = self.window_view.bounds.height - 2\n if self.selected < self.scroll_pos:\n self.scroll_pos = self.selected\n elif self.selected > self.scroll_pos + list_height - 1:\n self.scroll_pos = self.selected - list_height + 1\n button_y = 0\n for i in range(len(self.options)):\n if self.scroll_pos <= i < (self.scroll_pos + list_height):\n self.buttons[i].is_hidden = False\n self.buttons[i].layout_options = self.buttons[i].layout_options.with_updates(top=button_y)\n button_y += 1\n else:\n self.buttons[i].is_hidden = True\n self.buttons[i].superview.set_needs_layout()\n self.window_view.needs_layout = True", "def scroll(self, direction):\n scroll_params = {\n \"direction\": direction\n }\n self.execute_script(\"mobile: scroll\", scroll_params)", "def scroll_buffer(self, up=True, reset=False, lines=None):\n if reset:\n self.scrolling_up = 0\n self.set_term_cursor()\n return\n\n if lines is None:\n lines = self.height // 2\n\n if not up:\n lines = -lines\n\n maxscroll = len(self.scrollback_buffer)\n self.scrolling_up += lines\n\n if self.scrolling_up > maxscroll:\n self.scrolling_up = maxscroll\n elif self.scrolling_up < 0:\n self.scrolling_up = 0\n\n self.set_term_cursor()", "def _scroll(self, *args):\n for current_list in self.lists:\n current_list.yview(*args)\n return 'break'", "def scroll(self):\n if self.txtbox.atBottom:\n statemgr.switch(self.old_state_name)\n else:\n self.txtbox.scrollDown()", "def mouse_scroll(self, event):\r\n\r\n if self.vsb.visible:\r\n if OS == \"Darwin\":\r\n new_delta = -1 * event.delta\r\n else:\r\n new_delta = -1 * int(event.delta / 120)\r\n self.canvas.yview_scroll(new_delta, \"units\")", "def scroll_up_one_view(self):\n\n window_size = self.driver.get_window_size() # this will give You a dictionary\n start_x = window_size[\"width\"] * 0.25\n start_y = window_size[\"height\"] * 0.15\n end_y = window_size[\"height\"] * 0.8\n logging.info(\"scroll up only one screen\")\n sleep(2)\n self.driver.swipe(start_x, start_y, start_x, end_y, 3000) # each swipe is scrolling one screen\n sleep(1)", "def scroll_down():\n last_height = _DRIVER.execute_script(\"return document.body.scrollHeight\")\n while True:\n _DRIVER.execute_script(f\"window.scrollBy(0,{last_height})\")\n sleep(1)\n new_height = _DRIVER.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n _DRIVER.execute_script(f\"window.scrollTo(0,{last_height})\")\n sleep(1 / 2)", "def startScrolling(self):\n pass", "def mouse_wheel(event):\n canvas.yview_scroll(-1*(event.delta/120), \"units\")", "def _scroll_to(self, start_row, stop_row):\n mark_top = self.to_normalized(start_row)\n mark_bottom = self.to_normalized(stop_row)\n\n view_size = self.to_normalized(self.widget_rows())\n mark_size = mark_bottom - mark_top\n\n gap = max(0.2 * view_size, view_size - mark_size)\n self._yview(True, 'moveto', str(max(0.0, mark_top - 0.5 * gap)))", "def scroll_to(element):\n _DRIVER.execute_script(\"arguments[0].scrollIntoView();\", element)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scroll in both direction so that `pt` is in view. `Deck.ScrollToCard` basically just calls this function twice, on a `Card`'s corner points.
def ScrollToPoint(self, pt): step = self.SCROLL_STEP # get the current rect in view, in pixels # coordinates relative to underlying content size view = [k * step for k in self.GetViewStart()] sz = self.GetClientSize() rect = wx.Rect(view[0], view[1], sz.width, sz.height) # point we're scrolling to (already in pixels) # relative to content size # nothing to do if rect.Contains(pt): return # scroll the point into view scroll = False pad = self.GetPadding() # if one of the argumets is wx.DefaultCoord, # we will not scroll in that direction ysc = wx.DefaultCoord xsc = wx.DefaultCoord # remember y coordinate grows downward if pt.x >= rect.right or pt.x <= rect.left: scroll = True xsc = pt.x - pad # where we want to go xsc /= step # in scroll units if pt.y <= rect.top or pt.y >= rect.bottom: scroll = True ysc = pt.y - pad # where we want to go ysc /= step # in scroll units if scroll: # will scroll as much as it's possible # i.e., pt will not necessarily be in the top left corner after scrolling # but it will surely be inside the view self.Scroll(xsc, ysc)
[ "def ScrollToCard(self, card):\n rect = card.GetRect()\n pt = rect.GetBottomRight()\n pt = self.CalcUnscrolledPosition(pt)\n self.ScrollToPoint(pt)\n\n # call rect again since we may have scrolled the window\n rect = card.GetRect()\n pt = rect.GetTopLeft() \n pt = self.CalcUnscrolledPosition(pt)\n self.ScrollToPoint(pt)", "def scroll(self, dx, dy):\n self._Camera_Rect.move_ip(dx,dy)\n\n # Not updating the clipping position of the Views,\n # as multiple Cameras might point at the same View.", "def scrollTo(self,p=None):\n\n # __pychecker__ = '--no-argsused' # event not used.\n # __pychecker__ = '--no-intdivide' # suppress warning about integer division.\n\n c = self.c ; frame = c.frame ; trace = True\n if not p or not c.positionExists(p):\n p = c.currentPosition()\n if not p or not c.positionExists(p):\n if trace: g.trace('current p does not exist',p)\n p = c.rootPosition()\n if not p or not c.positionExists(p):\n if trace: g.trace('no root position')\n return\n try:\n h1 = self.yoffset(p)\n if self.center_selected_tree_node: # New in Leo 4.4.3.\n #@ << compute frac0 >>\n #@+node:ekr.20071001092453.62:<< compute frac0 >>\n # frac0 attempt to put the \n scrollRegion = self.canvas.cget('scrollregion')\n geom = self.canvas.winfo_geometry()\n\n if scrollRegion and geom:\n scrollRegion = scrollRegion.split(' ')\n # g.trace('scrollRegion',repr(scrollRegion))\n htot = int(scrollRegion[3])\n wh,junk,junk = geom.split('+')\n junk,h = wh.split('x')\n if h: wtot = int(h)\n else: wtot = 500\n # g.trace('geom',geom,'wtot',wtot)\n if htot > 0.1:\n frac0 = float(h1-wtot/2)/float(htot)\n frac0 = max(min(frac0,1.0),0.0)\n else:\n frac0 = 0.0\n else:\n frac0 = 0.0 ; htot = wtot = 0\n #@-node:ekr.20071001092453.62:<< compute frac0 >>\n #@nl\n delta = abs(self.prevMoveToFrac-frac0)\n # g.trace(delta)\n if delta > 0.0:\n self.prevMoveToFrac = frac0\n self.canvas.yview(\"moveto\",frac0)\n if trace: g.trace(\"frac0 %1.2f %3d %3d %3d\" % (frac0,h1,htot,wtot))\n else:\n last = c.lastVisible()\n nextToLast = last.visBack(c)\n h2 = self.yoffset(last)\n #@ << compute approximate line height >>\n #@+node:ekr.20071001092453.63:<< compute approximate line height >>\n if nextToLast: # 2/2/03: compute approximate line height.\n lineHeight = h2 - self.yoffset(nextToLast)\n else:\n lineHeight = 20 # A reasonable default.\n #@-node:ekr.20071001092453.63:<< compute approximate line height >>\n #@nl\n #@ << Compute the fractions to scroll down/up >>\n #@+node:ekr.20071001092453.64:<< Compute the fractions to scroll down/up >>\n data = frame.canvas.leo_treeBar.get() # Get the previous values of the scrollbar.\n try: lo, hi = data\n except: lo,hi = 0.0,1.0\n\n # h1 and h2 are the y offsets of the present and last nodes.\n if h2 > 0.1:\n frac = float(h1)/float(h2) # For scrolling down.\n frac2 = float(h1+lineHeight/2)/float(h2) # For scrolling up.\n frac2 = frac2 - (hi - lo)\n else:\n frac = frac2 = 0.0 # probably any value would work here.\n\n frac = max(min(frac,1.0),0.0)\n frac2 = max(min(frac2,1.0),0.0)\n #@nonl\n #@-node:ekr.20071001092453.64:<< Compute the fractions to scroll down/up >>\n #@nl\n if frac <= lo: # frac is for scrolling down.\n if self.prevMoveToFrac != frac:\n self.prevMoveToFrac = frac\n self.canvas.yview(\"moveto\",frac)\n if trace: g.trace(\"frac %1.2f %3d %3d %1.2f %1.2f\" % (frac, h1,h2,lo,hi))\n elif frac2 + (hi - lo) >= hi: # frac2 is for scrolling up.\n if self.prevMoveToFrac != frac2:\n self.prevMoveToFrac = frac2\n self.canvas.yview(\"moveto\",frac2)\n if trace: g.trace(\"frac2 1.2f %3d %3d %1.2f %1.2f\" % (frac2,h1,h2,lo,hi))\n\n if self.allocateOnlyVisibleNodes:\n pass ### self.canvas.after_idle(self.idle_second_redraw)\n\n c.setTopVnode(p) # 1/30/04: remember a pseudo \"top\" node.\n\n except:\n g.es_exception()", "def scrollIntoView(self, event):\r\n\r\n canvas = self.canvas\r\n widget_top = event.widget.winfo_y()\r\n widget_bottom = widget_top + event.widget.winfo_height()\r\n canvas_top = canvas.canvasy(0)\r\n canvas_bottom = canvas_top + canvas.winfo_height()\r\n\r\n if widget_bottom >= canvas_bottom:\r\n delta = int(canvas_bottom - widget_bottom)\r\n canvas.yview_scroll(-delta, \"units\")\r\n elif widget_top < canvas_top:\r\n delta = int(widget_top - canvas_top)\r\n canvas.yview_scroll(delta, \"units\")", "def Move(self, pt):\n pt2 = pt\n if self.window:\n pt2 = self.window.ClientToScreen(pt)\n self.SetPosition(pt2 - self.hotspot)", "def scroll_to(self, element):\n element.scroll_into_view()", "def scroll_buffer(self, up=True, reset=False, lines=None):\n if reset:\n self.scrolling_up = 0\n self.set_term_cursor()\n return\n\n if lines is None:\n lines = self.height // 2\n\n if not up:\n lines = -lines\n\n maxscroll = len(self.scrollback_buffer)\n self.scrolling_up += lines\n\n if self.scrolling_up > maxscroll:\n self.scrolling_up = maxscroll\n elif self.scrolling_up < 0:\n self.scrolling_up = 0\n\n self.set_term_cursor()", "def _scroll_to(self, start_row, stop_row):\n mark_top = self.to_normalized(start_row)\n mark_bottom = self.to_normalized(stop_row)\n\n view_size = self.to_normalized(self.widget_rows())\n mark_size = mark_bottom - mark_top\n\n gap = max(0.2 * view_size, view_size - mark_size)\n self._yview(True, 'moveto', str(max(0.0, mark_top - 0.5 * gap)))", "def scroll(self):\n self.x = self.x - 1\n if(self.x + 600 == 0):\n self.x = self.reset_x\n return False\n return True", "def scroll(self, x, y):\n assert isinstance(x, _INTTYPES), \"x must be an integer, got %s\" % repr(x)\n assert isinstance(y, _INTTYPES), \"y must be an integer, got %s\" % repr(x)\n def getSlide(x, length):\n \"\"\"get the parameters needed to scroll the console in the given\n direction with x\n returns (x, length, srcx)\n \"\"\"\n if x > 0:\n srcx = 0\n length -= x\n elif x < 0:\n srcx = abs(x)\n x = 0\n length -= srcx\n else:\n srcx = 0\n return x, length, srcx\n def getCover(x, length):\n \"\"\"return the (x, width) ranges of what is covered and uncovered\"\"\"\n cover = (0, length) # everything covered\n uncover = None # nothing uncovered\n if x > 0: # left side uncovered\n cover = (x, length - x)\n uncover = (0, x)\n elif x < 0: # right side uncovered\n x = abs(x)\n cover = (0, length - x)\n uncover = (length - x, x)\n return cover, uncover\n\n width, height = self.getSize()\n if abs(x) >= width or abs(y) >= height:\n return self.clear() # just clear the console normally\n\n # get the ranges of the areas that will be uncovered\n coverX, uncoverX = getCover(x, width)\n coverY, uncoverY = getCover(y, height)\n # so at this point we know that coverX and coverY makes a rect that\n # encases the area that we end up blitting to. uncoverX/Y makes a\n # rect in the corner of the uncovered area. So we need to combine\n # the uncoverX/Y with coverY/X to make what's left of the uncovered\n # area. Explaining it makes it mush easier to do now.\n\n # But first we need to blit.\n x, width, srcx = getSlide(x, width)\n y, height, srcy = getSlide(y, height)\n self.blit(self, x, y, width, height, srcx, srcy)\n\n if uncoverX: # clear sides (0x20 is space)\n self.drawRect(uncoverX[0], coverY[0], uncoverX[1], coverY[1], 0x20, 0x000000, 0x000000)\n if uncoverY: # clear top/bottom\n self.drawRect(coverX[0], uncoverY[0], coverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)\n if uncoverX and uncoverY: # clear corner\n self.drawRect(uncoverX[0], uncoverY[0], uncoverX[1], uncoverY[1], 0x20, 0x000000, 0x000000)", "def scroll_to_comments(self):\n comments: WebElement = self.driver.find_element_by_tag_name(\n 'ytd-comments')\n self.driver.execute_script(\n \"arguments[0].scrollIntoView(true)\", comments)\n # comments.location_once_scrolled_into_view", "def scroll_to(element):\n _DRIVER.execute_script(\"arguments[0].scrollIntoView();\", element)", "def scrolls(self , scroll):\n if(scroll.scroll_y <= MainWindow.distance):\n operations.load_more() \n scroll.scroll_to(content.ArticlesContainerCopy.articles_container_copy.children[content.Data.limit] , padding=0, animate=True)", "def mouse_scroll(event):\n fig = event.canvas.figure\n ax1 = fig.axes[0]\n ax2 = fig.axes[1]\n if event.button == 'down':\n previous_slice(ax1, ax2)\n elif event.button == 'up':\n next_slice(ax1, ax2)\n fig.canvas.draw()", "def scroll_to_click(element):\n scroll_to(element)\n click(element)", "def jump_to_page(self, page: int) -> None:\n overlap = 1 if self.column >= 2 else 0\n self.canvas.yview_moveto(\n str((page // self.column) / (len(self.pages) // self.column + overlap))\n )", "def scroll_to_player(self):\n\n position = Vec2(self.player_sprite.center_x - self.width / 2,\n self.player_sprite.center_y - self.height / 2)\n self.camera_sprites.move_to(position, CAMERA_SPEED)", "def scroll_down_half_view(self):\n\n window_size = self.driver.get_window_size() # this will give You a dictionary\n start_x = window_size[\"width\"] * 0.25\n start_y = window_size[\"height\"] * 0.15\n if window_size[\"height\"] <= 800:\n end_y = window_size[\"height\"] * 0.4\n else:\n end_y = window_size[\"height\"] * 0.5\n logging.info(\"scroll down half of the screen\")\n sleep(1)\n self.driver.swipe(start_x, end_y, start_x, start_y, 3000) # each swipe is scrolling one screen\n sleep(1)", "def MovePoint(start_p : np.ndarray, end_p : np.ndarray, factor : float):\r\n move = lambda x_1,x_2 : (x_1 + int(factor*(x_2 - x_1)))\r\n return np.array([move(start_p[0], end_p[0]), move(start_p[1], end_p[1])])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Arranges the selected cards according to `orient`.
def ArrangeSelection(self, orient): if orient == Deck.HORIZONTAL: self.HArrangeSelectedCards() elif orient == Deck.VERTICAL: self.VArrangeSelectedCards()
[ "def HArrangeSelectedCards(self):\n if len(self.GetSelection()) < 1: return\n\n # we unselect first so that we erase the selection rectangles correctly\n arrange = self.GetSelection()[:]\n self.UnselectAll() \n\n lefts = [c.GetRect().left for c in arrange]\n left = min(lefts)\n card = arrange[lefts.index(left)]\n top = card.GetRect().top\n arrange.sort(key=lambda x: x.GetRect().left)\n\n for c in arrange:\n c.SetPosition(wx.Point(left, top))\n left = c.GetRect().right + self.GetPadding()\n\n self.FitToChildren()\n self.selec.SetFocus()", "def reorganize(self):\n\n\t\tproperties_list = self.get_properties()\n\t\tnum_properties = 0\n\t\tif not properties_list:\n\t\t\tprint \"\\nYou don't have anything to move!\"\n\t\t\treturn\n\n\t\tfor card in properties_list:\n\t\t\tnum_properties += 1\n\t\t\tprint \"\\t%d: %s\" % (num_properties, card.name)\n\t\t\n\t\tprint \"\\t0. Go back.\"\n\t\tprint \"Which property would you like to move?\"\n\n\t\tselection = None\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tselection = int(raw_input(\": \"))\n\t\t\t\tif selection in range(0, num_properties + 1):\n\t\t\t\t\tbreak\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\n\t\t\tprint \"Try again, it looks like you mistyped.\"\n\n\t\tif selection == 0:\n\t\t\treturn\n\n\t\tproperty_index = 0\n\t\tfor group in self.properties:\n\t\t\tfor card in group:\n\t\t\t\tif selection - 1 == property_index:\n\t\t\t\t\tgroup.remove(card) # this is fine because the loop ends before iterating again\n\t\t\t\t\tself.properties[:] = [x for x in self.properties if x != []] # Remove empty lists\n\t\t\t\t\tcard.play(self)\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tproperty_index += 1\n\n\t\tprint \"player.reorganize() The card to be reorganized was never found\"", "def set_orient(self, new_orient):\n self[:2, :2] = new_orient", "def sort_players_card(self):\n for player_list in self.player:\n for num in range(0, len(player_list)):\n for n in range(0, len(player_list) - num - 1):\n pos = player_list[n]\n next_pos = player_list[n + 1]\n if self.rank.index(pos[1]) < self.rank.index(next_pos[1]):\n player_list[n], player_list[n + 1] = player_list[n + 1], player_list[n]", "def shuffle_card_positions(self):\n\t\trandom.shuffle(self.card_positions)", "def sort(self, ranks=None):\n ranks = ranks or self.ranks\n self.cards = sort_cards(self.cards, ranks)", "def test_sort_cards(self):\n self.hand.clear()\n self.hand.add_card(card.Card(3, \"H\"))\n self.hand.add_card(card.Card(2, \"C\"))\n\n # while cards should already be sorted at this point, make sure result is true\n self.assertTrue(self.hand.sort_cards())\n\n # check that the sorting actually worked: compare card values/suits\n # can't compare objects directly since they are technically not the same object\n # possible improvement: comparator function\n test_hand = self.hand.get_cards()\n proper_sort = [card.Card(2, \"C\"), card.Card(3, \"H\")]\n\n self.assertEqual(test_hand[0].get_value(), proper_sort[0].get_value())\n self.assertEqual(test_hand[0].get_suit(), proper_sort[0].get_suit())\n self.assertEqual(test_hand[1].get_value(), proper_sort[1].get_value())\n self.assertEqual(test_hand[1].get_suit(), proper_sort[1].get_suit())", "def make_cards(self):\n\n for suit in self._suits:\n for card_num in xrange(1, self._cards_per_suit + 1):\n left = CARD_WIDTH * (card_num - 1)\n top = CARD_HEIGHT * self._suits.index(suit)\n rect = (left, top, CARD_WIDTH, CARD_HEIGHT)\n image = self._cards_sprite.subsurface(rect)\n rect = image.get_rect()\n card = Card(card_num, suit, image, rect)\n self._cards[str(card)] = card\n self.add(card)\n #card back last row, col = 4,1\n subrect = CARD_WIDTH, CARD_HEIGHT * 4, CARD_WIDTH, CARD_HEIGHT\n image = self._cards_sprite.subsurface(subrect)\n rect = image.get_rect()\n self._cardback = Card(14, 'cardback', image, rect)\n #card back", "def sortKey(self, sort_option=0):\n arank = self.card.number\n asuit = self.card.suit\n key4sorting = arank\n if sort_option == 0: # by number, red, black together (used for Hand and Foot)\n if arank == 1:\n arank = 14\n if asuit=='Spades' or asuit == 'Clubs':\n key4sorting = 2 * arank\n else:\n key4sorting = (2 * arank) - 1\n elif sort_option == 1 or sort_option == 2: # by number, aces high or low\n if sort_option == 1 and arank == 1:\n arank = 14 # make aces high\n key4sorting = 4 * arank\n if asuit == 'Clubs':\n key4sorting = key4sorting - 3\n elif asuit == 'Diamonds':\n key4sorting = key4sorting - 2\n elif asuit == 'Spades':\n key4sorting = key4sorting - 1\n elif sort_option == 3 or sort_option == 4: # by suit, aces high or low\n if sort_option == 3 and arank == 1:\n arank = 14\n if asuit == 'Clubs':\n key4sorting = arank\n if asuit == 'Diamonds':\n key4sorting = 15 + arank\n elif asuit == 'Spades':\n key4sorting = 30 + arank\n elif asuit == 'Hearts':\n key4sorting = 45 + arank\n else:\n print('only sorting option currently supported are 0 to 4')\n return key4sorting", "def set_orient(dst, orient, src=None):\n exiftool = 'exiftool -overwrite_original -n'\n if src:\n system('%s -TagsFromFile %s -Orientation=%d %s >/dev/null' %\n (exiftool, shell_quote(src), orient, shell_quote(dst)))\n elif get_orient(dst) != orient:\n system('jpegexiforient -%d %s >/dev/null' % (orient, shell_quote(dst)))\n if get_orient(dst) != orient:\n system('%s -n -Orientation=%d %s >/dev/null' %\n (exiftool, orient, shell_quote(dst)))\n return get_orient(dst) == orient", "def changeorient(player, touche):\n\n\tif touche == \"d\": # rotation a droite\n\t\t\tplayer[1] = (player[1] + 10)%360 \n\t\t\n\telif touche == \"g\": # rotation a gauche \n\t\tplayer[1] = (player[1] - 10)%360", "def MoveSelected(self, dx, dy):\n for c in self.GetSelection():\n self.GetParent().MoveCard(c, dx, dy)", "def sortHand(suit_vec):\n #Simple sort for only 7 elements\n for i in range(0, len(suit_vec)):\n for j in range(0, len(suit_vec)):\n if (suit_vec[j].classification > hand_vec[i].classification):\n temp = hand_vec[i]\n suit_vec[i] = hand_vec[j]\n suit_vec[j] = temp", "def rotate_selected_orients_around_axis(\n axis, degrees=90, preserve_children=True, preserve_shapes=True, sync_joint_axes=True\n):\n # if currently on move tool, make sure its object space\n if pm.currentCtx() == pm.melGlobals[\"$gMove\"]:\n pm.manipMoveContext(\"Move\", e=True, mode=0)\n\n rotation = pm.dt.Vector()\n rotation[axis] = degrees\n\n sel_nodes = pm.selected()\n for node in sel_nodes:\n rotate_orient_or_transform(node, rotation, preserve_children, preserve_shapes, sync_joint_axes)", "def cmd_rotate(self):\r\n utils.shuffleUp(self.stacks)\r\n self.group.layoutAll()", "def sort_cards(cards):\n\n cards_dict = {\n \"A\": (\"A\", 1),\n \"2\": (\"2\", 2),\n \"3\": (\"3\", 3),\n \"4\": (\"4\", 4),\n \"5\": (\"5\", 5),\n \"6\": (\"6\", 6),\n \"7\": (\"7\", 7),\n \"8\": (\"8\", 8),\n \"9\": (\"9\", 9),\n \"T\": (\"T\", 10),\n \"J\": (\"J\", 11),\n \"Q\": (\"Q\", 12),\n \"K\": (\"K\", 13)\n }\n first_list = []\n for i in cards:\n first_list.append(cards_dict.get(i))\n sorted_list = sorted(first_list, key=itemgetter(1))\n return_list = []\n for i in sorted_list:\n return_list.append(i[0])\n return return_list", "def sort_card(self):\n self.new_card = random.randint(1,13)\n while self.new_card == self.old_card:\n self.new_card = random.randint(1,13)", "def sortHand(self, player):\n sp = [card for card in self.playerHands[player] if card.suit == Suit.spade]\n cl = [card for card in self.playerHands[player] if card.suit == Suit.club]\n he = [card for card in self.playerHands[player] if card.suit == Suit.heart]\n di = [card for card in self.playerHands[player] if card.suit == Suit.diamond]\n sp.sort(key=lambda x: x.val)\n cl.sort(key=lambda x: x.val)\n he.sort(key=lambda x: x.val)\n di.sort(key=lambda x: x.val)\n\n return sp + cl + he + di", "def sort_cards_by_param(queryset, sort_param) -> (list, (str, str)):\n # Initially, determine the sorting parameter\n sorted_by = find_sorting_method(sort_param, options=SORTING_CARD_OPTIONS)\n\n if sorted_by != SORTING_CARD_OPTIONS[0]:\n # If the queryset is not sorted according to the sorting parameter,\n # then sort by the parameter\n result = queryset.annotate(\n relevancy=(Case(When(Q(type=sort_param), then=1), When(~Q(type=sort_param), then=2),\n output_field=PositiveIntegerField())\n )).order_by('relevancy', '-id')\n else:\n # The queryset is already sorted according to the sorting parameter,\n # so, just return it\n result = queryset\n\n return result, sorted_by" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as `Deck.ArrangeSelection(Deck.HORIZONTAL)`. Arranges `Card`s in a horizontal row, to the right of the leftmost selected card.
def HArrangeSelectedCards(self): if len(self.GetSelection()) < 1: return # we unselect first so that we erase the selection rectangles correctly arrange = self.GetSelection()[:] self.UnselectAll() lefts = [c.GetRect().left for c in arrange] left = min(lefts) card = arrange[lefts.index(left)] top = card.GetRect().top arrange.sort(key=lambda x: x.GetRect().left) for c in arrange: c.SetPosition(wx.Point(left, top)) left = c.GetRect().right + self.GetPadding() self.FitToChildren() self.selec.SetFocus()
[ "def ArrangeSelection(self, orient):\n if orient == Deck.HORIZONTAL:\n self.HArrangeSelectedCards()\n elif orient == Deck.VERTICAL:\n self.VArrangeSelectedCards()", "def horizontal(self):\n self.__arrangement = 'horizontal'\n return self", "def flip_horizontal(self):\n old = self.current_variant_grid.copy()\n self.current_variant_grid = []\n\n for row in old[::-1]: # Go through the rows backwards.\n self.current_variant_grid.append(row)\n\n self.current_variant += 'H'\n self.snapshot_variant()", "def set_horizontal(self) -> None:\n self.orientation = constants.HORIZONTAL", "def DrawHorizontal(self, canvas):\n for segment in self.horizontalSegments:\n segment.Draw(canvas, self.color)", "def move_left(self):\n # global continue_up, continue_down, continue_right, continue_left\n self.continue_up = 0\n self.continue_down = 0\n self.continue_right = 0\n if self.current_line.text and self.current_line.number == self.lines.total:\n self.lines.add() # create emtpy line\n\n try: # if tab, move 4 spaces\n if self.current_line.x - 6 <= self.current_line.indentation and \\\n self.current_line.text[self.current_line.x - 6 - 4:self.current_line.x - 6] == ' ' and \\\n self.current_line.y == self.current_line.end_y:\n self.current_line.x -= 4\n return\n except BareException:\n pass\n if self.config['cursor_acceleration']:\n move_rate = min(self.config['cursor_max_horizontal_speed'], int(self.continue_left / 10.0) + 1)\n else:\n move_rate = 1\n self.continue_left += 1\n self.current_line.x -= move_rate", "def MoveSelected(self, dx, dy):\n for c in self.GetSelection():\n self.GetParent().MoveCard(c, dx, dy)", "def test_horizontal(self):\n adj = self.prepare_adjacency(horizontal=True)\n x = self.instance.forward_horizontally_stacked(x=self.x, adj=adj)\n self.check_output(x=x)", "def align(self):\r\n for i, box in enumerate(self.boxes):\r\n if box.width > box.height:\r\n self.boxes[i].rotate()", "def horizontalFlip(self):\r\n self.ser.write('A')\r\n try:\r\n self.cameraSettings.toggleHorizontalFlip()\r\n print(\"Camera Flipped Horizontally\")\r\n except:\r\n print(\"Error flipping image horizontally\")", "def is_horizontal(self) -> bool:\n return self.orientation == constants.HORIZONTAL", "def _incrementRow(self):\n global selected_row\n if selected_row is None:\n selected_row = 0\n else:\n selected_row = (selected_row + 1) % NUM_ROWS\n self.row_select_field.setText(str(selected_row))\n # TODO Update corresponding check boxes for selection grid.", "def print_horizontal_line(l):\n print(''.join(['-' * l]))", "def generate_horizontal_span(self, gap=None, scale=None, theta=0, shift=None, jitter=None):\n # Set scale of the cards, variance of shift & jitter to be applied if they're not given\n card_size = (len(self.cards[0].img[0]), len(self.cards[0].img))\n if scale is None:\n # Scale the cards so that card takes about 50% of the image's height\n coverage_ratio = 0.5\n scale = self.height * coverage_ratio / card_size[1]\n if shift is None:\n # Plus minus 5% of the card's height\n shift = [-card_size[1] * scale * 0.05, card_size[1] * scale * 0.05]\n pass\n if jitter is None:\n # Plus minus 10 degrees\n jitter = [-math.pi / 18, math.pi / 18]\n if gap is None:\n # 25% of the card's width - set symbol and 1-2 mana symbols will be visible on each card\n gap = card_size[0] * scale * 0.4\n\n # Determine the location of the first card\n # The cards will cover (width of a card + (# of cards - 1) * gap) pixels wide and (height of a card) pixels high\n x_anchor = int(self.width / 2 + (len(self.cards) - 1) * gap / 2)\n y_anchor = self.height // 2\n for card in self.cards:\n card.scale = scale\n card.x = x_anchor\n card.y = y_anchor\n card.theta = 0\n card.shift(shift, shift)\n card.rotate(jitter)\n card.rotate(theta, centre=(self.width // 2 - x_anchor, self.height // 2 - y_anchor))\n x_anchor -= gap\n\n return True", "def is_horizontal_win(self, checker):\n for row in range(self.height):\n for col in range(self.width - 3):\n # Check if the next four columns in this row\n # contain the specified checker.\n if self.slots[row][col] == checker and \\\n self.slots[row][col + 1] == checker and \\\n self.slots[row][col + 2] == checker and \\\n self.slots[row][col + 3] == checker:\n return True\n # if we make it here, there were no horizontal wins\n return False", "def _draw_horizontal_selection_line(self, pos):\n selection_line = avg.DivNode(\n parent=self._aid_lines_div,\n pos=(0, pos[1])\n )\n avg.LineNode(\n parent=selection_line,\n pos1=(self._aid_line_area[0] - self._selection_line_config.extra_length, 0),\n pos2=(self._aid_line_area[2] + self._selection_line_config.extra_length, 0),\n color=self._selection_line_config.color,\n strokewidth=self._selection_line_config.width,\n sensitive=False\n )\n return selection_line", "def create_horizontal_group_with(self, guest):\n hor_group = self.get_group(GroupPosition.HORIZONTAL)\n if hor_group is None: # need to create one new group\n new_group = Group(self, guest, GroupPosition.HORIZONTAL, self._connects)\n self.set_group(new_group, GroupPosition.HORIZONTAL)\n guest.set_group(new_group, GroupPosition.HORIZONTAL)\n else:\n hor_group.update(guest)\n guest.set_group(hor_group, GroupPosition.HORIZONTAL)", "def move_left_diagonal_up(x, y):\r\n return x - 1, y + 1", "def splitFrame(self,horizontal = True):\n (x,y,width,height) = self.frames[self.current_frame].getrect()\n if horizontal and height <= 6:\n return\n if not horizontal and width <= 20:\n return\n newFrame = copy.copy(self.frames[self.current_frame])\n if isinstance(newFrame,EditorFrame):\n self.setEditor(newFrame,self.frames[self.current_frame].editor)\n else:\n newFrame.setdialog(self.frames[self.current_frame].dialog)\n self.frames.insert(self.current_frame,newFrame)\n if horizontal:\n self.frames[self.current_frame].resize(x,y,width,height//2)\n self.frames[self.current_frame+1].resize(x,y+(height//2),width,height-(height//2))\n self.current_frame += 1\n else:\n self.frames[self.current_frame].resize(x,y,width//2,height)\n self.frames[self.current_frame+1].resize(x+(width//2),y,width-(width//2),height)\n self.frames[self.current_frame+1].setlborder(True)\n self.current_frame += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to every `Card.EVT_DELETE`.
def OnCardDelete(self, ev): card = ev.GetEventObject() self.cards.remove(card) self.UnselectCard(card)
[ "def DeleteSelected(self):\n # store the number of cards we're deleting to raise the event\n number = len(self.cards)\n \n # remember to use while instead of for, since in every\n # iteration self.cards is growing shorter\n while len(self.cards) > 0:\n c = self.cards[-1]\n c.Delete()\n if c in self.cards:\n self.cards.remove(c)\n\n # raise the event; it differs from Card.DeleteEvent in that\n # we raise only one event for every delete action\n # e.g., if we delete five cards, there will be five Card.DeleteEvent's\n # raised, but only one SelectionManager.DeleteEvent\n event = self.DeleteEvent(id=wx.ID_ANY, number=number)\n event.SetEventObject(self)\n self.GetEventHandler().ProcessEvent(event)", "def on_deleted(self, e):\n logger.info('Delete event on file: {}'.format(e.src_path))\n rel_path = self.relativize_path(e.src_path)\n if self._is_shared_file(rel_path):\n # if it has modified a file tracked by shared snapshot, then force the re-download of it\n try:\n self.shared_snapshot.pop(rel_path)\n except KeyError:\n pass\n else:\n # Send data to connection manager dispatcher and check return value.\n # If all go right update client_snapshot and local_dir_state\n response = self.conn_mng.dispatch_request('delete', {'filepath': rel_path})\n if response['successful']:\n event_timestamp = response['content']['server_timestamp']\n if self.client_snapshot.pop(rel_path, 'ERROR') == 'ERROR':\n logger.warning('WARNING inconsistency error during delete operation!\\n'\n 'Impossible to find the following file in stored data (client_snapshot):\\n'\n '{}'.format(e.src_path))\n self.update_local_dir_state(event_timestamp)\n logger.debug('Delete event completed.')\n else:\n self.stop(1, response['content'])", "def OnMgrDelete(self, ev):\n self.selec.Deactivate()\n\n # raise the event again, with event object = self\n event = self.DeleteEvent(id=wx.ID_ANY, number=ev.number)\n event.SetEventObject(self)\n self.GetEventHandler().ProcessEvent(event)", "def removeDeleteCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_removeDeleteCallback(self, *args)", "def addDeleteCallback(self, *args):\n return _coin.ScXMLStateMachine_addDeleteCallback(self, *args)", "def del_button_clicked(self, obj):\n print(\"Uncaught Delete clicked\")", "def addDeleteCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_addDeleteCallback(self, *args)", "def removeDeleteCallback(self, *args):\n return _coin.ScXMLStateMachine_removeDeleteCallback(self, *args)", "def delete_event(sender, instance, **kwargs):\r\n Event.objects.filter(pk=instance.event_ptr_id).delete()", "def test_event_delete(self):\n message = domish.Element((None, 'message'))\n message['from'] = 'pubsub.example.org'\n message['to'] = 'user@example.org/home'\n event = message.addElement((NS_PUBSUB_EVENT, 'event'))\n items = event.addElement('delete')\n items['node'] = 'test'\n\n def deleteReceived(event):\n self.assertEquals(JID('user@example.org/home'), event.recipient)\n self.assertEquals(JID('pubsub.example.org'), event.sender)\n self.assertEquals('test', event.nodeIdentifier)\n\n d, self.protocol.deleteReceived = calledAsync(deleteReceived)\n self.stub.send(message)\n return d", "async def on_channel_delete(self, channel):", "def setDeleteCallback(self, *args):\n return _coin.SoDataSensor_setDeleteCallback(self, *args)", "def register_for_deleted_events(self):\n pass", "def delete_events(self, run_id: str) -> None:", "def after_delete(self):\n pass", "def delete_event(self, event_id):\n pass", "def on_removed_handler(file_list):\n print_file_list(file_list, \"Remove\")", "def after_delete(sender, instance, **kwargs):\n table = sender.objects.model._meta.db_table\n\n if not allow_table_event(table):\n return\n\n trigger_event(\n f'{table}.deleted',\n model=sender.__name__,\n )", "async def _before_delete(self) -> None:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `SelectionManager.EVT_MGR_DELETE`, which is raised on every delete action. `Deck.DeleteSelected` calls every selected `Card`'s `Delete` method, which raises many `Card.EVT_DELETE`, and then raises only one `SelectionManager.EVT_MGR_DELETE` event.
def OnMgrDelete(self, ev): self.selec.Deactivate() # raise the event again, with event object = self event = self.DeleteEvent(id=wx.ID_ANY, number=ev.number) event.SetEventObject(self) self.GetEventHandler().ProcessEvent(event)
[ "def DeleteSelected(self):\n # store the number of cards we're deleting to raise the event\n number = len(self.cards)\n \n # remember to use while instead of for, since in every\n # iteration self.cards is growing shorter\n while len(self.cards) > 0:\n c = self.cards[-1]\n c.Delete()\n if c in self.cards:\n self.cards.remove(c)\n\n # raise the event; it differs from Card.DeleteEvent in that\n # we raise only one event for every delete action\n # e.g., if we delete five cards, there will be five Card.DeleteEvent's\n # raised, but only one SelectionManager.DeleteEvent\n event = self.DeleteEvent(id=wx.ID_ANY, number=number)\n event.SetEventObject(self)\n self.GetEventHandler().ProcessEvent(event)", "def OnCardDelete(self, ev):\n card = ev.GetEventObject()\n self.cards.remove(card)\n self.UnselectCard(card)", "def addDeleteCallback(self, *args):\n return _coin.ScXMLStateMachine_addDeleteCallback(self, *args)", "def addDeleteCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_addDeleteCallback(self, *args)", "def removeDeleteCallback(self, *args):\n return _coin.ScXMLStateMachine_removeDeleteCallback(self, *args)", "def removeDeleteCallback(self, *args) -> \"void\":\n return _coin.ScXMLStateMachine_removeDeleteCallback(self, *args)", "def deleted(*args, **kwargs):\n return ManagerNotificationWrapper(ACTIONS.deleted, *args, **kwargs)", "def _delete_selected(modeladmin, request, queryset):\r\n opts = modeladmin.model._meta\r\n app_label = opts.app_label\r\n\r\n # Check that the user has delete permission for the actual model\r\n if not modeladmin.has_delete_permission(request):\r\n raise PermissionDenied\r\n\r\n using = router.db_for_write(modeladmin.model)\r\n\r\n # Populate deletable_objects, a data structure of all related objects that\r\n # will also be deleted.\r\n # TODO: Permissions would be so cool...\r\n deletable_objects, perms_needed, protected = get_deleted_objects(\r\n queryset, opts, request.user, modeladmin.admin_site, using)\r\n\r\n # The user has already confirmed the deletion.\r\n # Do the deletion and return a None to display the change list view again.\r\n if request.POST.get('post'):\r\n if perms_needed:\r\n raise PermissionDenied\r\n n = len(queryset)\r\n if n:\r\n for obj in queryset:\r\n obj_display = force_unicode(obj)\r\n modeladmin.log_deletion(request, obj, obj_display)\r\n # call the objects delete method to ensure signals are\r\n # processed.\r\n obj.delete()\r\n # This is what you get if you have to monkey patch every object in a changelist\r\n # No queryset object, I can tell ya. So we get a new one and delete that. \r\n #pk_list = [o.pk for o in queryset]\r\n #klass = queryset[0].__class__\r\n #qs = klass.objects.filter(pk__in=pk_list)\r\n #qs.delete()\r\n modeladmin.message_user(request, _(\"Successfully deleted %(count)d %(items)s.\") % {\r\n \"count\": n, \"items\": model_ngettext(modeladmin.opts, n)\r\n })\r\n # Return None to display the change list page again.\r\n return None\r\n\r\n if len(queryset) == 1:\r\n objects_name = force_unicode(opts.verbose_name)\r\n else:\r\n objects_name = force_unicode(opts.verbose_name_plural)\r\n\r\n if perms_needed or protected:\r\n title = _(\"Cannot delete %(name)s\") % {\"name\": objects_name}\r\n else:\r\n title = _(\"Are you sure?\")\r\n\r\n context = {\r\n \"title\": title,\r\n \"objects_name\": objects_name,\r\n \"deletable_objects\": [deletable_objects],\r\n 'queryset': queryset,\r\n \"perms_lacking\": perms_needed,\r\n \"protected\": protected,\r\n \"opts\": opts,\r\n \"root_path\": modeladmin.admin_site.root_path,\r\n \"app_label\": app_label,\r\n 'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,\r\n }\r\n \r\n # Display the confirmation page\r\n return render_to_response(modeladmin.delete_selected_confirmation_template or [\r\n \"admin/%s/%s/delete_selected_confirmation.html\" % (app_label, opts.object_name.lower()),\r\n \"admin/%s/delete_selected_confirmation.html\" % app_label,\r\n \"admin/delete_selected_confirmation.html\"\r\n ], context, context_instance=template.RequestContext(request))", "def handle_deletes(self):\r\n deletes = {}\r\n previous_path = None\r\n current_index = None\r\n\r\n for obj_identifier in self.actions['delete']:\r\n (object_path, pk) = self.split_obj_identifier(obj_identifier)\r\n\r\n if object_path is None or pk is None:\r\n self.log.error(\"Skipping.\")\r\n continue\r\n\r\n if object_path not in deletes:\r\n deletes[object_path] = []\r\n\r\n deletes[object_path].append(obj_identifier)\r\n\r\n # We've got all deletes grouped. Process them.\r\n for object_path, obj_identifiers in deletes.items():\r\n model_class = self.get_model_class(object_path)\r\n\r\n if object_path != previous_path:\r\n previous_path = object_path\r\n current_index = self.get_index(model_class)\r\n\r\n if not current_index:\r\n self.log.error(\"Skipping.\")\r\n continue\r\n\r\n pks = []\r\n\r\n for obj_identifier in obj_identifiers:\r\n current_index.remove_object(obj_identifier, using=self.using)\r\n pks.append(self.split_obj_identifier(obj_identifier)[1])\r\n self.processed_deletes.add(obj_identifier)\r\n\r\n self.log.debug(\"Deleted objects for '%s': %s\" % (object_path, \", \".join(pks)))", "def deleteActions(self, selections=(), REQUEST=None):\n sels = list(map(int, selections)) # Convert to a list of integers.\n\n old_actions = self._cloneActions()\n new_actions = []\n\n for index in range(len(old_actions)):\n if index not in sels:\n new_actions.append(old_actions[index])\n\n self._actions = tuple(new_actions)\n\n if REQUEST is not None:\n msg = 'Deleted %d action(s).' % len(sels)\n return self.manage_editActionsForm(REQUEST,\n manage_tabs_message=msg)", "def on_deleted(self, e):\n logger.info('Delete event on file: {}'.format(e.src_path))\n rel_path = self.relativize_path(e.src_path)\n if self._is_shared_file(rel_path):\n # if it has modified a file tracked by shared snapshot, then force the re-download of it\n try:\n self.shared_snapshot.pop(rel_path)\n except KeyError:\n pass\n else:\n # Send data to connection manager dispatcher and check return value.\n # If all go right update client_snapshot and local_dir_state\n response = self.conn_mng.dispatch_request('delete', {'filepath': rel_path})\n if response['successful']:\n event_timestamp = response['content']['server_timestamp']\n if self.client_snapshot.pop(rel_path, 'ERROR') == 'ERROR':\n logger.warning('WARNING inconsistency error during delete operation!\\n'\n 'Impossible to find the following file in stored data (client_snapshot):\\n'\n '{}'.format(e.src_path))\n self.update_local_dir_state(event_timestamp)\n logger.debug('Delete event completed.')\n else:\n self.stop(1, response['content'])", "def handleDeleteEvent(self, deletedResource):\n\n\t\tri = deletedResource.ri\n\t\tgroups = CSE.storage.searchByTypeFieldValue(C.tGRP, 'mid', ri)\n\t\tfor group in groups:\n\t\t\tgroup['mid'].remove(ri)\n\t\t\tgroup['cnm'] = group.cnm - 1\n\t\t\tCSE.storage.updateResource(group)", "def delete(self, *devices):\n for d in devices:\n d.delete()", "def del_button_clicked(self, obj):\n print(\"Uncaught Delete clicked\")", "def delete(self):\n files = [os.path.join(self.location, self.proxy.itemData(index).get(0))\n for index in self.view.selectionModel().selectedIndexes()]\n for f in files:\n try:\n if os.path.isdir(f):\n shutil.rmtree(f, onerror=self.remove_readonly)\n else:\n os.unlink(f)\n except FileNotFoundError:\n logger.error(f\"{f} not found\")\n Pub.notify(f\"App\", f\"{self.pid}: {f} not found.\")", "def after_delete(self):\n pass", "def delete(self, *args, **kw):\n return self.custom_dispatch(*args, **kw)", "def on_delete(self, sender, arg=None):\n self.modify_selection(lambda *a: '')", "def removeDeselectionCallback(self, *args) -> \"void\":\n return _coin.SoSelection_removeDeselectionCallback(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `Card.EVT_REQUEST_VIEW` and raises `Deck.EVT_REQUEST_VIEW` with the same card as event object. The difference is that now a `Box` can `Bind` only once to `EVT_REQUEST_VIEW` events coming from this `Deck`, instead of having to bind to every individual card.
def OnCardRequest(self, ev): event = Deck.ReqViewEvent(id=wx.ID_ANY) event.SetEventObject(ev.GetEventObject()) self.GetEventHandler().ProcessEvent(event)
[ "def testF_view_request(self):\n _, _, requestIds = self._inject(15) # creates x docs/requests\n requestView = self._getViewResults(\"request\")\n self.assertEqual(len(requestView), 15)\n for reqView in requestView:\n self.failUnless(reqView[u\"key\"] in requestIds)\n self.failUnless(reqView[u\"value\"][u\"state\"] == u\"NewlyHeld\")", "def accept_request_view(request):\n borrow_request_id = request.POST.get('request')\n borrow_request = BorrowEvent.objects.filter(id=borrow_request_id).first()\n\n if request.user != borrow_request.game.owner:\n messages.info(request, f'You\\'re not authorized to accept that request.')\n return redirect('profile_view')\n\n borrow_request.status = 'approved'\n borrow_request.save()\n\n game = borrow_request.game\n game.status = 'out'\n game.checked_out_user = borrow_request.borrower\n game.checked_out_date = timezone.now()\n game.save()\n\n messages.info(request, f'You have accepted {borrow_request.borrower.username}\\'s request to borrow {borrow_request.game.game.title}.')\n return redirect('profile_view')", "def access_cards(self):\n self.server.on_access()", "def view_handler(self, item, event, data=None):\n # forward everything\n self.handler(item, event, data)", "def _handle_box_animation_request(self, request):\n if isinstance(request, events.BoxCloseRequest):\n self._handle_box_close_request(request)\n elif isinstance(request, events.BoxOpenRequest):\n self._handle_box_open_request(request)\n elif isinstance(request, events.BoxOpenAndCloseRequest):\n self._handle_box_open_and_close_request(request)", "def on_ask_event(self, event):\n self.event_dispatcher.dispatch_event( \n MyEvent ( MyEvent.RESPOND, self ) \n )", "def view(self, view):\n self.view.append(view)", "def viewbox_key_event(self, event):\n PerspectiveCamera.viewbox_key_event(self, event)\n\n if event.handled or not self.interactive:\n return\n\n # Ensure the timer runs\n if not self._timer.running:\n self._timer.start()\n\n if event.key in self._keymap:\n val_dims = self._keymap[event.key]\n val = val_dims[0]\n # Brake or accelarate?\n if val == 0:\n vec = self._brake\n val = 1\n else:\n vec = self._acc\n # Set\n if event.type == 'key_release':\n val = 0\n for dim in val_dims[1:]:\n factor = 1.0\n vec[dim-1] = val * factor", "def view_handler(self, e):\n self.toggle_view(e) # configures the window to reflect the view mode\n self.show_graph() # replots the graph", "def OnCardLeftDown(self, ev):\n card = ev.GetEventObject()\n\n # bring to front and select\n card.Raise()\n self.selec.SelectCard(card)\n\n # initiate moving\n self.CaptureMouse()\n self.Bind(wx.EVT_LEFT_UP, self.OnCardLeftUp)\n self.Bind(wx.EVT_MOTION, self.OnMovingCard)\n\n self.on_motion = False\n pos = card.GetPosition() + ev.GetPosition() # relative to the canvas\n self.moving_cards_pos = []\n for c in self.GetSelection():\n # (card, pos w.r.t. the original click, current pos)\n self.moving_cards_pos.append((c, c.GetPosition() - pos, c.GetPosition()))", "def send_request(window, request, on_response = None):\n if StackIDEManager.is_running(window):\n StackIDEManager.for_window(window).send_request(request, on_response)", "def on_card_clicked(self, card_id, card_object, card_state):\n\n\t\t# First possiblity: the card is blocked. Then we reverse selection\n\t\tif card_object.mode == 'blocked':\n\t\t\tif card_object.state == 'normal': card_object.state = 'down'\n\t\t\telif card_object.state == 'down': card_object.state = 'normal'\n\n\t\t# Second possibility: the card is not blocked. Then we must decide what to do with it.\n\t\telse:\n\t\t\t# What will happen depens on the card type and the current action, so we discover them.\n\t\t\t# We also prepare to recalculate zones, which might come to be a necessity.\n\t\t\tclicked_card_type = card_object.get_type()\n\t\t\tcurrent_action = g.save.match['action']\n\t\t\trecalculate_zones = True\n\t\n\t\t\t# In the Strategy Stage, all cards are clickable, but only \n\t\t\t# one formation and one tactic can be active at any point.\n\t\t\n\t\t\t# First, let's deal with what happened when a card was seleted, rather than diselected.\t\n\t\t\tif card_state == 'normal':\n\n\t\t\t\t# If a formation was selected, we must prevent two formations from being equiped at the same time.\n\t\t\t\tif clicked_card_type == 'formation':\n\t\t\t\t\tselected_formations = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'formation')\n\t\t\t\t\tif len(selected_formations)>1:\n\t\t\t\t\t\t# Multiple formations are selected. \n\t\t\t\t\t\t# The old one must be unselected so the new one becomes the only.\n\t\t\t\t\t\tfor card in selected_formations:\n\t\t\t\t\t\t\tif card != card_object:\n\t\t\t\t\t\t\t\tcard.state = 'down'\n\t\t\t\t\t\t# This will make sure there are not more skills equiped than allowed.\n\t\t\t\t\t\tself.reevaluate_skills()\n\t\t\t\t\t\t\t\t#recalculate_zones = False\n\t\n\t\t\t\t# If a tactic was selected, we must prevent two tactics from being equiped at the same time.\n\t\t\t\telif clicked_card_type == 'tactic':\n\t\t\t\t\tselected_tactics = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'tactic')\n\t\t\t\t\tif len(selected_tactics)>1:\n\t\t\t\t\t\t# Multiple tactics selected. \n\t\t\t\t\t\t# The old one must be unselected so the new one becomes the only.\n\t\t\t\t\t\tfor card in selected_tactics:\n\t\t\t\t\t\t\tif card != card_object:\n\t\t\t\t\t\t\t\tcard.state = 'down'\n\t\t\t\t\t\t#self.reevaluate_skills()\n\t\n\t\t\t# If a skill was selected, we must prevent that there are more equiped skills than what is allowed\n\t\t\t# by the selected formation.\n\t\t\t\tif clicked_card_type in ['attacker','defender','midfielder']:\n\t\t\t\t\tlist_of_formations = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'formation')\n\t\t\t\t\tif list_of_formations == []:\n\t\t\t\t\t\t#There are no equiped formation and, thus, the player cannot equip skills yet.\n\t\t\t\t\t\tcard_object.state = 'down'\n\t\t\t\t\t\trecalculate_zones = False\n\t\t\t\t\t\tprint \"In the strategy stage, you may not equip skills without a formation. Wait for a preparation phase.\"\n\t\t\t\t\telse:\n\t\t\t\t\t\t# There is an equiped formation and, thus, he can equip some skills\n\t\t\t\t\t\tformation = list_of_formations[0]\n\t\t\t\t\t\tskill_limit = None\n\t\t\t\t\t\t# This wil allow us to target the relevant zone to the selected skill.\t\t\n\t\t\t\t\t\tzone_index = 0 # The selected card was a defender\n\t\t\t\t\t\tif clicked_card_type == 'midfielder': zone_index =1 # The selected card was a midfielder\n\t\t\t\t\t\telif clicked_card_type == 'attaker': zone_index = 2 # The selected card was an attacker\n\t\t\t\t\t\t# The skill limit tells us how many skills slots are allowed by the formation.\n\t\t\t\t\t\tskill_limit = g.formations[formation.name]['men'][zone_index]\n\t\t\t\t\t\t# Now we check if the limit was exceeded.\n\t\t\t\t\t\tif len(self.card_hand.get_cards_by_type(card_state = 'selected', card_type = clicked_card_type)) > skill_limit:\n\t\t\t\t\t\t\t# If so, we reverse selection.\n\t\t\t\t\t\t\tcard_object.state = 'down'\n\t\t\t\t\t\t\trecalculate_zones = False\n\t\t\n\t\t\t\t\t\n\t\t\t# Now let's deal with what happens when a card is diselected, rather than selected.\n\t\t\telif card_state == 'down':\n\t\t\t\t# It depends on the card type.\n\t\t\t\tif clicked_card_type == 'formation':\n\t\t\t\t\t# If a formation is removed, all equiped skills are unequiped, since one cannot have\n\t\t\t\t\t# skills without a formation in the strategy stage.\n\t\t\t\t\tequiped_skills = self.card_hand.get_cards_by_type(card_state = 'selected', card_type = 'skill')\n\t\t\t\t\tfor card in equiped_skills:\n\t\t\t\t\t\tcard.state = 'down'\n\t\t\n\t\t\t# If there was a change that affects the zone strength, then we must recalculate it.\n\t\t\tif recalculate_zones:\n\t\t\t\tself.calculate_zone_strength()", "def view_mode_changed(self, view, value): \r\n if self.on_view_mode_changed is not None: # MI: Call callback function if defined\r\n self.on_view_mode_changed(value)", "def on_show_view(self) -> None:\n self.setup()", "def change_view(self, request, object_id, form_url='', extra_context=None):\n extra_context = {} if extra_context is None else extra_context\n return super().change_view(\n request,\n object_id,\n form_url=form_url,\n extra_context=extra_context,\n )", "def event_frame(self, event_frame):\n\n self._event_frame = event_frame", "def sendForPeerReview(intent_request):\r\n\r\n #Intent fulfillment\r\n slots = get_slots(intent_request)\r\n source = intent_request['invocationSource']\r\n\r\n applicationNumber = slots['applicationNumber']\r\n peer = {}\r\n peer['firstName'] = slots['peerFirstName'].capitalize()\r\n peer['lastName'] = slots['peerLastName'].capitalize()\r\n\r\n applicationNumberVal = validate_applicationNumber(applicationNumber)\r\n if not applicationNumberVal['isValid']:\r\n slots[applicationNumberVal['violatedSlot']] = None\r\n\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n applicationNumberVal['violatedSlot'],\r\n applicationNumberVal['message'])\r\n\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n peerVal = validate_peer(peer['firstName'],peer['lastName'])\r\n if not peerVal['isValid']:\r\n slots[peerVal['violatedSlot']] = None\r\n\r\n return elicit_slot(intent_request['sessionAttributes'],\r\n intent_request['currentIntent']['name'],\r\n slots,\r\n peerVal['violatedSlot'],\r\n peerVal['message'])\r\n\r\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\r\n return delegate(output_session_attributes, get_slots(intent_request))\r\n\r\n application = applicationsRead.getDetails(applicationNumber,'pullUpEverything')\r\n\r\n if ('y' in application['details']) and (application['details']['y'] is not None):\r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Done! I\\'ve sent application number {} to your colleague {} for a review.'.format(applicationNumber,peer['firstName'])})\r\n elif ('y' not in application['details']):\r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Application number {} does not seem to be evaluated for a risk score yet. Are you sure you want to send it to your colleague {} for a review?'.format(applicationNumber,peer['firstName'])})\r\n else:\r\n return close(intent_request['sessionAttributes'],\r\n 'Fulfilled',\r\n {'contentType': 'PlainText',\r\n 'content': 'Sorry, I could not send application {} to {}.'.format(applicationNumber,peer['firstName'])})", "def _ensure_response_has_view(self):\n if not (self.response.original and isinstance(self.response.original, View)):\n raise ValueError(\"The response is not a view\")", "def view_changed(self, cat_num, view_num):\n # Set new button as selected\n self.__handlers_block()\n for index, button in enumerate(self.buttons):\n if index == cat_num:\n button.set_active(True)\n else:\n button.set_active(False)\n self.__handlers_unblock()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `wx.EVT_LEFT_DOWN` events on every `Card`'s child window.
def OnCardChildLeftDown(self, ev): self.UnselectAll() ev.Skip()
[ "def OnCardLeftDown(self, ev):\n card = ev.GetEventObject()\n\n # bring to front and select\n card.Raise()\n self.selec.SelectCard(card)\n\n # initiate moving\n self.CaptureMouse()\n self.Bind(wx.EVT_LEFT_UP, self.OnCardLeftUp)\n self.Bind(wx.EVT_MOTION, self.OnMovingCard)\n\n self.on_motion = False\n pos = card.GetPosition() + ev.GetPosition() # relative to the canvas\n self.moving_cards_pos = []\n for c in self.GetSelection():\n # (card, pos w.r.t. the original click, current pos)\n self.moving_cards_pos.append((c, c.GetPosition() - pos, c.GetPosition()))", "def OnCardLeftUp(self, ev):\n # terminate moving\n if self.on_motion:\n self.on_motion = False\n for c, orig, pos in self.moving_cards_pos:\n self.EraseCardRect(c, pos)\n \n if self.moving_cards_pos:\n for c, orig, pos in self.moving_cards_pos:\n final_pos = ev.GetPosition() + orig - (Content.BORDER_WIDTH, Content.BORDER_WIDTH)\n # since we need to set absolute final position, we use\n # Card.Move instead of Card.MoveBy\n c.Move(final_pos)\n \n self.moving_cards_pos = []\n self.ReleaseMouse()\n self.Unbind(wx.EVT_LEFT_UP)\n self.Unbind(wx.EVT_MOTION)", "def OnLeftDown(self, event):\n\n click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n else:\n self.is_box_select = False\n self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))\n (self.last_drag_x, self.last_drag_y) = click_posn\n event.Skip()", "def OnLeftDClick(self, event):\n\n # ignore next Left UP event\n self.ignore_next_up = True\n\n # TODO: should ignore double-click off the map, but within view\n # a possible workaround is to limit minimum view level\n\n # get view coords of mouse double click, want same centre afterwards\n xy = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n # zoom out if shift key also down\n if self.ZoomToLevel(self.level - 1):\n self.ZoomOut(xy)\n else:\n # zoom in\n if self.ZoomToLevel(self.level + 1):\n self.ZoomIn(xy)\n\n # Raise position event to update the status text.\n self.RaiseMousePositionEvent(xy)", "def control_left(self):\n ok = True\n while ok:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.KEYDOWN:\n if (event.key == pygame.K_ESCAPE):\n ok = False\n break\n self.controlButtons['Left']._title = pygame.key.name(event.key)\n self.game_control['Left'] = event.key\n ok = False\n break\n return", "def dispatchAllWindowEvents(cls):\n wins = pyglet.window.get_platform().get_default_display().get_windows()\n for win in wins: win.dispatch_events()", "def handle_events(self, animation=False):\n for e in pg.event.get():\n if e.type == pg.QUIT:\n pg.quit()\n sys.exit()\n elif (not animation and e.type == pg.MOUSEBUTTONDOWN\n and e.button == pg.BUTTON_LEFT):\n self.handle_mouse_click()", "def onLeftDown(self, evt=None):\n evt_x, evt_y = evt.GetX(), evt.GetY()\n max_x, max_y = self.full_size\n img_w, img_h = self.bitmap_size\n pan_w, pan_h = self.panel_size\n pad_w, pad_h = (pan_w-img_w)/2.0, (pan_h-img_h)/2.0\n\n x = int(0.5 + (evt_x - pad_w)/self.scale)\n y = int(0.5 + (evt_y - pad_h)/self.scale)\n if self.leftdown_cb is not None:\n self.leftdown_cb(x, y, xmax=max_x, ymax=max_y)", "def __on_start_click_event(self):\r\n self.frames[\"game\"].tkraise()", "def OnMovingCard(self, ev):\n if ev.Dragging() and self.moving_cards_pos:\n # draw a rectangle while moving\n # order is important\n self.on_motion = True\n for c, orig, pos in self.moving_cards_pos:\n self.EraseCardRect(c, pos, refresh = False)\n pos = ev.GetPosition() + orig\n self.PaintCardRect(c, pos)", "def on_moved_to_front(self):\n window_front_event = pygame.event.Event(pygame.USEREVENT,\n {'user_type': UI_WINDOW_MOVED_TO_FRONT,\n 'ui_element': self,\n 'ui_object_id': self.most_specific_combined_id})\n pygame.event.post(window_front_event)", "def handle(self):\n pygame.event.post(pygame.event.Event(EventHandler.NEXT_FRAME_EVENT, {}))\n self.keyboardPress = self.keyboardPress.fromkeys(self.keyboardPress, False)\n self.mousePress = self.mousePress.fromkeys(self.mousePress, None)\n for event in pygame.event.get():\n if self.keyboard:\n if event.type == pygame.KEYDOWN:\n self.keyboardPress[event.key] = True\n self.keyboardHold[event.key] = True\n if event.type == pygame.KEYUP:\n self.keyboardHold[event.key] = False\n if self.mouse:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mousePress[event.button] = event.pos\n if event.type in self.listeners.keys():\n for func in self.listeners[event.type]:\n func(event)", "def _mouseDown(self):\n # Execute pre interaction callback\n self._preCallback()", "def handle_events(self):\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n self.env.close()\r\n self.sim_surface.handle_event(event)\r\n if self.env.ego_vehicles:\r\n VehicleGraphics.handle_event(self.env.ego_vehicles, event)", "def OnCardChildFocus(self, ev):\n self.UnselectAll()\n ev.Skip()", "def event(self, e):\n if self._glwidget is None:\n if e.type() == QEvent.ChildAdded and e.child().isWidgetType():\n ###print('child add')\n self._glwidget = e.child()\n self._glwidget.installEventFilter(self)\n return super().event(e)", "def left_callback(self):\n return self.send_command('/keypress/Left')", "def bind_clicks(self):\n # call back function, e: event\n self.bind(\"<Button-1>\", lambda e: self._handle_left_click((e.x, e.y)))\n self.bind(\"<Button-2>\", lambda e: self._handle_right_click((e.x, e.y)))\n self.bind(\"<Button-3>\", lambda e: self._handle_right_click((e.x, e.y)))", "def mouse_listener(self, event):\n if self.game_state == GAME_MENU:\n if event.type == pygame.MOUSEMOTION:\n self.gui.mouse_update(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.gui.mouse_click(event)\n elif event.type == pygame.MOUSEBUTTONUP:\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `wx.EVT_LEFT_DOWN` events from every `Card`.
def OnCardLeftDown(self, ev): card = ev.GetEventObject() # bring to front and select card.Raise() self.selec.SelectCard(card) # initiate moving self.CaptureMouse() self.Bind(wx.EVT_LEFT_UP, self.OnCardLeftUp) self.Bind(wx.EVT_MOTION, self.OnMovingCard) self.on_motion = False pos = card.GetPosition() + ev.GetPosition() # relative to the canvas self.moving_cards_pos = [] for c in self.GetSelection(): # (card, pos w.r.t. the original click, current pos) self.moving_cards_pos.append((c, c.GetPosition() - pos, c.GetPosition()))
[ "def OnCardLeftUp(self, ev):\n # terminate moving\n if self.on_motion:\n self.on_motion = False\n for c, orig, pos in self.moving_cards_pos:\n self.EraseCardRect(c, pos)\n \n if self.moving_cards_pos:\n for c, orig, pos in self.moving_cards_pos:\n final_pos = ev.GetPosition() + orig - (Content.BORDER_WIDTH, Content.BORDER_WIDTH)\n # since we need to set absolute final position, we use\n # Card.Move instead of Card.MoveBy\n c.Move(final_pos)\n \n self.moving_cards_pos = []\n self.ReleaseMouse()\n self.Unbind(wx.EVT_LEFT_UP)\n self.Unbind(wx.EVT_MOTION)", "def OnCardChildLeftDown(self, ev):\n self.UnselectAll()\n ev.Skip()", "def OnLeftDown(self, event):\n\n click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n else:\n self.is_box_select = False\n self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))\n (self.last_drag_x, self.last_drag_y) = click_posn\n event.Skip()", "def OnMovingCard(self, ev):\n if ev.Dragging() and self.moving_cards_pos:\n # draw a rectangle while moving\n # order is important\n self.on_motion = True\n for c, orig, pos in self.moving_cards_pos:\n self.EraseCardRect(c, pos, refresh = False)\n pos = ev.GetPosition() + orig\n self.PaintCardRect(c, pos)", "def onLeftDown(self, evt=None):\n evt_x, evt_y = evt.GetX(), evt.GetY()\n max_x, max_y = self.full_size\n img_w, img_h = self.bitmap_size\n pan_w, pan_h = self.panel_size\n pad_w, pad_h = (pan_w-img_w)/2.0, (pan_h-img_h)/2.0\n\n x = int(0.5 + (evt_x - pad_w)/self.scale)\n y = int(0.5 + (evt_y - pad_h)/self.scale)\n if self.leftdown_cb is not None:\n self.leftdown_cb(x, y, xmax=max_x, ymax=max_y)", "def control_left(self):\n ok = True\n while ok:\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.KEYDOWN:\n if (event.key == pygame.K_ESCAPE):\n ok = False\n break\n self.controlButtons['Left']._title = pygame.key.name(event.key)\n self.game_control['Left'] = event.key\n ok = False\n break\n return", "def OnLeftDClick(self, event):\n\n # ignore next Left UP event\n self.ignore_next_up = True\n\n # TODO: should ignore double-click off the map, but within view\n # a possible workaround is to limit minimum view level\n\n # get view coords of mouse double click, want same centre afterwards\n xy = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n # zoom out if shift key also down\n if self.ZoomToLevel(self.level - 1):\n self.ZoomOut(xy)\n else:\n # zoom in\n if self.ZoomToLevel(self.level + 1):\n self.ZoomIn(xy)\n\n # Raise position event to update the status text.\n self.RaiseMousePositionEvent(xy)", "def on_left_button_down(self):\n if hub.button.right.is_pressed():\n # Both buttons are pressed down\n self._reset_values()\n return\n\n def _predicate():\n return (hub.button.left.is_pressed()\n and not hub.button.right.is_pressed())\n\n self._update_display()\n self.adjust = self._adjust_actuator_value(_predicate, -10)\n call_soon(self.adjust)", "def OnRightDown(self, evt):\n pass", "def handle(self):\n pygame.event.post(pygame.event.Event(EventHandler.NEXT_FRAME_EVENT, {}))\n self.keyboardPress = self.keyboardPress.fromkeys(self.keyboardPress, False)\n self.mousePress = self.mousePress.fromkeys(self.mousePress, None)\n for event in pygame.event.get():\n if self.keyboard:\n if event.type == pygame.KEYDOWN:\n self.keyboardPress[event.key] = True\n self.keyboardHold[event.key] = True\n if event.type == pygame.KEYUP:\n self.keyboardHold[event.key] = False\n if self.mouse:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mousePress[event.button] = event.pos\n if event.type in self.listeners.keys():\n for func in self.listeners[event.type]:\n func(event)", "def _mouseDown(self):\n # Execute pre interaction callback\n self._preCallback()", "def press_left():\n\tif pygame.key.get_pressed()[pygame.K_LEFT]:\n\t\treturn True", "def mouse_handler(self,events):\n\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mousedown = True\n self.mousebutton = event.button\n elif event.type == pygame.MOUSEBUTTONUP:\n self.mousedown = False\n self.mousebutton = event.button\n self.mouseX, self.mouseY = pygame.mouse.get_pos()\n\n #manage tool events\n if self.draw_tool == \"Line\":\n self.draw_line_template()\n if self.draw_tool == \"Circle\":\n self.draw_circle_template()\n\n #show mouse state\n self.show_mousestate()", "def mouse_listener(self, event):\n if self.game_state == GAME_MENU:\n if event.type == pygame.MOUSEMOTION:\n self.gui.mouse_update(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.gui.mouse_click(event)\n elif event.type == pygame.MOUSEBUTTONUP:\n pass", "def left_callback(self):\n return self.send_command('/keypress/Left')", "def OnRightDown(self, event):\n\n click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n event.Skip()", "def _handle_mouse(self, ev):\n FROM_LEFT_1ST_BUTTON_PRESSED = 0x1\n\n result = []\n\n # Check event type.\n if ev.ButtonState == FROM_LEFT_1ST_BUTTON_PRESSED:\n # On a key press, generate both the mouse down and up event.\n for event_type in [MouseEventType.MOUSE_DOWN, MouseEventType.MOUSE_UP]:\n data = ';'.join([\n event_type,\n str(ev.MousePosition.X),\n str(ev.MousePosition.Y)\n ])\n result.append(KeyPress(Keys.WindowsMouseEvent, data))\n\n return result", "def keyboard_left(self, _):\n self.state = g.update_keyboard(g.KeyPress.LEFT, self.state)", "def key_listener(self, event):\n if self.game_state == GAME_PLAY:\n if event.type == pygame.KEYDOWN:\n if ord('a') <= event.__dict__[\"key\"] <= ord('z'):\n self.key_status[event.__dict__[\"key\"]] = True\n elif event.__dict__[\"key\"] == 304: # Shift\n self.key_status[event.__dict__[\"key\"]] = True\n elif event.__dict__[\"key\"] == 27: # Esc\n exit(0)\n elif event.type == pygame.KEYUP:\n if 97 <= event.__dict__[\"key\"] <= 122:\n self.key_status[event.__dict__[\"key\"]] = False\n elif event.__dict__[\"key\"] == 304: # Shift\n self.key_status[event.__dict__[\"key\"]] = False\n elif event.__dict__[\"key\"] == 32: # Space bar\n self.__save_hand()\n elif self.game_state == GAME_MENU:\n self.gui.key_listener(event)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `wx.EVT_CHILD_FOCUS` from every `Card`.
def OnCardChildFocus(self, ev): self.UnselectAll() ev.Skip()
[ "def OnChildFocus( self, evt ):\n def SetSelection( ctrl ):\n try:\n ctrl.SetSelection( -1, -1 )\n except:\n pass\n \n ctrl = evt.GetEventObject()\n self.GetGrid().SetFocusedProperty( self )\n self.GetGrid().SetFocusedPropertyControl( ctrl.GetId() )\n wx.CallAfter( SetSelection, ctrl )", "def OnCardChildLeftDown(self, ev):\n self.UnselectAll()\n ev.Skip()", "def on_focus(self, widget: 'FocusInput', invalid: bool) -> None:\n if self.parent and not self.focus:\n self.parent.add_player(widget, invalid)", "def OnCardLeftDown(self, ev):\n card = ev.GetEventObject()\n\n # bring to front and select\n card.Raise()\n self.selec.SelectCard(card)\n\n # initiate moving\n self.CaptureMouse()\n self.Bind(wx.EVT_LEFT_UP, self.OnCardLeftUp)\n self.Bind(wx.EVT_MOTION, self.OnMovingCard)\n\n self.on_motion = False\n pos = card.GetPosition() + ev.GetPosition() # relative to the canvas\n self.moving_cards_pos = []\n for c in self.GetSelection():\n # (card, pos w.r.t. the original click, current pos)\n self.moving_cards_pos.append((c, c.GetPosition() - pos, c.GetPosition()))", "def focusable(self, focusable):\n self.canvas_widget.focusable = focusable", "def _on_focus_changed(self, old, new):\n self._update_focus_widget()", "def _on_child_event(self, event: DOMEvent):\n if event.event_type is DOMEventType.RESIZE:\n self._on_child_resize(event)\n return\n\n if event.stop_propagation:\n return\n event.current_target = self\n self.emit(event.event_type.value, event)", "def focus_me(self, widget):\n for widg in self.__widgets:\n if widget == widg:\n widg.set_focus(True)\n else:\n widg.set_focus(False)", "def event(self, e):\n if self._glwidget is None:\n if e.type() == QEvent.ChildAdded and e.child().isWidgetType():\n ###print('child add')\n self._glwidget = e.child()\n self._glwidget.installEventFilter(self)\n return super().event(e)", "def item_focus (self, obj):\n if isinstance(obj, Process):\n self.at_top = self.index(obj) is 0\n return\n key = obj\n if key is 'up':\n self.at_top = self.focus is 0\n elif key is 'down':\n self.at_top = False", "def _request_root_focus(self, focused_item):\n if self.__canvas_widget.focused:\n self.focused_item = focused_item\n else:\n self.focused_item = None\n self.__last_focused_item = focused_item\n self.__canvas_widget.focused = True # this will trigger focus changed to set the focus", "def focus_change(self, func):\r\n return self._subscribe(\"focus_change\", func)", "def focusInEvent(self, event):\n super().focusInEvent(event)\n self.gotFocus.emit()", "def handle_event(self, event, parent_offset):\n \n offset = parent_offset + self.position\n for child in self._children: \n if isinstance(event, MouseWheelEvent) and self.hovered:\n if child.handle_event(event, offset): return True\n # TODO: more special handling; mouse capture etc.\n else:\n if child.handle_event(event, offset): return True\n \n return super().handle_event(event, parent_offset)", "def focusOn(self, displayNo, pos):\r\n\r\n row = self.itemDisplays[displayNo]\r\n widget = row[pos][0]\r\n self.selectWidget(widget)", "def ContactsFocused(self, Username):", "def focusable(self):\n return self.canvas_widget.focusable", "def _on_move_down(self, focus):\n self.qr.put(('LOCKGUI', None))\n e_child = self.trout.find(\".//\" + focus)\n if etree.iselement(e_child):\n e_parent = e_child.getparent()\n# self.qr.put(('PRINT', [[kid.tag, e_parent.index(kid)] for kid in e_parent.getchildren()]))\n child_index = e_parent.index(e_child)\n if child_index < len(list(e_parent[:-1])):\n child_index += 1\n e_parent.remove(e_child)\n e_parent.insert(child_index, e_child)\n# self.qr.put(('PRINT', [[kid.tag, e_parent.index(kid)] for kid in e_parent.getchildren()]))\n self._on_reload_tree()\n self.qr.put(('SEEFOCUS', focus))\n self.qr.put(('UNLOCKGUI', None))", "def on_focusin(self):\n\n if not getattr(self.parent_form, 'description', None):\n return None\n self.parent_form.description.values = self.description_text\n self.parent_form.display()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `wx.EVT_MOTION` events from `Card`s only while a `Card` is being clickdragged.
def OnMovingCard(self, ev): if ev.Dragging() and self.moving_cards_pos: # draw a rectangle while moving # order is important self.on_motion = True for c, orig, pos in self.moving_cards_pos: self.EraseCardRect(c, pos, refresh = False) pos = ev.GetPosition() + orig self.PaintCardRect(c, pos)
[ "def OnCardLeftDown(self, ev):\n card = ev.GetEventObject()\n\n # bring to front and select\n card.Raise()\n self.selec.SelectCard(card)\n\n # initiate moving\n self.CaptureMouse()\n self.Bind(wx.EVT_LEFT_UP, self.OnCardLeftUp)\n self.Bind(wx.EVT_MOTION, self.OnMovingCard)\n\n self.on_motion = False\n pos = card.GetPosition() + ev.GetPosition() # relative to the canvas\n self.moving_cards_pos = []\n for c in self.GetSelection():\n # (card, pos w.r.t. the original click, current pos)\n self.moving_cards_pos.append((c, c.GetPosition() - pos, c.GetPosition()))", "def OnCardLeftUp(self, ev):\n # terminate moving\n if self.on_motion:\n self.on_motion = False\n for c, orig, pos in self.moving_cards_pos:\n self.EraseCardRect(c, pos)\n \n if self.moving_cards_pos:\n for c, orig, pos in self.moving_cards_pos:\n final_pos = ev.GetPosition() + orig - (Content.BORDER_WIDTH, Content.BORDER_WIDTH)\n # since we need to set absolute final position, we use\n # Card.Move instead of Card.MoveBy\n c.Move(final_pos)\n \n self.moving_cards_pos = []\n self.ReleaseMouse()\n self.Unbind(wx.EVT_LEFT_UP)\n self.Unbind(wx.EVT_MOTION)", "def mousedown(self, event):\n self.deltax = event.x - (self.x + self.width // 2)\n self.deltay = event.y - (self.y + self.height // 2)\n if abs(self.deltax) < 50 and abs(self.deltay) < 50:\n self.dragging = True\n # only drag one bunny at a time - consume the event\n event.consumed = True", "def handleDragEvent(self, handle, event):\n handle.planeWise = (event.type() in (QtCore.QEvent.MouseMove, QtCore.QEvent.MouseButtonPress)\n and (event.modifiers() & QtCore.Qt.ControlModifier))", "def on_touch_move(self, evt):\n \n pass", "def player_handle_move(self) -> None:\r\n mouse_pos = pg.mouse.get_pos()\r\n mouse_click = pg.mouse.get_pressed()\r\n\r\n for i in range(1, self.size + 1):\r\n x = i * self.gap_size + (i - 1) * self.box_size\r\n for j in range(1, self.size + 1):\r\n y = j * self.gap_size + (j - 1) * self.box_size\r\n if x < mouse_pos[0] < x + self.box_size and y < mouse_pos[1] < y + self.box_size and self.tags[i-1][j-1] is None:\r\n displayWindow.blit(self.cross, (x, y))\r\n\r\n if mouse_click[0] == 1:\r\n self.tags[i-1][j-1] = 'x'\r\n self.player_move_in_progress = False", "def drag(self, event):\n # compute how much the mouse has moved\n delta_x = event.x - self._drag_data[\"x\"]\n delta_y = event.y - self._drag_data[\"y\"]\n\n # move the object the appropriate amount\n self.c.move(self._drag_data[\"item\"], delta_x, delta_y)\n\n # record the new position\n self._drag_data[\"x\"] = event.x\n self._drag_data[\"y\"] = event.y", "def handle_drag(\n self, box: Box, x: int, y: int, dx: int, dy: int, buttons: int, modifiers: int\n ) -> Optional[bool]:\n if self.definition.snap_boxes is None:\n self.parent.position += cocos.draw.Vector2(dx, dy)\n else:\n self._snap_drag_record += (dx, dy)\n self.parent.position += self._snap_drag_record\n # Move the parent by the snap record so we can test if we still intersect a snap point\n # It will get moved back it we still intersect the current snap point.\n # If we don't intersect with any snap point, then we will have just moved the parent\n # by the correct displacement anyway.\n for snap_box in self.definition.snap_boxes:\n if (\n snap_box is self._currently_snapped_to or snap_box.can_receive(self)\n ) and self.world_rect.intersects(snap_box.world_rect):\n self.snap_to(snap_box)\n break\n else:\n # If no longer intersecting any valid snap boxes, then stop being snapped\n self._snap_drag_record = cocos.draw.Vector2(0, 0)\n self.unsnap_if_snapped()\n\n return EVENT_HANDLED", "def _should_handle_mouse_drag(self) -> bool:\n return self._currently_dragging", "def OnCardChildLeftDown(self, ev):\n self.UnselectAll()\n ev.Skip()", "def check_allow_move(self):\n for event in pygame.event.get():\n if event.type == ALLOW_MOVE:\n self._allow_move = True", "def mouseMoveEvent(self, moveEvent):\n if self.grabbedByWidget==True: \n QGraphicsItem.mouseMoveEvent(self, moveEvent) \n else:\n QGraphicsProxyWidget.mouseMoveEvent(self, moveEvent)", "def mouse_drag(self):\n pass", "def on_poses_dragged(self, ind, pos, offset):\n if hasattr(self.ds, \"pose_positions_allo\"):\n fly, part = np.unravel_index(ind, (self.nb_flies, self.nb_bodyparts))\n pos0 = self.ds.pose_positions_allo.data[self.index_other, fly, part]\n try:\n pos1 = [pos.y(), pos.x()]\n except:\n pos1 = pos\n self.ds.pose_positions_allo.data[self.index_other, fly, part] += pos1 - pos0\n logging.info(f\" Moved {self.ds.poseparts[part].data} of fly {fly} from {pos0} to {pos1}.\")\n self.update_frame()", "def __sendToSingleEvtHandler(self, event):\n x, y, eventId, displayId, device = event.x, event.y, event.eventId, event.device.displayId, event.device\n callback = None\n\n # if the event goes to a specific evtHandler, no need to search for one\n if event.toEvtHandler: \n callback = event.toEvtHandler._getCallback(eventId)\n\n # generate EVT_ENTERED_WINDOW and EVT_LEFT_WINDOW events\n if event.toEvtHandler._doesAllowDrag() and eventId == EVT_DRAG:\n self.__enlargeWidgets(event)\n handler = self.getEvtHandlerAtPos(x,y, displayId, event)\n if handler != device.lastHandler:\n if device.lastHandler and device.lastHandler != event.toEvtHandler:\n self.__sendEvent(WindowLeftEvent(device),\n device.lastHandler._getCallback(EVT_LEFT_WINDOW))\n \n if handler and handler._doesAllowDrop():\n self.__sendEvent(WindowEnteredEvent(device),\n handler._getCallback(EVT_ENTERED_WINDOW))\n \n device.lastHandler = handler\n\n \n else:\n # find the object under this current position\n handler = self.getEvtHandlerAtPos(x,y, displayId, event)\n\n # enlarges widgets as the mouse approaches them\n if (getEnlargeWidgets() or getEnlargeAppWidgets()) and \\\n event.eventId == EVT_MOVE or event.eventId == EVT_DRAG:\n self.__enlargeWidgets(event)\n\n if handler and not handler._captured:\n callback = handler._getCallback(eventId)\n\n # generate EVT_ENTERED_WINDOW and EVT_LEFT_WINDOW events\n if handler != device.lastHandler: # handler changed\n\n # only allow move events to cross handler borders\n # e.g. if drag originated in one handler, don't let it carry over to another one\n if (eventId >= EVT_ANALOG1 and eventId <= EVT_ANALOG3) or \\\n (eventId >= EVT_ANALOG1_SPECIAL and eventId <= EVT_ANALOG3_SPECIAL):\n return \n\n if device.lastHandler:\n evtId = EVT_LEFT_WINDOW\n if device.specialDevice: evtId = EVT_LEFT_WINDOW_SPECIAL\n self.__sendEvent(WindowLeftEvent(device),\n device.lastHandler._getCallback(evtId))\n\n if handler and callback: # if there is no callback, don't do anything\n evtId = EVT_ENTERED_WINDOW\n if device.specialDevice: evtId = EVT_ENTERED_WINDOW_SPECIAL\n self.__sendEvent(WindowEnteredEvent(device),\n handler._getCallback(evtId))\n \n device.lastHandler = handler\n\n\n self.__sendEvent(event, callback)", "def OnLeftDown(self, event):\n\n click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n else:\n self.is_box_select = False\n self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))\n (self.last_drag_x, self.last_drag_y) = click_posn\n event.Skip()", "def on_mouse_motion(self, evt):\n \n pass", "def on_clicked_motion(self, event):\n pass", "def DragHitTest (self, x, y):\n assert (False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `wx.EVT_LEFT_UP` events from `Card`s only while a `Card` is being clickdragged.
def OnCardLeftUp(self, ev): # terminate moving if self.on_motion: self.on_motion = False for c, orig, pos in self.moving_cards_pos: self.EraseCardRect(c, pos) if self.moving_cards_pos: for c, orig, pos in self.moving_cards_pos: final_pos = ev.GetPosition() + orig - (Content.BORDER_WIDTH, Content.BORDER_WIDTH) # since we need to set absolute final position, we use # Card.Move instead of Card.MoveBy c.Move(final_pos) self.moving_cards_pos = [] self.ReleaseMouse() self.Unbind(wx.EVT_LEFT_UP) self.Unbind(wx.EVT_MOTION)
[ "def OnCardLeftDown(self, ev):\n card = ev.GetEventObject()\n\n # bring to front and select\n card.Raise()\n self.selec.SelectCard(card)\n\n # initiate moving\n self.CaptureMouse()\n self.Bind(wx.EVT_LEFT_UP, self.OnCardLeftUp)\n self.Bind(wx.EVT_MOTION, self.OnMovingCard)\n\n self.on_motion = False\n pos = card.GetPosition() + ev.GetPosition() # relative to the canvas\n self.moving_cards_pos = []\n for c in self.GetSelection():\n # (card, pos w.r.t. the original click, current pos)\n self.moving_cards_pos.append((c, c.GetPosition() - pos, c.GetPosition()))", "def OnCardChildLeftDown(self, ev):\n self.UnselectAll()\n ev.Skip()", "def OnLeftDown(self, event):\n\n click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n self.is_box_select = True\n self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n (self.sbox_w, self.sbox_h) = (0, 0)\n (self.sbox_1_x, self.sbox_1_y) = click_posn\n else:\n self.is_box_select = False\n self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))\n (self.last_drag_x, self.last_drag_y) = click_posn\n event.Skip()", "def mouseup(self, event):\n if self.dragging:\n self.dragging = False\n event.consumed = True", "def OnLeftUp(self, event):\n\n self.last_drag_x = self.last_drag_y = None\n\n if self.ignore_next_up:\n self.ignore_next_up = False\n return\n\n self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n\n # we need a repaint to remove any selection box, but NOT YET!\n delayed_paint = self.sbox_1_x # True if box select active\n\n # if any layers interested, inform of possible select\n if not self.was_dragging:\n if self.is_box_select:\n # possible box selection\n ll_corner_v = (self.sbox_1_x, self.sbox_1_y)\n tr_corner_v = (self.sbox_1_x + self.sbox_w,\n self.sbox_1_y + self.sbox_h)\n ll_corner_m = self.ConvertView2Geo(ll_corner_v)\n tr_corner_m = self.ConvertView2Geo(tr_corner_v)\n\n # check each layer for a box select event\n # we work on a copy as user response could change order\n for id in self.layer_z_order[:]:\n l = self.layer_mapping[id]\n # if layer visible and selectable\n if l.selectable and l.visible:\n if l.map_rel:\n # map-relative, get all points selected (if any)\n p_data = self.layerBSelHandler[l.type](l,\n ll_corner_m,\n tr_corner_m)\n else:\n # view-relative\n p_data = self.layerBSelHandler[l.type](l,\n ll_corner_v,\n tr_corner_v)\n self.RaiseSelectEvent(EventBoxSelect, l, p_data)\n\n # user code possibly updated screen\n delayed_paint = True\n self.is_box_select = False\n else:\n # possible point selection\n clickpt_v = event.GetPositionTuple() if WX3 else event.GetPosition()\n clickpt_m = self.ConvertView2Geo(clickpt_v)\n # check each layer for a point select callback\n # we work on a copy as user callback could change order\n for id in self.layer_z_order[:]:\n l = self.layer_mapping[id]\n # if layer visible and selectable\n if l.selectable and l.visible and \\\n (l.type in self.layerPSelHandler):\n if l.map_rel:\n p_data = self.layerPSelHandler[l.type](l,\n clickpt_m)\n else:\n p_data = self.layerPSelHandler[l.type](l,\n clickpt_v)\n self.RaiseSelectEvent(EventPointSelect, l, p_data,\n vposn=clickpt_v,\n mposn=clickpt_m)\n\n # user code possibly updated screen\n delayed_paint = True\n\n # turn off drag\n self.was_dragging = False\n\n # turn off box selection mechanism\n self.is_box_select = False\n self.sbox_1_x = self.sbox_1_y = None\n\n # force PAINT event if required\n if delayed_paint:\n self.Update()", "def handleDragEvent(self, handle, event):\n handle.planeWise = (event.type() in (QtCore.QEvent.MouseMove, QtCore.QEvent.MouseButtonPress)\n and (event.modifiers() & QtCore.Qt.ControlModifier))", "def mousedown(self, event):\n self.deltax = event.x - (self.x + self.width // 2)\n self.deltay = event.y - (self.y + self.height // 2)\n if abs(self.deltax) < 50 and abs(self.deltay) < 50:\n self.dragging = True\n # only drag one bunny at a time - consume the event\n event.consumed = True", "def OnMovingCard(self, ev):\n if ev.Dragging() and self.moving_cards_pos:\n # draw a rectangle while moving\n # order is important\n self.on_motion = True\n for c, orig, pos in self.moving_cards_pos:\n self.EraseCardRect(c, pos, refresh = False)\n pos = ev.GetPosition() + orig\n self.PaintCardRect(c, pos)", "def OnLeftDClick(self, event):\n\n # ignore next Left UP event\n self.ignore_next_up = True\n\n # TODO: should ignore double-click off the map, but within view\n # a possible workaround is to limit minimum view level\n\n # get view coords of mouse double click, want same centre afterwards\n xy = event.GetPositionTuple() if WX3 else event.GetPosition()\n\n if event.ShiftDown():\n # zoom out if shift key also down\n if self.ZoomToLevel(self.level - 1):\n self.ZoomOut(xy)\n else:\n # zoom in\n if self.ZoomToLevel(self.level + 1):\n self.ZoomIn(xy)\n\n # Raise position event to update the status text.\n self.RaiseMousePositionEvent(xy)", "def on_mouse_button_up(self, event):\r\n surf = self.board_surface\r\n surf_abs_rect = surf.get_rect(topleft=surf.get_abs_offset())\r\n if surf_abs_rect.collidepoint(event.pg_event.pos):\r\n if not self.ctrl:\r\n # no tiles shown in select area yet\r\n return\r\n for rect, gx, gy in self.ctrl:\r\n # rect is in local coords to start with\r\n r = rect.copy()\r\n r.move_ip(surf_abs_rect.left, surf_abs_rect.top)\r\n if r.collidepoint(event.pg_event.pos):\r\n LOGGER.info(\"mouse button up in %r\" % ((gx, gy),))\r\n self.em.post(TAOPlacementRequestEvent(gx, gy))", "def on_touch_move(self, evt):\n \n pass", "def on_left_button_up(self):\n if self.adjust:\n cancel(self.adjust)\n self.adjust = None\n self._update_display()", "def onLeftDown(self, evt=None):\n evt_x, evt_y = evt.GetX(), evt.GetY()\n max_x, max_y = self.full_size\n img_w, img_h = self.bitmap_size\n pan_w, pan_h = self.panel_size\n pad_w, pad_h = (pan_w-img_w)/2.0, (pan_h-img_h)/2.0\n\n x = int(0.5 + (evt_x - pad_w)/self.scale)\n y = int(0.5 + (evt_y - pad_h)/self.scale)\n if self.leftdown_cb is not None:\n self.leftdown_cb(x, y, xmax=max_x, ymax=max_y)", "def drag_mouse_input(self,\n dst=(0, 0),\n src=None,\n button=\"left\",\n pressed=\"\",\n absolute=True,\n duration=0.0):\n raise NotImplementedError()", "def on_left_button_down(self):\n if hub.button.right.is_pressed():\n # Both buttons are pressed down\n self._reset_values()\n return\n\n def _predicate():\n return (hub.button.left.is_pressed()\n and not hub.button.right.is_pressed())\n\n self._update_display()\n self.adjust = self._adjust_actuator_value(_predicate, -10)\n call_soon(self.adjust)", "def player_handle_move(self) -> None:\r\n mouse_pos = pg.mouse.get_pos()\r\n mouse_click = pg.mouse.get_pressed()\r\n\r\n for i in range(1, self.size + 1):\r\n x = i * self.gap_size + (i - 1) * self.box_size\r\n for j in range(1, self.size + 1):\r\n y = j * self.gap_size + (j - 1) * self.box_size\r\n if x < mouse_pos[0] < x + self.box_size and y < mouse_pos[1] < y + self.box_size and self.tags[i-1][j-1] is None:\r\n displayWindow.blit(self.cross, (x, y))\r\n\r\n if mouse_click[0] == 1:\r\n self.tags[i-1][j-1] = 'x'\r\n self.player_move_in_progress = False", "def _mouseDown(self):\n # Execute pre interaction callback\n self._preCallback()", "def handleMouseRelease(self, event):\n if self._board.determineIfBought():\n if self._type == \"purchase\":\n self._board.purchaseButton()\n else:\n self._board.passButton()", "def OnRightUp(self, event):\n\n if self.ignore_next_right_up:\n self.ignore_next_right_up = False\n return\n\n self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))\n\n # we need a repaint to remove any selection box, but NOT YET!\n delayed_paint = self.sbox_1_x # True if box select active\n\n # if any layers interested, inform of possible select\n if self.is_box_select:\n # possible box selection\n ll_corner_v = (self.sbox_1_x, self.sbox_1_y)\n tr_corner_v = (self.sbox_1_x + self.sbox_w,\n self.sbox_1_y + self.sbox_h)\n ll_corner_m = self.ConvertView2Geo(ll_corner_v)\n tr_corner_m = self.ConvertView2Geo(tr_corner_v)\n\n # check each layer for a box select event\n # we work on a copy as user response could change order\n for id in self.layer_z_order[:]:\n l = self.layer_mapping[id]\n if l.selectable and l.visible: # and l.event_box_select:\n if l.map_rel:\n # map-relative, get all points selected (if any)\n pts = self.layerBSelHandler[l.type](l, ll_corner_m,\n tr_corner_m)\n else:\n # view-relative\n pts = self.layerBSelHandler[l.type](l, ll_corner_v,\n tr_corner_v)\n self.RaiseSelectEvent(EventRightBoxSelect, l, pts)\n\n # user code possibly updated screen\n delayed_paint = True\n self.is_box_select = False\n else:\n # possible point selection\n clickpt_v = event.GetPositionTuple() if WX3 else event.GetPosition()\n clickpt_m = self.ConvertView2Geo(clickpt_v)\n # check each layer for a point select callback\n # we work on a copy as user callback could change order\n for id in self.layer_z_order[:]:\n l = self.layer_mapping[id]\n # if layer visible, selectable and there is a callback\n if l.selectable and l.visible:\n if l.map_rel:\n pt = self.layerPSelHandler[l.type](l, clickpt_m)\n else:\n pt = self.layerPSelHandler[l.type](l, clickpt_v)\n self.RaiseSelectEvent(EventRightPointSelect, l, pt,\n mposn=clickpt_m,\n vposn=clickpt_v)\n\n # user code possibly updated screen\n delayed_paint = True\n\n # turn off box selection mechanism\n self.is_box_select = False\n self.sbox_1_x = self.sbox_1_y = None\n\n # force PAINT event to remove selection box (if required)\n if delayed_paint:\n self.Update()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to `wx.EVT_MOUSE_CAPTURE_LOST` events from this object.
def OnMouseCaptureLost(self, ev): self.ReleaseMouse()
[ "def on_mouse_leave (self, event):\n\t\tprint 'mouse leave'", "def glfw_mouse_event_callback(self, window, xpos, ypos):\r\n xpos, ypos = int(xpos), int(ypos)\r\n dx, dy = self._calc_mouse_delta(xpos, ypos)\r\n\r\n if self.mouse_states.any:\r\n self._mouse_drag_event_func(xpos, ypos, dx, dy)\r\n else:\r\n self._mouse_position_event_func(xpos, ypos, dx, dy)", "def OnLeaveWindow(self, event):\n\n self.RaiseMousePositionEvent(None)", "def mouseReleaseEvent(self, event):\n\n if event.button() == Qt.LeftButton:\n if self.select_zooming:\n self.zooming_rect = QRect(self.zoom_point.x(), self.zoom_point.y(),\n event.x(), event.x() / self.vid_ratio)\n self.zoom_point = QPoint()\n self.select_zooming = False\n self.zooming = True\n self.load_current_frame()\n self.update()\n else:\n if self.current_line:\n self.lines.append(self.current_line)\n self.current_line = []\n self.drawing = False\n\n if event.button() == Qt.RightButton:\n self.playing_using_mouse = False", "def mouse_listener(self, event):\n if self.game_state == GAME_MENU:\n if event.type == pygame.MOUSEMOTION:\n self.gui.mouse_update(event)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.gui.mouse_click(event)\n elif event.type == pygame.MOUSEBUTTONUP:\n pass", "def mouseMoveEvent(self, moveEvent):\n if self.grabbedByWidget==True: \n QGraphicsItem.mouseMoveEvent(self, moveEvent) \n else:\n QGraphicsProxyWidget.mouseMoveEvent(self, moveEvent)", "def handle(self):\n pygame.event.post(pygame.event.Event(EventHandler.NEXT_FRAME_EVENT, {}))\n self.keyboardPress = self.keyboardPress.fromkeys(self.keyboardPress, False)\n self.mousePress = self.mousePress.fromkeys(self.mousePress, None)\n for event in pygame.event.get():\n if self.keyboard:\n if event.type == pygame.KEYDOWN:\n self.keyboardPress[event.key] = True\n self.keyboardHold[event.key] = True\n if event.type == pygame.KEYUP:\n self.keyboardHold[event.key] = False\n if self.mouse:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mousePress[event.button] = event.pos\n if event.type in self.listeners.keys():\n for func in self.listeners[event.type]:\n func(event)", "def leaveEvent(self, event):\n\n if self.doesSignal:\n self.mouseLeave.emit()\n\n return super(QTPieWidget, self).leaveEvent(event)", "def on_mouse_motion(self, evt):\n \n pass", "def on_lost_focus(self, event):\n event.Skip()\n self.shell_obj._field_lost_focus()", "def mouseMoved(self, mouseX, mouseY, pmouseX, pmouseY):\n pass", "def _handle_mouse(self, ev):\n FROM_LEFT_1ST_BUTTON_PRESSED = 0x1\n\n result = []\n\n # Check event type.\n if ev.ButtonState == FROM_LEFT_1ST_BUTTON_PRESSED:\n # On a key press, generate both the mouse down and up event.\n for event_type in [MouseEventType.MOUSE_DOWN, MouseEventType.MOUSE_UP]:\n data = ';'.join([\n event_type,\n str(ev.MousePosition.X),\n str(ev.MousePosition.Y)\n ])\n result.append(KeyPress(Keys.WindowsMouseEvent, data))\n\n return result", "def mouseReleaseEvent(self, event):\n event.ignore()\n state = self.frame_state\n self._refreshCursor(mouse_event_pos(event))\n if state.mouse_title:\n if self.titleBarMouseReleaseEvent(event):\n event.accept()\n state.mouse_title = False\n return\n if self.isWindow() and event.button() == Qt.LeftButton:\n state.resize_border = self.NoBorder\n state.resize_offset = None\n if state.last_size is not None:\n if state.last_size != self.size():\n self.manager().frame_resized(self)\n del state.last_size\n event.accept()", "def handle_mouse(self, event, x, y, flags, param):\n mousey = MouseEvent(event, x, y, flags, param)\n window_commands.mouse_pub.publish(mousey)", "def videoWindowLeave(self, widget, event):\n self.destroyIdleTimer()", "def mouse_handler(self,events):\n\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mousedown = True\n self.mousebutton = event.button\n elif event.type == pygame.MOUSEBUTTONUP:\n self.mousedown = False\n self.mousebutton = event.button\n self.mouseX, self.mouseY = pygame.mouse.get_pos()\n\n #manage tool events\n if self.draw_tool == \"Line\":\n self.draw_line_template()\n if self.draw_tool == \"Circle\":\n self.draw_circle_template()\n\n #show mouse state\n self.show_mousestate()", "def Left_Release(event,mouse_socket):\n mouse_socket.sendto('^L',(IP, MOUSE_PORT))\n return True", "def mouseReleased(self, button, mouseX, mouseY):\n pass", "def handle_mouse_release(self, event):\n self._die1.roll()\n self._die2.roll()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to the "Paste" `wx.EVT_MENU` event from the context menu.
def OnPaste(self, ev): self.PasteFromClipboard(self.menu_position)
[ "def context_menu(self) -> None:\n menu = QMenu(self)\n if platform.system() == \"Darwin\":\n copy_keys = QKeySequence(Qt.CTRL + Qt.Key_C)\n paste_keys = QKeySequence(Qt.CTRL + Qt.Key_V)\n else:\n copy_keys = QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_C)\n paste_keys = QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_V)\n\n menu.addAction(\"Copy\", self.copy, copy_keys)\n menu.addAction(\"Paste\", self.paste, paste_keys)\n menu.exec_(QCursor.pos())", "def contentsContextMenuEvent(self,ev):\n return", "def on_paste(self, sender, arg=None):\n buf = self.get_active_buffer()\n if buf is not None:\n buf.paste_clipboard(sender, arg)", "def raiseContextMenu(self, ev):\n menu = self.getMenu()\n menu.popup(ev.screenPos().toPoint())", "def addEditMenuItems(self, menu):\n menu.add(self.createStandardItem(\"Cut\"))\n menu.add(self.createStandardItem(\"Copy\"))\n menu.add(self.createStandardItem(\"Paste\"))\n menu.add(self.createStandardItem(\"Select All\"))", "def talon_add_context_clipboard():\n friendly_name = actions.app.name()\n executable = actions.app.executable().split(os.path.sep)[-1]\n if app.platform != \"windows\":\n result = \"os: {}\\napp: {}\\ntitle: {}\".format(app.platform, friendly_name, actions.win.title())\n\n #on windows, it's best to include both the friendly name and executable name in case the muicache breaks....\n else:\n result = \"os: {}\\napp: {}\\napp: {}\\ntitle: {}\".format(app.platform, friendly_name, executable, actions.win.title())\n\n clip.set(result)", "def on_menuitem_select (self, id):\n\t\t\n\t\tpass", "def updatePasteAvail(self):\n mime = QtGui.QApplication.clipboard().mimeData()\n self.allActions['EditPaste'].setEnabled(len(mime.data('text/xml') or\n mime.data('text/plain'))\n > 0)\n focusWidget = QtGui.QApplication.focusWidget()\n if hasattr(focusWidget, 'pastePlain'):\n focusWidget.updateActions()", "def _context_menu_make(self, pos):\n menu = QtGui.QMenu(self)\n\n self.cut_action = menu.addAction('Cut', self.cut)\n self.cut_action.setEnabled(self.can_cut())\n self.cut_action.setShortcut(QtGui.QKeySequence.Cut)\n\n self.copy_action = menu.addAction('Copy', self.copy)\n self.copy_action.setEnabled(self.can_copy())\n self.copy_action.setShortcut(QtGui.QKeySequence.Copy)\n\n self.paste_action = menu.addAction('Paste', self.paste)\n self.paste_action.setEnabled(self.can_paste())\n self.paste_action.setShortcut(QtGui.QKeySequence.Paste)\n\n anchor = self._control.anchorAt(pos)\n if anchor:\n menu.addSeparator()\n self.copy_link_action = menu.addAction(\n 'Copy Link Address', lambda: self.copy_anchor(anchor=anchor))\n self.open_link_action = menu.addAction(\n 'Open Link', lambda: self.open_anchor(anchor=anchor))\n\n menu.addSeparator()\n menu.addAction(self.select_all_action)\n\n menu.addSeparator()\n menu.addAction(self.export_action)\n menu.addAction(self.print_action)\n\n return menu", "def pasteEvent(self):\n try:\n self.currentTabController().paste()\n except NoCurrentTabControllerException:\n logging.warning(self.__class__.__name__ + \": \" + self.TAB_PREMATURELY_CLOSED_WARNING)", "def __onCopy(self, ev):\n text = self.__textEdit.GetValue()\n\n cb = wx.TheClipboard\n\n if cb.Open():\n cb.SetData(wx.TextDataObject(text))\n cb.Close()\n\n if self.__showCopyMessage:\n td = TimeoutDialog(self, 'Copied!', 1000)\n td.Show()", "def cut_copy_paste_del_sel_event(self, event):\n\n if event.GetId() == wx.ID_CUT or wx.ID_COPY or wx.ID_PASTE or wx.ID_DELETE or wx.ID_SELECTALL:\n self.cut_copy_paste_del_sel_action(event)\n else:\n event.Skip()", "def openContextMenu (self, event):\r\n self.parent.PopupMenu(PassageWidgetContext(self), event.GetPosition())", "def OnTabMenu(self, evt):\n e_id = evt.GetId()\n if e_id in (ed_glob.ID_COPY_PATH, ed_glob.ID_COPY_FILE):\n path = self.GetFileName()\n if path is not None:\n if e_id == ed_glob.ID_COPY_FILE:\n path = GetFileName(path)\n SetClipboardText(path)\n elif e_id == ed_glob.ID_MOVE_TAB:\n frame = wx.GetApp().OpenNewWindow()\n nbook = frame.GetNotebook()\n parent = self.GetParent()\n pg_txt = parent.GetRawPageText(parent.GetSelection())\n nbook.OpenDocPointer(self.GetDocPointer(),\n self.GetDocument(), pg_txt)\n self._ignore_del = True\n wx.CallAfter(parent.ClosePage)\n elif e_id == ed_glob.ID_CLOSE_OTHERS:\n parent = self.GetParent()\n if hasattr(parent, 'CloseOtherPages'):\n parent.CloseOtherPages()\n elif e_id in (EdEditorView.ID_CLOSE_TAB, EdEditorView.ID_CLOSE_ALL_TABS):\n # Need to relay events up to toplevel window on GTK for them to\n # be processed. On other platforms the propagate by themselves.\n evt.SetId({ EdEditorView.ID_CLOSE_TAB : ed_glob.ID_CLOSE,\n EdEditorView.ID_CLOSE_ALL_TABS : ed_glob.ID_CLOSEALL}.get(e_id))\n wx.PostEvent(self.GetTopLevelParent(), evt)\n else:\n evt.Skip()", "def CategorizedPopUpHndlr(self, event, whichList):\r\n menu = QMenu(self)\r\n newCatList = self.listCategories.selectedItems()\r\n if len(newCatList) == 0:\r\n str = 'None'\r\n else:\r\n str = newCatList[0].text()\r\n \r\n self.NewCatAct.setText(str)\r\n menu.addAction(self.NewCatAct)\r\n menu.addAction(self.NoneCatAct)\r\n if whichList.currentItem() == None:\r\n return\r\n selectedEntryStr = whichList.currentItem().text()\r\n self.newCatStr = str\r\n self.selectedEntry = self.cf.find(selectedEntryStr)\r\n #menu.addAction(copyAct)\r\n #menu.addAction(pasteAct)\r\n menu.show()\r\n what = menu.exec_(PyQt5.QtGui.QCursor.pos())\r\n if (what):\r\n what.trigger()\r\n pass", "def popupMenu(markingMenu=bool, postMenuCommand=\"string\", button=int, defineTemplate=\"string\", parent=\"string\", ctrlModifier=bool, shiftModifier=bool, numberOfItems=bool, useTemplate=\"string\", altModifier=bool, itemArray=bool, allowOptionBoxes=bool, exists=bool, postMenuCommandOnce=bool, deleteAllItems=bool):\n pass", "def hot_key_callback(self):\n try:\n pyperclip.copy(self.url_producer_proxy())\n except Exception as ex:\n self.context.error_signal.emit('Core Process Failed', ex.__str__())\n else:\n self.context.main_window.switch_display(MainFloatingView.DisplayMode.ready_to_paste)\n # only adds the hot key if no previous hot key exists\n if self.paste_handler is None:\n self.paste_handler = keyboard.add_hotkey('ctrl+v', self.paste_callback)", "def simulatePasteKeystroke( self ):\n \n ContextUtils.typeCommandKey( \"v\" )", "def click_menu(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to the "Insert Content" `wx.EVT_MENU` event from the context menu.
def OnInsertContent(self, ev): self.PlaceNewCard("Content", pos=self.menu_position)
[ "def contentsContextMenuEvent(self,ev):\n return", "def add_content(self):\n self._image_widget = self.menu.add_image(\n image_path=self.selector_modes()[1][\"image\"]\n )\n self._backstory_widget = self.menu.add_label(\n title=self.selector_modes()[1][\"backstory\"]\n )\n self._selector_widget = self._add_selector(\n self.selector_items(), self._on_selector_change\n )\n self.add_buttons()\n self._add_warn_same_chars(public.p1_char.name)\n self._update_from_selection(self._selector_widget.get_value()[0][1])", "def context_menu(self) -> None:\n menu = QMenu(self)\n if platform.system() == \"Darwin\":\n copy_keys = QKeySequence(Qt.CTRL + Qt.Key_C)\n paste_keys = QKeySequence(Qt.CTRL + Qt.Key_V)\n else:\n copy_keys = QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_C)\n paste_keys = QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_V)\n\n menu.addAction(\"Copy\", self.copy, copy_keys)\n menu.addAction(\"Paste\", self.paste, paste_keys)\n menu.exec_(QCursor.pos())", "def on_menuitem_select (self, id):\n\t\t\n\t\tpass", "def register_menu(self, menu):\n\n self._contextual_menu.addMenu(menu)", "def addMenus(self):\n addFileMenu()\n addEditMenu()", "def set_context_menu(self, context_menu):\n self.context_menu = context_menu\n self.context_menu.tree_view = self\n self.context_menu.init_actions()\n for action in self.context_menu.actions():\n self.addAction(action)", "def OnPaste(self, ev):\n self.PasteFromClipboard(self.menu_position)", "def custom_menu_context(self, point):\n if not self.current_working_directory:\n return\n\n index = self.directory_view_tree.currentIndex()\n file_path = self.filesystem.filePath(index)\n selected_item = self.directory_view_tree.indexAt(point)\n menu = QtWidgets.QMenu()\n\n if selected_item.data(QtCore.Qt.DisplayRole) == None:\n LogSystem.success(\"Requested context menu for current working directory!\")\n action_1 = menu.addAction(\"Create new file\")\n action_1.triggered.connect(lambda: self.create_new_file(self.current_working_directory))\n action_2 = menu.addAction(\"Create new directory\")\n action_2.triggered.connect(lambda: self.create_new_folder(self.current_working_directory))\n elif os.path.isfile(file_path):\n LogSystem.success(\"Requested context menu for file: {0}!\".format(file_path))\n action_1 = menu.addAction(\"Open\")\n action_1.triggered.connect(lambda: self.open_file_in_new_tab(file_path))\n action_2 = menu.addAction(\"Rename\")\n action_2.triggered.connect(lambda: self.rename_file(file_path))\n action_3 = menu.addAction(\"Delete\")\n action_3.triggered.connect(lambda: self.delete_file_from_dir_view(file_path))\n else:\n LogSystem.success(\"Requested context menu for directory: {0}!\".format(file_path))\n action_1 = menu.addAction(\"Rename\")\n action_1.triggered.connect(lambda: self.rename_file(file_path))\n action_2 = menu.addAction(\"Delete\")\n action_2.triggered.connect(lambda: self.delete_file_from_dir_view(file_path))\n action_3 = menu.addAction(\"Create new directory\")\n action_3.triggered.connect(lambda: self.create_new_folder(file_path))\n action_3 = menu.addAction(\"Create new file\")\n action_3.triggered.connect(lambda: self.create_new_file(file_path))\n\n menu.exec_(self.directory_view_tree.mapToGlobal(point))", "def set_up_context(self, templates, **cnf):\n self.context_menu = self.make_menu(templates, **cnf)\n MenuUtils.bind_all_context(\n self,\n lambda event: ContextMenuMixin.popup(event, self.context_menu),\n add='+'\n )", "def extraMenus(self):\n pass", "def browser_menu():\n def on_setup_menus(browser):\n \"\"\"\n on browser setupMenus was called\n \"\"\"\n # main menu\n menu = browser.form.menubar.addMenu(\"Import Video\")\n\n # import\n action = QAction(APP_ICON, _(\"IMPORT_VIDEO\"), mw)\n action.triggered.connect(lambda: show_dialog())\n menu.addAction(action)\n\n # check update\n action = QAction(_('CHECK_UPDATE'), browser)\n action.triggered.connect(lambda: check_updates(background=False, parent=browser))\n menu.addAction(action)\n\n # About\n action = QAction(_('ABOUT'), browser)\n action.triggered.connect(lambda: show_about_dialog(browser))\n menu.addAction(action)\n\n addHook('browser.setupMenus', on_setup_menus)", "def raiseContextMenu(self, ev):\n menu = self.getMenu()\n menu.popup(ev.screenPos().toPoint())", "def translateContextMenu(self):\n contex_file = self.getTemplateMenu(file_name='context.json')\n\n for contex in contex_file:\n try:\n contex['caption'] = _(contex['caption'])\n except:\n pass\n\n plugin = Paths.getPluginPath()\n context_path = os.path.join(plugin, 'Context.sublime-menu')\n preset_file = JSONFile(context_path)\n preset_file.setData(contex_file)\n preset_file.saveData()", "def addContextMenuAction(self, action):\n self.context_menu.addAction(action)", "def popupMenu(markingMenu=bool, postMenuCommand=\"string\", button=int, defineTemplate=\"string\", parent=\"string\", ctrlModifier=bool, shiftModifier=bool, numberOfItems=bool, useTemplate=\"string\", altModifier=bool, itemArray=bool, allowOptionBoxes=bool, exists=bool, postMenuCommandOnce=bool, deleteAllItems=bool):\n pass", "def prepareContextMenu(self, position):\n # Get the selected item (only one, no multiple selection allowed):\n\t\tcurr = self.treeWidget.selectedItems()[0]\n\n\t\t# Get the corresponding name in the HDF5 file:\n\t\th5Item = self.HDF5File[str(curr.data(0, Qt.UserRole))]\n\t\tkey = str(h5Item.name)\n\n\t\t# Create the menu:\t\t\n\t\tmenu = QMenu()\n\t\tif ((key == \"/data/dark\") or (key == \"/data/white\") or (key == \"/data/image\")):\n\t\t\topenAction = QAction(\"Open image in new tab\", self)\n\t\t\topenAction.triggered.connect(self.openImage)\n\t\t\tmenu.addAction(openAction)\t\t\t\n\t\t\n\t\t# Show the menu:\n\t\tmenu.exec_(self.treeWidget.viewport().mapToGlobal(position))", "def OnTabMenu(self, evt):\n e_id = evt.GetId()\n if e_id in (ed_glob.ID_COPY_PATH, ed_glob.ID_COPY_FILE):\n path = self.GetFileName()\n if path is not None:\n if e_id == ed_glob.ID_COPY_FILE:\n path = GetFileName(path)\n SetClipboardText(path)\n elif e_id == ed_glob.ID_MOVE_TAB:\n frame = wx.GetApp().OpenNewWindow()\n nbook = frame.GetNotebook()\n parent = self.GetParent()\n pg_txt = parent.GetRawPageText(parent.GetSelection())\n nbook.OpenDocPointer(self.GetDocPointer(),\n self.GetDocument(), pg_txt)\n self._ignore_del = True\n wx.CallAfter(parent.ClosePage)\n elif e_id == ed_glob.ID_CLOSE_OTHERS:\n parent = self.GetParent()\n if hasattr(parent, 'CloseOtherPages'):\n parent.CloseOtherPages()\n elif e_id in (EdEditorView.ID_CLOSE_TAB, EdEditorView.ID_CLOSE_ALL_TABS):\n # Need to relay events up to toplevel window on GTK for them to\n # be processed. On other platforms the propagate by themselves.\n evt.SetId({ EdEditorView.ID_CLOSE_TAB : ed_glob.ID_CLOSE,\n EdEditorView.ID_CLOSE_ALL_TABS : ed_glob.ID_CLOSEALL}.get(e_id))\n wx.PostEvent(self.GetTopLevelParent(), evt)\n else:\n evt.Skip()", "def add_new_event_content(update, context):\n user = User.resolve_user(update)\n user_language = DatabaseController.load_selected_language(user.user_id)\n if UserEventCreationMachine.receive_state_of_user(user.user_id) != 1:\n return\n content = replace_reserved_characters(update.message.text)\n EventHandler.events_in_creation[user.user_id][\"content\"] = content\n update.message.reply_text(receive_translation(\"event_creation_type\", user_language),\n reply_markup=Event.event_keyboard_type(user_language))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to the "Insert Header" `wx.EVT_MENU` event from the context menu.
def OnInsertHeader(self, ev): self.PlaceNewCard("Header", pos=self.menu_position)
[ "def add_menu_header(stdscr):\n main_header(stdscr)\n stdscr.addstr(SUB_MENU_START[Y], SUB_MENU_START[X], \"Add coin:\")\n stdscr.refresh()", "def changeHeader(self):\n col = self.table_widget.currentColumn()\n\n text, ok = QInputDialog.getText(self, \"Enter Header\", \"Header text:\")\n\n if ok and text != \"\":\n self.table_widget.setHorizontalHeaderItem(col, QTableWidgetItem(text))\n else:\n pass", "def table_menu(self, pos):\n\n self.headerIndex = self.ui.tableWidget.indexAt(pos)\n self.headerIndex = int(self.headerIndex.column())\n menu = QtWidgets.QMenu(self)\n action_change_fieldname = menu.addAction(_('Change fieldname'))\n action_change_fieldname.triggered.connect(self.change_fieldname)\n if self.fields_type[self.headerIndex] == \"character\" and self.headerIndex != 0:\n action_change_fieldname = menu.addAction(_('Change to Qualitative'))\n action_change_fieldname.triggered.connect(self.qualitative_field_type)\n if self.fields_type[self.headerIndex] in ('numeric', 'qualitative'):\n action_change_fieldname = menu.addAction(_('Change to Character'))\n action_change_fieldname.triggered.connect(self.character_field_type)\n menu.popup(self.ui.tableWidget.mapToGlobal(pos))", "def onHeaderClicked(self,idx):\r\n print(\"AbstractTableView: Header Clicked %d\"%idx)", "def contentsContextMenuEvent(self,ev):\n return", "def setup_header(self):\n\n title = self.get_header(self.get_title())\n label = Gtk.Label(label='<span size=\"larger\" weight=\"bold\">%s</span>' % title)\n label.set_use_markup(True)\n self.window.get_content_area().pack_start(label, False, False, \n self.border_pad)", "def init_header(self):\n self.hv = NavHeaderView(self.header)\n self.hv.setSectionsMovable(True)\n self.hv.setSectionsClickable(True)\n self.hv.setHighlightSections(True)\n self.hv.clicked.connect(self.updateModel)\n self.hv.setModel(self.model)", "def header_right_click(self, event):\n region = self.treeview.identify(\"region\", event.x, event.y)\n column = self.treeview.identify_column(event.x)\n if region == 'heading':\n column = self.treeview.heading(column)['text'] # get clicked column text name\n show_opt = {'path': 0, 'artist': 1, 'title': 2, 'album': 3, '#': 4, 'year': 5} # mask name to number\n self.show_option.remove(show_opt.get(column.lower()))\n if not self.show_option:\n self.show_option = [0, 1, 2, 3, 4, 5] # show all if all hidden\n self.treeview.configure(displaycolumns=self.show_option)", "def test_action_hooks_header_dropdown_hook(self):\n SandboxHeaderDropdownActionTestHook(extension=self.extension)\n\n context = Context({'comment': 'this is a comment'})\n\n t = Template(\n \"{% load rb_extensions %}\"\n \"{% header_dropdown_action_hooks %}\")\n\n t.render(context).strip()", "def on_menuitem_select (self, id):\n\t\t\n\t\tpass", "def prepareContextMenu(self, position):\n # Get the selected item (only one, no multiple selection allowed):\n\t\tcurr = self.treeWidget.selectedItems()[0]\n\n\t\t# Get the corresponding name in the HDF5 file:\n\t\th5Item = self.HDF5File[str(curr.data(0, Qt.UserRole))]\n\t\tkey = str(h5Item.name)\n\n\t\t# Create the menu:\t\t\n\t\tmenu = QMenu()\n\t\tif ((key == \"/data/dark\") or (key == \"/data/white\") or (key == \"/data/image\")):\n\t\t\topenAction = QAction(\"Open image in new tab\", self)\n\t\t\topenAction.triggered.connect(self.openImage)\n\t\t\tmenu.addAction(openAction)\t\t\t\n\t\t\n\t\t# Show the menu:\n\t\tmenu.exec_(self.treeWidget.viewport().mapToGlobal(position))", "def add_header(self, *args, **kwargs):\r\n self.header = True\r\n self.add_row(ypos=0, *args, **kwargs)", "def add_specific_menu(self, menu, event, lat, lon):\n raise NotImplementedError", "def change_header_tip(self, event, game=''):\n self.header_tip.config(text=game)", "def editorheader(self, d):\n _ = self.request.getText\n\n html = [\n # Custom html above header\n self.emit_custom_html(self.cfg.page_header1),\n self.username(d),\n\n # Header\n u'<div id=\"header\">',\n self.searchform(d),\n self.logo(),\n u'<div id=\"locationline\">',\n self.interwiki(d),\n self.title(d),\n u'</div>',\n self.trail(d),\n u'</div>',\n\n # Custom html below header (not recomended!)\n self.emit_custom_html(self.cfg.page_header2),\n\n # Sidebar\n u'<div id=\"sidebar\">',\n self.wikipanel(d),\n self.pagepanel(d),\n u'</div>',\n\n # Page\n self.startPage(),\n \n self.msg(d),\n ]\n return u'\\n'.join(html)", "def test_action_hooks_header_hook(self):\n SandboxHeaderActionTestHook(extension=self.extension)\n\n context = Context({'comment': 'this is a comment'})\n\n t = Template(\n \"{% load rb_extensions %}\"\n \"{% header_action_hooks %}\")\n\n t.render(context).strip()", "def register_menu(self, menu):\n\n self._contextual_menu.addMenu(menu)", "def on_helpAboutMenuItem_activate(self,*args):\n print \"Help About\"", "def raiseContextMenu(self, ev):\n menu = self.getMenu()\n menu.popup(ev.screenPos().toPoint())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Listens to the "Insert Image" `wx.EVT_MENU` event from the context menu.
def OnInsertImg(self, ev): self.PlaceNewCard("Image", pos=self.menu_position)
[ "def prepareContextMenu(self, position):\n # Get the selected item (only one, no multiple selection allowed):\n\t\tcurr = self.treeWidget.selectedItems()[0]\n\n\t\t# Get the corresponding name in the HDF5 file:\n\t\th5Item = self.HDF5File[str(curr.data(0, Qt.UserRole))]\n\t\tkey = str(h5Item.name)\n\n\t\t# Create the menu:\t\t\n\t\tmenu = QMenu()\n\t\tif ((key == \"/data/dark\") or (key == \"/data/white\") or (key == \"/data/image\")):\n\t\t\topenAction = QAction(\"Open image in new tab\", self)\n\t\t\topenAction.triggered.connect(self.openImage)\n\t\t\tmenu.addAction(openAction)\t\t\t\n\t\t\n\t\t# Show the menu:\n\t\tmenu.exec_(self.treeWidget.viewport().mapToGlobal(position))", "def setupMenu(self):\n print(f'\\nsetupMenu')\n # Create actions for file menu\n open_act = QAction('Open...', self)\n open_act.setShortcut('Ctrl+O')\n open_act.triggered.connect(self.openImageFile)\n\n save_act = QAction('Save', self)\n save_act.setShortcut('Ctrl+S')\n save_act.triggered.connect(self.saveImageFile)\n\n # Create menu bar\n menu_bar = self.menuBar()\n menu_bar.setNativeMenuBar(False)\n\n # Create file menu and add actions\n file_menu = menu_bar.addMenu('File')\n file_menu.addAction(open_act)\n file_menu.addAction(save_act)", "def contentsContextMenuEvent(self,ev):\n return", "def insertImage(self):\n self.popup = QtGui.QFileDialog()\n filename = self.popup.getOpenFileName(self,\n \"select an image\",\n \"\",\n \"Image Files (*.png *.jpg *.bmp *.jpeg *.svg *.gif)\" + \\\n \";;all files (*.*)\")\n\n # QFileDialog returns a tuple with filename and used filter\n if filename[0]:\n imagemarkdown = tp.create_image_markdown(filename[0])\n self.corpusBox.insertPlainText(imagemarkdown)", "def image_chooser(self):\n self.__image = pg.image.load(\"res/ghost/\" + Ghost.image_names[self.__id] + \"/start.png\")", "def OnInsertContent(self, ev):\n self.PlaceNewCard(\"Content\", pos=self.menu_position)", "def menu_draw(self, context):\n self.layout.operator(EmbarkNewExportCollection.bl_idname, icon='COLLECTION_NEW')", "def on_fileOpenMenuItem_activate(self, *args):\n if self._ui.save_changes():\n fileChooser = gtk.FileChooserDialog(\n title=\"Open File\", \n action=gtk.FILE_CHOOSER_ACTION_OPEN,\n buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))\n fileChooser.add_filter(self.file_filter)\n response = fileChooser.run()\n if response == gtk.RESPONSE_OK:\n path = fileChooser.get_filename()\n self._ui.load_image(path)\n fileChooser.destroy()", "def on_menuitem_select (self, id):\n\t\t\n\t\tpass", "def raiseContextMenu(self, ev):\n menu = self.getMenu()\n menu.popup(ev.screenPos().toPoint())", "def show_popup(event, menu):\n\tmenu.post(event.x_root, event.y_root)", "def add_specific_menu(self, menu, event, lat, lon):\n raise NotImplementedError", "def on_pan_image_menu(self, event):\n if not self.pan_image:\n self.view.toolbar.ToggleTool(self.view.toolbar_ids['Pan Image'], True)\n else:\n self.view.toolbar.ToggleTool(self.view.toolbar_ids['Pan Image'], False)\n self.on_pan_image(event)", "def custom_menu_context(self, point):\n if not self.current_working_directory:\n return\n\n index = self.directory_view_tree.currentIndex()\n file_path = self.filesystem.filePath(index)\n selected_item = self.directory_view_tree.indexAt(point)\n menu = QtWidgets.QMenu()\n\n if selected_item.data(QtCore.Qt.DisplayRole) == None:\n LogSystem.success(\"Requested context menu for current working directory!\")\n action_1 = menu.addAction(\"Create new file\")\n action_1.triggered.connect(lambda: self.create_new_file(self.current_working_directory))\n action_2 = menu.addAction(\"Create new directory\")\n action_2.triggered.connect(lambda: self.create_new_folder(self.current_working_directory))\n elif os.path.isfile(file_path):\n LogSystem.success(\"Requested context menu for file: {0}!\".format(file_path))\n action_1 = menu.addAction(\"Open\")\n action_1.triggered.connect(lambda: self.open_file_in_new_tab(file_path))\n action_2 = menu.addAction(\"Rename\")\n action_2.triggered.connect(lambda: self.rename_file(file_path))\n action_3 = menu.addAction(\"Delete\")\n action_3.triggered.connect(lambda: self.delete_file_from_dir_view(file_path))\n else:\n LogSystem.success(\"Requested context menu for directory: {0}!\".format(file_path))\n action_1 = menu.addAction(\"Rename\")\n action_1.triggered.connect(lambda: self.rename_file(file_path))\n action_2 = menu.addAction(\"Delete\")\n action_2.triggered.connect(lambda: self.delete_file_from_dir_view(file_path))\n action_3 = menu.addAction(\"Create new directory\")\n action_3.triggered.connect(lambda: self.create_new_folder(file_path))\n action_3 = menu.addAction(\"Create new file\")\n action_3.triggered.connect(lambda: self.create_new_file(file_path))\n\n menu.exec_(self.directory_view_tree.mapToGlobal(point))", "def on_fileSaveMenuItem_activate(self, *args):\n if self._ui.current_file is None:\n return self.on_fileSaveAsMenuItem_activate(args)\n else:\n self._ui.save_image(self._ui.current_file)\n return True", "def onImage(self, msg):\n self.image_msg = msg\n self.has_new_image = True", "def load_img_callback(self):\n if not self.allow_open: return # GUI used from other app\n\n fn = filedialog.askopenfilename(title = \"Select file\",\n filetypes = ((\"PNG files\",\"*.png\"),(\"JPEG files\",\"*.jpg\"),(\"All files\",\"*.*\")))\n if fn != \"\": self.load_image(fn)", "def PopupMenuItems(self,menu):\n pass", "def add_content(self):\n self._image_widget = self.menu.add_image(\n image_path=self.selector_modes()[1][\"image\"]\n )\n self._backstory_widget = self.menu.add_label(\n title=self.selector_modes()[1][\"backstory\"]\n )\n self._selector_widget = self._add_selector(\n self.selector_items(), self._on_selector_change\n )\n self.add_buttons()\n self._add_warn_same_chars(public.p1_char.name)\n self._update_from_selection(self._selector_widget.get_value()[0][1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Paints a rectangle just big enough to encircle `card`.
def PaintCardRect(self, card, pos, thick=MOVING_RECT_THICKNESS, style=wx.SOLID, refresh=True): x, y, w, h = card.GetRect() rect = wx.Rect(pos[0], pos[1], w, h) rect = rect.Inflate(2 * thick, 2 * thick) self.PaintRect(rect, thick=thick, style=style, refresh=refresh)
[ "def draw(self):\n # check the current 'unit' value\n u = int(min(float(self.width) / 10.0, float(self.height) / 15.0))\n\n # draw the background\n self.canvas_before.add(Color(constant_color_background))\n self.canvas_before.add(Rectangle(0, 0, self.width, self.height))\n\n # draw the card\n if self.visible:\n # the card is visible (face up): we draw it\n self.canvas.add(Color(constant_color_card))\n self.canvas.add(Line(rounder_rectangle=(0,0,10*self.unit, 15*self.unit,self.unit)))\n # read the characteristics of the card from the card code\n c = int(self_code[0])\n s = int(self_code[1])\n f = int(self_code[2])\n n = int(self_code[3])\n # draws the symbols\n if n == 0:\n # adds 1 symbol in the middle of the card\n self.addsymbol(u, 6.0*u, 8*u, 2.5*u, c, s, f)\n elif n == 1:\n # adds 2 symbols spread on the height\n self.addsymbol(u, 3.5*u, 8*u, 2.5*u, c, s, f)\n self.addsymbol(u, 8.5*u, 8*u, 2.5*u, c, s, f)\n else:\n # adds 3 symbols spread on teh height\n self.addsymbol(u, 2.5*u, 8*u, 2.5*u, c, s, f)\n self.addsymbol(u, 6.0*u, 8*u, 2.5*u, c, s, f)\n self.addsymbol(u, 9.5*u, 8*u, 2.5*u, c, s, f)\n # show a line around the card if it is selected\n if self.selected:\n self.canvas.add(Color(constant_color_card))\n self.canvas.add(Line(rounder_rectangle=(0,0,10*self.unit, 15*self.unit,self.unit),\n width = self.unit/3))\n \n else:\n # the card is not visible: we display the card back\n self.canvas.add(Color(constant_color_card_back))\n self.canvas.add(Line(rounder_rectangle=(0,0,10*self.unit, 15*self.unit,self.unit)))\n # and draw !\n super(Card, self).draw()", "def draw_rect(surface, fill_color, outline_color, rect, border=1):\n\tsurface.fill(outline_color, rect)\n\tsurface.fill(fill_color, rect.inflate(-border*2, -border*2))", "def drawRectangle(self, canvas):", "def draw_rectangle(animal, width, height):\n for _ in range(2):\n animal.forward(width)\n animal.left(90)\n animal.forward(height)\n animal.left(90)", "def draw_Square():\r\n t.down()\r\n t.color(\"purple\")\r\n t.fillcolor(\"purple\")\r\n t.begin_fill()\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.end_fill()\r\n t.up", "def EraseCardRect(self, card, pos, thick=MOVING_RECT_THICKNESS, refresh=True):\n # Brush is for background, Pen is for foreground\n x, y, w, h = card.GetRect() \n rect = wx.Rect(pos[0], pos[1], w, h)\n rect = rect.Inflate(2 * thick, 2 * thick)\n self.PaintRect(rect, thick=thick, style=wx.TRANSPARENT, refresh=refresh)", "def draw_square(animal, size):\n for _ in range(5):\n animal.forward(size)\n animal.left(90)\n animal.forward(size)\n animal.left(90)\n animal.forward(size)\n animal.left(90)\n animal.forward(size)\n animal.left(90)\n animal.penup()\n animal.forward(50)\n animal.pendown()", "def cover_box(self, x, y, width, height):\n pg.draw.rect(self.screen, (255, 255, 255), (x, y, width, height))", "def draw_rectangle(height, rec_width, rec_color):\n young.begin_fill() # start filling this shape\n young.color(rec_color)\n young.left(90)\n young.forward(height) # the height of the rectangle\n young.write(\" \" + str(height/6.5), font=(\"Arial\", 9, \"normal\")) \n young.right(90)\n young.forward(rec_width) # the width of the rectangle\n young.right(90)\n young.forward(height)\n young.left(90)\n young.end_fill()", "def roundRect(self, x, y, width, height, radius, stroke=1, fill=0):\n #use a precomputed set of factors for the bezier approximation\n #to a circle. There are six relevant points on the x axis and y axis.\n #sketch them and it should all make sense!\n t = 0.4472 * radius\n\n x0 = x\n x1 = x0 + t\n x2 = x0 + radius\n x3 = x0 + width - radius\n x4 = x0 + width - t\n x5 = x0 + width\n\n y0 = y\n y1 = y0 + t\n y2 = y0 + radius\n y3 = y0 + height - radius\n y4 = y0 + height - t\n y5 = y0 + height\n\n self._code.append('n %0.4f %0.4f m' % (x2, y0))\n self._code.append('%0.4f %0.4f l' % (x3, y0)) # bottom row\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x4, y0, x5, y1, x5, y2)) # bottom right\n\n self._code.append('%0.4f %0.4f l' % (x5, y3)) # right edge\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x5, y4, x4, y5, x3, y5)) # top right\n\n self._code.append('%0.4f %0.4f l' % (x2, y5)) # top row\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x1, y5, x0, y4, x0, y3)) # top left\n\n self._code.append('%0.4f %0.4f l' % (x0, y2)) # left edge\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x0, y1, x1, y0, x2, y0)) # bottom left\n\n self._code.append('h') #close off, although it should be where it started anyway\n\n self._code.append(PATH_OPS[stroke, fill, self._fillMode])\n ##################################################\n #\n # Text methods\n #\n # As with graphics, a separate object ensures that\n # everything is bracketed between text operators.\n # The methods below are a high-level convenience.\n # use PDFTextObject for multi-line text.\n ##################################################", "def _draw_cell(x, y, color):\n px = x * length\n py = y * length\n\n rectangle = pygame.Rect((px, py), size)\n pygame.draw.rect(pygame.display.get_surface(), color, rectangle)", "def draw_black_rect(x, y):\n pyglet.graphics.draw(4, pyglet.gl.GL_POLYGON,\n (\"v2i\", (sq_size*x, sq_size*y + top_bar, sq_size*(x+1), sq_size*y + top_bar,\n sq_size*(x+1), sq_size*(y+1) + top_bar, sq_size*x, sq_size*(y+1) + top_bar)),\n (\"c3B\", (90, 60, 30)*4))", "def _draw(self) -> Card:\n return self._deck.draw()", "def draw_square(t, sz, col, ps, step):\t\n t.color(col)\n t.pensize(ps)\n for i in range(4):\n t.fd(sz)\n t.left(90)\n t.penup()\n t.goto(t.pos()+ (-step,-step))\n t.pendown()", "def drawBigCircle(a, b, r):\n t.up()\n t.goto(a,b-r)\n t.down()\n t.color(\"White\")\n t.circle(r)\n #t.ht()\n t.up()\n t.goto(a,b)", "def draw_square(self, side_length):\n self.draw_polygon(side_length, 4)", "def draw_food(self):\n\n pygame.draw.rect(self.screen, self.food_color, self.rect)", "def draw(self):\n #There is only one turtle, so it needs updated to display each object's stored color\n self._turtle.setColor(self._color[0],self._color[1],self._color[2])\n #Filled in rectangle\n #For all y coordinates in a square, draw a horizontal line that goes across the square\n for y in xrange(self._y, self._y+self._height):\n self._turtle.up()\n #Move to the starting x position\n self._turtle.move(self._x,y)\n self._turtle.setDirection(0)\n self._turtle.down()\n #Draw a line equal in length to the width of the rectangle\n self._turtle.move(self._width)\n #Old outline for the rectangle", "def __draw (self, display_surface):\n pygame.draw.rect(display_surface, self._colour, (self._rect))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Erases a rectangle drawn by PaintCardRect().
def EraseCardRect(self, card, pos, thick=MOVING_RECT_THICKNESS, refresh=True): # Brush is for background, Pen is for foreground x, y, w, h = card.GetRect() rect = wx.Rect(pos[0], pos[1], w, h) rect = rect.Inflate(2 * thick, 2 * thick) self.PaintRect(rect, thick=thick, style=wx.TRANSPARENT, refresh=refresh)
[ "def deleteRectangle(self, canvas):", "def _erase (self):\n self.screen.blit_background (self._rect)", "def updateEraseRect(self):\n\t\treturn", "def updateEraseRect(self):\n\t\tx0, y0, x1, y1 = self.getMinMaxXY()\n\t\tx0 += self._xpos\n\t\ty0 += self._ypos\n\t\tx1 += self._xpos\n\t\ty1 += self._ypos\n\t\tself.eraseRect = (x0, y0, x1, y1)\n\t\tself.UpdateOriginalPoints()\n\t\tself.CalculateBoundingBox()", "def canvas_clicked(self, e):\n\n if not self.rectState:\n # No rectangle being drawn; draw a new one\n self.canvas.delete(\"r\")\n self.x_base = e.x\n self.y_base = e.y\n self.canvas.create_rectangle(\n (self.x_base, self.y_base, self.x_base, self.y_base),\n outline=\"red\", tag=\"r\")\n self.rectState = True\n\n else:\n # Finalize currently drawn rectangle\n self.canvas.itemconfigure(\"r\", outline=\"black\")\n self.x_base = None\n self.y_base = None\n self.rectState = False", "def clearScreen(self, color=COLOR_BLACK): \n w = self.getScreenWidth()\n h = self.getScreenHeight()\n \n self.display.setArea(0, 0, w - self.margin_row, h - self.margin_col) \n self.fillRect(0, 0, w, h, color)", "def reject_rect(self, rect):\r\n if (rect.area < self.min_size):\r\n self.rects.remove(rect)\r\n elif (rect.area > self.max_size):\r\n self.rects.remove(rect)\r\n elif (rect.top - self.mid_y > self.max_mid_distance * self.mid_y):\r\n self.rects.remove(rect)\r\n elif (self.mid_y - rect.bottom > self.max_mid_distance * self.mid_y):\r\n self.rects.remove(rect)\r\n return", "def exit(self):\r\n self.rect.x = 1000\r\n self.rect.y = 750", "def emit_clear(self):\n\n self._append_line(\"$ctx.clearRect(0, 0, %s.width, %s.height);\" % (self.get_canvas(), self.get_canvas()))", "def user32_InvalidateRect(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hWnd\", \"lpRect\", \"bErase\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def remove_rectangle(self, i):\n if len(self.rectangles) == 0:\n print(\"Empty list\")\n elif i >= len(self.rectangles):\n print(\"Invalid value\")\n else:\n del self.rectangles[i]\n image_height, image_width = self.cv_displayed_image.shape[:2]\n self.cv_displayed_image = cv2.resize(self.cv_image, (image_width, image_height))\n self.add_rectangles()\n self.show_rectangle(self.rectangle)", "def drawRectangle(self, canvas):", "def right_click(self, event):\n self.remove_rectangle(-1)", "def draw_rect(surface, fill_color, outline_color, rect, border=1):\n\tsurface.fill(outline_color, rect)\n\tsurface.fill(fill_color, rect.inflate(-border*2, -border*2))", "def draw_black_rect(x, y):\n pyglet.graphics.draw(4, pyglet.gl.GL_POLYGON,\n (\"v2i\", (sq_size*x, sq_size*y + top_bar, sq_size*(x+1), sq_size*y + top_bar,\n sq_size*(x+1), sq_size*(y+1) + top_bar, sq_size*x, sq_size*(y+1) + top_bar)),\n (\"c3B\", (90, 60, 30)*4))", "def PaintCardRect(self, card, pos, thick=MOVING_RECT_THICKNESS, style=wx.SOLID, refresh=True):\n x, y, w, h = card.GetRect()\n rect = wx.Rect(pos[0], pos[1], w, h)\n rect = rect.Inflate(2 * thick, 2 * thick)\n self.PaintRect(rect, thick=thick, style=style, refresh=refresh)", "def crop(self, rect: QRect) -> None:", "def undraw_attractor(self):\n pass", "def drawForeground(self, ox, oy, doc, parent):\n if self.stroke is not None and self.strokeWidth: # Only if defined.\n doc.context.fill(None) # Fill is done in background drawing.\n doc.context.stroke(self.stroke, self.strokeWidth)\n if self.w is not None and self.h is not None:\n doc.context.rect(ox, oy, self.w, self.h)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dumps all the `Card`s' info in a `dict`.
def DumpCards(self): carddict = {} # we put the scrollbars at the origin, to get the real positions shown = self.IsShown() if shown: self.Hide() view_start = self.GetViewStart() self.Scroll(0, 0) # with the scrollbars at the origin, dump the cards for c in self.cards: carddict[c.GetId()] = c.Dump() carddict[c.GetId()]["pos"] = [i / self.scale for i in carddict[c.GetId()]["pos"]] # and return to the original view self.Scroll(view_start[0], view_start[1]) if shown: self.Show() return carddict
[ "def Dump(self):\n return {\"cards\": self.DumpCards(), \"groups\": self.DumpGroups()}", "def card_to_dict(card):\n jcard = {'id': card.id}\n if card.number:\n jcard['number'] = card.number\n if card.event:\n jcard['event'] = card.event\n if card.contracts:\n jcard['contracts'] = [{'id': contract.id,\n 'good': contract.good.name,\n 'city': contract.city.name,\n 'amount': contract.amount}\n for contract in card.contracts]\n return jcard", "def print(self):\n for card in self.deck:\n print(card)", "def generate_cards(self):\n info = []\n\n for card in self.card_array:\n self.card_parameters.append([card.number, card.shape, card.fill, card.color])\n info.append([card.x, card.y, card.contour, card.number, card.shape, card.fill, card.color])\n\n if len(self.card_array):\n info = np.array(info)\n # self.card_info = info\n #\n idx = np.argsort(info[:, 0] + 2*info[:, 1])\n self.card_info = info[idx]\n\n else:\n self.card_info = info", "def __repr__(self):\n for card in self.deck:\n return card.short_name()", "def show_cards(player):\n print(\"-\"*20)\n print(\"{}'s hand\".format(player.name))\n print(\"-\"*20)\n for card in player.hand_stack:\n print(card['Card'])\n print(\"{}'s card value is {}\".format(\n player.name,\n player.hand_value))", "def cardInfo(self, cardName):\n \n cardID = self.cardLookup(cardName)\n \n card = Card(cardID)\n return card.description()", "def show(self):\n for c in self.cards:\n print(c)", "def print_cards(self):\n print(self, '\\b:\\t', end='')\n print('Cards : {}\\n'.format(self._cards))", "def get_cards(self, token):\n cards = display(CustomerCard.get_all_cards(customer_id=token.customer_id))\n return {'cards': cards}", "def create_deck(self) -> dict:\n card = self.cards[0]\n\n self.card_json['CardID'] = self.current_card_id\n self.card_json['Nickname'] = card.tabletop_name\n\n card_asset_json = deepcopy(card_asset_template)\n card_asset_json['FaceURL'] = card.image_url\n card_asset_json['BackURL'] = self.card_back_url\n card_asset_json['BackIsHidden'] = False\n\n self.card_json['CustomDeck'] = {\"1\": card_asset_json}\n self.card_json['Transform']['rotZ'] = 180 if self.hidden else 0\n\n return self.card_json", "def dump(self):\n\n # Dump the resource properties\n resource_dict = super(Cpc, self).dump()\n\n # Dump the child resources\n capacity_groups = self.capacity_groups.dump()\n if capacity_groups:\n resource_dict['capacity_groups'] = capacity_groups\n partitions = self.partitions.dump()\n if partitions:\n resource_dict['partitions'] = partitions\n adapters = self.adapters.dump()\n if adapters:\n resource_dict['adapters'] = adapters\n virtual_switches = self.virtual_switches.dump()\n if virtual_switches:\n resource_dict['virtual_switches'] = virtual_switches\n lpars = self.lpars.dump()\n if lpars:\n resource_dict['lpars'] = lpars\n reset_act_profiles = self.reset_activation_profiles.dump()\n if reset_act_profiles:\n resource_dict['reset_activation_profiles'] = reset_act_profiles\n image_act_profiles = self.image_activation_profiles.dump()\n if image_act_profiles:\n resource_dict['image_activation_profiles'] = image_act_profiles\n load_act_profiles = self.load_activation_profiles.dump()\n if load_act_profiles:\n resource_dict['load_activation_profiles'] = load_act_profiles\n\n return resource_dict", "def get_card_statistics(self, card_name):\n\n # Make sure the card exists in the database\n if self.check_card(card_name) == False:\n print(card_name, \"does not exist in the database\")\n return None\n\n try:\n # Initialize an empty dictionary for the statistics\n stats = {}\n\n # Get the type of the card\n cursor = self.conn.cursor()\n cursor.execute(\"\"\"SELECT card_type FROM cards\n WHERE card_name = ?\"\"\", (card_name,))\n card_type = cursor.fetchone()[0]\n stats[\"type\"] = card_type\n \n if card_type == \"Minion\":\n # Get all minion-related attributes\n cursor.execute(\"\"\"SELECT card_name, card_rarity, card_cost, minion_text, minion_attack, minion_health\n FROM cards\n INNER JOIN minions ON card_key = minion_cardkey\n WHERE card_name = ?\"\"\", (card_name,))\n card = cursor.fetchone()\n stats[\"minion_name\"] = card[0]\n stats[\"minion_rarity\"] = card[1]\n stats[\"minion_cost\"] = card[2]\n stats[\"minion_text\"] = card[3]\n stats[\"minion_attack\"] = card[4]\n stats[\"minion_health\"] = card[5]\n\n elif card_type == \"Spell\":\n # Get all spell-related attributes\n cursor.execute(\"\"\"SELECT card_name, card_rarity, card_cost, spell_text\n FROM cards\n INNER JOIN spells ON card_key = spell_cardkey\n WHERE card_name = ?\"\"\", (card_name,))\n card = cursor.fetchone()\n stats[\"spell_name\"] = card[0]\n stats[\"spell_rarity\"] = card[1]\n stats[\"spell_cost\"] = card[2]\n stats[\"spell_text\"] = card[3]\n \n elif card_type == \"Weapon\":\n # Get all weapon-related attributes\n cursor.execute(\"\"\"SELECT card_name, card_rarity, card_cost, weapon_text, weapon_attack, weapon_durability\n FROM cards\n INNER JOIN weapons ON card_key = weapon_cardkey\n WHERE card_name = ?\"\"\", (card_name,))\n card = cursor.fetchone()\n stats[\"weapon_name\"] = card[0]\n stats[\"weapon_rarity\"] = card[1]\n stats[\"weapon_cost\"] = card[2]\n stats[\"weapon_text\"] = card[3]\n stats[\"weapon_attack\"] = card[4]\n stats[\"weapon_durability\"] = card[5]\n\n # Return the statistics\n return stats\n\n except Error as e:\n print(\"Error in get_card_statistics:\", e)\n return None", "def parse_card(self, response):\n output = {\n 'title': response.css('.card-details .caption::text').extract()[0],\n 'class': 'all',\n }\n\n infobox = response.css('.card-details .infobox')\n infobox = infobox.css('::text').extract()\n\n for i, infobox_line in enumerate(infobox):\n if 'Type:' in infobox_line:\n output['type'] = infobox[i+1].strip()\n if 'Rarity:' in infobox_line:\n output['rarity'] = infobox[i+1].strip()\n if 'Set:' in infobox_line:\n output['set'] = infobox[i+1].strip()\n if 'Class:' in infobox_line:\n output['class'] = infobox[i+1].strip()\n if 'Classes:' in infobox_line:\n output['class'] = 'Multi'\n if 'Used in' in infobox_line:\n usage = re.sub(r'\\s+', ' ', infobox_line).strip()\n usage_tokens = usage.split(' ')\n output['usage_pct'] = usage_tokens[2]\n\n yield output", "def __repr__(self):\n return \"Card(\" + repr(self.get_rank()) + \", \" + repr(self.get_class()) + \", \" + repr(self.get_suit()) +\")\"", "def generatePlayerDeck(self):\n cards = []\n\n for k in EVENT_CARDS: #id, name, description\n cards.append(EventCard(k, EVENT_CARDS[k][\"name\"], EVENT_CARDS[k][\"description\"]))\n\n for k in PLAYER_CARDS: #name,colour,population,area,country\n cards.append(PlayerCard(k, PLAYER_CARDS[k][\"colour\"], PLAYER_CARDS[k][\"population\"], PLAYER_CARDS[k][\"area\"], PLAYER_CARDS[k][\"country\"]))\n\n\n return cards", "def dump_dict(self, dump=None):\n\n dump_dict = dict()\n\n warnings = self.get_warnings()\n if warnings:\n dump_dict['Parsing Warnings'] = warnings\n\n dump_dict['DOS_HEADER'] = self.DOS_HEADER.dump_dict()\n dump_dict['NT_HEADERS'] = self.NT_HEADERS.dump_dict()\n dump_dict['FILE_HEADER'] = self.FILE_HEADER.dump_dict()\n\n image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')\n\n dump_dict['Flags'] = list()\n for flag in image_flags:\n if getattr(self.FILE_HEADER, flag[0]):\n dump_dict['Flags'].append(flag[0])\n\n if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:\n dump_dict['OPTIONAL_HEADER'] = self.OPTIONAL_HEADER.dump_dict()\n\n dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')\n\n dump_dict['DllCharacteristics'] = list()\n for flag in dll_characteristics_flags:\n if getattr(self.OPTIONAL_HEADER, flag[0]):\n dump_dict['DllCharacteristics'].append(flag[0])\n\n dump_dict['PE Sections'] = list()\n\n section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')\n for section in self.sections:\n section_dict = section.dump_dict()\n dump_dict['PE Sections'].append(section_dict)\n section_dict['Flags'] = list()\n for flag in section_flags:\n if getattr(section, flag[0]):\n section_dict['Flags'].append(flag[0])\n\n section_dict['Entropy'] = section.get_entropy()\n if md5 is not None:\n section_dict['MD5'] = section.get_hash_md5()\n if sha1 is not None:\n section_dict['SHA1'] = section.get_hash_sha1()\n if sha256 is not None:\n section_dict['SHA256'] = section.get_hash_sha256()\n if sha512 is not None:\n section_dict['SHA512'] = section.get_hash_sha512()\n\n\n\n if (hasattr(self, 'OPTIONAL_HEADER') and\n hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):\n\n dump_dict['Directories'] = list()\n\n for idx in range(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):\n directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]\n dump_dict['Directories'].append(directory.dump_dict())\n\n if hasattr(self, 'VS_VERSIONINFO'):\n dump_dict['Version Information'] = list()\n for idx in range(len(self.VS_VERSIONINFO)):\n version_info_list = list()\n version_info_list.append(self.VS_VERSIONINFO[idx].dump_dict())\n\n if hasattr(self, 'VS_FIXEDFILEINFO'):\n version_info_list.append(self.VS_FIXEDFILEINFO[idx].dump_dict())\n\n if hasattr(self, 'FileInfo') and len(self.FileInfo) > idx:\n fileinfo_list = list()\n for entry in self.FileInfo[idx]:\n fileinfo_list.append(entry.dump_dict())\n\n if hasattr(entry, 'StringTable'):\n stringtable_dict = dict()\n for st_entry in entry.StringTable:\n [fileinfo_list.append(line) for line in st_entry.dump_dict()]\n stringtable_dict['LangID'] = st_entry.LangID\n for str_entry in list(st_entry.entries.items()):\n stringtable_dict[str_entry[0]] = str_entry[1]\n fileinfo_list.append(stringtable_dict)\n\n\n elif hasattr(entry, 'Var'):\n for var_entry in entry.Var:\n var_dict = dict()\n if hasattr(var_entry, 'entry'):\n [fileinfo_list.append(line) for line in var_entry.dump_dict()]\n var_dict[list(var_entry.entry.keys())[0]] = list(\n var_entry.entry.values())[0]\n fileinfo_list.append(var_dict)\n\n dump_dict['Version Information'].append(version_info_list)\n\n if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):\n dump_dict['Exported symbols'] = list()\n dump_dict['Exported symbols'].append(self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict())\n for export in self.DIRECTORY_ENTRY_EXPORT.symbols:\n export_dict = dict()\n if export.address is not None:\n export_dict.update({'Ordinal': export.ordinal, 'RVA': export.address, 'Name': export.name})\n if export.forwarder:\n export_dict['forwarder'] = export.forwarder\n dump_dict['Exported symbols'].append(export_dict)\n\n if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):\n dump_dict['Imported symbols'] = list()\n for module in self.DIRECTORY_ENTRY_IMPORT:\n import_list = list()\n dump_dict['Imported symbols'].append(import_list)\n import_list.append(module.struct.dump_dict())\n for symbol in module.imports:\n symbol_dict = dict()\n if symbol.import_by_ordinal is True:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Ordinal'] = symbol.ordinal\n else:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Name'] = symbol.name\n symbol_dict['Hint'] = symbol.hint\n\n if symbol.bound:\n symbol_dict['Bound'] = symbol.bound\n import_list.append(symbol_dict)\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):\n dump_dict['Bound imports'] = list()\n for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:\n bound_imp_desc_dict = dict()\n dump_dict['Bound imports'].append(bound_imp_desc_dict)\n\n bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict())\n bound_imp_desc_dict['DLL'] = bound_imp_desc.name\n\n for bound_imp_ref in bound_imp_desc.entries:\n bound_imp_ref_dict = dict()\n bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict())\n bound_imp_ref_dict['DLL'] = bound_imp_ref.name\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):\n dump_dict['Delay Imported symbols'] = list()\n for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:\n module_list = list()\n dump_dict['Delay Imported symbols'].append(module_list)\n module_list.append(module.struct.dump_dict())\n\n for symbol in module.imports:\n symbol_dict = dict()\n if symbol.import_by_ordinal is True:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Ordinal'] = symbol.ordinal\n else:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Name'] = symbol.name\n symbol_dict['Hint'] = symbol.hint\n\n if symbol.bound:\n symbol_dict['Bound'] = symbol.bound\n module_list.append(symbol_dict)\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):\n dump_dict['Resource directory'] = list()\n dump_dict['Resource directory'].append(self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict())\n\n for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:\n resource_type_dict = dict()\n\n if resource_type.name is not None:\n resource_type_dict['Name'] = resource_type.name\n else:\n resource_type_dict['Id'] = (\n resource_type.struct.Id, RESOURCE_TYPE.get(resource_type.struct.Id, '-'))\n\n resource_type_dict.update(resource_type.struct.dump_dict())\n dump_dict['Resource directory'].append(resource_type_dict)\n\n if hasattr(resource_type, 'directory'):\n directory_list = list()\n directory_list.append(resource_type.directory.struct.dump_dict())\n dump_dict['Resource directory'].append(directory_list)\n\n for resource_id in resource_type.directory.entries:\n resource_id_dict = dict()\n\n if resource_id.name is not None:\n resource_id_dict['Name'] = resource_id.name\n else:\n resource_id_dict['Id'] = resource_id.struct.Id\n\n resource_id_dict.update(resource_id.struct.dump_dict())\n directory_list.append(resource_id_dict)\n\n if hasattr(resource_id, 'directory'):\n resource_id_list = list()\n resource_id_list.append(resource_id.directory.struct.dump_dict())\n directory_list.append(resource_id_list)\n\n for resource_lang in resource_id.directory.entries:\n if hasattr(resource_lang, 'data'):\n resource_lang_dict = dict()\n resource_lang_dict['LANG'] = resource_lang.data.lang\n resource_lang_dict['SUBLANG'] = resource_lang.data.sublang\n resource_lang_dict['LANG_NAME'] = LANG.get(resource_lang.data.lang, '*unknown*')\n resource_lang_dict['SUBLANG_NAME'] = get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)\n resource_lang_dict.update(resource_lang.struct.dump_dict())\n resource_lang_dict.update(resource_lang.data.struct.dump_dict())\n resource_id_list.append(resource_lang_dict)\n if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:\n for idx, res_string in list(resource_id.directory.strings.items()):\n resource_id_list.append(res_string.encode(\n 'unicode-escape',\n 'backslashreplace').decode(\n 'ascii'))\n\n\n if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and\n self.DIRECTORY_ENTRY_TLS and\n self.DIRECTORY_ENTRY_TLS.struct ):\n dump_dict['TLS'] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict()\n\n\n if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and\n self.DIRECTORY_ENTRY_LOAD_CONFIG and\n self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):\n dump_dict['LOAD_CONFIG'] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict()\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):\n dump_dict['Debug information'] = list()\n for dbg in self.DIRECTORY_ENTRY_DEBUG:\n dbg_dict = dict()\n dump_dict['Debug information'].append(dbg_dict)\n dbg_dict.update(dbg.struct.dump_dict())\n dbg_dict['Type'] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type)\n\n\n if self.has_relocs():\n dump_dict['Base relocations'] = list()\n for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:\n base_reloc_list = list()\n dump_dict['Base relocations'].append(base_reloc_list)\n base_reloc_list.append(base_reloc.struct.dump_dict())\n for reloc in base_reloc.entries:\n reloc_dict = dict()\n base_reloc_list.append(reloc_dict)\n reloc_dict['RVA'] = reloc.rva\n try:\n reloc_dict['Type'] = RELOCATION_TYPE[reloc.type][16:]\n except KeyError:\n reloc_dict['Type'] = reloc.type\n\n return dump_dict", "def dump(self):\n\n # Dump the resource properties\n resource_dict = super(Console, self).dump()\n\n # Dump the child resources\n users = self.users.dump()\n if users:\n resource_dict['users'] = users\n user_roles = self.user_roles.dump()\n if user_roles:\n resource_dict['user_roles'] = user_roles\n user_patterns = self.user_patterns.dump()\n if user_patterns:\n resource_dict['user_patterns'] = user_patterns\n password_rules = self.password_rules.dump()\n if password_rules:\n resource_dict['password_rules'] = password_rules\n tasks = self.tasks.dump()\n if tasks:\n resource_dict['tasks'] = tasks\n ldap_server_definitions = self.ldap_server_definitions.dump()\n if ldap_server_definitions:\n resource_dict['ldap_server_definitions'] = ldap_server_definitions\n storage_groups = self.storage_groups.dump()\n if storage_groups:\n resource_dict['storage_groups'] = storage_groups\n\n # Note: Unmanaged CPCs are not dumped, since their properties cannot\n # be retrieved.\n\n return resource_dict", "def info(self):\n\n infoDict = OrderedDict()\n\n # store a dictionary for each CCD, keyed by the CCD name\n # NSWE is the sky postion.\n # Rotation here is defined as the rotation angle from chip frame to fiducial global frame, counter-clock wise is positive\n infoDict[\"CIW\"] = {\"xCenter\": 1.57, \"yCenter\": 0., \"FAflag\": True, \"CCDNUM\": 5, \"Offset\": 1500,\n \"Extension\": 1, \"Rotation\": -90}\n infoDict[\"CIS\"] = {\"xCenter\": 0, \"yCenter\": -1.57, \"FAflag\": True, \"CCDNUM\": 4, \"Offset\": 1500,\n \"Extension\": 2, \"Rotation\": 0}\n infoDict[\"CIC\"] = {\"xCenter\": 0, \"yCenter\": 0., \"FAflag\": True, \"CCDNUM\": 3, \"Offset\": 1500,\n \"Extension\": 3, \"Rotation\": 180}\n infoDict[\"CIN\"] = {\"xCenter\": 0, \"yCenter\": 1.57, \"FAflag\": True, \"CCDNUM\": 2, \"Offset\": 1500,\n \"Extension\": 4, \"Rotation\": 180}\n infoDict[\"CIE\"] = {\"xCenter\": -1.57, \"yCenter\": 0., \"FAflag\": True, \"CCDNUM\": 1, \"Offset\": 1500,\n \"Extension\": 5, \"Rotation\": 90}\n\n # offset 1500 is 1.5 mm. Does not matter for CI instrument but should be set for GFAs +/- 1500\n # FAflag should be true for all focus and alignment chip.\n\n return infoDict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dumps all the `CardGroup`s' info in a `dict`.
def DumpGroups(self): d = {} for g in self.groups: d[g.GetLabel()] = g.Dump() return d
[ "def Dump(self):\n return {\"cards\": self.DumpCards(), \"groups\": self.DumpGroups()}", "def group_info(self):\n groups = {}\n for group in self.store.keys():\n groups[group] = {\n 'metadata': self.store.get_storer(group).attrs.metadata,\n 'size': self.store[group].memory_usage().sum()}\n return groups", "def DumpCards(self):\n carddict = {}\n\n # we put the scrollbars at the origin, to get the real positions\n shown = self.IsShown()\n if shown: self.Hide()\n view_start = self.GetViewStart()\n self.Scroll(0, 0)\n\n # with the scrollbars at the origin, dump the cards \n for c in self.cards:\n carddict[c.GetId()] = c.Dump()\n carddict[c.GetId()][\"pos\"] = [i / self.scale for i in carddict[c.GetId()][\"pos\"]]\n \n # and return to the original view\n self.Scroll(view_start[0], view_start[1])\n if shown: self.Show()\n\n return carddict", "def cmd_groups(self):\r\n return dict({i.name: i.info() for i in self.groups})", "def getiddgroupdict(self):\n return iddgroups.commdct2grouplist(self.idd_info)", "def extract_details(group: MeetupObject) -> dict:\n return {\n 'name': group.name,\n 'category': group.category['name'],\n 'created': from_epoch(group.created),\n 'city': group.city,\n 'state': group.state,\n 'country': group.country,\n 'description': group.description,\n 'url': group.link,\n 'organizer': group.organizer['name'],\n 'members': group.members,\n 'member_title': group.who\n }", "def create_group_dict():\n ifile = house_keeping + 'msid_list_all'\n data = mcf.read_data_file(ifile)\n g_dict = {}\n for ent in data:\n atemp = re.split('\\s+', ent)\n msid = atemp[0].strip()\n group = atemp[1].strip()\n g_dict[msid] = group\n\n return g_dict", "def make_dict_groups(group_file_name = \"./data/cluster_97.clstr\",\n fasta_file_name = \"./data/contigs_100\"):\n dict_fasta = make_dict_records(fasta_file_name)\n split_groups_out = make_split_groups_out(group_file_name)\n groups = [Arch_Group(group, dict_fasta) for group in split_groups_out]\n groups = {group.name:group for group in groups}\n return groups", "def dump_group_conf(self, model, file_name):\n logging.info(\"Dumping group config to {} ...\".format(file_name))\n\n g_cfg = OrderedDict()\n for idx, (name, mod) in enumerate(model.named_modules()):\n # iterate every module, dump them\n if isinstance(mod, MaskConv2d):\n # logging.info('Dumping data for mod {}:'.format(name))\n # logging.info('\\t{}'.format(mod))\n\n name_ = \"module.{}\".format(name)\n g_cfg[name_] = {\n \"G\": mod.G,\n \"F\": mod.out_channels,\n \"C\": mod.in_channels,\n \"id\": idx,\n }\n\n dirname = os.path.dirname(file_name)\n os.makedirs(dirname, exist_ok=True)\n with open(file_name, \"w\") as f:\n json.dump(g_cfg, f, indent=2)", "def print_groups(group):\n with open('data/server_data/groups.csv', mode='w', newline=\"\") as outfile:\n writer = csv.writer(outfile)\n for key in group.keys():\n while len(group[key]) < 6:\n group[key].append(None)\n writer.writerow([key] + group[key])", "def groups(self):\n return dict(self._results.groups())", "def print_groups(filename):\n path = os.path.expanduser(filename)\n\n m = Measurements(filename=path, mode=\"r\")\n\n metadata_tags = m.get_grouping_tags()\n\n groupings = m.get_groupings(metadata_tags)\n\n json.dump(groupings, sys.stdout)", "def H5GroupToDict(g):\n return {k: H5GroupToDict(g[k]) if isinstance(g[k], h5py.Group) else g[k].value\n for k in g.iterkeys()}", "def print_groups(self):\n\n text = ''\n\n # print out a starting message, and print headers.\n # print('printing groups')\n text += self.print_header()\n\n # print out the row numbers and the contents of the\n # rows, with the values in m represented by groups\n # and wall characters.\n for j in range(0, self.height):\n\n text += '{}| '.format(j%10)\n # print('{}|'.format(j%10), end=' ')\n \n for i in range(0, self.width):\n text += '{} '.format(self.group_map[j][i])\n # print(self.group_map[j][i], end=' ')\n\n text += '\\n'\n # print()\n\n # print the ending message, then check the map.\n # print('end of groups\\n')\n self.assert_array_size('print_groups', self.group_map)\n \n return text", "def groups(self):\n\n return list(self.grpimg.keys())", "def dump_dict(self, dump=None):\n\n dump_dict = dict()\n\n warnings = self.get_warnings()\n if warnings:\n dump_dict['Parsing Warnings'] = warnings\n\n dump_dict['DOS_HEADER'] = self.DOS_HEADER.dump_dict()\n dump_dict['NT_HEADERS'] = self.NT_HEADERS.dump_dict()\n dump_dict['FILE_HEADER'] = self.FILE_HEADER.dump_dict()\n\n image_flags = retrieve_flags(IMAGE_CHARACTERISTICS, 'IMAGE_FILE_')\n\n dump_dict['Flags'] = list()\n for flag in image_flags:\n if getattr(self.FILE_HEADER, flag[0]):\n dump_dict['Flags'].append(flag[0])\n\n if hasattr(self, 'OPTIONAL_HEADER') and self.OPTIONAL_HEADER is not None:\n dump_dict['OPTIONAL_HEADER'] = self.OPTIONAL_HEADER.dump_dict()\n\n dll_characteristics_flags = retrieve_flags(DLL_CHARACTERISTICS, 'IMAGE_DLLCHARACTERISTICS_')\n\n dump_dict['DllCharacteristics'] = list()\n for flag in dll_characteristics_flags:\n if getattr(self.OPTIONAL_HEADER, flag[0]):\n dump_dict['DllCharacteristics'].append(flag[0])\n\n dump_dict['PE Sections'] = list()\n\n section_flags = retrieve_flags(SECTION_CHARACTERISTICS, 'IMAGE_SCN_')\n for section in self.sections:\n section_dict = section.dump_dict()\n dump_dict['PE Sections'].append(section_dict)\n section_dict['Flags'] = list()\n for flag in section_flags:\n if getattr(section, flag[0]):\n section_dict['Flags'].append(flag[0])\n\n section_dict['Entropy'] = section.get_entropy()\n if md5 is not None:\n section_dict['MD5'] = section.get_hash_md5()\n if sha1 is not None:\n section_dict['SHA1'] = section.get_hash_sha1()\n if sha256 is not None:\n section_dict['SHA256'] = section.get_hash_sha256()\n if sha512 is not None:\n section_dict['SHA512'] = section.get_hash_sha512()\n\n\n\n if (hasattr(self, 'OPTIONAL_HEADER') and\n hasattr(self.OPTIONAL_HEADER, 'DATA_DIRECTORY') ):\n\n dump_dict['Directories'] = list()\n\n for idx in range(len(self.OPTIONAL_HEADER.DATA_DIRECTORY)):\n directory = self.OPTIONAL_HEADER.DATA_DIRECTORY[idx]\n dump_dict['Directories'].append(directory.dump_dict())\n\n if hasattr(self, 'VS_VERSIONINFO'):\n dump_dict['Version Information'] = list()\n for idx in range(len(self.VS_VERSIONINFO)):\n version_info_list = list()\n version_info_list.append(self.VS_VERSIONINFO[idx].dump_dict())\n\n if hasattr(self, 'VS_FIXEDFILEINFO'):\n version_info_list.append(self.VS_FIXEDFILEINFO[idx].dump_dict())\n\n if hasattr(self, 'FileInfo') and len(self.FileInfo) > idx:\n fileinfo_list = list()\n for entry in self.FileInfo[idx]:\n fileinfo_list.append(entry.dump_dict())\n\n if hasattr(entry, 'StringTable'):\n stringtable_dict = dict()\n for st_entry in entry.StringTable:\n [fileinfo_list.append(line) for line in st_entry.dump_dict()]\n stringtable_dict['LangID'] = st_entry.LangID\n for str_entry in list(st_entry.entries.items()):\n stringtable_dict[str_entry[0]] = str_entry[1]\n fileinfo_list.append(stringtable_dict)\n\n\n elif hasattr(entry, 'Var'):\n for var_entry in entry.Var:\n var_dict = dict()\n if hasattr(var_entry, 'entry'):\n [fileinfo_list.append(line) for line in var_entry.dump_dict()]\n var_dict[list(var_entry.entry.keys())[0]] = list(\n var_entry.entry.values())[0]\n fileinfo_list.append(var_dict)\n\n dump_dict['Version Information'].append(version_info_list)\n\n if hasattr(self, 'DIRECTORY_ENTRY_EXPORT'):\n dump_dict['Exported symbols'] = list()\n dump_dict['Exported symbols'].append(self.DIRECTORY_ENTRY_EXPORT.struct.dump_dict())\n for export in self.DIRECTORY_ENTRY_EXPORT.symbols:\n export_dict = dict()\n if export.address is not None:\n export_dict.update({'Ordinal': export.ordinal, 'RVA': export.address, 'Name': export.name})\n if export.forwarder:\n export_dict['forwarder'] = export.forwarder\n dump_dict['Exported symbols'].append(export_dict)\n\n if hasattr(self, 'DIRECTORY_ENTRY_IMPORT'):\n dump_dict['Imported symbols'] = list()\n for module in self.DIRECTORY_ENTRY_IMPORT:\n import_list = list()\n dump_dict['Imported symbols'].append(import_list)\n import_list.append(module.struct.dump_dict())\n for symbol in module.imports:\n symbol_dict = dict()\n if symbol.import_by_ordinal is True:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Ordinal'] = symbol.ordinal\n else:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Name'] = symbol.name\n symbol_dict['Hint'] = symbol.hint\n\n if symbol.bound:\n symbol_dict['Bound'] = symbol.bound\n import_list.append(symbol_dict)\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_BOUND_IMPORT'):\n dump_dict['Bound imports'] = list()\n for bound_imp_desc in self.DIRECTORY_ENTRY_BOUND_IMPORT:\n bound_imp_desc_dict = dict()\n dump_dict['Bound imports'].append(bound_imp_desc_dict)\n\n bound_imp_desc_dict.update(bound_imp_desc.struct.dump_dict())\n bound_imp_desc_dict['DLL'] = bound_imp_desc.name\n\n for bound_imp_ref in bound_imp_desc.entries:\n bound_imp_ref_dict = dict()\n bound_imp_ref_dict.update(bound_imp_ref.struct.dump_dict())\n bound_imp_ref_dict['DLL'] = bound_imp_ref.name\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_DELAY_IMPORT'):\n dump_dict['Delay Imported symbols'] = list()\n for module in self.DIRECTORY_ENTRY_DELAY_IMPORT:\n module_list = list()\n dump_dict['Delay Imported symbols'].append(module_list)\n module_list.append(module.struct.dump_dict())\n\n for symbol in module.imports:\n symbol_dict = dict()\n if symbol.import_by_ordinal is True:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Ordinal'] = symbol.ordinal\n else:\n symbol_dict['DLL'] = module.dll\n symbol_dict['Name'] = symbol.name\n symbol_dict['Hint'] = symbol.hint\n\n if symbol.bound:\n symbol_dict['Bound'] = symbol.bound\n module_list.append(symbol_dict)\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_RESOURCE'):\n dump_dict['Resource directory'] = list()\n dump_dict['Resource directory'].append(self.DIRECTORY_ENTRY_RESOURCE.struct.dump_dict())\n\n for resource_type in self.DIRECTORY_ENTRY_RESOURCE.entries:\n resource_type_dict = dict()\n\n if resource_type.name is not None:\n resource_type_dict['Name'] = resource_type.name\n else:\n resource_type_dict['Id'] = (\n resource_type.struct.Id, RESOURCE_TYPE.get(resource_type.struct.Id, '-'))\n\n resource_type_dict.update(resource_type.struct.dump_dict())\n dump_dict['Resource directory'].append(resource_type_dict)\n\n if hasattr(resource_type, 'directory'):\n directory_list = list()\n directory_list.append(resource_type.directory.struct.dump_dict())\n dump_dict['Resource directory'].append(directory_list)\n\n for resource_id in resource_type.directory.entries:\n resource_id_dict = dict()\n\n if resource_id.name is not None:\n resource_id_dict['Name'] = resource_id.name\n else:\n resource_id_dict['Id'] = resource_id.struct.Id\n\n resource_id_dict.update(resource_id.struct.dump_dict())\n directory_list.append(resource_id_dict)\n\n if hasattr(resource_id, 'directory'):\n resource_id_list = list()\n resource_id_list.append(resource_id.directory.struct.dump_dict())\n directory_list.append(resource_id_list)\n\n for resource_lang in resource_id.directory.entries:\n if hasattr(resource_lang, 'data'):\n resource_lang_dict = dict()\n resource_lang_dict['LANG'] = resource_lang.data.lang\n resource_lang_dict['SUBLANG'] = resource_lang.data.sublang\n resource_lang_dict['LANG_NAME'] = LANG.get(resource_lang.data.lang, '*unknown*')\n resource_lang_dict['SUBLANG_NAME'] = get_sublang_name_for_lang(resource_lang.data.lang, resource_lang.data.sublang)\n resource_lang_dict.update(resource_lang.struct.dump_dict())\n resource_lang_dict.update(resource_lang.data.struct.dump_dict())\n resource_id_list.append(resource_lang_dict)\n if hasattr(resource_id.directory, 'strings') and resource_id.directory.strings:\n for idx, res_string in list(resource_id.directory.strings.items()):\n resource_id_list.append(res_string.encode(\n 'unicode-escape',\n 'backslashreplace').decode(\n 'ascii'))\n\n\n if ( hasattr(self, 'DIRECTORY_ENTRY_TLS') and\n self.DIRECTORY_ENTRY_TLS and\n self.DIRECTORY_ENTRY_TLS.struct ):\n dump_dict['TLS'] = self.DIRECTORY_ENTRY_TLS.struct.dump_dict()\n\n\n if ( hasattr(self, 'DIRECTORY_ENTRY_LOAD_CONFIG') and\n self.DIRECTORY_ENTRY_LOAD_CONFIG and\n self.DIRECTORY_ENTRY_LOAD_CONFIG.struct ):\n dump_dict['LOAD_CONFIG'] = self.DIRECTORY_ENTRY_LOAD_CONFIG.struct.dump_dict()\n\n\n if hasattr(self, 'DIRECTORY_ENTRY_DEBUG'):\n dump_dict['Debug information'] = list()\n for dbg in self.DIRECTORY_ENTRY_DEBUG:\n dbg_dict = dict()\n dump_dict['Debug information'].append(dbg_dict)\n dbg_dict.update(dbg.struct.dump_dict())\n dbg_dict['Type'] = DEBUG_TYPE.get(dbg.struct.Type, dbg.struct.Type)\n\n\n if self.has_relocs():\n dump_dict['Base relocations'] = list()\n for base_reloc in self.DIRECTORY_ENTRY_BASERELOC:\n base_reloc_list = list()\n dump_dict['Base relocations'].append(base_reloc_list)\n base_reloc_list.append(base_reloc.struct.dump_dict())\n for reloc in base_reloc.entries:\n reloc_dict = dict()\n base_reloc_list.append(reloc_dict)\n reloc_dict['RVA'] = reloc.rva\n try:\n reloc_dict['Type'] = RELOCATION_TYPE[reloc.type][16:]\n except KeyError:\n reloc_dict['Type'] = reloc.type\n\n return dump_dict", "def print_group(label, group):\n # type: (str, COPASI.CModelParameterGroup) -> None\n print (label)\n for i in range(group.size()):\n current = group.getChild(i)\n\n if isinstance(current, COPASI.CModelParameterGroup): # reaction parameters have a subgroup for each rection\n print (\" Reaction: %s\" % current.getName())\n\n for j in range(current.size()): # followed by the kinetic parameters\n param = current.getChild(j)\n print (\" {0} = {1}\".format(param.getName(), param.getValue(COPASI.CCore.Framework_Concentration)))\n else: # otherwise we just have name value pairs\n print (\" {0} = {1}\".format(current.getName(), current.getValue(COPASI.CCore.Framework_Concentration)))", "def dump(self):\n\n # Dump the resource properties\n resource_dict = super(Cpc, self).dump()\n\n # Dump the child resources\n capacity_groups = self.capacity_groups.dump()\n if capacity_groups:\n resource_dict['capacity_groups'] = capacity_groups\n partitions = self.partitions.dump()\n if partitions:\n resource_dict['partitions'] = partitions\n adapters = self.adapters.dump()\n if adapters:\n resource_dict['adapters'] = adapters\n virtual_switches = self.virtual_switches.dump()\n if virtual_switches:\n resource_dict['virtual_switches'] = virtual_switches\n lpars = self.lpars.dump()\n if lpars:\n resource_dict['lpars'] = lpars\n reset_act_profiles = self.reset_activation_profiles.dump()\n if reset_act_profiles:\n resource_dict['reset_activation_profiles'] = reset_act_profiles\n image_act_profiles = self.image_activation_profiles.dump()\n if image_act_profiles:\n resource_dict['image_activation_profiles'] = image_act_profiles\n load_act_profiles = self.load_activation_profiles.dump()\n if load_act_profiles:\n resource_dict['load_activation_profiles'] = load_act_profiles\n\n return resource_dict", "def parse_group(self, element: etree.Element) -> Dict:\n\n if element is None:\n return {}\n\n group = {\n \"id\": int(self._eav(element=element, attribute=\"id\")),\n \"url\": self._eav(element=element, attribute=\"url\"),\n \"name\": self._et(element=element),\n }\n\n return group" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a `dict` with all the info contained in this `Deck`.
def Dump(self): return {"cards": self.DumpCards(), "groups": self.DumpGroups()}
[ "def DumpCards(self):\n carddict = {}\n\n # we put the scrollbars at the origin, to get the real positions\n shown = self.IsShown()\n if shown: self.Hide()\n view_start = self.GetViewStart()\n self.Scroll(0, 0)\n\n # with the scrollbars at the origin, dump the cards \n for c in self.cards:\n carddict[c.GetId()] = c.Dump()\n carddict[c.GetId()][\"pos\"] = [i / self.scale for i in carddict[c.GetId()][\"pos\"]]\n \n # and return to the original view\n self.Scroll(view_start[0], view_start[1])\n if shown: self.Show()\n\n return carddict", "def get_cards(self):\r\n return self.deck", "def info(self):\n\n infoDict = OrderedDict()\n\n # store a dictionary for each CCD, keyed by the CCD name\n # NSWE is the sky postion.\n # Rotation here is defined as the rotation angle from chip frame to fiducial global frame, counter-clock wise is positive\n infoDict[\"CIW\"] = {\"xCenter\": 1.57, \"yCenter\": 0., \"FAflag\": True, \"CCDNUM\": 5, \"Offset\": 1500,\n \"Extension\": 1, \"Rotation\": -90}\n infoDict[\"CIS\"] = {\"xCenter\": 0, \"yCenter\": -1.57, \"FAflag\": True, \"CCDNUM\": 4, \"Offset\": 1500,\n \"Extension\": 2, \"Rotation\": 0}\n infoDict[\"CIC\"] = {\"xCenter\": 0, \"yCenter\": 0., \"FAflag\": True, \"CCDNUM\": 3, \"Offset\": 1500,\n \"Extension\": 3, \"Rotation\": 180}\n infoDict[\"CIN\"] = {\"xCenter\": 0, \"yCenter\": 1.57, \"FAflag\": True, \"CCDNUM\": 2, \"Offset\": 1500,\n \"Extension\": 4, \"Rotation\": 180}\n infoDict[\"CIE\"] = {\"xCenter\": -1.57, \"yCenter\": 0., \"FAflag\": True, \"CCDNUM\": 1, \"Offset\": 1500,\n \"Extension\": 5, \"Rotation\": 90}\n\n # offset 1500 is 1.5 mm. Does not matter for CI instrument but should be set for GFAs +/- 1500\n # FAflag should be true for all focus and alignment chip.\n\n return infoDict", "def create_deck(self) -> dict:\n raise NotImplemented", "def generatePlayerDeck(self):\n cards = []\n\n for k in EVENT_CARDS: #id, name, description\n cards.append(EventCard(k, EVENT_CARDS[k][\"name\"], EVENT_CARDS[k][\"description\"]))\n\n for k in PLAYER_CARDS: #name,colour,population,area,country\n cards.append(PlayerCard(k, PLAYER_CARDS[k][\"colour\"], PLAYER_CARDS[k][\"population\"], PLAYER_CARDS[k][\"area\"], PLAYER_CARDS[k][\"country\"]))\n\n\n return cards", "def info(self):\n self._load_meta()\n cache = dict()\n for key, value in self._meta.items():\n container, _ = value\n cache[key] = container\n return cache", "def create_deck(self) -> dict:\n card = self.cards[0]\n\n self.card_json['CardID'] = self.current_card_id\n self.card_json['Nickname'] = card.tabletop_name\n\n card_asset_json = deepcopy(card_asset_template)\n card_asset_json['FaceURL'] = card.image_url\n card_asset_json['BackURL'] = self.card_back_url\n card_asset_json['BackIsHidden'] = False\n\n self.card_json['CustomDeck'] = {\"1\": card_asset_json}\n self.card_json['Transform']['rotZ'] = 180 if self.hidden else 0\n\n return self.card_json", "def requiered_info(self):\n return {\"players\":2, \"round\": (50, \"Nombre de manches\")}", "def to_dict(self):\n\n surface = {}\n\n if self.name:\n surface[\"meta\"] = {\n \"name\": self.name,\n }\n\n surface[\"layers\"]: {\n \"image\": self.raw.image,\n \"color\": self.raw.color,\n \"attr\": self.raw.attr,\n }\n\n return surface", "def info_dict(self):\n if not hasattr(self, '_info_dict'):\n self._info_dict = self.__create_info_dict()\n self.recalc_info_dict()\n return self._info_dict", "def newDeck(self):\n temp_deck = []\n for card in self.cards:\n for suit in self.suits:\n temp_deck.append(\"{} {}\".format(card, suit))\n return temp_deck", "def GetAllInfo(self):\n return {\n \"Type\":\"Player\",\n \"Location\":(self.location[0], self.location[1]),\n \"PlayerID\":self.ID\n }", "def GetCards(self):\n return self.cards", "def get_player_deck(playerId):\n\n query = \"\"\"\n select CardID, Name, Attributes from MonsterCards.UserCards\n inner join MonsterCards.Cards\n on CardID = ID\n where UserID = %s;\n \"\"\"\n cards = execute(query, (playerId, ))\n dict_cards = []\n for card_id, card_name, card_attr in cards:\n card = {\n \"id\": card_id,\n \"name\": card_name,\n \"attr\": card_attr\n }\n dict_cards.append(card)\n return dict_cards", "def getInfo(self, filename):\n self.info = {\n 'video': self.getVideoInfo(filename),\n 'audio': self.getAudioInfo(filename)\n }\n return self.info", "def deck_statistics(d_deck):\n N = 40\n n = 5\n K = 3\n k = 1\n\n d = {}\n for i in d_deck:\n k = d_deck[i]\n d[i] = statistics(N,n,K,k)\n return d", "def info(self):\n return {\n \"title\": self.title,\n \"url\": self.url,\n \"width\": self.size.width,\n \"height\": self.size.height,\n \"image_type\": self.image_type,\n \"subreddit\": self.subreddit,\n }", "def full_deck(self):\n deck = Deck()\n deck.populate(__class__.all_cards())\n return deck", "def build_deck():\n\tsuits = {\n\t\t'hearts': [],\n\t\t'diamonds': [],\n\t\t'clubs': [],\n\t\t'spades': []\n\t\t}\n\n\tface_cards = ['jack','queen', 'king', 'ace']\n\n\tfor suit in suits.keys():\n\t\tfor number in range(1,11):\n\t\t\tsuits[suit].append(f'{number} of {suit.title()}')\n\t\tfor face_card in face_cards:\n\t\t\tsuits[suit].append(f'{face_card.title()} of {suit.title()}')\n\n\n\treturn suits" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the selected `Card`s.
def GetSelection(self): return self.cards
[ "def get_selected_cards(self):\n\t\tselected_cards = []\n\t\tfor i in range(len(self.cards)):\n\t\t\tif self.cards[i] is not None:\n\t\t\t\tif self.cards[i]._state is CardState.SELECTED:\n\t\t\t\t\tselected_cards.append(i)\n\t\treturn selected_cards", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def GetCards(self):\n return self.cards", "def get_cards(self):\r\n return self.deck", "def get_all_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards;\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def pick_card(self, draw_num):\n draw_card = []\n for draw in range(draw_num):\n draw_card.append(self.all_cards.pop())\n\n\n return draw_card", "def get_active_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards\",\n\t\t\t\"WHERE cards.synced = '1'\",\n\t\t\t\"OR cards.synced = 'True';\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def cards(self):\n try:\n return self.game.cards[self.player_id]\n except AttributeError:\n raise ValueError('Cannot access cards: player is unassigned.')", "def pick(self, amount=1):\n \n cards = []\n for i in range(amount):\n cards.append(self.cards[-1])\n self.cards.pop()\n return cards", "def get_cards(self, expansion=None):\n print(\"getting cards\")\n session = Session()\n cards = session.query(Card).filter(\n #Card.data['is_enhanced']=='false',\n Card.data['is_maverick']=='false')\n if expansion:\n cards = cards.filter(Card.data['expansion']==str(expansion))\n cards = cards.all()\n card_expansion = {}\n for card in cards:\n key = card.name+\";\"+str(card.data['expansion'])+\";\"+card.data['rarity']\n # Prefer non-enhanced non-maverick card that is in current_set\n if key in card_expansion:\n if card.data['is_enhanced']:\n continue\n if not card.is_from_current_set:\n continue\n if card.data['is_maverick']:\n continue\n card_expansion[key] = card\n print([card.data['expansion'] for card in card_expansion.values() if card.name=='Mookling'])\n print(len(cards))\n print(len(card_expansion.values()))\n return card_expansion.values()", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def get_card(self, idx):\n return self.cards[idx]", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def pick_card(self):\n shuffle(Constants.RANKS)\n shuffle(Constants.SUITE)\n return Card(Constants.RANKS[0], Constants.SUITE[0])", "def HArrangeSelectedCards(self):\n if len(self.GetSelection()) < 1: return\n\n # we unselect first so that we erase the selection rectangles correctly\n arrange = self.GetSelection()[:]\n self.UnselectAll() \n\n lefts = [c.GetRect().left for c in arrange]\n left = min(lefts)\n card = arrange[lefts.index(left)]\n top = card.GetRect().top\n arrange.sort(key=lambda x: x.GetRect().left)\n\n for c in arrange:\n c.SetPosition(wx.Point(left, top))\n left = c.GetRect().right + self.GetPadding()\n\n self.FitToChildren()\n self.selec.SetFocus()", "def fetch_cards_of_suit(self, suit):\n\n def sort_by_value(card):\n \"\"\" Returns the value of the card based on it's value name \"\"\"\n return VALUE_BY_NAME[card.value]\n\n cards_of_suit = [card for card in self.cards if suit == card.suit]\n\n # Sort for easy viewing.\n cards_of_suit.sort(key=sort_by_value)\n return cards_of_suit", "def get_played_cards(cards, progress):\n return [card for card in cards if has_been_played(card, progress)]", "def getHearthstoneCards(self):\n return self.cache.get(HEARTHSTONE_CARD_API, fallback=[])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes `card` from the current selection.
def UnselectCard(self, card): if card in self.cards: self.cards.remove(card) card.Unselect()
[ "def UnselectCard(self, card):\n self.selec.UnselectCard(card)", "def discard(self, card):\n \n self.hand.pop(self.hand.index(card))\n self.cardList.append(card)", "def OnCardDelete(self, ev):\n card = ev.GetEventObject()\n self.cards.remove(card)\n self.UnselectCard(card)", "def remove_card(self, card):\n if card not in self._cards:\n print('you dont have that card')\n self._cards.remove(card) # O(n)", "def remove_from_hand(self, card):\n if card and card in self.hand:\n position = self.hand.index(card)\n del self.hand[position]\n return card", "def pop_card(self):\n try:\n return self._cards.pop(0)\n except:\n print('No cards left')", "def UnselectAll(self):\n while len(self.cards) > 0:\n c = self.cards[0]\n self.UnselectCard(c)", "def remove_card(self, kind, card_id):\n\n if kind not in CARD_KIND:\n raise UnknownKindError(f'Unknown kind {kind}')\n\n remain_cards = self._counter.get(kind)\n if not (card_id in remain_cards and remain_cards[card_id]):\n raise InvalidOperationError(\n f\"Can't remove {card_id}{kind} from {self}\")\n remain_cards[card_id] -= 1\n self.data[kind].replace(card_id, '', 1)", "def removefromhandvisible(self, cards):\n for card in cards:\n try:\n index = self.handvisible.index(card)\n self.handvisible.pop(index)\n self.hvcardround.pop(index)\n except:\n pass", "def pull_card(self):\n return self._hand.pop(0)", "def discard_scard(self, card):\n try:\n card = random.choice(self.shand)\n move(card, self.shand, self.survival_discard)\n except:\n logger.info('{} tried to discard a survival card due to Toxin but had none'.format(self.name))", "def delete_cards(self):\n self._stage = []\n self._hand = []", "def discard_pcard(self, card):\n move(card, self.phand, self.discard)", "def discard():\n player = current_player._get_current_object()\n if not player:\n abort(400)\n\n args = request.get_json()\n card_id = args.get('card', None)\n if card_id is None:\n abort(400)\n\n card = Card.query.get(card_id)\n if card is not None and card in player.cards:\n player.cards.remove(card)\n db.session.commit()\n return player_state()", "def DeleteSelected(self):\n # store the number of cards we're deleting to raise the event\n number = len(self.cards)\n \n # remember to use while instead of for, since in every\n # iteration self.cards is growing shorter\n while len(self.cards) > 0:\n c = self.cards[-1]\n c.Delete()\n if c in self.cards:\n self.cards.remove(c)\n\n # raise the event; it differs from Card.DeleteEvent in that\n # we raise only one event for every delete action\n # e.g., if we delete five cards, there will be five Card.DeleteEvent's\n # raised, but only one SelectionManager.DeleteEvent\n event = self.DeleteEvent(id=wx.ID_ANY, number=number)\n event.SetEventObject(self)\n self.GetEventHandler().ProcessEvent(event)", "def deSelected(self):\n self.isSelected = False\n selectedSprites.remove(self)", "def delete_card_from_deck(self,obsolete_id_card):\n\t\ti=0\n\t\tfor x in self.cards:\n\t\t\tif x.identifier==obsolete_id_card:\n\t\t\t\tself.cards.pop(i)\n\t\t\ti+=1\n\t\treturn", "def deal(self):\n \n topCard = self._deck[0]\n self._deck.remove(topCard)\n self._discardPile.append(topCard)\n return topCard", "def deal_card(self):\n self.deal_deck=self.deck_list[-1]\n self.deck_list.pop(-1)\n # Deal a card object from the deck\n return self.deal_deck" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unselects all cards. Be sure to call this method instead of `Unselect` on every card for proper cleanup.
def UnselectAll(self): while len(self.cards) > 0: c = self.cards[0] self.UnselectCard(c)
[ "def unselect_boxes(self):\n for box in self._sel_boxes:\n box.unselect()\n self._sel_boxes = []", "def UnselectCard(self, card):\n self.selec.UnselectCard(card)", "def UnselectCard(self, card):\n if card in self.cards:\n self.cards.remove(card)\n card.Unselect()", "def deselect_all(self):\n for c in self.check_boxes:\n if c.isChecked() is True:\n c.click()", "def on_deselect(self):\n for tile in self.divisions:\n tile.group_selected = False\n tile.selected = False", "def deSelected(self):\n self.isSelected = False\n selectedSprites.remove(self)", "def deselect(self, *args) -> \"void\":\n return _coin.SoSelection_deselect(self, *args)", "def unselect(self, game):\n game.tower_buttons.empty()", "def _clear_selection():\r\n\r\n for ob in bpy.data.objects:\r\n ob.select = False", "def clear_selection(self):\n for node in self._selected:\n node.deselect()\n self._selected = []\n self.selection_changed()", "def DeSelectAll (self):\n for drawableObject in self.zOrderedDrawableObjects:\n if (drawableObject.selected):\n drawableObject.selected = False\n self.RefreshScrolledRect (drawableObject.bounds)", "def deselect(self, *args):\n return _coin.SoSelection_deselect(self, *args)", "def selection_clear(self, first, last=None):\r\n\t\tfor l in self.widgets:\r\n\t\t\ttk.Listbox.selection_clear(l, first, last)", "def unselect_options(self):\n self.node.unselect_options()", "def uncheck_selected(self, sender, args):\n self._set_states(state=False, selected=True)", "def unselect_target(self, box):\n box.unselect()\n self._sel_boxes.remove(box)", "def unselect(self, item):\n if item.selected:\n item.selected=False\n self._total_selected-=1\n debug('*** total_selected={}'.format(self._total_selected))", "def uncheck_all(self, sender, args):\n self._set_states(state=False)", "def click_clear_sel_params(self) -> None:\r\n self.clear_sel_params()\r\n self.w.clear_sel_par()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select every `Card` in `group`.
def SelectGroup(self, group, new_sel=True): # in case we are coming from a card that's inside the group, # we may want to return to that card after selection ends # so we select the group but restore the last card after if self.last and self.last in group.GetMembers(): crd = self.last if new_sel: self.UnselectAll() for c in group.GetMembers(): self.SelectCard(c) if crd: self.last = crd
[ "def GetContainingGroups(self, card):\n return [g for g in self.groups if card in g.GetMembers()]", "def get_cards(self):\n for c in sorted(self.cards, key=lambda card: card.data['house']):\n for i in range(self.data['_links']['cards'].count(c.key)):\n c.data['is_legacy'] = c.key in self.data.get('set_era_cards',{}).get('Legacy',[])\n c.data['bonus_icons'] = []\n for bonus_card in self.data.get(\"bonus_icons\", []):\n if bonus_card[\"card_id\"] == c.key:\n c.data['bonus_icons'] = bonus_card['bonus_icons']\n yield c", "def cards(self):\n\t\treturn [btn.card for btn in self._buttonsSelected]", "def iter_card_sets(self):\n for card_set in self.root.all_card_sets():\n yield card_set", "def pick_card(self, draw_num):\n draw_card = []\n for draw in range(draw_num):\n draw_card.append(self.all_cards.pop())\n\n\n return draw_card", "def pick(self, amount=1):\n \n cards = []\n for i in range(amount):\n cards.append(self.cards[-1])\n self.cards.pop()\n return cards", "def get_cards(soup):\n return soup.findAll(\"div\", {\"class\": \"card\"})", "def select_groups(adata, groups=\"all\", key=\"louvain\"):\n strings_to_categoricals(adata)\n if isinstance(groups, list) and isinstance(groups[0], int):\n groups = [f\"{n}\" for n in groups]\n categories = adata.obs[key].cat.categories\n groups_masks = np.array(\n [categories[i] == adata.obs[key].values for i, name in enumerate(categories)]\n )\n if groups == \"all\":\n groups = categories.values\n else:\n groups_ids = [categories.get_loc(name) for name in groups]\n groups_masks = groups_masks[groups_ids]\n groups = categories[groups_ids].values\n return groups, groups_masks", "def UnselectAll(self):\n while len(self.cards) > 0:\n c = self.cards[0]\n self.UnselectCard(c)", "def NewGroup(self, cards=[]):\n self.groups.append(card.CardGroup(label=len(self.groups), members=cards))", "def first_cards(self):\n for player in self.players:\n self.assign_player_cards(player, 7)", "def findCardsByNum(self, number):\n if self.verbose:\n print(self.name + \" finding all cards of number \" + str(number))\n if self.log is not None:\n self.log.write(self.name + \" finding all cards of number \" + str(number) + \"\\n\")\n result = []\n for card in self.hand:\n if card.get_number() == number:\n result.append(card)\n return result", "def supports_group(self, group):\n raise NotImplementedError(\"Scraper has no implementation for filtering a specific group.\")", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def get_all_cards(self):\n\t\tquery_str = [\n\t\t\t\"SELECT * FROM cards;\"\n\t\t]\n\t\tself.c.execute(\n\t\t\tstr.join(\" \", query_str)\n\t\t)\n\t\tcardsData = self.c.fetchall()\n\t\tcards = []\n\t\tfor row in cardsData:\n\t\t\tcard = RFIDCard(str(row[0]), row[2], row[1] == 1)\n\t\t\tcards.append(card)\n\t\treturn tuple(cards)", "def pick_card(self):\n shuffle(Constants.RANKS)\n shuffle(Constants.SUITE)\n return Card(Constants.RANKS[0], Constants.SUITE[0])", "def _select_associated_content_group(self, content_group):\n self.select_content_group_radio_button()\n select_option_by_text(\n self.q(css=self._bounded_selector(self.content_group_selector_css)), content_group\n )", "def get_same_month_cards(self, card: Card) -> List[Card]:\n object_month = card.month\n cards = []\n for field_card in self.cards:\n if field_card.month == object_month:\n cards.append(field_card)\n return cards" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes every `Card` currently selected.
def DeleteSelected(self): # store the number of cards we're deleting to raise the event number = len(self.cards) # remember to use while instead of for, since in every # iteration self.cards is growing shorter while len(self.cards) > 0: c = self.cards[-1] c.Delete() if c in self.cards: self.cards.remove(c) # raise the event; it differs from Card.DeleteEvent in that # we raise only one event for every delete action # e.g., if we delete five cards, there will be five Card.DeleteEvent's # raised, but only one SelectionManager.DeleteEvent event = self.DeleteEvent(id=wx.ID_ANY, number=number) event.SetEventObject(self) self.GetEventHandler().ProcessEvent(event)
[ "def delete_cards(self):\n self._stage = []\n self._hand = []", "def UnselectAll(self):\n while len(self.cards) > 0:\n c = self.cards[0]\n self.UnselectCard(c)", "def OnCardDelete(self, ev):\n card = ev.GetEventObject()\n self.cards.remove(card)\n self.UnselectCard(card)", "def UnselectCard(self, card):\n if card in self.cards:\n self.cards.remove(card)\n card.Unselect()", "def UnselectCard(self, card):\n self.selec.UnselectCard(card)", "def deSelected(self):\n self.isSelected = False\n selectedSprites.remove(self)", "def removefromhandvisible(self, cards):\n for card in cards:\n try:\n index = self.handvisible.index(card)\n self.handvisible.pop(index)\n self.hvcardround.pop(index)\n except:\n pass", "def remove(self):\n for disconnect_cid in self._disconnect_cids:\n disconnect_cid()\n while self._selections:\n self._remove_selection(self._selections[-1])", "def delete(self):\n del contactlist[self.get_selection_index()]\n self.update_contactlist()", "def Delete_multi(self, index=[]):\n removeList=[]\n for idx in index:\n if idx >= self.length or idx <0:\n warn( \"The list index specified is out of range\")\n return\n to_remove = self.ItemList[idx]\n removeList.append(to_remove)\n if to_remove.locked:\n warn( \"Can't delete saved item. Uncheck the save mark\")\n return\n # delete the representation from canvas\n self.canvas.delete(to_remove.icon)\n self.canvas.delete(to_remove.caption)\n \n # If the item to be deleted is selected, remove the selection box\n if self.current==idx:\n self.canvas.delete(self.selectionBox)\n self.current_selected = None\n \n for r in removeList:\n self.ItemList.remove(r)\n #del r\n \n # Update GUI of the list\n self.length -= len(index)\n i=1\n for item in self.ItemList:\n item.y=i\n item.Draw()\n i+=1", "def remove_selected(self):\n idx = 0\n for i in list(self.selection):\n idx = self.index(i)\n self.remove(i)\n new = max(0, (idx - 1))\n if len(self) > new:\n self.selection.add(self[new])", "def delete_all(self):\n return payment_rpc.delete_cart()", "def delete_deck(self):\n\n # Ask for the name of the deck\n deck_name = input(\"Please enter the name of the deck you want to delete: \")\n deck_name = deck_name.strip()\n print(\"\")\n\n # Find the deck and delete it if it exists\n for deck in self.decks:\n if deck_name == deck.name:\n self.decks.remove(deck)\n del deck\n print(deck_name, \"is successfully deleted\")\n return\n\n print(\"Deck not found\")", "def clear(self):\r\n\r\n ilist = self.canvas().allItems()\r\n for eachItem in ilist:\r\n if eachItem:\r\n eachItem.setCanvas(None)\r\n del eachItem\r\n self.canvas().update()", "def stash(self):\n for op_data in self.c.selection:\n\n self._destroy_op(op_data)", "def delete(self, *devices):\n for d in devices:\n d.delete()", "def discard(self, card):\n \n self.hand.pop(self.hand.index(card))\n self.cardList.append(card)", "def _clear_selection():\r\n\r\n for ob in bpy.data.objects:\r\n ob.select = False", "def selection_clear(self, first, last=None):\r\n\t\tfor l in self.widgets:\r\n\t\t\ttk.Listbox.selection_clear(l, first, last)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Selects next `Card` in the specified direction.
def SelectNext(self, direc, new_sel=False): nxt = self.GetParent().GetNextCard(self.last, direc) if nxt: self.SelectCard(nxt, new_sel)
[ "def GetNextCard(self, card, direc):\n # depending on the direction we compare a different side\n # of the cards, as well as get the points whose distance\n # we're going to calculate in a different way\n if direc == Deck.LEFT:\n side = lambda x: x.right\n getp1 = lambda x: x.GetTopLeft()\n getp2 = lambda x: x.GetBottomLeft()\n elif direc == Deck.RIGHT:\n side = lambda x: x.left\n getp1 = lambda x: x.GetTopLeft()\n getp2 = lambda x: x.GetTopRight()\n elif direc == Deck.UP:\n side = lambda x: x.bottom\n getp1 = lambda x: x.GetTopLeft()\n getp2 = lambda x: x.GetBottomLeft()\n elif direc == Deck.DOWN:\n side = lambda x: x.top\n getp1 = lambda x: x.GetBottomLeft()\n getp2 = lambda x: x.GetTopLeft()\n\n # get those cards whose \"side\" is in the desired position with respect to card\n rect = card.GetRect()\n nxt = []\n if direc == Deck.LEFT or direc == Deck.UP:\n nxt = [c for c in self.GetCards() if side(c.GetRect()) < side(rect)]\n elif direc == Deck.RIGHT or direc == Deck.DOWN:\n nxt = [c for c in self.GetCards() if side(c.GetRect()) > side(rect)]\n else:\n return None\n\n # we're going to use getp1 to get a point in card and compare\n # it to the point got by getp2 on all the cards in nxt\n if nxt:\n # order them by distance\n nxt.sort(key=lambda x: utilities.dist2(getp1(x.GetRect()), getp2(rect)))\n # and return the nearest one\n return nxt[0]\n else:\n return None", "def movecard(self, i):\n if i == _UP and self.mode == _CHOOSEPILE and self.cardpointer < len(self.board[self.stackpointer])-1:\n card_index = len(self.board[self.stackpointer])-1-self.cardpointer\n if self.board[self.stackpointer][card_index-1][2]:\n if self.board[self.stackpointer][card_index][1] == self.board[self.stackpointer][card_index-1][1]:\n if self.board[self.stackpointer][card_index][0] + 1 == self.board[self.stackpointer][card_index-1][0]:\n self.cardpointer += 1\n elif i == _DOWN and self.mode == _CHOOSEPILE and self.cardpointer > 0:\n self.cardpointer += -1", "def MoveSelected(self, dx, dy):\n for c in self.GetSelection():\n self.GetParent().MoveCard(c, dx, dy)", "def next_card(_, card_id):\n concepts = get_concepts_applying_filters().order_by('pk')\n if concepts.filter(id__gt=card_id):\n card = concepts.filter(id__gt=card_id)[0]\n else:\n card = concepts[0]\n return redirect('view_card', card.id)", "def SelectCard(self, card, new_sel=False):\n self.selec.SelectCard(card, new_sel)", "def nextCard(self, location, exit_endpoint):\n return None", "def next_move(self) -> Move:", "def next_pos(self, direction):\n\t\tif direction == \"up\":\n\t\t\treturn (self.x, self.y - 1)\n\t\telif direction == \"down\":\n\t\t\treturn (self.x, self.y + 1)\n\t\telif direction == \"right\":\n\t\t\treturn (self.x + 1, self.y)\n\t\telif direction == \"left\":\n\t\t\treturn (self.x - 1, self.y)\n\t\treturn None", "def next_play(board, selection, active_player):", "def pick_card(self, putdown_pile):#COMMENTS\n pile_card = putdown_pile.top()\n i = 0\n while i <= self.deck.get_amount()+1:\n card = self.deck.top()\n match = pile_card.matches(card)\n if match:\n return self.deck.pick()[0]\n else:\n self.deck.shuffle()\n i+=1\n return None", "def _activate_next_item(self):\n self._selected_item = (self._selected_item + 1) % len(self._items)", "def select_next_edge_or_vertex(self, single=True, direction=+1):\n if len(self._active_vertices) == 0:\n self._active_vertices = [0]\n elif len(self._active_vertices) == 1:\n self._active_vertices.append((self._active_vertices[-1] + direction) % len(self))\n elif len(self._active_vertices) == 2:\n v0, v1 = self._active_vertices\n if v0 == (v1 + 1) % len(self):\n v1, v0 = v0, v1\n if direction == +1:\n self._active_vertices = [v1]\n else:\n self._active_vertices = [v0]\n\n if single:\n self._active_vertices = [self._active_vertices[-1]]\n else:\n self._active_vertices = sorted(self._active_vertices)\n av = self._active_vertices[-1]\n self.fit_point_in_view(self.x[av], self.y[av])", "def select_next_item(self) -> int:\n if not self.is_opened():\n self.open()\n self.selected_item += 1\n if self.selected_item == len(self.items):\n self.selected_item = -1\n return self.selected_item", "def advance(self,d=1):\n if self.index!=None:\n self.index = (self.index + d) % len(self.ordering)\n return self", "def next_player(self):\n if self._order:\n del self._order[0]\n if not self._order:\n self.end_phase()\n elif not self.choices():\n self.resolve(None, None)", "def select_move(self, board):", "def decide_next_move(self, game, role, show=False):\n\n move = None\n\n return move", "def OnCardLeftDown(self, ev):\n card = ev.GetEventObject()\n\n # bring to front and select\n card.Raise()\n self.selec.SelectCard(card)\n\n # initiate moving\n self.CaptureMouse()\n self.Bind(wx.EVT_LEFT_UP, self.OnCardLeftUp)\n self.Bind(wx.EVT_MOTION, self.OnMovingCard)\n\n self.on_motion = False\n pos = card.GetPosition() + ev.GetPosition() # relative to the canvas\n self.moving_cards_pos = []\n for c in self.GetSelection():\n # (card, pos w.r.t. the original click, current pos)\n self.moving_cards_pos.append((c, c.GetPosition() - pos, c.GetPosition()))", "def move_cards_aside(self, to_be_seen):\n move_next = False\n for card in self.draw_hand:\n if move_next:\n card.x = card.x + card.width * 0.17\n elif card == to_be_seen:\n move_next = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move all selected `Card`s.
def MoveSelected(self, dx, dy): for c in self.GetSelection(): self.GetParent().MoveCard(c, dx, dy)
[ "def move_all_cards(self, destination_list):\n\n self.client.fetch_json(\n '/lists/' + self.id + '/moveAllCards',\n http_method='POST',\n post_args = {\n \"idBoard\": destination_list.board.id,\n \"idList\": destination_list.id,\n })", "def HArrangeSelectedCards(self):\n if len(self.GetSelection()) < 1: return\n\n # we unselect first so that we erase the selection rectangles correctly\n arrange = self.GetSelection()[:]\n self.UnselectAll() \n\n lefts = [c.GetRect().left for c in arrange]\n left = min(lefts)\n card = arrange[lefts.index(left)]\n top = card.GetRect().top\n arrange.sort(key=lambda x: x.GetRect().left)\n\n for c in arrange:\n c.SetPosition(wx.Point(left, top))\n left = c.GetRect().right + self.GetPadding()\n\n self.FitToChildren()\n self.selec.SetFocus()", "def move_cards(self, hand, num=1):\n cards = []\n for i in range(num):\n cards.append(self.pop_card())\n str_cards = \",\".join(cards)\n param = {\"cards\": str_cards}\n move_url = self.base_url + self.deck_id + '/pile/' + hand + '/add/'\n res = requests.get(move_url, params=param).json()\n if res['success']:\n return True\n #print(f'cards moved to {hand}')\n else:\n print(\"error in moving cards\")\n return", "def movecard(self, i):\n if i == _UP and self.mode == _CHOOSEPILE and self.cardpointer < len(self.board[self.stackpointer])-1:\n card_index = len(self.board[self.stackpointer])-1-self.cardpointer\n if self.board[self.stackpointer][card_index-1][2]:\n if self.board[self.stackpointer][card_index][1] == self.board[self.stackpointer][card_index-1][1]:\n if self.board[self.stackpointer][card_index][0] + 1 == self.board[self.stackpointer][card_index-1][0]:\n self.cardpointer += 1\n elif i == _DOWN and self.mode == _CHOOSEPILE and self.cardpointer > 0:\n self.cardpointer += -1", "def move_cards_aside(self, to_be_seen):\n move_next = False\n for card in self.draw_hand:\n if move_next:\n card.x = card.x + card.width * 0.17\n elif card == to_be_seen:\n move_next = True", "def UnselectAll(self):\n while len(self.cards) > 0:\n c = self.cards[0]\n self.UnselectCard(c)", "def move_zone(deck: List[Card], zone: Zone) -> None:\n for card in deck:\n card.zone = zone", "def collect(self, players):\n for p in players:\n while len(p.get_cards()) > 0:\n self._cards.append(p.pop_card())", "def move(self, card, to_deck, **kwargs):\n self.remove(card)\n to_deck.add(card, **kwargs)", "def give_cards(self, hand, amount):\n\n for i in range(amount):\n hand.add_card(self.pop_card())", "def simulate_move(self):\n for atom in self.list_of_atoms:\n atom.move(self.grid)", "def delete_cards(self):\n self._stage = []\n self._hand = []", "def pick(self, amount=1):\n \n cards = []\n for i in range(amount):\n cards.append(self.cards[-1])\n self.cards.pop()\n return cards", "def move(self):\r\n # move agents\r\n for agent in self.agents:\r\n agent.move(self.agents)", "def deal_initial_cards(self) -> None:\r\n for player_index, player in self.players.items():\r\n #Draw 6 cards\r\n drawn_cards = self.draw_cards(6)\r\n player.initial_draw(drawn_cards)", "def move():\n move_square()\n move_rectangle()\n move_circle()\n square_dancing()\n crop_circles()", "def move(self):\n for alien in self.aliens:\n alien.move()", "def move(self, distance):\n for tile in self.group:\n tile.move_relative(distance, False)", "def stage_card(self, i, j):\n self._stage.insert(j, self._hand.pop(i))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Plot the output versus continuous label figures for each session.
def save_output_vs_continuous_label_plot(self): for (trial, output_record), (_, label_record) in zip(self.trialwise_output_dict.items(), self.trialwise_continuous_label_dict.items()): complete_directory = self.complete_directory_to_save_plot() plot_filename = trial full_plot_filename = os.path.join(complete_directory, plot_filename + ".jpg") # Find the y ranges for subplot with better clarity. if len(self.emotional_dimension) > 1: ylim_low, ylim_high = [], [] for emotion in self.emotional_dimension: ylim_low.append(min(min(output_record[emotion]), min(label_record[emotion]))) ylim_high.append(max(max(output_record[emotion]), max(label_record[emotion]))) ylim_low, ylim_high = min(ylim_low) * 1.15, max(ylim_high) * 1.15 else: ylim_low, ylim_high = None, None self.plot_and_save(full_plot_filename, trial, output_record, label_record, ylim_low, ylim_high)
[ "def _plot_separated_group(self, data, output, name):\n\n if len(data)>1:\n fig, ax = plt.subplots()\n ax.set_xlabel('Session number')\n ax.set_ylabel(name)\n temp = data.reset_index()\n l1 = self._plot_second_axis(ax, x = temp.session_number, y = temp.counts)\n l2 = self._plot_group(data, ax, 'cyan', 'o')\n lines = l1+l2\n labels = [l.get_label() for l in lines]\n plt.legend(lines, labels, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.savefig(output+labels[1]+'.svg', bbox_inches='tight')\n plt.close()", "def plot(self):\n fig, ax = plt.subplots()\n # Calculates number of elapsed simulation cycles\n sim_cycles = len(self.average_distances) * self.cfg[\"interval\"]\n ax.plot(range(0, sim_cycles, self.cfg[\"interval\"]), self.average_distances)\n ax.grid()\n if isinstance(self.slam, EKFSlam):\n ax.set(xlabel='Simulation cycles', ylabel='Average distance to true landmark in meters',\n title='Evaluation of EKF SLAM')\n plt.savefig('ekf_slam_evaluation.png')\n else:\n ax.set(xlabel='Simulation cycles', ylabel='Average distance to true landmark in meters',\n title='Evaluation of FastSLAM')\n plt.savefig('fast_slam_evaluation.png')\n ax.grid()\n\n plt.show()", "def showPlot2():\n title(\"Clean time for 25x25 vs Number of Robots\")\n xlabel(\"Number of Robots\")\n ylabel(\"Average time\")\n means = []\n for i in range(1, 11):\n means.append(runSimulation(i, 1.0, 25, 25, 0.75, 30, Robot, False))\n num_robots = []\n for i in range(1,11):\n num_robots.append(i)\n plot(num_robots, means)", "def showPlot2():\n listofavgs = []\n listofrobots =[]\n for robots in range(1,11):\n listofrobots.append(robots)\n a = runSimulation(robots, 1.0, 25, 25, 0.75, 50, Robot, False)\n listofavgs.append(int(a))\n print \"for robot numbers ranging from 1 - 10 we get these average times\", listofavgs\n pylab.figure() \n pylab.plot(listofrobots,listofavgs,'ro')\n pylab.ylabel('Average Time in Clock Ticks')\n pylab.xlabel('Number of Robots')\n pylab.title('Time Vs. Number of Robots in a 25x25 Room 75% Clean')", "def plotVoltages():\n for n in range(1,4): # plot new data points\n plt.plot(plotdata[0],plotdata[n],linecolors[n],label=plotlabels[n])\n global rollingTitle\n setplotparams(rollingTitle)\n global plotwindow\n setplotwindow(plotwindow) # comment out to plot entire test\n plt.legend(fontsize=12)", "def __call__(self):\n fig, (evidence_plot, kl_plot) = plt.subplots(2)\n fig.suptitle(\"Evidence and KL Divergence\")\n evidence_plot.plot(self.ep_history.evidences(), label=\"evidence\")\n kl_plot.semilogy(self.ep_history.kl_divergences(), label=\"KL divergence\")\n # for factor, factor_history in self.ep_history.items():\n # evidence_plot.plot(\n # factor_history.evidences, label=f\"{factor.name} evidence\"\n # )\n # kl_plot.plot(\n # factor_history.kl_divergences, label=f\"{factor.name} divergence\"\n # )\n evidence_plot.legend()\n kl_plot.legend()\n plt.savefig(str(self.output_path / \"graph.png\"))", "def showPlot1():\n robots = 10\n num_trials = []\n times = []\n for i in range(robots):\n trial = runSimulation(i + 1,1,20,20,0.80,10,StandardRobot)\n num_trials.append(trial[0])\n times.append(trial[1])\n pylab.plot(num_trials, times)\n pylab.xlabel('number of robots')\n pylab.ylabel('time to finish 80% of 20x20 room')\n pylab.title('time taken to complete 80% of 20x20 room with 1-10 robots')\n pylab.show()", "def _show_examples(self):\n labels, label_indices, label_counts = np.unique(self.y_train, return_index=True, return_counts=True)\n plt.figure(figsize=(15, 20))\n for idx in range(len(labels)):\n ax = plt.subplot(9, 5, idx + 1)\n ax.imshow(self.X_train[label_indices[idx]])\n ax.axis('off')\n ax.set_title(f\"label {labels[idx]}: {label_counts[idx]} images\")\n\n plt.show()", "def outflow_test_plot_nii(comp_dict_outflow,comp_dict_no_outflow,run_dir):\n\n\t# Creat plot window and axes\n\tfig = plt.figure(figsize=(14,11)) \n\tgs = gridspec.GridSpec(9,1)\n\tax1 = fig.add_subplot(gs[0:3,0]) # No outflow\n\tax2 = fig.add_subplot(gs[3:4,0]) # No outflow residuals\n\tax3 = fig.add_subplot(gs[5:8,0]) # Outflow\n\tax4 = fig.add_subplot(gs[8:9,0]) # Outflow residuals\n\tgs.update(wspace=0.0, hspace=0.0) # set the spacing between axes. \n\t# No outflow model (ax1,ax2)\n\tnorm = np.median(comp_dict_no_outflow['data']['comp'])\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['data']['comp'] , color='xkcd:white' , linewidth=0.5, linestyle='-' , label='Data' ) \n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['model']['comp'] , color='xkcd:red' , linewidth=1.0, linestyle='-' , label='Model' ) \n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['host_galaxy']['comp'] , color='xkcd:lime green' , linewidth=1.0, linestyle='-' , label='Galaxy' )\n\tif ('power' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['power']['comp'] , color='xkcd:orange red' , linewidth=1.0, linestyle='--', label='AGN Cont.' )\n\tif ('na_feii_template' in comp_dict_no_outflow) and ('br_feii_template' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='Na. FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['br_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='Br. FeII' )\n\telif ('F_feii_template' in comp_dict_no_outflow) and ('S_feii_template' in comp_dict_no_outflow) and ('G_feii_template' in comp_dict_no_outflow) and ('Z_feii_template' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['F_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='F-transition FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['S_feii_template']['comp'], color='xkcd:mustard' , linewidth=1.0, linestyle='-' , label='S_transition FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['G_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='G_transition FeII' )\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['Z_feii_template']['comp'], color='xkcd:rust' , linewidth=1.0, linestyle='-' , label='Z_transition FeII' )\n\tif ('br_Ha' in comp_dict_no_outflow):\n\t\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['br_Ha']['comp'] , color='xkcd:turquoise' , linewidth=1.0, linestyle='-' , label='Br. H-alpha' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_Ha_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' , label='Core comp.' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_nii6549_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_nii6585_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_sii6718_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_sii6732_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax1.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax1.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax1.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax1.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax1.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\t# ax1.plot(comp_dict_no_outflow['wave']['comp'], 1*comp_dict_no_outflow['noise']['comp'], color='xkcd:dodger blue' , linewidth=0.5, linestyle='--')\n\t# ax1.plot(comp_dict_no_outflow['wave']['comp'], 2*comp_dict_no_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax1.plot(comp_dict_no_outflow['wave']['comp'], 3*comp_dict_no_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax1.set_ylabel(r'$f_\\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\\rm{\\AA}^{-1}$)')\n\tax1.set_xticklabels([])\n\tax1.legend(loc='upper left',fontsize=6)\n\tax1.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax1.set_ylim(0.0,np.max(comp_dict_no_outflow['model']['comp'])+3*np.median(comp_dict_no_outflow['noise']['comp']))\n\tax1.set_title('No Outflow Model')\n\t# No Outflow Residuals\n\tax2.plot(comp_dict_no_outflow['wave']['comp'],3*(comp_dict_no_outflow['data']['comp']-comp_dict_no_outflow['model']['comp']), color='xkcd:white' , linewidth=0.5, linestyle='-')\n\tax2.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax2.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax2.axhline(0.0, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax2.plot(comp_dict_no_outflow['wave']['comp'], 3*1*comp_dict_no_outflow['noise']['comp'], color='xkcd:bright aqua' , linewidth=0.5, linestyle='-')\n\t# ax2.plot(comp_dict_no_outflow['wave']['comp'], 3*2*comp_dict_no_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax2.plot(comp_dict_no_outflow['wave']['comp'], 3*3*comp_dict_no_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax2.set_xlabel(r'$\\lambda_{\\rm{rest}}$ ($\\rm{\\AA}$)')\n\tax2.set_ylabel(r'$\\Delta f_\\lambda$')\n\tax2.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax2.set_ylim(0.0-9*np.std(comp_dict_no_outflow['resid']['comp']),ax1.get_ylim()[1])\n # Outlfow models (ax3,ax4)\n\tnorm = np.median(comp_dict_outflow['data']['comp'])\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['data']['comp'] , color='xkcd:white' , linewidth=0.5, linestyle='-' , label='Data' ) \n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['model']['comp'] , color='xkcd:red' , linewidth=1.0, linestyle='-' , label='Model' ) \n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['host_galaxy']['comp'] , color='xkcd:lime green' , linewidth=1.0, linestyle='-' , label='Galaxy' )\n\tif ('power' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['power']['comp'] , color='xkcd:orange red' , linewidth=1.0, linestyle='--', label='AGN Cont.' )\n\tif ('na_feii_template' in comp_dict_outflow) and ('br_feii_template' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['na_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='Na. FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['br_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='Br. FeII' )\n\telif ('F_feii_template' in comp_dict_outflow) and ('S_feii_template' in comp_dict_outflow) and ('G_feii_template' in comp_dict_outflow) and ('Z_feii_template' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['F_feii_template']['comp'], color='xkcd:yellow' , linewidth=1.0, linestyle='-' , label='F-transition FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['S_feii_template']['comp'], color='xkcd:mustard' , linewidth=1.0, linestyle='-' , label='S_transition FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['G_feii_template']['comp'], color='xkcd:orange' , linewidth=1.0, linestyle='-' , label='G_transition FeII' )\n\t\tax3.plot(comp_dict_no_outflow['wave']['comp'], comp_dict_no_outflow['Z_feii_template']['comp'], color='xkcd:rust' , linewidth=1.0, linestyle='-' , label='Z_transition FeII' )\n\tif ('br_Ha' in comp_dict_outflow):\n\t\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['br_Ha']['comp'] , color='xkcd:turquoise' , linewidth=1.0, linestyle='-' , label='Br. H-alpha' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_Ha_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' , label='Core comp.' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6549_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6585_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6718_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6732_core']['comp'] , color='xkcd:dodger blue', linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_Ha_outflow']['comp'] , color='xkcd:magenta' , linewidth=1.0, linestyle='-', label='Outflow comp.')\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6549_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_nii6585_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6718_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.plot(comp_dict_outflow['wave']['comp'], comp_dict_outflow['na_sii6732_outflow']['comp'], color='xkcd:magenta' , linewidth=1.0, linestyle='-' )\n\tax3.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax3.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax3.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax3.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\tax3.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--') \n\t# ax3.plot(comp_dict_outflow['wave']['comp'], 1*comp_dict_outflow['noise']['comp'], color='xkcd:dodger blue' , linewidth=0.5, linestyle='--')\n\t# ax3.plot(comp_dict_outflow['wave']['comp'], 2*comp_dict_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax3.plot(comp_dict_outflow['wave']['comp'], 3*comp_dict_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax3.set_ylabel(r'$f_\\lambda$ ($10^{-17}$ erg cm$^{-2}$ s$^{-1}$ $\\rm{\\AA}^{-1}$)')\n\tax3.set_xticklabels([])\n\tax3.legend(loc='upper left',fontsize=6)\n\tax3.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax3.set_ylim(0.0,np.max(comp_dict_outflow['model']['comp'])+3*np.median(comp_dict_outflow['noise']['comp']))\n\tax3.set_title('Outflow Model')\n\t# Outflow Residuals\n\tax4.plot(comp_dict_outflow['wave']['comp'],3*(comp_dict_outflow['data']['comp']-comp_dict_outflow['model']['comp']), color='xkcd:white' , linewidth=0.5, linestyle='-')\n\tax4.axvline(6549.86, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6564.61, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6585.27, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6718.29, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axvline(6732.67, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.axhline(0.0, color='xkcd:white' , linewidth=0.5, linestyle='--')\n\tax4.plot(comp_dict_outflow['wave']['comp'], 3*1*comp_dict_outflow['noise']['comp'], color='xkcd:bright aqua' , linewidth=0.5, linestyle='-')\n\t# ax4.plot(comp_dict_outflow['wave']['comp'], 3*2*comp_dict_outflow['noise']['comp'], color='xkcd:lime green' , linewidth=0.5, linestyle='--')\n\t# ax4.plot(comp_dict_outflow['wave']['comp'], 3*3*comp_dict_outflow['noise']['comp'], color='xkcd:orange red' , linewidth=0.5, linestyle='--')\n\tax4.set_xlabel(r'$\\lambda_{\\rm{rest}}$ ($\\rm{\\AA}$)')\n\tax4.set_ylabel(r'$\\Delta f_\\lambda$')\n\tax4.set_xlim(np.min(comp_dict_outflow['wave']['comp']),np.max(comp_dict_outflow['wave']['comp']))\n\tax4.set_ylim(0.0-9*np.std(comp_dict_outflow['resid']['comp']),ax3.get_ylim()[1])\n \n\tfig.tight_layout()\n\tplt.savefig(run_dir+'outflow_test.pdf',fmt='pdf',dpi=150)\n\n\tplt.close()\n\t# Collect garbage\n\tdel ax1\n\tdel ax2\n\tdel ax3\n\tdel ax4\n\tdel fig \n\tdel comp_dict_outflow\n\tdel comp_dict_no_outflow\n\tgc.collect()\n\n\treturn None", "def plot_changes(video, outputdir):\n plotname = os.path.splitext(os.path.basename(video))[0]+\"_plot_frames.png\"\n x, y = get_frame_difference(video)\n fig, ax = plt.subplots()\n ax.plot(x, y)\n ax.set(xlabel='Frame', ylabel='Difference',\n title='Frame differences over time')\n ax.grid()\n\n fig.savefig(os.path.join(outputdir, plotname))\n # plt.show()", "def _print_per_target_comparison(self, results_filename, label):\n sns.set_context('talk')\n sns.set_style(\"white\")\n plt.figure(figsize=(15, 11))\n examples = ['1.4', '2.4', '3.8', '4.1', '5.2']\n for key in examples:\n plt.plot(list(range(1, 101)), (np.asarray(self._matches_by_sent[key]) * 100)[:100], label=key)\n plt.legend(title='SDG Target', bbox_to_anchor=(1.1, 1.2), loc=1, borderaxespad=10)\n plt.title('Percent Matches Vs. Number of Sentences by Target - ' + label)\n plt.xlabel('Number of Sentences')\n plt.ylabel('Percent Matches with Policy Experts')\n plt.yticks(np.arange(0, 105, 10))\n plt.savefig(results_filename + ' - target comparison.jpg')\n plt.close()", "def losses_accuracies_plots(train_losses, train_acc, test_losses, test_acc,plot_title=\"Loss, train acc, test acc\",step=100):\n \n training_iters = len(train_losses)\n # iters_steps\n iter_steps = [step *k for k in range(training_iters)]\n\n imh = plt.figure(1, figsize=(15, 14), dpi=160)\n # imh.tight_layout()\n # imh.subplots_adjust(top=0.88)\n\n final_acc = test_acc[-1]\n img_title = \"{}, test acc={:.4f}\".format(plot_title,final_acc)\n imh.suptitle(img_title)\n plt.subplot(221)\n #plt.plot(iter_steps,losses, '-g', label='Loss')\n plt.semilogy(iter_steps, train_losses, '-g', label='Trn Loss')\n plt.title('Train Loss ')\n plt.subplot(222)\n plt.plot(iter_steps, train_acc, '-r', label='Trn Acc')\n plt.title('Train Accuracy')\n\n plt.subplot(223)\n plt.semilogy(iter_steps, test_losses, '-g', label='Tst Loss')\n plt.title('Test Loss')\n plt.subplot(224)\n plt.plot(iter_steps, test_acc, '-r', label='Tst Acc')\n plt.title('Test Accuracy')\n\n\n #plt.tight_layout()\n plt.subplots_adjust(top=0.88)\n\n plot_file = \"./plots/{}.png\".format(plot_title.replace(\" \",\"_\"))\n plt.savefig(plot_file)\n plt.show()", "def old_run_plots(self,params):\n lw = 2\n \n \n # Plot voltage at soma and dendrites (apical proximal and distal)\n pylab.figure(1)\n pylab.plot(h.tvec,h.vsoma,lw=lw,c='k',label='v_soma')\n #pylab.plot(h.tvec,h.vdend,lw=lw,c='r',label='v_dend')\n #pylab.plot(h.tvec,h.vdend2,lw=lw,c='b',label='v_dend2')\n pylab.xlim(h.tstart-20,h.tstop+20)\n pylab.ylim(-120,40)\n # If optogenetics were included, draw blocks for times that illumination occurred in appropriate colours \n if params.has_key('opdict'):\n for (opsin,opexpressions) in params['opdict'].iteritems():\n for opexp in opexpressions:\n if opexp[0] is None or opexp[0].lower() == 'none':\n continue\n for pulsenum in range(opexp[1][6]): \n pulse_start = opexp[1][2]+pulsenum*(opexp[1][3]+opexp[1][4])\n self.plot_optogenetic(opsin,pulse_start,pulse_start+opexp[1][3],yoffset=40)\n # once we've plotted an activation for one area, that should be sufficient i.e. we don't need to plot apical *and* soma, only the first \n # TODO: think how to extend this to allow for different areas to be indicated i.e. ChR in soma vs ChR in apical dendritic arbor\n break\n pylab.title('V')\n ax = pylab.gca()\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n spine.set_position(('outward',5))\n ax.tick_params(direction='out')\n elif loc in ['right','top']:\n spine.set_color('none') \n pylab.legend()\n pylab.xlabel('time (ms)')\n pylab.ylabel('V (mV)')\n \n \"\"\"\n # Plot currents at soma and i_syn\n pylab.figure(2)\n pylab.plot(h.tvec,h.isyn,lw=lw,c='g',label='i_syn')\n pylab.plot(h.tvec,h.isoma,lw=lw,c='k',label='i_soma')\n if params.has_key('opdict'):\n for (opsin,opexpressions) in params['opdict'].iteritems():\n for opexp in opexpressions:\n if opexp[0] is None or opexp[0].lower() == 'none':\n continue\n h('objref list_i_opsin')\n h('list_i_opsin = new List()')\n h('list_i_opsin.append(i_%s)'%opsin)\n pylab.plot(h.tvec,h.list_i_opsin.object(0),color=opsin_dict[opsin]['color'],label='i_%s'%opsin)\n break\n pylab.xlim(h.tstart-20,h.tstop+20)\n #pylab.ylim(-3,6)\n pylab.title('I')\n ax = pylab.gca()\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n spine.set_position(('outward',5))\n ax.tick_params(direction='out')\n elif loc in ['right','top']:\n spine.set_color('none') \n pylab.legend()\n pylab.xlabel('time (ms)')\n pylab.ylabel('I (nA)')\n \"\"\"\n \n if params['expname'] is not None:\n savename = params['expname']\n pylab.figure(1)\n pylab.savefig(savename+'_voltage.png')\n #pylab.figure(2)\n #pylab.savefig(savename+'_current.png')\n print \"Saved figures under %s*.png\"%savename\n pylab.close('all')\n else:\n pylab.show()", "def generate_training_plots(scores_global, episode_durations, attributes):\n fig = plt.figure()\n ax_score = fig.add_subplot(413)\n plt.plot(np.arange(1, len(scores_global) + 1), scores_global)\n plt.ylabel('Accum Rewards (Score)')\n plt.xlabel('Episode #')\n max_y = np.max(scores_global)\n max_y = (int(max_y / 10) + 1) * 10\n plt.ylim(0, max_y)\n grid_step = 10\n ax_score.set_yticks(range(10, max_y, grid_step), minor=False)\n ax_score.yaxis.grid(True, which=\"major\")\n\n ax_duration = fig.add_subplot(414)\n num_episodes = len(episode_durations)\n plt.plot(np.arange(1, num_episodes + 1), episode_durations)\n plt.ylabel('Training Duration [s]')\n plt.xlabel('Episode #')\n title = generate_plot_name(attributes)\n fig.suptitle(title, fontsize=7)\n plt.show()", "def _print_per_sdg_comparison(self, results_filename, label):\n sns.set_context('talk')\n sns.set_style(\"white\")\n plt.figure(figsize=(15, 11))\n for key in range(1, 6):\n plt.plot(list(range(1, 101)), (np.asarray(self._avg_sdg_matches_by_sent[key]) * 100)[:100],\n label='SDG ' + str(key))\n plt.plot(list(range(1, 101)), (np.asarray(self._avg_matches_by_sent) * 100)[:100], label='SDG Avg')\n plt.legend(title='SDG', bbox_to_anchor=(1.1, 1.2), loc=1, borderaxespad=10)\n plt.title('Percent Matches Vs. Number of Sentences by SDG - ' + label)\n plt.xlabel('Number of Sentences')\n plt.ylabel('Percent Matches with Policy Experts')\n plt.yticks(np.arange(0, 105, 10))\n plt.savefig(results_filename + ' - SDG comparison.jpg')\n plt.close()", "def __visual_training__(self):\n\n\t\t# Import only relevant libraries for Jupyter Notebook if needed\n\t\tfrom IPython import display\n\n\t\tfor i in range(len(self.b_history)):\n\t\t\tplt.close()\n\t\t\tplt.clf()\n\t\t\tplt.figure(figsize=(12, 10))\n\n\t\t\tplt.scatter(self.X, self.y, c='b', label=\"Training set\")\n\t\t\tplt.plot(self.X, np.add(np.multiply(self.X, self.m_history[i]), self.b_history[i]), c='r',\n\t\t\t label=\"Regression line\")\n\t\t\tplt.title(\"Linear Regression - Training process\")\n\t\t\tplt.xlabel(\"Feature value\")\n\t\t\tplt.ylabel(\"Target value\")\n\t\t\tplt.legend(framealpha=1, frameon=True)\n\n\t\t\tdisplay.display(plt.gcf())\n\t\t\tdisplay.display()\n\t\t\ttime.sleep(1)\n\t\t\tdisplay.clear_output(wait=True)", "def display_graph(self, save_graph = True):\r\n fig, axs = plt.subplots(2, figsize = (5,8))\r\n fig.suptitle(\"Graph to show changes in errors with iterations or epoch\")\r\n axs[0].plot(self.total_number_of_iteration[:-1], self.collection_of_errors[:-1])\r\n axs[0].set(xlabel =\"Total Number of iteration\", ylabel = \"Errors\")\r\n axs[1].plot(self.total_number_of_Epoch, self.collection_of_MSE)\r\n axs[1].set(xlabel =\"Total Number of Epoch\", ylabel = \"Mean Squared Error\")\r\n if save_graph:\r\n fig_name = str(datetime.datetime.now().strftime(\"%Y-%m-%d\")) + str(datetime.datetime.now().strftime(\"%I-%M-%S %p\"))+\".png\"\r\n fig.savefig(os.path.abspath(os.getcwd())+\"\\\\\"+fig_name)\r\n plt.show()", "def plot(self):\n\n fig, ax = plt.subplots()\n\n for cfg in self.graph.settings['topologies']:\n count_all = 2.0\n count_current = count_all\n if 'colormap' in cfg:\n color = getattr(mpl.cm, cfg['colormap'])\n else:\n color = lambda _: cfg.get('color', 'black')\n\n for run in self.runs:\n if run.orig.settings['topology'].name in cfg['topology']:\n count_all += 1\n\n for run in self.runs:\n if run.orig.settings['topology'].name not in cfg['topology']:\n continue\n\n # Load dataset\n data = run.get_dataset(\"routing-pair_wise_ping-stretch-*.csv\")\n data = data['stretch'].dropna()\n\n # Compute ECDF\n ecdf = sm.distributions.ECDF(data)\n\n legend_label = cfg.get('legend', None)\n variable = cfg.get('variable', None)\n if legend_label and variable:\n legend_label = legend_label % run.orig.settings[variable]\n\n ax.plot(ecdf.x, ecdf.y, drawstyle='steps', linewidth=2, color=color(count_current / count_all),\n label=legend_label)\n\n count_current += 1\n\n ax.set_xlabel('Razteg poti')\n ax.set_ylabel('Kumulativna verjetnost')\n ax.grid()\n ax.axis((0.5, None, 0, 1.01))\n #self.convert_axes_to_bw(ax)\n\n legend = ax.legend(loc='lower right')\n if self.settings.GRAPH_TRANSPARENCY:\n legend.get_frame().set_alpha(0.8)\n\n fig.savefig(self.get_figure_filename())", "def _draw_results(self):\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n self._draw_bar_axes(ax1)\n self._draw_ticks_title(ax1)\n self._draw_times_axes(ax2)\n self._draw_legend(ax1, ax2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The centering is done by directly average the shifted and weighted data.
def perform_centering(self): centered_data = self.data - np.repeat(self.mean_data[:, np.newaxis], self.data.shape[1], axis=1) + self.weight return centered_data
[ "def recalculate_center(self):\n # if we don't have any assigned inputs after this K-Means epoch, leave\n # the center where it was\n if self.assigned_inputs:\n new_center = []\n for dimension in xrange(len(self.assigned_inputs[0])):\n total = reduce(operator.add,\n [x[dimension] for x in self.assigned_inputs])\n new_center.append(float(total) / len(self.assigned_inputs))\n self.center = new_center", "def center(self) -> Tuple[np.ndarray, float]:\n return self._augment(Frame._img(self.center_img_path), self.steering)", "def init_center_c(self, data,train_model:Model, eps=0.1):\n output = train_model.predict(data)\n c = np.zeros(output.shape[0])\n c = np.sum(output, axis=0)\n c /= output.shape[0]\n # If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.\n c[(abs(c) < eps) & (c < 0)] = -eps\n c[(abs(c) < eps) & (c > 0)] = eps\n return c", "def _weighted_cluster_center(self, X):\n # Number of non-noise clusters\n n_clusters = len(set(self.labels_) - {-1, -2})\n mask = np.empty((X.shape[0],), dtype=np.bool_)\n make_centroids = self.store_centers in (\"centroid\", \"both\")\n make_medoids = self.store_centers in (\"medoid\", \"both\")\n\n if make_centroids:\n self.centroids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)\n if make_medoids:\n self.medoids_ = np.empty((n_clusters, X.shape[1]), dtype=np.float64)\n\n # Need to handle iteratively seen each cluster may have a different\n # number of samples, hence we can't create a homogeneous 3D array.\n for idx in range(n_clusters):\n mask = self.labels_ == idx\n data = X[mask]\n strength = self.probabilities_[mask]\n if make_centroids:\n self.centroids_[idx] = np.average(data, weights=strength, axis=0)\n if make_medoids:\n # TODO: Implement weighted argmin PWD backend\n dist_mat = pairwise_distances(\n data, metric=self.metric, **self._metric_params\n )\n dist_mat = dist_mat * strength\n medoid_index = np.argmin(dist_mat.sum(axis=1))\n self.medoids_[idx] = data[medoid_index]\n return", "def center_vertices_avg(self):\n vertex_array = np.array(self.vertices_)\n centroid = np.mean(vertex_array, axis = 0)\n vertex_array_cent = vertex_array - centroid\n self.vertices_ = vertex_array_cent.tolist()", "def mean_shift(self, *args, **kwargs):\r\n func = self._module.mean_shift\r\n data = self._data\r\n cluster_centers, labels = func(data.values, *args, **kwargs)\r\n labels = self._constructor_sliced(labels, index=data.index)\r\n return cluster_centers, labels", "def _XModelWeightedMean(self):\n raise NotImplementedError", "def wghtdavg(x,w):\n \n m = sum(x*w)/sum(w);\n \n return m", "def weighted_centers(self):\n return [d.weighted_centers for d in self]", "def compute_centers(self):\n for cluster_ in range(self.number_clusters): # type: ignore\n center = np.mean(self.data[self.model.labels_ == cluster_], axis=0) # type: ignore\n if center.isnull().values.any(): # type: ignore\n self.centers[cluster_] = center.fillna(0) # type: ignore\n else:\n self.centers[cluster_] = center", "def compute_centers(self):\n for img in self.images:\n for i in self.images_superpixels[img]:\n # Retrieve all indices where superpixel label equals i\n indices = np.where(self.images_segmented[img] == i)\n # Approximate the center by the medians of the indices in x and y dimension\n self.images_superpixels_center[img].append((np.median(indices[1]), np.median(indices[0])))", "def _center(self):\n\n txf = vtk.vtkTransform()\n txf.PostMultiply()\n txf.Translate(-1*self.CoM)\n \n txfPoly = vtk.vtkTransformPolyDataFilter()\n txfPoly.SetInputData(self.Surf)\n txfPoly.SetTransform(txf)\n txfPoly.Update()\n \n self.Surf = txfPoly.GetOutput()\n self._update_q()\n self._calc_mass_prop()\n self._centered = True", "def _new_centers(self, X, closest, K):\n mus = np.zeros((K, X.shape[1]))\n closest = np.array(closest)\n global_mean = None\n for i in range(K):\n idx = closest == i\n X_ = X[idx, :]\n if X_.shape[0] == 0: # No point was closest to this centre!\n if global_mean is None:\n global_mean = np.mean(X, axis=0)\n mu = global_mean # TODO: Correct solution?\n else:\n mu = np.mean(X_, axis=0)\n mus[i, :] = mu\n\n return mus", "def ts_center(images, kernel):\n counts = np.asanyarray(images['counts'])\n background = np.asanyarray(images['background'])\n kernel = kernel / kernel.sum()\n\n assert counts.shape == kernel.shape\n assert background.shape == kernel.shape\n\n C0 = stats.cash(counts, background)\n out = fit_amplitude(counts, background, kernel)\n C1 = stats.cash(counts, background + out['amplitude'] * kernel)\n # Cash is a negative log likelihood statistic,\n # thus the minus in the TS formula here\n out['ts'] = - 2 * (C1 - C0)\n return out", "def x_center(self) -> float:\n return self.x1 + self.width / 2", "def __compute_weighted_centroid(self, pts: np.ndarray,\n weights: np.ndarray) -> np.ndarray:\n numerator = np.dot(weights, pts)\n denominator = np.sum(weights)\n return numerator / denominator", "def center_scale(self, X, scale=False):\n # center\n x_mean = X.mean(axis=0)\n Xcs = X-x_mean\n # scale\n if scale:\n x_std = Xcs.std(axis=0, ddof=1)\n x_std[x_std == 0.0] = 1.0\n Xcs /= x_std\n else:\n x_std = np.ones(Xcs.shape[1])\n return Xcs, x_mean, x_std", "def weightedmean(x,w):\n check_dim(x,w)\n \n num = sum([x[i]*w[i] for i in range(len(w))])\n den = sum([w[i] for i in range(len(w))])\n\n return num/den", "def preprocess_training_data(training_data):\n scale_pixels(training_data)\n\n #zero centre the data\n mean_image = np.mean(training_data, axis=0)\n training_data -= mean_image\n return mean_image" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate the Cn2 matrix. The jth column of the matrix records all the possible candidate to the jth rater. So that for the jth column, we can acquire all the possible unrepeated combination for the jth rater.
def generate_cnk_matrix(self): total = self.rator_number cnk_matrix = np.zeros((total - 1, total)) for column in range(total): cnk_matrix[:, column] = np.concatenate((np.where(self.combination_list[:, 0] == column)[0], np.where(self.combination_list[:, 1] == column)[0])) return cnk_matrix.astype(int)
[ "def generateMatrix(self, n: int) -> List[List[int]]:\n i, j, curr = 0, 0, 1\n direction = (0, 0)\n visited = set()\n matrix = [[1] * n for _ in range(n)]\n\n while len(visited) < n ** 2:\n y, x = i + direction[0], j + direction[1]\n if 0 <= y < n and 0 <= x < n and (y, x) not in visited:\n matrix[y][x] = curr\n visited.add((y, x))\n i, j = y, x\n curr += 1\n else:\n if direction == (0, 1):\n direction = (1, 0)\n elif direction == (1, 0):\n direction = (0, -1)\n elif direction == (0, -1):\n direction = (-1, 0)\n else:\n direction = (0, 1)\n\n if direction == (0, 0):\n direction = (0, 1)\n return matrix", "def _build_j_mtx(self):\n size = 2*self.n\n j_mtx = np.zeros((size,size))\n # selecting indices\n inds = np.arange(size-1)\n # selecting upper-diagonal indices\n j_mtx[inds, inds+1] = -1\n # selecting lower-diagonal indices\n j_mtx[inds+1, inds] = 1\n return j_mtx", "def generateWeightMatrixForMCC(self):\n num_real_states = self.getNumPP() \n w = [ [0.] * num_real_states for x in range(num_real_states)]\n \n for s1_id, s1 in enumerate(self.states[:num_real_states]):\n for s2_id, s2 in enumerate(self.states[:num_real_states]):\n #if ground truth is a normal state\n if s1.inheritance_pattern == (1,1):\n #the same state\n if s1_id == s2_id:\n w[s1_id][s2_id] = 0.\n #recombination\n elif s1.inheritance_pattern == s2.inheritance_pattern:\n w[s1_id][s2_id] = 0.\n #other inheritance pattern\n else:\n w[s1_id][s2_id] = 1.\n #else if ground truth is a CNV state\n else:\n #the same state\n if s1_id == s2_id:\n w[s1_id][s2_id] = 1.\n #recombination\n elif s1.inheritance_pattern == s2.inheritance_pattern:\n w[s1_id][s2_id] = 0.5\n #other inheritance pattern\n else:\n w[s1_id][s2_id] = 1.\n \n# for i in range(len(w)):\n# for j in range(len(w[0])):\n# print w[i][j],\n# print ''\n \n return w", "def _generate_all_matrix_combinations(self, x, y):\n matrix_list = []\n for i in itertools.product([x, y], repeat=9):\n mat = np.array(i).reshape(3,3)\n matrix_list.append(mat)\n return matrix_list", "def number_to_combination(self, n):\n if n not in self.number_to_combination_cache:\n result = []\n i = n\n for _ in range(self.num_sketches):\n result.append(i % 2)\n i //= 2\n self.number_to_combination_cache[n] = numpy.array(result)\n return self.number_to_combination_cache[n]", "def get_combinations(n, r) :\n return list(itertools.combinations(range(n), r))", "def ceros_forma1(m, n):\n matriz = list()\n for r in range(m):\n renglon = list()\n for i in range(n):\n renglon.append(0)\n matriz.append(renglon)\n return matriz", "def get_coupling_matrix(self, J):\n\n a = np.array([[4,1,0,1,4]])\n r2 = a + a.T\n W = np.zeros((5,5))\n W[r2 == 1] = J[0]\n W[r2 == 2] = J[1]\n W[r2 == 4] = J[2]\n W[r2 == 5] = J[3]\n return W", "def cofactor_matrix(self):\n return Matrix(self.row_n, self.col_n, [self.cofactor(i, j) for i in range(1, self.row_n + 1) for j in range(1, self.col_n + 1)])", "def combinations(matrix: Matrix) -> Matrix:\n\n return np \\\n .array(np.meshgrid(*matrix)) \\\n .T \\\n .reshape(-1, len(matrix)) \\\n .tolist()", "def construct_Jn(n):\n J = np.zeros((n, n))\n for i in range(n-1):\n J[i ][i+1] = gamma_j(i+2)\n J[i+1][i ] = gamma_j(i+2)\n return J", "def _combinations(N, M):\n # TODO test\n index = range( M )\n while index[0] <= N-M:\n yield index[:]\n index[ M-1 ] += 1\n if index[ M-1 ] >= N:\n #now we hit the end, need to increment other positions than last\n #the last position may reach N-1, the second last only N-2 etc.\n j = M-1\n while j >= 0 and index[j] >= N-M+j: j -= 1\n #j contains the value of the index that needs to be incremented\n index[j] += 1\n k = j + 1\n while k < M: index[k] = index[k-1] + 1; k += 1;", "def make_random_circuit(n_rows, n_cols, depth):\r\n return cirq.experiments.generate_boixo_2018_supremacy_circuits_v2_grid(\r\n n_rows=n_rows,\r\n n_cols=n_cols,\r\n cz_depth=depth - 2, # Account for beginning/ending Hadamard layers\r\n seed=SEED)", "def spiral_order_book_sol2(square_matrix: [[int]]) -> [int]:", "def d2_matrix(n, diff=1):\n return np.dot(d1_matrix(n=n - diff, diff=diff), d1_matrix(n=n, diff=diff))", "def matrix_chain(d):\n n = len(d) - 1\n N = [[0] * n for i in range(n)]\n for b in range(1, n):\n for i in range(n-b):\n j = i + b\n N[i][j] = min(N[i][k]+N[k+1][j]*d[i]*d[k+1]*d[j+1] for k in range(i,j))\n return N", "def sample_matrix(self):\n posteriors = self.get_all_posteriors()\n combinations = [self.number_to_combination(i)\n for i in range(2 ** self.num_sketches)]\n result = []\n combination_range = list(range(2 ** self.num_sketches))\n for posterior in posteriors:\n row = combinations[numpy.random.choice(combination_range, p=posterior)]\n result.append(row)\n return numpy.array(result)", "def ex5_DiamondPattern():\n N = int(input())\n counter = N\n print(1)\n for row in range(1, 2*N - 1):\n res = [1]\n if row < N:\n for column in range(0, 2*row):\n res.append(column + 2)\n else:\n counter -=1\n for column in range(2, 2*counter):\n res.append(column)\n \n res = list(map(str, res))\n print(' '.join(res))", "def constructRateMatrix(self):\n # initialize the rate matrix with proper dimension\n self.K = np.zeros((self.num_state, self.num_state), dtype=float) #The dimension of the rate matrix is basically equal to the total number of states\n # loop through cofactor_id in adjacency matrix\n \"\"\"\n Take the adjacency matrix which is weighted by the distance to construct the full rate matrix\n \"\"\"\n for i in range(self.num_cofactor):\n for j in range(i+1, self.num_cofactor): # These two \"for\" loops take care of (upper triangular - diagonal) part of the adjacency matrix\n if self.D[i][j] != 0: # cofactor i and j are connected! !=:not equal to\n cof_i = self.id2cofactor[i]\n cof_f = self.id2cofactor[j] # Finding the name of cofactor of the ijth of the adjacency matrix\n dis = self.D[i][j] #Distance between cof_i and cof_f is the ij th element of the adjacency matrix\n \"\"\"\n Looping through all the possible transfers from donor to acceptor to find their reduction potentials to get deltaG of that transfer. \n You use that deltaG to get the Marcus rate of that transfer, and then add that rate constant to the rate matrix.\n \"\"\"\n for donor_state in range(1, cof_i.capacity+1): #This is correct!!!! Python is weird #cof.capacity=maximum number of electrons the cofactor can occupy\n for acceptor_state in range(0, cof_f.capacity): #This is correct!!!! Python is weird\n deltaG = cof_i.redox[donor_state-1] - cof_f.redox[acceptor_state] #This is correct!!!! Python is weird\n k = self.ET(deltaG, dis, self.reorgE, self.beta, self.V)\n self.connectStateRate(i, donor_state, j, acceptor_state, k, deltaG,1) #Adding the rate constant to rate matrix. The last parameter is 1 because these are all 1-electron transfers!\n # loop through reservoirInfo to add reservoir-related rate\n for reservoir_id, info in self.reservoirInfo.items():\n name, cofactor, redox_state, num_electron, deltaG, rate = info\n cof_id = self.cofactor2id[cofactor]\n final_redox_state = redox_state - num_electron\n self.connectReservoirRate(cof_id, redox_state, final_redox_state, rate, deltaG)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the CCC for all the pairs from the combination list.
def calculate_paired_ccc(self): ccc = np.zeros((self.combination_list.shape[0])) for index in range(len(self.combination_list)): ccc[index] = self.calculate_ccc(self.data[self.combination_list[index, 0], :], self.data[self.combination_list[index, 1], :]) return ccc
[ "def combo_sums(numlist, c):\n comb_list = combinations(numlist, c) \n comb_sums = [sum(comb) for comb in comb_list]\n return comb_sums\n # return list(set(comb_sums))", "def combo_sums(numlist, c):\n comb_list = combinations(numlist, c)\n comb_sums = [sum(comb) for comb in comb_list]\n return comb_sums\n # return list(set(comb_sums))", "def caculateCoeff(valueList):\r\n serialNos = np.arange(0, len(valueList))\r\n d2Array = np.vstack([serialNos, valueList])\r\n coeff = np.corrcoef(d2Array)\r\n return round(coeff[0][1], 2)", "def c_for_benefit_score(pairs):\n \n # mapping pair outcomes to benefit\n obs_benefit_dict = {\n (0, 0): 0,\n (0, 1): -1,\n (1, 0): 1,\n (1, 1): 0,\n }\n \n # compute observed benefit for each pair\n obs_benefit = [obs_benefit_dict[(i[1], j[1])] for (i, j) in pairs]\n\n # compute average predicted benefit for each pair\n pred_benefit = [np.mean([i[0], j[0]]) for (i, j) in pairs]\n\n concordant_count, permissible_count, risk_tie_count = 0, 0, 0\n\n # iterate over pairs of pairs\n for i in range(len(pairs)):\n for j in range(i + 1, len(pairs)):\n \n # if the observed benefit is different, increment permissible count\n if obs_benefit[i] != obs_benefit[j]:\n\n # increment count of permissible pairs\n permissible_count = permissible_count +1\n \n # if concordant, increment count\n \n if ((pred_benefit[i] > pred_benefit[j] and obs_benefit[i] > obs_benefit[j]) or (pred_benefit[i] < pred_benefit[j] and obs_benefit[i] < obs_benefit[j])): # change to check for concordance\n \n concordant_count = concordant_count + 1\n\n # if risk tie, increment count\n if (pred_benefit[i] == pred_benefit[j]): #change to check for risk ties\n risk_tie_count = risk_tie_count + 1\n\n\n # compute c-statistic-for-benefit\n cstat = (concordant_count + (0.5 * risk_tie_count)) / permissible_count\n\n return cstat", "def ccw(p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def _comb_short(c, ncov):\n c = np.asanyarray(c)\n return tuple(c.reshape(len(c) // ncov, ncov).sum(0))", "def course_combinations(self):\n\n return itertools.combinations(self.courses, self.quantity)", "def computePointsC(self):\n\n self.P = self.J.dot(self.C)\n self.P0 = self.J0.dot(self.C)", "def corr(c_list, filter):\r\n ans = []\r\n center = math.floor(len(filter) / 2)\r\n for i in range(len(c_list) - len(filter) + 1):\r\n start = 0\r\n end = len(c_list)\r\n temp = c_list[i:i + len(filter)]\r\n while start < end - 1:\r\n mat = []\r\n for i in range(len(temp)):\r\n mat.append(temp[i][start:start + len(filter)])\r\n if len(mat[0]) != len(filter):\r\n start += 1\r\n continue\r\n else:\r\n start += 1\r\n mult = 0\r\n for i in range(len(mat)):\r\n for j in range(len(mat[i])):\r\n mult += mat[i][j] * filter[i][j]\r\n mat[center][center] = mult\r\n ans.append(mult)\r\n return ans", "def cal_C(a1, a2, dt):\n update()\n\n # old way of computing cross correlation (incorrect)\n # x = np.cumsum(a1*a2)*dt\n # y = np.cumsum(a1*a1)*dt\n # z = np.cumsum(a2*a2)*dt\n # c = x/(np.power(y, 0.5)*np.power(z, 0.5))\n\n # This new form of computing the cross correlation\n # coefficient fixes normalization problem\n c = np.correlate(a1, a2, 'full')/np.sqrt(np.sum(a1**2)*np.sum(a2**2))\n c = np.max(c)\n\n # x = np.cumsum(a1*a2)*dt\n # y = np.cumsum(np.square(a1))*dt\n # z = np.cumsum(np.square(a2))*dt\n # c = x/(np.power(y, 0.5)*np.power(z, 0.5))\n\n cc = 10*np.amax(c, 0)\n\n # cc = abs(cc)\n return cc", "def compute_node_cc(self, node):\n neighbors = self.find_neighbors(node)\n kv = len(neighbors)\n nv = 0\n\n if kv < 2:\n return 0\n\n for i in range(kv-1):\n for j in range(i+1, kv):\n if neighbors[j] in self.find_neighbors(neighbors[i]):\n nv += 1\n\n return 2.0*nv/(kv*(kv-1))", "def pairwise_cch(neo_block, unit1=0, unit2=1, binsize=5):\n\n for i, trial in enumerate(range(len(neo_block.segments))):\n st1 = neo_block.segments[trial].spiketrains[unit1]\n st2 = neo_block.segments[trial].spiketrains[unit2]\n bst1 = BinnedSpikeTrain(st1, binsize=binsize*pq.ms)\n bst2 = BinnedSpikeTrain(st2, binsize=binsize*pq.ms)\n\n cch = cross_correlation_histogram(bst1, bst2, border_correction=True)\n times = cch[1]\n cch = cch[0].as_array()\n\n if i == 0:\n CCH = cch\n else:\n CCH = np.concatenate((CCH, cch), axis=-1)\n\n times = times\n\n return times, np.mean(CCH, axis=-1)", "def make_trading_pairs(currency_list):\n \n pair_list = list()\n \n for i in range(len(currency_list)):\n for j in(range(len(currency_list))):\n if i != j:\n pair = currency_list[i] + currency_list[j]\n pair_list.append(pair)\n \n return pair_list", "def internal_coproduct(self):\n F = self.parent()\n F2 = F.tensor(F)\n result = F2.zero()\n from sage.categories.tensor import tensor\n from sage.combinat.permutation import Permutation\n for I, a in self:\n # We must add a * \\Delta^\\times(F_I) to result.\n from sage.combinat.permutation import descents_composition_last\n pi = descents_composition_last(I)\n n = I.size()\n for sigma in Permutations(n):\n sigma_inverse = sigma.inverse()\n # If the __mul__ of permutations wasn't such a mess,\n # the next line could be as simple as\n # tau = pi * sigma_inverse.\n tau = Permutation([pi(i) for i in sigma_inverse])\n result += a * tensor([F(sigma.descents_composition()),\n F(tau.descents_composition())])\n return result", "def course_combinations(self):\n \n course_combinations_list = [child.course_combinations for child in self.children]\n \n if self.relationship == \"and\":\n course_combinations_prod = itertools.product(*course_combinations_list)\n course_combinations_iter = itertools.starmap(self._join_product, course_combinations_prod)\n else:\n course_combinations_iter = itertools.chain(*course_combinations_list)\n\n return course_combinations_iter", "def computeCindices(self):\n\n self.surf_index_C = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m)\n self.edge_index_C = PUBSlib.computeedgeindices(self.nedge, self.ngroup, self.edge_group, self.group_m)\n self.nC = self.nvert\n self.nC += self.edge_index_C[-1,1]\n self.nC += self.surf_index_C[-1,1]\n\n if self.printInfo:\n print '# Control points =',self.nC", "def getCCCId(index):\n ccc = index % NUM_GCCC\n tem = int(index/NUM_GCCC)\n\n return (tem,ccc)", "def calculate_concentrations_Cs_p(concentrations_Cs_ip):\n return concentrations_Cs_ip.sum(axis=1) #TODO need to transpose?", "def CreateColorGradient(c0, c1, num):\n return [tuple(map(lambda (x0,x1): int(x0*(1-j) + x1*j), zip(c0, c1)))\n for j in np.linspace(0.0, 1.0, num)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the interrater CCC agreement.
def calculate_rator_wise_agreement(self): ccc_agreement = np.zeros(self.rator_number) for index in range(self.rator_number): ccc_agreement[index] = np.mean(self.ccc[self.cnk_matrix[:, index]]) return ccc_agreement
[ "def calculate_paired_ccc(self):\r\n ccc = np.zeros((self.combination_list.shape[0]))\r\n for index in range(len(self.combination_list)):\r\n ccc[index] = self.calculate_ccc(self.data[self.combination_list[index, 0], :],\r\n self.data[self.combination_list[index, 1], :])\r\n\r\n return ccc", "def find_cc(theoreticalData, experimentalData):\n A=numpy.array(theoreticalData)\n B=numpy.array(experimentalData) \n cc= ( numpy.inner(A,B)/ ( numpy.linalg.norm(A)*numpy.linalg.norm(B) ) )\n print \"Cross-correlation: %5.3f\"%cc", "def cal_C(a1, a2, dt):\n update()\n\n # old way of computing cross correlation (incorrect)\n # x = np.cumsum(a1*a2)*dt\n # y = np.cumsum(a1*a1)*dt\n # z = np.cumsum(a2*a2)*dt\n # c = x/(np.power(y, 0.5)*np.power(z, 0.5))\n\n # This new form of computing the cross correlation\n # coefficient fixes normalization problem\n c = np.correlate(a1, a2, 'full')/np.sqrt(np.sum(a1**2)*np.sum(a2**2))\n c = np.max(c)\n\n # x = np.cumsum(a1*a2)*dt\n # y = np.cumsum(np.square(a1))*dt\n # z = np.cumsum(np.square(a2))*dt\n # c = x/(np.power(y, 0.5)*np.power(z, 0.5))\n\n cc = 10*np.amax(c, 0)\n\n # cc = abs(cc)\n return cc", "def calculate_concentrations_Cs_p(concentrations_Cs_ip):\n return concentrations_Cs_ip.sum(axis=1) #TODO need to transpose?", "def calculate_concentrations_Cs_i(concentrations_Cs_ip):\n return concentrations_Cs_ip.sum(axis=0)", "def calculate_concentration_Cs_seed(concentrations_Cs_seed_p):\n return np.sum(concentrations_Cs_seed_p)", "def computeCindices(self):\n\n self.surf_index_C = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m)\n self.edge_index_C = PUBSlib.computeedgeindices(self.nedge, self.ngroup, self.edge_group, self.group_m)\n self.nC = self.nvert\n self.nC += self.edge_index_C[-1,1]\n self.nC += self.surf_index_C[-1,1]\n\n if self.printInfo:\n print '# Control points =',self.nC", "def test_cnot_cascade(self):\n\n qr = QuantumRegister(10, \"qr\")\n circuit = QuantumCircuit(qr)\n circuit.cx(qr[0], qr[1])\n circuit.cx(qr[1], qr[2])\n circuit.cx(qr[2], qr[3])\n circuit.cx(qr[3], qr[4])\n circuit.cx(qr[4], qr[5])\n circuit.cx(qr[5], qr[6])\n circuit.cx(qr[6], qr[7])\n circuit.cx(qr[7], qr[8])\n circuit.cx(qr[8], qr[9])\n\n circuit.cx(qr[8], qr[9])\n circuit.cx(qr[7], qr[8])\n circuit.cx(qr[6], qr[7])\n circuit.cx(qr[5], qr[6])\n circuit.cx(qr[4], qr[5])\n circuit.cx(qr[3], qr[4])\n circuit.cx(qr[2], qr[3])\n circuit.cx(qr[1], qr[2])\n circuit.cx(qr[0], qr[1])\n\n passmanager = PassManager()\n # passmanager.append(CommutativeCancellation())\n passmanager.append(\n [CommutationAnalysis(), CommutativeCancellation(), Size(), FixedPoint(\"size\")],\n do_while=lambda property_set: not property_set[\"size_fixed_point\"],\n )\n new_circuit = passmanager.run(circuit)\n expected = QuantumCircuit(qr)\n\n self.assertEqual(expected, new_circuit)", "def c_for_benefit_score(pairs):\n \n # mapping pair outcomes to benefit\n obs_benefit_dict = {\n (0, 0): 0,\n (0, 1): -1,\n (1, 0): 1,\n (1, 1): 0,\n }\n \n # compute observed benefit for each pair\n obs_benefit = [obs_benefit_dict[(i[1], j[1])] for (i, j) in pairs]\n\n # compute average predicted benefit for each pair\n pred_benefit = [np.mean([i[0], j[0]]) for (i, j) in pairs]\n\n concordant_count, permissible_count, risk_tie_count = 0, 0, 0\n\n # iterate over pairs of pairs\n for i in range(len(pairs)):\n for j in range(i + 1, len(pairs)):\n \n # if the observed benefit is different, increment permissible count\n if obs_benefit[i] != obs_benefit[j]:\n\n # increment count of permissible pairs\n permissible_count = permissible_count +1\n \n # if concordant, increment count\n \n if ((pred_benefit[i] > pred_benefit[j] and obs_benefit[i] > obs_benefit[j]) or (pred_benefit[i] < pred_benefit[j] and obs_benefit[i] < obs_benefit[j])): # change to check for concordance\n \n concordant_count = concordant_count + 1\n\n # if risk tie, increment count\n if (pred_benefit[i] == pred_benefit[j]): #change to check for risk ties\n risk_tie_count = risk_tie_count + 1\n\n\n # compute c-statistic-for-benefit\n cstat = (concordant_count + (0.5 * risk_tie_count)) / permissible_count\n\n return cstat", "def read_cc(self):\r\n\r\n\t\tprint(\"-------+---------\")\r\n\t\tpkcs11 = PyKCS11.PyKCS11Lib()\r\n\t\tpkcs11.load(lib)\r\n\t\tself.slots =pkcs11.getSlotList()\r\n\t\tfor slot in self.slots:\r\n\t\t\tprint(pkcs11.getTokenInfo(slot))\r\n\t\t\r\n\t\t#slot=pkcs11.getSlotList(tokenPresent=Tru)[0]\r\n\t\tself.session=pkcs11.openSession(slot)\r\n\t\tall_attributes = list(PyKCS11.CKA.keys())\r\n\t\tall_attributes = [e for e in all_attributes if isinstance(e, int)]\r\n\t\t\r\n\t\tobj = self.session.findObjects([(PyKCS11.CKA_LABEL, 'CITIZEN AUTHENTICATION CERTIFICATE')])[0]\r\n\t\tattributes = self.session.getAttributeValue(obj, all_attributes)\r\n\t\tattributes = dict(zip(map(PyKCS11.CKA.get, all_attributes), attributes))\r\n\t\t\r\n\t\tself.certificate=x509.load_der_x509_certificate(bytes(attributes['CKA_VALUE']))\r\n\t\tcc_num = self.certificate.subject.get_attributes_for_oid(NameOID.SERIAL_NUMBER)\r\n\t\tself.private_key_cc = self.session.findObjects([(PyKCS11.CKA_CLASS, PyKCS11.CKO_PRIVATE_KEY), (PyKCS11.CKA_LABEL, 'CITIZEN AUTHENTICATION KEY')])[0]\r\n\t\tself.mechanism = PyKCS11.Mechanism(PyKCS11.CKM_SHA1_RSA_PKCS, None)", "def ccw(p1, p2, p3):\n return (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0])", "def euler1232C(q):\n\n st1 = math.sin(q[0])\n ct1 = math.cos(q[0])\n st2 = math.sin(q[1])\n ct2 = math.cos(q[1])\n st3 = math.sin(q[2])\n ct3 = math.cos(q[2])\n\n C = np.identity(3)\n C[0, 0] = ct2 * ct3\n C[0, 1] = ct3 * st1 * st2 + ct1 * st3\n C[0, 2] = st1 * st3 - ct1 * ct3 * st2\n C[1, 0] = -ct2 * st3\n C[1, 1] = ct1 * ct3 - st1 * st2 * st3\n C[1, 2] = ct3 * st1 + ct1 * st2 * st3\n C[2, 0] = st2\n C[2, 1] = -ct2 * st1\n C[2, 2] = ct1 * ct2\n\n return C", "def calculate_C_p(tip_speed_ratio):\n a_min = get_induction_factor(0.0)\n a_max = get_induction_factor(tip_speed_ratio)\n\n # Calculate integral\n integral = lambda a: ((1 - a) * (1 - 2 * a) * (1 - 4 * a) / (1 - 3 * a)) ** 2\n a = np.linspace(a_min, a_max, 100000)\n da = a[1] - a[0]\n dCp = integral(a) * da\n\n Cp = np.sum(dCp) * 24.0 / tip_speed_ratio ** 2\n return Cp", "def _generate_C(self, mute=True):\n\n tstart = clock()\n\n omega = self.omega\n c = self.unit_system['c']\n self.C = np.empty((self.ny, self.nx), dtype='complex')\n\n if self.polarization == 'O':\n self.C = omega*omega/(c*c) * self.deps[2,2]\n\n else:\n S = np.real(self.eps0[0,0])\n D = np.imag(self.eps0[1,0])\n S2 = S*S\n D2 = D*D\n self.C = omega*omega/(c*c) * ( D2*self.deps[0,0] + \\\n 1j*D*S*(self.deps[1,0]-self.deps[0,1]) + S2*self.deps[1,1] ) / S2\n\n tend = clock()\n\n if not mute:\n print('Operator C generated. Time used: {:.3}'.format(tend-tstart),\n file=sys.stdout)", "def calculate_concentration_Cs_OA(concentrations_Cs_i):\n return np.sum(concentrations_Cs_i)", "def _calc_concentration(self):\n r_t = self.gas.gas_constant * self._temperature\n total_gas_conc = self._pressure / r_t\n conc = self._mole_fraction * total_gas_conc\n # all_conc = np.copy(conc)\n sat_conc = self.saturation_pressure / r_t\n dry_mole_fraction = np.copy(self.mole_fraction)\n dry_mole_fraction[self.id_pc] = 0.0\n dry_mole_fraction = self._calc_fraction(dry_mole_fraction)\n for i in range(self.n_species):\n if i == self.id_pc:\n conc[self.id_pc] = np.where(conc[self.id_pc] > sat_conc,\n sat_conc, conc[self.id_pc])\n else:\n try:\n conc[i] = \\\n np.where(conc[self.id_pc] > sat_conc,\n (total_gas_conc - sat_conc)\n * dry_mole_fraction[i],\n conc[i])\n except FloatingPointError:\n raise FloatingPointError\n return np.maximum(conc, 0.0)", "def _calculate_coexpression(self, significance_thresh=3):\n # 1. Calculate the PCCs\n self.log(\"Calculating Coexpression\")\n num_bytes_needed = comb(self.shape()[0], 2) * 8\n if num_bytes_needed > psutil.virtual_memory().available:\n raise MemoryError(\"Not enough RAM to calculate co-expression network\")\n # pass in a contigious array to the cython function to calculate PCCs\n pccs = PCCUP.pair_correlation(\n np.ascontiguousarray(\n # PCCUP expects floats\n self._expr.as_matrix().astype(\"float\")\n )\n )\n\n self.log(\"Applying Fisher Transform\")\n pccs[pccs >= 1.0] = 0.9999999\n pccs[pccs <= -1.0] = -0.9999999\n pccs = np.arctanh(pccs)\n gc.collect()\n\n # Do a PCC check to make sure they are not all NaNs\n if not any(np.logical_not(np.isnan(pccs))):\n raise ValueError(\n \"Not enough data is available to reliably calculate co-expression, \"\n \"please ensure you have more than 10 accessions to calculate correlation coefficient\"\n )\n\n self.log(\"Calculating Mean and STD\")\n # Sometimes, with certain datasets, the NaN mask overlap\n # completely for the two genes expression data making its PCC a nan.\n # This affects the mean and std fro the gene.\n pcc_mean = np.ma.masked_array(pccs, np.isnan(pccs)).mean()\n self._global(\"pcc_mean\", pcc_mean)\n gc.collect()\n pcc_std = np.ma.masked_array(pccs, np.isnan(pccs)).std()\n self._global(\"pcc_std\", pcc_std)\n gc.collect()\n\n # 2. Calculate Z Scores\n self.log(\"Finding adjusted scores\")\n pccs = (pccs - pcc_mean) / pcc_std\n gc.collect()\n\n # 3. Build the dataframe\n self.log(\"Build the dataframe and set the significance threshold\")\n self._global(\"significance_threshold\", significance_thresh)\n raw_coex = self._raw_coex(pccs, significance_thresh)\n del pccs\n gc.collect()\n\n # 4. Calculate Gene Distance\n self.log(\"Calculating Gene Distance\")\n raw_coex.addcol(\n self.refgen.pairwise_distance(\n gene_list=self.refgen.from_ids(self._expr.index)\n ),\n pos=1,\n name=\"distance\",\n )\n gc.collect()\n\n # 5. Cleanup\n raw_coex.flush()\n del raw_coex\n gc.collect()\n\n # 6. Load the new table into the object\n self.coex = self._bcolz(\"coex\", blaze=True)\n self.set_sig_edge_zscore(float(self._global(\"significance_threshold\")))\n self.log(\"Done\")\n return self", "def cone_func(self):\n # actual values\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n # design values\n i_d = self.inl[0].to_flow_design()\n o_d = self.outl[0].to_flow_design()\n\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n\n n = 1\n return (- i[0] + i_d[0] * i[1] / i_d[1] *\n np.sqrt(i_d[1] * v_mix_ph(i_d) / (i[1] * v_i)) *\n np.sqrt(abs((1 - (o[1] / i[1]) ** ((n + 1) / n)) /\n (1 - (o_d[1] / i_d[1]) ** ((n + 1) / n)))))", "def discr_calc(a, b, c):\n\n discriminant = b**2-4*a*c\n\n return(discriminant)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get landmark with dlib
def get_landmark(filepath, predictor): detector = dlib.get_frontal_face_detector() img = dlib.load_rgb_image(filepath) dets = detector(img, 1) for k, d in enumerate(dets): shape = predictor(img, d) t = list(shape.parts()) a = [] for tt in t: a.append([tt.x, tt.y]) lm = np.array(a) return lm
[ "def _get_landmarks(input, show_image=False):\n if type(input) == str:\n im = cv2.imread(input)\n if im.shape[2] == 3:\n image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n else:\n image = im\n elif isinstance(input, np.ndarray):\n im = input\n if im.shape[2] == 3:\n image = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n else:\n image = im\n shape_predictor = 'shape_predictor_68_face_landmarks.dat'\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(shape_predictor)\n gray = image\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n if len(rects) == 0:\n # foto entera es el rectangulo\n rectangle = None\n rectangle = dlib.rectangle(0, 0, image.shape[1], image.shape[0])\n elif len(rects) == 1:\n # ok\n rectangle = rects[0]\n else:\n # Ahora se elige el más grande.\n sizes = []\n for r in rects:\n (x, y, w, h) = face_utils.rect_to_bb(r)\n sizes.append(w * h)\n rectangle = rects[np.argmax(sizes)]\n rect = rectangle\n # determine the facial landmarks for the face region, then convert the facial landmark (x, y)-coordinates to a\n # NumPy array\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n # convert dlib's rectangle to a OpenCV-style bounding box\n # [i.e., (x, y, w, h)], then draw the face bounding box\n (x, y, w, h) = face_utils.rect_to_bb(rect)\n if show_image:\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n # show the face number\n cv2.putText(image, \"Face\", (x - 10, y - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n # loop over the (x, y)-coordinates for the facial landmarks\n # and draw them on the image\n for (x, y) in shape:\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\n # show the output image with the face detections + facial landmarks\n cv2.imshow(\"Output\", image)\n cv2.waitKey(0)\n return shape, x, y, w, h", "def get_land(content):\r\n \r\n tr_text = get_trtags(content)\r\n try:\r\n land = tr_text[163]\r\n if len(land) == 2:\r\n unit = 'acres'\r\n else:\r\n unit = 'sq. ft.'\r\n data['land'] = [land[0], land[1], unit]\r\n except:\r\n land = tr_text[161]\r\n if len(land) == 2:\r\n unit = 'acres'\r\n else:\r\n unit = 'sq. ft.'\r\n data['land'] = [land[0], land[1], unit]", "def showLandMarkData(self):\r\n print(\"disX = \", self.disX)\r\n print(\"disY = \", self.disY)\r\n print(\"dis = \", self.dist)\r\n print(\"yaw angle = \", self.yawAngle * 180.0 / np.pi)", "def load_landmarks(path_file):\n assert os.path.isfile(path_file), 'missing file \"%s\"' % path_file\n ext = os.path.splitext(path_file)[-1]\n if ext == '.csv':\n return load_landmarks_csv(path_file)\n elif ext == '.txt':\n return load_landmarks_txt(path_file)\n else:\n logging.error('not supported landmarks file: %s',\n os.path.basename(path_file))", "def land(doc, *point_seq):\n return polygon(doc, point_seq, '00ff00')", "def annotate_landmarks(img, landmarks, font_scale = 0.4):\n img = img.copy()\n for idx, point in enumerate(landmarks):\n pos = (point[0, 0], point[0, 1])\n cv2.putText(img, str(idx), pos,\n fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,\n fontScale=font_scale,\n color=(0, 0, 255))\n cv2.circle(img, pos, 3, color=(0, 255, 255))\n return img", "def getDataFromTxt(txt,with_landmark=True,raw=False):\r\n dirname = os.path.dirname(txt)\r\n with open(txt,'r') as fd:\r\n lines = fd.readlines()\r\n \r\n result = []\r\n for line in lines:\r\n line = line.strip()\r\n #print line\r\n components = line.split(' ')\r\n #print components\r\n img_path = join(dirname,components[0]) #file path\r\n img_path = img_path.replace('\\\\','/')\r\n #bounding box\r\n #bbox = (0,645,0,800)\r\n bbox = (0,2400,0,1935)\r\n bbox = [int(_) for _ in bbox]\r\n # landmark\r\n if not with_landmark:\r\n result.append((img_path,BBox(bbox)))\r\n continue\r\n landmark = np.zeros((19,2))\r\n for i in range(0,19):\r\n #下采样\r\n #x = int(float(components[1+2*i])/3)#这样写的原因是不能有str转化为int,但是能由str转化为float,很奇怪\r\n #y = int(float(components[1+2*i+1])/3)\r\n # 原始图像\r\n x = components[1+2*i]\r\n y = components[1+2*i+1]\r\n #print x,y\r\n rv = (y,x)\r\n landmark[i] = rv\r\n if raw == False:\r\n for i,one in enumerate(landmark):\r\n rv = ((one[0]-bbox[0])/(bbox[1]-bbox[0]),(one[1]-bbox[2])/(bbox[3]-bbox[2])) #归一化\r\n landmark[i] = rv #相对坐标\r\n #print (img_path,landmark,BBox(bbox))\r\n result.append((img_path,landmark,BBox(bbox)))\r\n return result", "def makeLandmark(param):\n\n return {\"point\": lmParamToPoint(*param),\n \"param\": param,\n \"occurrence\": 1}", "def read_lms_from_pts(path):\n\t#print (path)\n\tlines = open(path).read().splitlines()\n\tif ICCR_LMS_USED:\n\t\tlines = lines[3:69]\n\telse:\n\t\tlines = lines[3:71]\n\n\tlandmarks = []\n\tfor l in lines:\n\t\tcoords = l.split()\n\t\tlandmarks.append([float(coords[1]), float(coords[0])])\n\t\t#landmarks.append([float(coords[0]), float(coords[1])])\n\t#print (landmarks)\n\treturn landmarks", "def getLandmarkPatchAndBBox(img,landmark,N):\r\n n = N/2\r\n if landmark[1]<=n or landmark[1]>=(1935-n) or landmark[0]<=n or landmark[0]>=(2400-n):\r\n radis = N/4\r\n else:\r\n radis = N/2\r\n #landmark[0]为行数(top,bottom),landmark[1]为列数(left,right)\r\n left = (landmark[1]-radis).astype('int16')\r\n right = (landmark[1]+radis).astype('int16')\r\n top = (landmark[0]-radis).astype('int16') \r\n bottom = (landmark[0]+radis).astype('int16')\r\n patch = img[top:bottom+1,left:right+1]\r\n patch_bbox = BBox([left,right,top,bottom])\r\n return patch,patch_bbox", "def convert_mediapipe_to_numpy(landmarks) -> np.ndarray:\n return np.array([(e.x, e.y, e.z) for e in landmarks.landmark])", "def read_landmarks_for_cephalo(\n *,\n db: Session = Depends(deps.get_db),\n cephalo_id: int,\n) -> Any:\n landmarks = crud.landmark.get_landmarks_by_cephalo(db=db, cephalo_id=cephalo_id)\n return landmarks", "def objectifyLandmarks(esdcs):\n def callback(esdc):\n for lkey in [\"l\", \"l2\", \"f\"]:\n\n if esdc.childIsListOfWords(lkey) and not isLeafObject(esdc):\n\n esdc.setChild(lkey, ExtendedSdc(\"OBJECT\",\n f=esdc.children(lkey)))\n breadthFirstTraverse(esdcs, callback)", "def crop_landmark2(image, landmarks, part, show_crop=False):\n dims = np.load('landmark_dims.npy')\n\n if (part == \"left eyebrow\" or part == 0):\n rango = range(17, 22)\n w, h = dims[0] // 2\n elif (part == \"right eyebrow\" or part == 1):\n rango = range(22, 27)\n w, h = dims[1] // 2\n elif (part == \"nose\" or part == 2):\n rango = range(27, 36)\n w, h = dims[5] // 2\n elif (part == \"left eye\" or part == 3):\n rango = range(36, 42)\n w, h = dims[2] // 2\n elif (part == \"right eye\" or part == 4):\n rango = range(42, 48)\n w, h = dims[3] // 2\n elif (part == \"mouth\" or part == 5):\n rango = range(48, 68)\n w, h = dims[4] // 2\n\n landmarks = np.array(landmarks)\n rango = np.array(rango)\n x_max = int(landmarks[rango, 0].max())\n x_min = int(landmarks[rango, 0].min())\n y_max = int(landmarks[rango, 1].max())\n y_min = int(landmarks[rango, 1].min())\n\n X = int(np.mean((x_min, x_max)).round(0))\n Y = int(np.mean((y_min, y_max)).round(0))\n\n landmark = _crop_image(image, X, Y, w, h)\n if show_crop:\n cv2.imshow(\"Image\", landmark)\n cv2.waitKey(15000)\n # cv2.waitKey(0)\n return landmark", "def extract_hough_landmarks(image):\n origin = numpy.array(image.size, dtype=numpy.int)/2 # Centre of image.\n black_white = occupancy.black_white(image)\n lines = probabilistic_hough_line(black_white, threshold=30, line_length=50, line_gap=50) # Extract lines\n landmarks = []\n for line in lines:\n landmarks.append(Landmark([line[0] - origin[0], line[1] - origin[1]]))\n return limit_landmarks(landmarks)", "def landline(self):\n return self._landline", "def locations(term):", "def convert_mediapipe_to_numpy(landmarks) -> np.ndarray:\n return np.array([(e.x, e.y, e.z) for e in landmarks])", "def get_landcover_class(self, position) -> 'GeoData':\n assert len(position) == 2, \"Need coordinates for x and y\"\n\n if isinstance(position[0], numbers.Number) and isinstance(position[1],\n numbers.Number): # point\n return self._landcover_map.get_class(position)\n else: # position is a rectangle\n assert len(position[0]) == 2 and len(position[1]) == 2\n return self._landcover_map.get_values(position)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints a user's unfollowing.
def get_unfollowers(followers: list, following: list): print (f'Followers: \n{followers}') print (f'Following: \n{following}')
[ "def unfollow_all_non_friends(self):\n followings = set(self.bot.following)\n unfollows = [x for x in followings if x not in self.friends.list]\n print(f'\\nGoing to unfollow {len(unfollows)} \"friends\".')\n for u in unfollows:\n self.unfollow(u)", "def unfollow(self, user):\n users = []\n yield self._twt.unfollow_user(user, users.append)\n returnValue(users[0])", "def unfollow(self, user_id):\n with suppress(Exception):\n print(f\"Unfollowing {user_id}\")\n self.bot.api.unfollow(user_id)\n\n self.unfollowed.append(user_id)\n\n with suppress(StopIteration):\n to_remove = next(\n x for x in self.tmp_following.list if x.split(\",\")[0] == user_id\n )\n self.tmp_following.remove(to_remove)\n\n with suppress(ValueError):\n self.bot.following.remove(user_id)", "def unfollow_failed_unfollows(self):\n tmp_following = [u.split(\",\")[0] for u in self.tmp_following.list]\n users = set(bot.following) - set(tmp_following) - self.friends.set\n manually_followed = set(users) - self.unfollowed.set\n to_unfollow = set(users) - manually_followed\n print(f\"Going to unfollow {len(to_unfollow)} users\")\n for i, u in enumerate(to_unfollow):\n if i <= 15:\n self.unfollow(u)", "def test_unfollow_user_unauthenticated(self):\n with app.test_client() as client:\n response = client.post(\n f'/users/stop-following/{self.user1.id}', follow_redirects=True)\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Access unauthorized.\", html)", "def unfollow(self, *args, **kwargs):\n obj = self.get_object()\n obj.unfollow(self.request.user)\n return self.retrieve(*args, **kwargs)", "def unfollow():\n user = mongo.db.Users\n uuid = request.json['uuid']\n foreign_uuid = request.json['foreign_uuid']\n user.update({'uuid': uuid}, {\"$pull\": {'follow': foreign_uuid}}, True)\n user.update({'uuid': foreign_uuid}, {\"$pull\": {'follower': uuid}}, True)\n\n return jsonify({'result': \"Unfollow successful!\"})", "def test_view_user_following_unauthenticated(self):\n with app.test_client() as client:\n response = client.get(\n f'/users/{self.user1.id}/following', follow_redirects=True)\n html = response.get_data(as_text=True)\n self.assertEqual(response.status_code, 200)\n self.assertIn('Access unauthorized.', html)", "def confirm_unfollow(request, id_user):\n\n followed_user = get_object_or_404(AppUser, id=id_user)\n relation = UserFollows.objects.filter(\n user=request.user, followed_user=followed_user\n )\n relation.delete()\n return redirect(\"/community/\")", "def test_user_not_following(self):\n\n self.assertFalse(self.user1.is_following(self.user2))", "def unfollow(self, user_or_id):\n if self.is_following(user_or_id):\n self._remove_connection_with(\n user_or_id, ConnectionType.FOLLOW)\n return self", "def handle_unfollow(_, event, destination):\n LINE.log_event(\"Bot unfollowed.\", event=event, dest=destination)", "def test_is_not_following(self):\n \n self.assertFalse(self.user.is_following(self.user2))", "def user_playlist_unfollow(self, user, playlist_id):\n warnings.warn(\n \"You should use `current_user_unfollow_playlist(playlist_id)` instead\",\n DeprecationWarning,\n )\n return self.current_user_unfollow_playlist(playlist_id)", "def test_view_user_followers_unauthorized(self):\n with app.test_client() as client:\n response = client.get(\n f'/users/{self.user1.id}/followers', follow_redirects=True)\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('Access unauthorized.', html)", "def unfollow_haters(follower_list, \n following_list, \n api, \n unfollow_at_once = 75, \n total_to_unfollow=None,\n hours_delay = 1):\n \n #used to measure how many have been unfollowed in each loop\n count_of_unfollowed_at_once=0\n \n #used to measure how many total have been unfollowed\n count_of_unfollowed = 0\n \n following_list.reverse() #Unfollow oldest bunch first \n \n # If the max number of people to unfollow isn't set, just use the total length \n # of following list as the max number to unfollow\n if total_to_unfollow == None:\n total_to_unfollow = len(following_list) \n \n for hater in following_list:\n if hater not in follower_list:\n try:#error handling for case where user can't be found\n #Only select a few at once so as not overload twitter with requests\n print(str(count_of_unfollowed+1) + \". \" + api.get_user(hater).screen_name + \" is being unfollowed\")\n api.destroy_friendship(hater)\n count_of_unfollowed_at_once += 1\n count_of_unfollowed +=1\n #test to see if I've hit the number of unfollows defined\n if count_of_unfollowed >= total_to_unfollow:\n break\n if count_of_unfollowed_at_once == unfollow_at_once:\n count_of_unfollowed_at_once = 0\n print(\"Quick pause to not overload Twitter with unfollows\")\n sleep(3600*hours_delay)\n except tweepy.error.TweepError:\n sleep(3600) #Pause for an hour to reset API limit\n \n print(\"The haters have been unfollowed\")", "def userStopToFollow(userId, idToUnfollow):\n user = getUser(userId)\n userToUnfollow = getUser(idToUnfollow)\n\n user.suscriptions.remove(idToUnfollow)\n userToUnfollow.followers.remove(userId)\n user.put()\n userToUnfollow.put()", "def list_following(self) -> list:\n return [u.screen_name for u in self.following_list]", "def test_user_not_followed(self):\n\n self.assertFalse(self.user1.is_followed_by(self.user2))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns float of the number of seconds to wait before the request limit will no longer be exceeded. Also clears out any requests older than a minute. This makes an important asssumption that your program will actually honor the block time. If you don't, you will rocket past the courtesy rate limit. This is also the most threadunsafe thing ever.
def _get_block_time_seconds(self): if self.rate_limit == 0: return 0 call_time = time.time() remove_time = call_time - 60 for idx, request in enumerate(self.request_log): if request >= remove_time: self.request_log = self.request_log[idx:] break if len(self.request_log) < self.rate_limit: return 0 return (self.request_log[-1 * self.rate_limit] + 60) - call_time
[ "def _http_lock_wait_time(self):\r\n if self._http_lock_wait_begin == 0:\r\n return 0\r\n if self._http_lock_wait_end == 0:\r\n return time.time() - self._http_lock_wait_begin\r\n return self._http_lock_wait_end - self._http_lock_wait_begin", "def apply_limit(self, elapsed_time, block_size):\n if self.limit > 0 and block_size > self.limit:\n block_size = self.limit\n if elapsed_time < 1:\n time.sleep(1 - elapsed_time)\n return block_size", "def remainingTimeToWait(self) -> int:\n ...", "def _rate_limit_sleep(self, last_body_download_time):\n current_time = round(time.time(), 2)\n diff = round(current_time - last_body_download_time, 2)\n\n if diff >= self._api_hit_rate:\n return\n\n sleep_diff = round(self._api_hit_rate - diff, 3)\n self.logger.debug(\"Sleep time is: {0}\".format(sleep_diff))\n time.sleep(sleep_diff)", "def get_remaining_time_in_millis(self):\n return self.time_limit_ms", "def _wait_for_ratelimit(self, resource: str=CORE_RESOURCE):\n ratelimit = self._get_ratelimit(resource)\n if int(ratelimit.get('remaining', '0')) < 1:\n reset = datetime.utcfromtimestamp(int(ratelimit.get('reset', '0')))\n delta = reset - datetime.utcnow()\n wait_time = int(delta.total_seconds()) + 2\n if wait_time > 0:\n __log__.info(\n 'Rate limit reached. Wait for %d sec until %s',\n wait_time, reset)\n time.sleep(wait_time)", "def _throttle(self):\n\n if (time.time() - self.last_access_time) < \\\n DEFAULT_WEB_REQUEST_SLEEP_TIME:\n time.sleep(self.sleep_time)\n self.last_access_time = time.time()", "def seconds_to_sleep(self):\n if self.next_request_timestamp is None:\n return\n sleep_seconds = self.next_request_timestamp - time.time()\n if sleep_seconds <= 0:\n return\n return sleep_seconds", "def test_limit_exceeded(silver_client, gold_client):\n for i in range(15):\n assert gold_client.get(\"/\").status_code == 200, f\"Response of the request \" \\\n f\"number {i} should be 200\"\n # wait for 0.125 as the original ruby tests waits after making request\n time.sleep(0.125)\n\n wait_interval()\n\n assert_limit_works(silver_client, limit=10)\n\n wait_until_next_minute()\n\n assert_limit_works(silver_client, limit=10)", "def ratelimit(func):\n @wraps(func)\n async def waitLimit(*args, **params):\n token = await args[0]._rl.getToken(func.__name__)\n\n response = await func(*args, **params)\n\n try:\n limits = utils.getLimits(response.headers)\n timestamp = utils.getTimestamp(response.headers)\n except:\n limits = None\n timestamp = utils.getTimestamp(None)\n\n await args[0]._rl.getBack(func.__name__, token, timestamp, limits)\n\n return response\n\n return waitLimit", "def rate_limit_threshold(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"rate_limit_threshold\")", "def _check_api_limits(gh_session, api_required=250):\n api_rates = gh_session.rate_limit()\n\n api_remaining = api_rates[\"rate\"][\"remaining\"]\n api_reset = api_rates[\"rate\"][\"reset\"]\n logger.debug(\"Rate Limit - %d requests remaining\", api_remaining)\n\n if api_remaining > api_required:\n return\n\n now_time = time.time()\n time_to_reset = int(api_reset - now_time)\n logger.warning(\"Rate Limit Depleted - Sleeping for %d seconds\", time_to_reset)\n\n while now_time < api_reset:\n time.sleep(10)\n now_time = time.time()\n\n return", "def _calc_timeout(self) -> None:\n tries = list()\n for i in range(10):\n start = time()\n self._stream.read() # Prime camera for reading.\n end = time()\n tries.append(end - start)\n self._timeout_limit = max(tries) + 1.5 # + 1.5 to handle possible lag spikes.", "def cleanThreadTimeToWait() -> None:\n ...", "def ControlRate(t_last_call, max_rate):\n p = 1.0 / max_rate\n t_current = time.time()\n dt = t_current - t_last_call\n\n if dt < p:\n time.sleep(p - dt)\n\n return t_current", "def check_rate_limiting():\n if not flaskbb_config[\"AUTH_RATELIMIT_ENABLED\"]:\n return None\n return limiter.check()", "def request_time(self) -> float:\n if self._finish_time is None:\n return time.time() - self._start_time\n else:\n return self._finish_time - self._start_time", "def peak_minutes_to_wait_on_disconnect(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"peak_minutes_to_wait_on_disconnect\")", "def idle(self):\n return (datetime.datetime.now() - self._last_received).total_seconds()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inserts a new timestamp into the request log, marked block_time seconds in the future.
def _insert_request_to_log(self, block_time=0): if self.rate_limit == 0: return self.request_log.append(time.time() + block_time)
[ "def time_block(self, message):\n tic = time.time()\n yield\n dt = time.time() - tic\n log = app_log.info if dt > 1 else app_log.debug\n log(\"%s in %.2f ms\", message, 1e3 * dt)", "def _get_block_time_seconds(self):\n\n if self.rate_limit == 0:\n return 0\n\n call_time = time.time()\n remove_time = call_time - 60\n\n for idx, request in enumerate(self.request_log):\n if request >= remove_time:\n self.request_log = self.request_log[idx:]\n break\n\n if len(self.request_log) < self.rate_limit:\n return 0\n return (self.request_log[-1 * self.rate_limit] + 60) - call_time", "def log_now(self):\n\t\tt = time.time()\n\t\tprint(\"logged\", time.ctime(t))\n\t\tself.history.append(t)", "def block_timestamp(self, block_num):\n block_time = Block(\n block_num,\n hive_instance=self.hive\n ).time()\n return int(time.mktime(block_time.timetuple()))", "def add_time(self, amount):\n self._time += amount", "def set_block_start_time(self, time: str) -> None:\n self._logger.debug(\"running\")\n self._block_start_time_val.setText(time)\n self._logger.debug(\"done\")", "def logSleep(self, timeSlept):\n self.slept = timeSlept/3600", "def extend(self, additional_time=0.0):\n\t\tself._last_time += additional_time", "def add_timestamp(dct):\n dct['timestamp'] = time.time() * 1000", "def time_block(self, time_block):\n allowed_values = [1, 2, 3, 4, 5, 6, 7, 8, 9] # noqa: E501\n if time_block not in allowed_values:\n raise ValueError(\n \"Invalid value for `time_block` ({0}), must be one of {1}\" # noqa: E501\n .format(time_block, allowed_values)\n )\n\n self._time_block = time_block", "def add_time(self, response_time):\n if type(response_time) != float:\n raise TypeError(\"response time must be a float\")\n self.resp_time.append([time.time(), response_time*1000.0])", "def record(self, timestamp: int) -> None:\n # Increase total hits\n self.total_hits += 1\n\n # Convert timestamp to minute, reduce number of elements to store\n minute = timestamp // 60\n\n # Get insert position to keep the hits sorted by minute\n insert_pos = bisect_left([hit[0] for hit in self.hits], minute)\n\n if insert_pos < len(self.hits) and self.hits[insert_pos][0] == minute:\n old_hit = self.hits[insert_pos][1]\n self.hits[insert_pos] = (minute, old_hit + 1)\n else:\n self.hits.insert(insert_pos, (minute, 1))", "def insert_partial(self, name, timestamp, newvalue):", "def set_timestamp(self, timestamp):\n self.timestamp = LogEntry.normalize_timestamp(timestamp)", "def block_time(self, block_num):\n return Block(\n block_num,\n hive_instance=self.hive\n ).time()", "def writeTime(self, time):\n cursor = self.connection.cursor()\n time = (time,)\n\n cursor.execute('''UPDATE conf\n set last_update = ?''', time)\n\n self.connection.commit()\n cursor.close()", "def increment_last(self, timestamp):", "def monitor_block_times(self):\n self.current_block_number = self.blockchain_client.get_block_number()\n self.current_block = self.blockchain_client.get_block_by_number(self.current_block_number, False)\n self.current_block_timestamp = int(self.current_block['timestamp'], 16)\n\n while self._run:\n self.do_heartbeat()\n sleep_time = self.sleep_time\n time.sleep(sleep_time)\n if self.blockchain_client.get_block_number() > self.current_block_number:\n # Update block time.\n next_block = self.blockchain_client.get_block_by_number(self.current_block_number + 1)\n if next_block is None:\n logger.warning(\"Got `None` while fetching block %s\", self.current_block_number + 1)\n continue\n next_block_timestamp = int(next_block['timestamp'], 16)\n self.block_time = next_block_timestamp - self.current_block_timestamp\n\n # Grab current block data\n self.current_block_number = self.blockchain_client.get_block_number()\n self.current_block = self.blockchain_client.get_block_by_number(self.current_block_number, False)\n self.current_block_timestamp = int(self.current_block['timestamp'], 16)\n self.logger.debug(\n \"Block Number: %s - Block Time: %s\",\n self.current_block_number,\n decimal.Decimal(str(self._block_time)).quantize(decimal.Decimal('1.00')),\n )\n elif time.time() > self.expected_next_block_time + 20 * self.block_time:\n delta = time.time() - self.expected_next_block_time\n if delta > 120 and int(delta) % 10 == 0:\n self.logger.warning(\n \"Potentially stuck at block %s - Have waited %s seconds.\",\n self.current_block_number,\n time.time() - self.current_block_timestamp,\n )\n time.sleep(1)", "def postAt(self, act, abs_time):\n assert issubclass(type(act), Ahsm)\n self.act = act\n self.interval = 0\n Framework.addTimeEventAt(self, abs_time)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the first (and only) doc in the result set, otherwise raises an exception.
def one(self): self._get() if len(self.result.get('collection', [])) != 1: raise ValueError('query did not return exactly one result') return self.result['collection'][0]
[ "def one(self):\n try:\n return self.results[0]\n except IndexError:\n return None", "def one(self, *args, **kwargs):\n bson_obj = self.find(*args, **kwargs)\n count = bson_obj.count()\n if count > 1:\n raise MultipleResultsFound(\"%s results found\" % count)\n elif count == 1:\n try:\n doc = next(bson_obj)\n except StopIteration:\n doc = None\n return doc", "def find_one(self, filter=None):\n for doc in self.find(filter, limit=1):\n return doc", "def fetch_one(self, *args, **kwargs):\n bson_obj = self.fetch(*args, **kwargs)\n count = bson_obj.count()\n if count > 1:\n raise MultipleResultsFound(\"%s results found\" % count)\n elif count == 1:\n return next(bson_obj)", "def first(self):\n try:\n row = self.cursor_strategy.fetchone()\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )\n\n try:\n if row is not None:\n return self.process_rows([row])[0]\n else:\n return None\n finally:\n self.close()", "def fetchOne(self):\n\t\tself.result = self.cursor.fetchone()\n\t\treturn self.result", "def fetchone(self):\n try:\n row = self.cursor_strategy.fetchone()\n if row is not None:\n return self.process_rows([row])[0]\n else:\n self._soft_close()\n return None\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )", "def find_one(self, where_dict):\n\n for document in self.documents:\n if self.check_document(document, where_dict):\n return document", "def get_one(cur, query):\n\tnummatches = cur.execute(query)\n\treturn cur.fetchone()", "def one(self, default=None, as_dict=False, as_tuple=False):\n\n # Ensure that we don't have more than one row.\n try:\n test = self[1] # force the cursor to the second rowpass\n except:\n pass\n\n if len(self) > 1:\n raise ValueError(\n \"RecordCollection contained more than one row. \"\n \"Expects only one row when using \"\n \"RecordCollection.one\"\n )\n\n # Try to get a record, or return default.\n if len(self) == 0: # bummer, no rows at all\n return default\n record = self[0]\n # Cast and return.\n if as_dict:\n return record.as_dict()\n elif as_tuple:\n return record.as_tuple()\n else:\n return record", "def find_one(collection, query):\n return DB.DATABASE[collection].find_one(query)", "def next(self):\n return self.__document_type(self.__wrapped_cursor.next())", "def find_doc(self, doc_type, property_name, property_value):\n try:\n self.client.connect()\n db = self.client[self.db_name]\n selector = {\n '_id': {'$gt': 0},\n 'type': doc_type,\n property_name: property_value\n }\n query = Query(db, selector=selector)\n for doc in query()['docs']:\n return doc\n return None\n except Exception:\n LOG.exception(\"Cloudant DB exception:\")\n finally:\n self.client.disconnect()", "def get_doc_by_id(cls, doc_id):\n return cls.get_index().get(doc_id=doc_id)", "def first_or_404(self):\n instance = self.first()\n if instance is None:\n abort(404)\n return instance", "def first(self):\n return self._objects[0]", "def __getitem__(self, key):\n if self.document_cache is None:\n return self.fetch_document(key, raw_results = False)\n try:\n return self.document_cache[key]\n except KeyError:\n document = self.fetch_document(key, raw_results = False)\n self.document_cache.cache(document)\n return document", "def find_one(self, where_dict):\n result = self.find(where_dict)\n return result[0] if result else None", "async def queryone(self, stmt, *args):\n result = await self.query(stmt, *args)\n if len(result) == 0:\n raise NoResultError()\n elif len(result) > 1:\n raise ValueError(\"Expectecd 1 result, got %d\" % len(result))\n return result[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add one or many new matching filters to the filter set using kwargs. These aren't what go in the actual 'filter' URL parameter. These are used to match against the db, like name=Zoulas. This can be used to pass through any query parameters that do not have their own dedicated chainable methods.
def match(self, **kwargs): for filter_name, filter_value in kwargs.iteritems(): self._match[filter_name] = filter_value return self
[ "def filter(self, **kwargs):\n\n for filter_name, filter_value in kwargs.iteritems():\n self._filters[filter_name] = filter_value\n return self", "def set_filters(filter_list):", "def filter(self, _filter: \"Filter\" = None, **kwargs) -> \"Query\":\n\n if _filter and kwargs:\n raise RuntimeError(\"Specifying both Filter and kwargs is not allowed.\")\n\n if self._executed:\n raise DatabaseError(\"Cannot filter a database query that has already been executed.\")\n else:\n if _filter is not None:\n self._filters.append(_filter)\n else:\n for field, value in kwargs.items():\n self._filters.append(Equals(field, value))\n return self", "def filter(self, **kwargs):\n return self", "def register_filter(self, *filters):\n for f in filters:\n f.controller = self\n self._filters.append(f)", "def filter(self, *args, **kwargs):\n query = R()\n for arg in args:\n if isinstance(arg, str):\n query &= R(_expr=arg)\n continue\n if isinstance(arg, R):\n query &= arg\n continue\n raise TypeError(f'arguments must be string or R not {type(arg)}')\n\n if kwargs:\n query &= R(**kwargs)\n\n return self._get_resourceset_class()(\n self._client,\n self._path,\n query=query,\n )", "def rest_framework_filters_args(self):\n raise NotImplementedError", "def get_filterset_kwargs(self, filterset_class):\n kwargs = {\n \"data\": self.request.GET or None,\n \"request\": self.request,\n }\n try:\n kwargs.update(\n {\n \"queryset\": self.get_queryset(),\n }\n )\n except ImproperlyConfigured:\n # ignore the error here if the filterset has a model defined\n # to acquire a queryset from\n if filterset_class._meta.model is None:\n msg = (\n \"'%s' does not define a 'model' and the view '%s' does \"\n \"not return a valid queryset from 'get_queryset'. You \"\n \"must fix one of them.\"\n )\n args = (filterset_class.__name__, self.__class__.__name__)\n raise ImproperlyConfigured(msg % args)\n return kwargs", "def add_new_filter(self, filter_path):", "def attach_filters(self, filters):\n precedence_dict = defaultdict(list)\n\n for f in filters:\n precedence_dict[f.precedence].append(f)\n\n for key in sorted(precedence_dict.keys()):\n self.filters.extend(precedence_dict[key])", "def filters(self, filters):\n\n for f in filters:\n self.filter(f[\"attribute_name\"], f[\"value\"], f[\"operator\"])", "def addFilters(self, filterList):\n for f in filterList:\n self.addFilter(f)", "def _update_json_request(json_request, **kwargs):\n if 'filter' in kwargs:\n filter = json_request.get('filter', [])\n filter.extend(kwargs.pop('filter'))\n json_request['filter'] = filter\n if 'exclude' in kwargs:\n exclude = json_request.get('exclude', [])\n exclude.extend(kwargs.pop('exclude'))\n json_request['exclude'] = exclude\n json_request.update( dict((k, v) for k, v in kwargs.iteritems() \n if v is not None)\n )\n return json_request", "def generic_filterset(self):\n\n def get_filterset(data=None, queryset=None, *, request=None, prefix=None):\n \"\"\"\n Apply GenericFilterSet and return the filtered queryset.\n \"\"\"\n return GenericFilterSet(data, queryset, request=request, prefix=prefix).qs\n\n return partial(get_filterset, self.request.GET, request=self.request)", "def set_default_filters(self, **filters):\n\t\tself._filters = filters\n\t\tself._store_schema()", "def apply_filters(self, request, applicable_filters):\n self.__request = request\n commaseparated_filters = {}\n nb_enfants = request.GET.get(\"n_enfants__length\")\n language = applicable_filters.pop(\"language__in\", None)\n for f in self.commaseparated_fields:\n commaseparated_filters[f] = applicable_filters.pop(\"%s__in\" % f, None)\n applicable_filters.pop(\"tarif__in\", None) # we remove it since processed in filters_post_sorting\n\n qs = super(SearchResource, self).apply_filters(request, applicable_filters)\n qs = qs.distinct() # for enfants__school filtering, can return duplicates\n\n if not settings.ALLOW_BASIC_PLAN_IN_SEARCH:\n qs = qs.filter(plan=self._meta.object_class.PLANS[\"premium\"])\n\n if nb_enfants:\n qs = self.filter_nb_enfants(nb_enfants, qs)\n\n if language:\n qs = self.filter_language(language, qs)\n\n for f, value in commaseparated_filters.iteritems():\n if value:\n qs = self._filter_commaseparated_field(f, value, qs)\n\n return qs", "def filter(self, filter_params):\n pass", "def get_filter_kwargs(self):\n filter_kwargs = {}\n exclude_kwargs = {}\n for param, lookup in self.QUERY_MAP.items():\n exclude_param = '%s-exclude' % param\n if param in self.request.GET:\n filter_kwargs[lookup] = self.request.GET[param]\n if exclude_param in self.request.GET:\n exclude_kwargs[lookup] = self.request.GET[exclude_param]\n return filter_kwargs, exclude_kwargs", "def addFilterSet(credentials, filter_set, **opts):\n\n return filter_set.save(credentials, new=True, **opts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add one or many new matching filters to the filter set using kwargs.
def filter(self, **kwargs): for filter_name, filter_value in kwargs.iteritems(): self._filters[filter_name] = filter_value return self
[ "def match(self, **kwargs):\n\n for filter_name, filter_value in kwargs.iteritems():\n self._match[filter_name] = filter_value\n return self", "def addFilterSet(credentials, filter_set, **opts):\n\n return filter_set.save(credentials, new=True, **opts)", "def set_filters(filter_list):", "def register_filter(self, *filters):\n for f in filters:\n f.controller = self\n self._filters.append(f)", "def addFilters(self, filterList):\n for f in filterList:\n self.addFilter(f)", "def add_new_filter(self, filter_path):", "def add_filter(self,fltr):\n self.mutex.lock()\n self.filters.append(fltr)\n self.mutex.unlock()", "def attach_filters(self, filters):\n precedence_dict = defaultdict(list)\n\n for f in filters:\n precedence_dict[f.precedence].append(f)\n\n for key in sorted(precedence_dict.keys()):\n self.filters.extend(precedence_dict[key])", "def register_filter(self, filter, function):\n if filter in self.filters:\n self.filters[filter].append(function)\n else:\n self.filters[filter] = [ function ]", "def addListedNewFilter(self):\n selected_items = self.ui.filter_list.selectedItems()\n if len(selected_items) != 1:\n print 'Please select one filter in the list'\n else:\n filter_name = unicode( selected_items[0].text() )\n filter_class = filters_package.filter_given_name_dict[filter_name]\n new_filter = filter_class()\n if new_filter is not None:\n cur_filterNames = []\n name = new_filter.name\n for fltr in self.worker_thread.filters:\n cur_filterNames.append(fltr.name)\n i = 2\n while name in cur_filterNames:\n name = new_filter.name + ' #' + unicode(i)\n i+=1\n new_filter.setName(name)\n self.addFilterProxyItem(new_filter)\n else: print \"Warning: Could not create a filter instance\"", "def filters(self, filters):\n\n for f in filters:\n self.filter(f[\"attribute_name\"], f[\"value\"], f[\"operator\"])", "def addToFilter(self, filter: ghidra.program.util.ProgramDiffFilter) -> None:\n ...", "def aaaadd_filter(self, f, filter_cls=None):\n if filter_cls is None:\n filter_cls = type(f)\n for logger in self.find_loggers_with_filter(filter_cls):\n logger.addFilter(f)\n for handler in self.find_handlers_with_filter(filter_cls):\n handler.addFilter(f)", "def filter(self, **kwargs):\n return self", "def test_add_filter_new(self):\n self.f.add_filter(self.TEST_FILTER_KEY, self.TEST_FILTER_VALUE_1)\n self.assertIn(self.TEST_FILTER_KEY, self.f)\n self.assertEqual([self.TEST_FILTER_VALUE_1], self.f[self.TEST_FILTER_KEY])", "def add_filter(self, func):\n self.env.filters[func.__name__] = func\n return func", "def add_filter(self, name, filter):\n raise NotImplementedError(\"please mixin an environment class\")", "def filter(self, *args, **kwargs):\n query = R()\n for arg in args:\n if isinstance(arg, str):\n query &= R(_expr=arg)\n continue\n if isinstance(arg, R):\n query &= arg\n continue\n raise TypeError(f'arguments must be string or R not {type(arg)}')\n\n if kwargs:\n query &= R(**kwargs)\n\n return self._get_resourceset_class()(\n self._client,\n self._path,\n query=query,\n )", "def filter_and_update(cls, filter_args=None, updated_args=None):\n res = db.session.query(cls).filter_by(**filter_args).update(updated_args)\n db.session.commit()\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that number of keys is equal to number of values in dict
def test_dict_key_value(): my_dict = {a: a ** 2 for a in range(7)} keys_count = my_dict.keys() values_count = my_dict.values() print(keys_count) print(values_count) assert len(keys_count) == len(values_count)
[ "def number_keys(a_dictionary):\n\n return len(a_dictionary)", "def check_results_dict_dimensions(result_dict: dict):\n check_list = []\n error_message = []\n for key, value in result_dict.items():\n error_message.append(f'{key}: {\", \".join([str(item) for item in value])}\\n')\n check_list.append(len(value))\n if len(set(check_list)) > 1:\n raise ValueError(f'Result dictionary has unbalanced values: {\"; \".join(error_message)}')", "def _check_scatter_key_length(\n self, num_keys: int, value: Union[cudf.core.scalar.Scalar, ColumnBase]\n ):\n if isinstance(value, ColumnBase):\n if len(value) != num_keys:\n msg = (\n f\"Size mismatch: cannot set value \"\n f\"of size {len(value)} to indexing result of size \"\n f\"{num_keys}\"\n )\n raise ValueError(msg)", "def db_consistent(dict_of_dict):\n inner_keys_list = []\n # Build a list of list of keys\n for key in dict_of_dict:\n inner_keys = list(dict_of_dict[key].keys())\n inner_keys.sort()\n inner_keys_list.append(inner_keys)\n for i in range(1, len(inner_keys_list)):\n # If the number of keys is different.\n if len(inner_keys_list[0]) != len(inner_keys_list[i]):\n return False\n # If the keys don't match.\n for j in range(len(inner_keys_list[0])):\n if inner_keys_list[0][j] != inner_keys_list[i][j]:\n return False\n return True", "def test_if_to_dict_returns_the_accurate_number_of_keys(self):\n b = BaseModel()\n partial_expectation = {k: v for k, v in b.__dict__.items()\n if not k.startswith(\"_\")}\n self.assertEqual(len(b.to_dict()), len(partial_expectation) + 1)", "def assert_key_is_list(self, dictionary, desired_keys):\r\n running_length = None\r\n for key in desired_keys:\r\n assert type(dictionary[key]) == list, \"key-pair does not form a list!\"\r\n if running_length is None:\r\n running_length = len(dictionary[key])\r\n assert len(dictionary[key]) == running_length, \"List lengths must match.\"\r\n running_length = len(dictionary[key])", "def test_returned_dictionary_count(legal_dict_fixture):\n t = legal_dict_fixture\n print(t)\n assert len(t) == 7", "def count_values(dic):\n values = dic.values()\n check = []\n count = 0\n for i in values:\n if i not in check:\n count += 1\n check.append(i)\n return count", "def __len__(self) -> Dict[str, int]:\n return self.dict_size", "def count_duplicates(dic):\n vals = list(dic.values())\n lst_vals = []\n count = 0\n for v in vals:\n if vals.count(v) >= 2:\n if v not in lst_vals:\n lst_vals.append(v)\n return len(lst_vals)", "def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.bond1, self.bond2, self.bond3, self.bond4])), 4)\n\n # Test set behavior\n self.assertEqual(len({self.bond1, self.bond2, self.bond3, self.bond4}), 4)", "def verify_hash_table(kv, idx):\n for k, v in kv.items():\n assert k in idx and idx[k] == (v, v, v)\n assert len(idx) == len(kv)", "def test_hash(self):\n # Test dictionary behavior\n self.assertEqual(len(dict.fromkeys([self.atom1, self.atom2, self.atom3, self.atom4])), 4)\n\n # Test set behavior\n self.assertEqual(len({self.atom1, self.atom2, self.atom3, self.atom4}), 4)", "def validate_array_sizes():\n arrays_union = unique_arrays_write['unused'].union(unique_arrays_read['unused'])\n arrays_dict = {}\n for el in arrays_union:\n if el[0] in arrays_dict and el[1] != arrays_dict[el[0]]:\n error = f'Arrays {el[0]} have different sizes'\n raise TypeError(error)\n else:\n arrays_dict[el[0]] = el[1]\n return arrays_dict", "def length(self) -> int:\n count = 0\n for key in self.keys():\n if key in self:\n count += 1\n return count", "def assert_dict_almost_equal(self, dict1, dict2):\n self._recursion_counter += 1\n if self._recursion_counter <= self.max_recursions:\n for key, value in dict1.items():\n if isinstance(value, dict):\n self.assert_dict_almost_equal(value, dict2[key])\n elif isinstance(value, np.ndarray):\n assert_array_almost_equal(value, dict2[key], self.decimals)\n elif issparse(value):\n assert_array_almost_equal(value.todense(), dict2[key].todense(), self.decimals)\n else:\n self.assertAlmostEqual(value, dict2[key], self.decimals, self.tolerance)\n self._recursion_counter = 0\n else:\n raise RuntimeError('Recursion stopped to avoid infinite loops')", "def assertResponseKeyLengthEquals(self, response, key, value):\n response_json = json.loads(response.content)\n self.assertIn(key, response_json)\n self.assertEquals(len(response_json[key]), value)", "def _checkValues(self, data_dict):\n for key, value in self.fixed_value_dict.iteritems():\n actual_value = data_dict.setdefault(key, value)\n if actual_value != value:\n raise ValueError, '%r: expected %r, got %r' % (key, value,\n actual_value)", "def check_hashable(values):\n for value in values:\n if not isinstance(value, Integer):\n # TODO: we may want a more specific error here too.\n # TODO: add support for more types (at least symbols, chars and other types of number)\n return TrifleExceptionInstance(\n wrong_type,\n u\"You can't use %s as a hashmap key. Keys must be integers\" % value.repr()\n )\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use ruleset function on data and update data.
def apply_rule(self): def relative_to_absolute_coord(cur_x, cur_y): return [(cur_x + xi, cur_y + yi) for xi, yi in self.rule.indices] def coordinates_in_bounds(x, y): if min(x, y) < 0: return False if x >= self.data.shape[0]: return False if y >= self.data.shape[1]: return False return True new_data = np.zeros(self.data.shape) it = np.nditer(self.data, flags=["multi_index"]) while not it.finished: values_from_coords = [] for xi, yi in relative_to_absolute_coord(*it.multi_index): if not coordinates_in_bounds(xi, yi): values_from_coords.append(False) else: values_from_coords.append(self.data[xi, yi]) new_data[it.multi_index] = self.rule.ruleset(it[0], values_from_coords) it.iternext() self.data = new_data
[ "def PBH_RULE_update():\n\n pass", "def update_rules(self):\n print(\"Checking existing rules and subscription for (phedex) dataset %s \" %\n self.phedex_dataset)\n\n for block, sub in self.subscriptions.items():\n if 'request' not in sub:\n print(\"subscription for block %s missing 'request' field\" % block)\n sub['request'] = 'unknown'\n\n if 'group' not in sub:\n print(\"subscription for block %s missing 'group' field\" % block)\n sub['group'] = 'unknown'\n\n submd = json.dumps({'type': 'phedex_sync', 'rid': sub['request'],\n 'group': sub['group']})\n\n if block not in self.rules:\n print(\"No rule found for %s, creating one\" % block)\n self.add_rule(names=[block], rse_exp='rse='+self.rse,\n comment=submd)\n # For the moment ignoring this: ISSUE ..\n #elif submd != self.rules[block]['comments']:\n # print(\"Rule for %s has wrong comment, re-creating\" % block)\n # self.del_rule(self.rules[block]['id'])\n # self.add_rule(names=[block], rse_exp='rse='+self.rse,\n # comment=submd)\n elif self.cli.account != self.rules[block]['account']:\n print(\"Rule for %s belongs to the wrong account, modifying\" % block)\n self.update_rule(self.rules[block]['id'], {'account': self.account})\n\n for block, rule in self.rules.items():\n if block not in self.subscriptions:\n print(\"Rule for %s correspond to no subscription, deleting\" % block)\n self.del_rule(rule['id'])", "def update_set(self, dataset):\n for data in dataset:\n for hypo in self.values():\n like = self.likelihood(data, hypo)\n self.mult(hypo, like)\n return self.normalize()", "def updateDataset(self, *filter_args, **filter_kwargs):\n self.__dataset__ = self.filterRecs(*filter_args, **filter_kwargs)\n\n # Update dataset by link object data\n # self.__dataset__ = self._updateLinkDataDataset(self.__dataset__)\n return self.__dataset__", "def execute_sequence(data, rule):\n for func in rule:\n for index, value in enumerate(data):\n func_name = \"_\".join([\"fun\", func])\n # run function by name and redefine value of data element\n data[index] = getattr(functions, func_name)(value)\n return data", "def _overwriteDataWithFiltered(self):\n self.data = self.dataFiltered\n self.groupedData = self.groupedDataFiltered\n self.driftCorrectedData = self.driftCorrectedDataFiltered\n self.driftCorrectedDataUngrouped = self.driftCorrectedDataUngroupedFiltered", "def PBH_RULE_update_field():\n\n pass", "def set_rule(self, rule):\n self.rule = rule # pragma: no cover", "def update_ruleset(self, ruleset):\n return self._put(route='CompliancePolicy', payload=ruleset)", "def _update_acc_by_rules(self, mut_influence: np.ndarray):\n for rule, coeff in self.rules.items():\n acc_delta = rule(self, mut_influence) # can't call self.rule\n self._update_acc(acc_delta, coeff)", "def __process_data(self, data):\n return self.mlda.transform(data)", "def rulesetsRefreshed(self):\n pass", "def refine(self, rule):\n raise NotImplementedError('abstract method')", "def update_rule(self):\n\n rules = list_replication_rules(filters={'scope': self.scope, 'name': self.block_name})\n # rules = self.rcli.list_did_rules(scope=self.scope, name=self.block_name)\n rse_expression = 'rse=' + self.rse\n\n remove_rules = [rule for rule in rules\n if rule['account'] == self.account and rule['rse_expression'] == rse_expression]\n\n if not remove_rules and self.is_at_pnn:\n self.rule_exists = False\n if self.dry_run:\n logging.info(\"Dry run: Adding rule for dataset %s at rse %s.\", self.block_name, self.rse)\n else:\n self.add_replication_rule_with_defaults(dids=[{'scope': self.scope, 'name': self.block_name}],\n copies=1, rse_expression=rse_expression, account=self.account)\n monitor.record_counter('cms_sync.rules_added')\n self.rule_exists = True\n elif remove_rules and not self.is_at_pnn:\n self.rule_exists = True\n if self.dry_run:\n logging.info(\"Removing rules for dataset %s at rse %s.\", self.block_name, self.rse)\n else:\n for rule in remove_rules:\n # delete_replication_rule(rule['id'], purge_replicas=False, issuer=self.account)\n delete_rule(rule_id=rule['id'], purge_replicas=True, soft=False)\n monitor.record_counter('cms_sync.rules_removed')\n self.rule_exists = False", "def computeRules(self):\n with open(self.dataFile) as fileObj:\n transactions = list(load_transactions(fileObj, delimiter=\",\"))\n\n # remove empty strings if any\n transactions_filtered = []\n a = set()\n\n for li in transactions:\n li = list(filter(None, li))\n transactions_filtered.append(li)\n a |= set(li)\n\n self.association_rules = apriori(transactions_filtered, min_support=0.01, min_confidence=0.01,\n min_lift=1.0,\n max_length=None)", "def _apply_virtual_rules(self, json_id, field_name, rule_def):\n rules = []\n rules.append(('calculated', rule_def['rules'].get('calculated', [])))\n rules.append(('derived', rule_def['rules'].get('derived', [])))\n for (rule_type, rrules) in rules:\n for rule in rrules:\n if not self._evaluate_decorators(rule):\n return False\n try:\n info = self._find_meta_metadata(json_id, field_name, rule_type, rule, rule_def)\n if rule_type == 'derived' or rule['memoize']:\n value = try_to_eval(rule['value'], self.functions, self=self.json)\n if 'json_ext' in rule_def:\n value = rule_def['json_ext']['dumps'](value)\n self._remove_none_values(value)\n else:\n value = None\n self.json.set(field_name, value, extend=True)\n self.json['__meta_metadata__.%s' % (SmartDict.main_key_pattern.sub('', field_name), )] = info\n except Exception as e:\n self.json['__meta_metadata__']['__continuable_errors__']\\\n .append('Virtual Rule CError - Unable to evaluate %s - %s' % (field_name, str(e)))\n return False\n\n if field_name not in self.json:\n self._set_default_value(json_id, field_name)\n\n return True", "def _apply(self, dataset: Dataset) -> Dataset:\n dataset = copy.deepcopy(dataset)\n\n for pattern, replacement in self.replacement_map.items():\n replaced_col = dataset.data[self.columns[0]].str.replace(\n pat=pattern, repl=replacement\n )\n if self.derived_columns is not None:\n dataset.data[self.derived_columns[0]] = replaced_col\n else:\n dataset.data[self.columns[0]] = replaced_col\n\n return dataset", "def assigned(self, uri, newRuleset):\n pass", "def refresh(self):\n log.msg(\"Starting to refresh rulesets...\")\n result = defer.Deferred()\n defer.maybeDeferred(self.dbIter).addCallback(self._refresh, result).addErrback(result.errback)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the x/y position of the char
def get_char_position(char): i = CHAR_SET.index(char) if args.vertical: y = i % SHEET_HEIGHT x = i // SHEET_HEIGHT else: x = i % SHEET_WIDTH y = i // SHEET_WIDTH return (x, y)
[ "def get_char_coords(x, y):\n\n x = MARGIN_X + (x * (FONT_WIDTH + CHAR_SPACING_X))\n y = MARGIN_Y + (y * (FONT_HEIGHT + CHAR_SPACING_Y))\n\n return (x, y)", "def get_char(self, coord):\n\t\tassert coord.x >= 0 and coord.x < self.width, \"X Coordinate out of range\"\n\t\tassert coord.y >= 0 and coord.y < self.height, \"Y Coordinate out of range\"\n\t\treturn self.content[self.y_max - coord.y][coord.x]", "def find_char_at_pos(self, x, y):\n left = 0\n try:\n subline_no, text = self.find_subline_at_pos(y)\n except TypeError:\n return None\n i = -1\n while text:\n i += 1\n c, text = text[0], text[1:]\n if c in (FormatType.BOLD, FormatType.RESET, FormatType.UNDERLINE):\n continue\n if c == FormatType.COLOR:\n if len(text) > 0 and text[0] in \"0123456789\":\n if len(text) > 1 and text[1] in \"0123456789\":\n text = text[2:]\n i += 2\n else:\n text = text[1:]\n i += 1\n if len(text) > 1 and text[0] == \",\" and text[1] in \"0123456789\":\n if len(text) > 2 and text[2] in \"0123456789\":\n text = text[3:]\n i += 3\n else:\n text = text[2:]\n i += 2\n continue\n\n layout, (width, height) = self.get_pango_layout(c, False)\n\n if left <= x < left + width:\n return subline_no, i, c\n\n left += width\n return subline_no, i + 1, \"\"", "def pos_to_coord(pos):\n x, y = pos\n return \"%s%s\" % (string.letters[x], string.letters[y])", "def char_position(char):\n for i, row in enumerate(keys_lo):\n for j, key in enumerate(row):\n if key == char or char == keys_hi[i][j]:\n return i, j,", "def _text_position(self, size, text):\n width, height = self._font.getsize(text)\n left = (size - width) / 2.0\n # I just don't know why 3 :)\n top = (size - height) / 2.0\n return left, top", "def getindex(self, char):\n return ord(char) - 97", "def get_position(char, table):\n for row in xrange(5):\n for column in xrange(5):\n if table[row][column]==char:\n return [row, column]", "def getCharPositionInAlpha(char):\n ALPHABET = \"abcdefghijklmnopqrstuvwxyz\"\n return ALPHABET.find(char.lower()) + 1", "def pixel_to_position(self, pixel):\n x, y = pixel\n return y // LENGTH, x // LENGTH", "def index(self, char):\n return self.s.index(char, self.ofs)", "def text_to_position(self, text):\n # Check for invalid board positions\n if text[0] not in self.board[0] or text[1] not in self.board[1]:\n print(\"You have entered an invalid chess board location\\n\"\n \"Valid locations are a1 through h8\")\n return\n\n return self.board[0].index(text[0]), self.board[1].index(text[1])", "def get_char(self, char, key):\n idx = self.get_idx(char, key)\n x = self.get_col(idx)\n y = self.get_row(idx)\n\n return Char(x, y)", "def get_coords(self, cell):\n i, j = cell \n return (self.left_offset + i * self.cell_size, self.top_offset + j * self.cell_size)", "def coordinate(self):\n col = get_column_letter(self.column)\n return f\"{col}{self.row}\"", "def get_position(event):\n\tline, column = text.index('insert').split('.')\n\ts = \"line=%s column=%s\" % (line, column)\n\tprint \"Karthik\",\n\tprint s", "def getPos(self, row, col):\n return (row * self.numCols) + col", "def get_position(self):\n return self._rect.x, self._rect.y", "def _compute_position(input, index):\n line = 1\n col = 1\n eol = None # last end of line character\n for c in input[:index]:\n if c == '\\n' or c == '\\r':\n if eol is None or eol == c:\n eol = c\n line += 1\n col = 1\n else:\n # ignore second of '\\n\\r' and '\\r\\n' sequences\n eol = None\n else:\n col += 1\n return (line, col)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the x/y position of the char in pixels
def get_char_coords(x, y): x = MARGIN_X + (x * (FONT_WIDTH + CHAR_SPACING_X)) y = MARGIN_Y + (y * (FONT_HEIGHT + CHAR_SPACING_Y)) return (x, y)
[ "def get_char_position(char):\n i = CHAR_SET.index(char)\n if args.vertical:\n y = i % SHEET_HEIGHT\n x = i // SHEET_HEIGHT\n else:\n x = i % SHEET_WIDTH\n y = i // SHEET_WIDTH\n return (x, y)", "def pixel_to_position(self, pixel):\n x, y = pixel\n return y // LENGTH, x // LENGTH", "def _text_position(self, size, text):\n width, height = self._font.getsize(text)\n left = (size - width) / 2.0\n # I just don't know why 3 :)\n top = (size - height) / 2.0\n return left, top", "def get_char(self, coord):\n\t\tassert coord.x >= 0 and coord.x < self.width, \"X Coordinate out of range\"\n\t\tassert coord.y >= 0 and coord.y < self.height, \"Y Coordinate out of range\"\n\t\treturn self.content[self.y_max - coord.y][coord.x]", "def char_size_px(self) -> \"tuple[int, int]\":\n px, py = self.term_size_px\n rows, cols = self.output.get_size()\n # If we can't get the pixel size, just guess wildly\n return px // cols or 10, py // rows or 22", "def get_coords(self, cell):\n i, j = cell \n return (self.left_offset + i * self.cell_size, self.top_offset + j * self.cell_size)", "def pixel_position(self, y_pixels, x_pixels):\n\n pixel_y = int(y_pixels * (self.y / self.screen_height))\n pixel_x = int(x_pixels * (self.x / self.screen_width))\n\n return (pixel_y, pixel_x)", "def pixelcoord(x, y):\n xp = a * x + b * y + minX\n yp = d * x + e * y + minY\n return xp, yp", "def get_position(self):\n return self._rect.x, self._rect.y", "def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT", "def pos_to_coord(pos):\n x, y = pos\n return \"%s%s\" % (string.letters[x], string.letters[y])", "def pixel_to_position(self, pixel):\n return int(pixel[1] // self._grid_width), int(pixel[0] // self._grid_width)", "def get_tile_coordinates(tx,ty):\n\n sx = TILE_SIZE * tx + LEFT_MARGIN\n sy = MAZE_SCREEN_HEIGHT - TILE_SIZE * (ty + 1) + DOWN_MARGIN # Reverse !?!\n\n return(sx,sy)", "def find_char_at_pos(self, x, y):\n left = 0\n try:\n subline_no, text = self.find_subline_at_pos(y)\n except TypeError:\n return None\n i = -1\n while text:\n i += 1\n c, text = text[0], text[1:]\n if c in (FormatType.BOLD, FormatType.RESET, FormatType.UNDERLINE):\n continue\n if c == FormatType.COLOR:\n if len(text) > 0 and text[0] in \"0123456789\":\n if len(text) > 1 and text[1] in \"0123456789\":\n text = text[2:]\n i += 2\n else:\n text = text[1:]\n i += 1\n if len(text) > 1 and text[0] == \",\" and text[1] in \"0123456789\":\n if len(text) > 2 and text[2] in \"0123456789\":\n text = text[3:]\n i += 3\n else:\n text = text[2:]\n i += 2\n continue\n\n layout, (width, height) = self.get_pango_layout(c, False)\n\n if left <= x < left + width:\n return subline_no, i, c\n\n left += width\n return subline_no, i + 1, \"\"", "def get_icon_in_position(self, x, y):\r\n for i in self.commands:\r\n if x < self.icons_offsets[i] + self.icons_widths[i] + self.padding\\\r\n / 2:\r\n return i", "def index_at(self, x, y):\n sx, sy = self.pixel_from_screen(x, y)\n return int(sx // self.tile_width), int(sy // self.tile_height)", "def get_position_and_size(self): # XXX buffer size on windows :/\n info = CONSOLE_SCREEN_BUFFER_INFO()\n ctypes.windll.kernel32.GetConsoleScreenBufferInfo(self.handle, ctypes.byref(info))\n # print('getpos', info.dwCursorPosition.X, info.dwCursorPosition.Y, info.dwSize.X, info.dwSize.Y)\n return info.dwCursorPosition.X, info.dwCursorPosition.Y, info.dwSize.X, info.dwSize.Y", "def get_cell_coords(pt):\n\n return int(pt[0] // a), int(pt[1] // a), int(pt[2] // a)", "def posToPixel(self, pos):\r\n x_meter = float(pos[0])\r\n y_meter = float(pos[1])\r\n mx = self.meterToMx(x_meter - self.ulX)\r\n my = self.meterToMy(y_meter - self.ulY)\r\n return mx, my" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a symbolic expression for the Meijer Gfunction encapsulated in the class.
def expression(self): x = Symbol('x', real=True) self.expr = hyperexpand(meijerg(self.a_p, self.b_q, self._const * x)) return self.expr
[ "def to_matlab_expr(self, data_name='X', function_name_prefix='') -> str:\n raise NotImplementedError()", "def expression(self):\n return", "def Expression(self) -> _n_4_t_1:", "def f(self):\r\n return self.g()", "def vm_impl_exp(self):\n\n def vm_impl(x):\n x = x.asnumpy()\n res = vm.exp(x)\n return Tensor(res)\n\n return vm_impl", "def get_genf(self, **kwargs) -> Expr:\n if not self.verified():\n raise InvalidOperationError(\"The tiling is not verified\")\n raise NotImplementedError", "def newFunction(self):\n if not self.mesh:\n self.addMesh()\n if not self.space:\n # space takes a long time to construct\n self.space = FunctionSpace(self.mesh, 'CG', 1)\n return Function(self.space)", "def function(self):\n return self.method.function", "def generateFunction(self):\n functionName = self.char\n self.generator()\n\n while self.char is not None and functionName not in ['sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', 'log', 'MAD', 'σ', 'Γ']:\n functionName = functionName + self.char\n self.generator()\n\n if self.char == 'h' and functionName in ['sin', 'cos', 'tan']:\n functionName = functionName + self.char\n self.generator()\n\n if functionName == 'log':\n return Token(TokenType.LOGARITHMIC, functionName)\n elif functionName == 'MAD':\n return Token(TokenType.MAD, 'MAD')\n elif functionName == 'Γ':\n return Token(TokenType.GAMMA, 'Γ')\n elif functionName == 'σ':\n return Token(TokenType.STANDARDDEVIATION, 'σ')\n\n return Token(TokenType.TRIG, functionName)", "def _function_for_graph(self, classobj, funcnm, is_static, graph):\n jargtypes, jrettype = self.types_for_graph(graph)\n funcobj = node.GraphFunction(\n self, classobj, funcnm, jargtypes, jrettype, graph, is_static)\n return funcobj", "def func_gen(name: str):\n\n def func(\n expr: Union[\n None,\n PrimExpr,\n Literal[\"inf\", \"-inf\", \"nan\"],\n int,\n float,\n ] = None,\n *,\n is_size_var: bool = False,\n ) -> PrimExpr:\n if isinstance(expr, str):\n expr = float(expr)\n return getattr(_ffi_api, name)(expr, is_size_var)\n\n return func", "def math_func_dict(self):\n return self._math_func_dict", "def f_molGas_dyn(self):\n# print self.M_gas, self.M_dyn\n return self.M_gas / self.M_dyn", "def build_graph_with_function(self):\n @tf.function\n def multiplier_function(v):\n return tf.constant(10.0, name=\"function_multiplier\") * v\n\n tf_g = tf.Graph()\n with tf_g.as_default():\n x = tf.placeholder(name=\"x\", dtype=tf.float32, shape=[])\n y = tf.placeholder(name=\"y\", dtype=tf.float32, shape=[])\n result_op = tf.add(x, y, name=\"add\")\n func_call_op = multiplier_function(result_op)\n _ = tf.identity(func_call_op, name=\"output\")\n return gde.Graph(g=tf_g)", "def obfn_gvar(self):\n\n if self.opt['gEvalY']:\n return self.Y\n else:\n return self.cnst_A(None, self.Xf) - self.cnst_c()", "def function2():\r\n x = sp.Symbol('x')\r\n y = sp.Symbol('y')\r\n # f = 0.2 * x - 20 + 3 * y ** 2\r\n f = 6*x + y**2 - 100\r\n\r\n return f", "def _get_gyre():\n function = LegacyFunctionSpecification()\n function.name = 'get_gyre'\n function.addParameter('index_of_the_star', dtype='int32',\n direction=function.IN, description=\"The index for the star. \")\n function.addParameter('mode_l', dtype='int32',\n direction=function.IN, description=\"L mode to find (must match that in gyre.in) \")\n function.addParameter('add_center_point', dtype='bool', direction=function.IN,\n description=\"Whether to add center point\")\n function.addParameter('keep_surface_pointt', dtype='bool', direction=function.IN,\n description=\"Whether to keep surface point\")\n function.addParameter('add_atmosphere', dtype='bool', direction=function.IN,\n description=\"Whether to add atmosphere\")\n function.addParameter('fileout', dtype='string', direction=function.IN,\n description=\"Filename to store data at each radial point\")\n function.result_type = 'int32'\n return function", "def _get_expression(self) -> \"std::string\" :\n return _core.AngleValueCommandInput__get_expression(self)", "def get_jac_lambda_function(self):\n lambda_inputs = self.__sympy_reactions + self.__sympy_species\n return sympy.utilities.lambdify(lambda_inputs, self.__full_system.jacobian(self.__sympy_species))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Property used on composite classes to find all leafobjects. Just returns [self] for a leaf (this class)
def leafObjs(self): return [self]
[ "def get_leaf(self) -> List:\n if self.is_leaf():\n return [self]\n else:\n temp = []\n for tree in self.subtrees:\n temp += tree.get_leaf()\n return temp", "def _get_leaves(self):\n if self:\n leaves = []\n for child in self:\n leaves.extend(child._get_leaves())\n return leaves\n else:\n return [self]", "def all_proper_children(self, obj):\n return self.all_children(obj)[1:]", "def subobjs(self):\n if not hasattr(self, \"_subobjs\"):\n self._subobjs = []\n return self._subobjs", "def leaf_nodes(self, result=None):\n if result is None:\n result = []\n if not self.dtrs:\n result.append(self)\n for dtr in self.dtrs:\n dtr.leaf_nodes(result)\n return result", "def get_all_node(self) -> list:\n if self.is_empty():\n return [self]\n elif self.is_leaf():\n return [self]\n else:\n temp = [self]\n for subtree in self.subtrees:\n temp += subtree.get_all_node()\n return temp", "def findRoots(self) -> list:\n\t\tresult = []\n\t\tfor skill in self:\n\t\t\tif skill.parent[0] == 'NULL':\n\t\t\t\tresult.append(skill)\n\t\treturn result", "def all_ligands(self):\n raise NotImplementedError(\"Implement in your subclass!\")", "def gather_children(self) -> List['Property']:\n result = [self]\n for child in self.children.values():\n result += child.gather_children()\n return result", "def Children(self) -> Dwf3dNavigationTreeNodeCollection:", "def get_children(self):\n\n # Check for timeouts.\n limits_exceeded(throw_error=True)\n \n # The default behavior is to count any VBA_Object attribute as\n # a child.\n if ((hasattr(self, \"_children\")) and (self._children is not None)):\n return self._children\n r = []\n for _, value in self.__dict__.iteritems():\n if (isinstance(value, VBA_Object)):\n r.append(value)\n if ((isinstance(value, list)) or\n (isinstance(value, pyparsing.ParseResults))):\n for i in value:\n if (isinstance(i, VBA_Object)):\n r.append(i)\n if (isinstance(value, dict)):\n for i in value.values():\n if (isinstance(i, VBA_Object)):\n r.append(i)\n self._children = r\n return r", "def getChildren(self) -> \"SoChildList *\":\n return _coin.SoPointLightManip_getChildren(self)", "def sublayers(self):\n return self._sublayers", "def getChildren(self):\n return self.children", "def roots(self) -> Tuple[Tier]:\n return tuple(t for t in self if not t.parent_ref)", "def getChildren(self) -> \"SoChildList *\":\n return _coin.SoDirectionalLightManip_getChildren(self)", "def _direct_superclasses(self):\n return self._directly_connected(rdflib.RDFS.subPropertyOf,\n blacklist=BLACKLIST)", "def getChildren(self) -> \"SoChildList *\":\n return _coin.SoSpotLightManip_getChildren(self)", "def _get_objs(self):\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Property used on composite classes to find all leafobjects. Just returns [self] for a leaf (this class)
def leafObjs(self): return [self]
[ "def get_leaf(self) -> List:\n if self.is_leaf():\n return [self]\n else:\n temp = []\n for tree in self.subtrees:\n temp += tree.get_leaf()\n return temp", "def _get_leaves(self):\n if self:\n leaves = []\n for child in self:\n leaves.extend(child._get_leaves())\n return leaves\n else:\n return [self]", "def all_proper_children(self, obj):\n return self.all_children(obj)[1:]", "def subobjs(self):\n if not hasattr(self, \"_subobjs\"):\n self._subobjs = []\n return self._subobjs", "def leaf_nodes(self, result=None):\n if result is None:\n result = []\n if not self.dtrs:\n result.append(self)\n for dtr in self.dtrs:\n dtr.leaf_nodes(result)\n return result", "def get_all_node(self) -> list:\n if self.is_empty():\n return [self]\n elif self.is_leaf():\n return [self]\n else:\n temp = [self]\n for subtree in self.subtrees:\n temp += subtree.get_all_node()\n return temp", "def findRoots(self) -> list:\n\t\tresult = []\n\t\tfor skill in self:\n\t\t\tif skill.parent[0] == 'NULL':\n\t\t\t\tresult.append(skill)\n\t\treturn result", "def all_ligands(self):\n raise NotImplementedError(\"Implement in your subclass!\")", "def gather_children(self) -> List['Property']:\n result = [self]\n for child in self.children.values():\n result += child.gather_children()\n return result", "def Children(self) -> Dwf3dNavigationTreeNodeCollection:", "def get_children(self):\n\n # Check for timeouts.\n limits_exceeded(throw_error=True)\n \n # The default behavior is to count any VBA_Object attribute as\n # a child.\n if ((hasattr(self, \"_children\")) and (self._children is not None)):\n return self._children\n r = []\n for _, value in self.__dict__.iteritems():\n if (isinstance(value, VBA_Object)):\n r.append(value)\n if ((isinstance(value, list)) or\n (isinstance(value, pyparsing.ParseResults))):\n for i in value:\n if (isinstance(i, VBA_Object)):\n r.append(i)\n if (isinstance(value, dict)):\n for i in value.values():\n if (isinstance(i, VBA_Object)):\n r.append(i)\n self._children = r\n return r", "def getChildren(self) -> \"SoChildList *\":\n return _coin.SoPointLightManip_getChildren(self)", "def sublayers(self):\n return self._sublayers", "def getChildren(self):\n return self.children", "def roots(self) -> Tuple[Tier]:\n return tuple(t for t in self if not t.parent_ref)", "def getChildren(self) -> \"SoChildList *\":\n return _coin.SoDirectionalLightManip_getChildren(self)", "def _direct_superclasses(self):\n return self._directly_connected(rdflib.RDFS.subPropertyOf,\n blacklist=BLACKLIST)", "def getChildren(self) -> \"SoChildList *\":\n return _coin.SoSpotLightManip_getChildren(self)", "def _get_objs(self):\n return []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse out organism name for each genome.
def _assembly_organism_name(self, refseq_archaea_assembly_file, refseq_bacteria_assembly_file, genbank_archaea_assembly_file, genbank_bacteria_assembly_file, output_organism_name_file): fout = open(output_organism_name_file, 'w') for assembly_file in [refseq_archaea_assembly_file, refseq_bacteria_assembly_file, genbank_archaea_assembly_file, genbank_bacteria_assembly_file]: with open(assembly_file) as f: f.readline() header = f.readline().strip().split('\t') org_name_index = header.index('organism_name') for line in f: line_split = line.strip().split('\t') gid = line_split[0] if gid.startswith('GCA_'): gid = 'GB_' + gid else: gid = 'RS_' + gid org_name = line_split[org_name_index] fout.write('%s\t%s\n' % (gid, org_name)) fout.close()
[ "def parse_organism(self):\n string = self.organism\n name, host_genus = \\\n basic.parse_names_from_record_field(string)\n self._organism_name = name\n self._organism_host_genus = host_genus", "def init_name_maps(self):\n map_1 = {}\n with open(self.organisms_code_names_path) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n for code in content:\n s = code.split('\t')\n map_1[s[0]] = s[1]\n self.short_name_to_full_name_map = map_1\n\n map_2 = {}\n # tree_str = self.newick\n # tree_names = re.split('[\\s+\\n+\\\"\\'\\:\\)\\(\\,\\:\\'\\']', tree_str)\n # tree_names = list(filter(lambda x: x != \"\" and x != ';', tree_names))\n for short_name in self.short_name_to_full_name_map.keys():\n full_name = self.short_name_to_full_name_map[short_name]\n map_2[full_name] = short_name\n\n self.full_name_to_short_name_map = map_2", "def get_organism_name(gca_id):\n\n organism_name_string = \"\"\n\n if gca_id in annotations_dictionary:\n taxa_id = annotations_dictionary[gca_id]['taxa_id']\n organism_name = annotations_dictionary[gca_id]['organism_name']\n\n organism_name_string = str(taxa_id) + \"\\t\" + str(gca_id) + \"\\t\" + str(organism_name)\n\n else:\n organism_name_string = gca_id\n\n return organism_name_string", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n if self.check_simple_org_format():\n org_name = self.parse_simple_org()\n nac[ORGNAME] = org_name\n else:\n inetnum_sec = self.find_first_section(('inetnum',))\n if inetnum_sec:\n self.check_inetnum(inetnum_sec)\n else:\n raise UnknownWhoisFormat('Inetnum section was not found')\n\n #looking for address\n role_sec = self.find_first_section(('role',))\n if role_sec:\n self.parse_role(role_sec, nac)\n else:\n person_sec = self.find_first_section(('person',))\n if person_sec:\n self.parse_person(person_sec, nac)\n else:\n raise UnknownWhoisFormat('Role and Person sections were not found')\n\n return nac", "def format_genome(self, sformat='fasta'):\n complete_genomes = \"\"\n if not sformat == 'fasta':\n raise NotImplementedError('Other format are not implemented')\n\n for g in ['G-atp6']:#self.sequences['genes_list']:\n seq = self.sequences['sequences'].get(g, '')\n cur_header = '>{gname} {specname}'.format(\n gname=g, specname=self.sequences['species_name'])\n pos = self.sequences['gpos'].get(g)\n if pos:\n cur_header += \", {size} ({start}:{end})\".format(\n size=len(seq), start=pos[0], end=pos[1])\n complete_genomes += cur_header + \"\\n\" + seq + \"\\n\"\n\n return complete_genomes", "def get_genome(filename):\n file_open = open(filename, \"r\")\n genome = \"\"\n name = \"\"\n for line in file_open:\n if line[0:1] == \">\":\n name = line[1:].rstrip()\n else:\n genome = genome + line.rstrip()\n\n file_open.close()\n return name, genome", "def parse_occupation(self):\n pass", "def collect_all_genomes():\n\n def str2num(s,cat=False,force=True):\n \"\"\"\n Converts string to integer\n eg. ensembl92 to 92\n\n :param s: string\n :param cat: Whether to concatenate detected integers. eg. 20,23 to 2023\n :param force: If True, ignores decimal point error. \n \"\"\"\n import re \n if '.' in s and not force:\n raise ValueError(f\"A string can only be converted to integeres, found a '.' in {s}\")\n n=re.findall(r'\\d+',s)\n if len(n)==0:\n raise ValueError(\"No digits found in string {}\".format(s)) \n elif len(n)==1:\n return int(n[0])\n else:\n if cat:\n return int(''.join(n))\n else:\n return n\n\n from glob import glob\n from os.path import dirname,basename,exists\n import numpy as np\n import pandas as pd\n from pyensembl.species import normalize_species_name,Species\n \n # here's how I get the .cache directory eg. '/home/user/.cache/pyensembl'\n import datacache\n pyensembl_cache_dir=f\"{dirname(datacache.get_data_dir())}/pyensembl\" #FIXME if genomes are installed at other places than .cache\n\n # all the assemblies\n assemblies=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/*\")]\n # dataframe that contains all the info (and can be exported as a tsv).\n dspecies=pd.DataFrame(columns=['latin name','release','synonymn','assembly'])\n # assempy to release min max dict needed as an input to create Species object\n assembly2releasesminmax={}\n # following loop populates the dataframe \n genomei=0\n for assembly in assemblies:\n releases=[basename(p) for p in glob(f\"{pyensembl_cache_dir}/{assembly}/*\")]\n for release in releases:\n releasei=str2num(release) #FIXME is realease is a float\n genome_dir=f\"{pyensembl_cache_dir}/{assembly}/{release}\"\n genome_files=glob(f\"{genome_dir}/*\")\n is_genome_installed=True if len(genome_files)>4 else False #FIXME need more than 4 (.gz) files to be strict\n if is_genome_installed:\n dspecies.loc[genomei,'assembly']=assembly\n dspecies.loc[genomei,'release']=releasei\n dspecies.loc[genomei,'synonymn']=basename(genome_files[0]).split('.')[0]\n dspecies.loc[genomei,'latin name']=normalize_species_name(dspecies.loc[genomei,'synonymn'])\n genomei+=1\n # following loop generates the Species object\n for spc in dspecies['latin name'].unique():\n assembly2releases={}\n for assembly in dspecies.loc[(dspecies['latin name']==spc),'assembly'].unique():\n d=dspecies.loc[((dspecies['latin name']==spc) & (dspecies['assembly']==assembly)),:]\n assembly2releases[assembly]=d['release'].min(),d['release'].max() #FIXME if MAX_ENSEMBL_RELEASE very important and has to be used\n Species.register(\n latin_name=spc,\n synonyms=dspecies.loc[(dspecies['latin name']==spc),'synonymn'].unique().tolist(),\n reference_assemblies=assembly2releases)\n Species.dspecies=dspecies\n return Species", "def include_morgan_in_name(self):\n\t\tself.name=self.old_name+str(self.morgan)", "def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n\n if self.check_simple_org_format():\n org_name = self.parse_arin_simple_org()\n nac[ORGNAME] = org_name\n else:\n ref_ser = self.find_referral_server()\n if ref_ser:\n server_name, port_number = ref_ser\n # raw_whois = self.receive_raw_whois(ip_address, server_name, port_number)\n whois_parser = self._manager.create_parser(self._ip_address, server_name, port_number)\n whois_parser.receive_raw_whois()\n nac = whois_parser.parse()\n else:\n self.parse_arin_org(nac)\n return nac", "def split_gene_name(gene_info):\n gene_info = gene_info.replace(\"ID=\", \"\").split()[0]\n gene_info = gene_info.split(\";\")[0]\n gene_info = gene_info.replace(\"CDS:\", \"\")\n gene_info = gene_info.split(\"Note=\")[0]\n gene_info = gene_info.split(\".\")[0]\n return gene_info.rstrip()", "def _parse_title(self):\n return self.agency + \" Meeting\"", "def parse_genre(self):\n msg(\"parsing u.genre\")\n lines = file('/'.join((self.datadir,\"u.genre\"))).read().split('\\n')\n pairs = [line.split('|') for line in lines if line]", "def fixGeneNames(self):\n self.name=withinQuotes(self.name)\n for ro in self.lstOfRows:\n ro.geneName=withinQuotes(ro.geneName)", "def parse_annotation_folder(genome_jgi_list, annotation_folder):\n genomes_cog_number = {}\n genomes_cog_category = {}\n genomes_product_name = {}\n genomes_pfam_number = {}\n\n description_cogs = {}\n description_pfams = {}\n\n for genome in genome_jgi_list:\n #This required the file to have the extension .info.xls\n #This can be changed later\n genome_file = annotation_folder + \"/\" + genome + \".info.xls\"\n cog_number, cog_category, product_name, pfam_number, desc_cog, desc_pfam = parse_jgi_annotation(genome_file)\n\n genomes_cog_number.update(cog_number)\n genomes_cog_category.update(cog_category)\n genomes_product_name.update(product_name)\n genomes_pfam_number.update(pfam_number)\n description_cogs.update(desc_cog)\n description_pfams.update(desc_pfam)\n\n return genomes_cog_number, genomes_cog_category, genomes_product_name, genomes_pfam_number, \\\n description_cogs, description_pfams", "def annotations(self):\n for line in self.fp:\n self.lineno += 1\n if not line or line[0] == '!':\n # This is a comment line\n continue\n try:\n # append the organism name to the line, the file.\n # Some wiggleling is necessary, because the last\n # part of the line is actually a newline and three tab\n line = line[0:-2] + self.organism_name\n yield Annotation(line)\n except TypeError as ex:\n raise SyntaxError(\"cannot parse annotation\", self.lineno)", "def get_short_organism_name(self, full_name):\n return self.full_name_to_short_name_map[full_name]", "def _get_alternative_names_adm1(self):\n names = dd(set)\n pg.cur.execute(\"\"\"\n SELECT geonameid\n FROM geonames\n WHERE feature_code IN ('ADM1', 'ADM1H', 'ADM2', 'ADM2H')\n OR geonames.geonameid IN (\n SELECT adm1.geonameid FROM adm1\n )\n \"\"\")\n for geonameid, in pg.cur.fetchall():\n pg.cur.execute(f\"\"\"SELECT name, full_name, population, country_geonameid, adm1_geonameid FROM geonames WHERE geonameid = {geonameid}\"\"\")\n res = pg.cur.fetchone()\n if res is None:\n continue\n name, full_name, population, country_geonameid, adm1_geonameid = res\n if name not in names:\n names[name] = {}\n\n geonameid_info = {\n 'type': 'adm1',\n 'abbreviations': [],\n \"toponym\": name,\n \"geonameid\": geonameid,\n \"population\": population,\n \"country_geonameid\": country_geonameid,\n \"adm1_geonameid\": adm1_geonameid\n }\n names[name][geonameid] = geonameid_info\n\n pg.cur.execute(f\"\"\"SELECT alternate_name, isolanguage, full_name FROM alternate_names WHERE geonameid = {geonameid}\"\"\")\n for name, isolanguage, full_name in pg.cur.fetchall():\n if name not in names:\n names[name] = {}\n if geonameid not in names[name]:\n names[name][geonameid] = geonameid_info\n if isolanguage == 'abbr':\n names[name][geonameid]['abbreviations'].append(full_name)\n return names", "def parse_ncbi_taxonomy(self,\n taxonomy_dir,\n refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix):\n\n # parse organism name\n self._assembly_organism_name(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix + '_organism_names.tsv')\n\n # parse metadata file and taxonomy files\n assembly_to_tax_id = self._assembly_to_tax_id(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file)\n\n node_records = self._read_nodes(\n os.path.join(taxonomy_dir, 'nodes.dmp'))\n print('Read %d node records.' % len(node_records))\n\n name_records = self._read_names(\n os.path.join(taxonomy_dir, 'names.dmp'))\n print('Read %d name records.' % len(name_records))\n\n # traverse taxonomy tree for each assembly\n taxonomy_file = output_prefix + '_unfiltered_taxonomy.tsv'\n fout = open(taxonomy_file, 'w')\n\n print('Number of assemblies: %d' % len(assembly_to_tax_id))\n for assembly_accession, tax_id in assembly_to_tax_id.items():\n # traverse taxonomy tree to the root which is 'cellular organism' for genomes,\n # 'other sequences' for plasmids, and 'unclassified sequences' for metagenomic libraries\n taxonomy = []\n cur_tax_id = tax_id\n\n if cur_tax_id not in name_records:\n print('[Warning] Assembly %s has an invalid taxid: %s' % (assembly_accession, tax_id))\n continue\n\n roots = ['cellular organisms', 'other sequences',\n 'unclassified sequences', 'Viruses', 'Viroids']\n while name_records[cur_tax_id].name_txt not in roots:\n if cur_tax_id == '1':\n print('[Error] TaxId %s reached root of taxonomy tree: %s' % (tax_id, taxonomy))\n sys.exit(-1)\n\n try:\n node_record = node_records[cur_tax_id]\n\n if node_record.rank in Taxonomy.rank_labels:\n rank_index = Taxonomy.rank_labels.index(\n node_record.rank)\n rank_prefix = Taxonomy.rank_prefixes[rank_index]\n elif node_record.rank == 'subspecies':\n rank_prefix = 'sb__'\n else:\n # unrecognized rank\n rank_prefix = 'x__'\n if node_record.rank == 'superkingdom':\n rank_prefix = 'd__'\n\n taxonomy.append(\n rank_prefix + name_records[cur_tax_id].name_txt)\n\n cur_tax_id = node_record.parent_tax_id\n except:\n print(traceback.format_exc())\n print(taxonomy)\n\n taxonomy.reverse()\n taxa_str = ';'.join(taxonomy)\n fout.write('%s\\t%s\\n' % (assembly_accession, taxa_str))\n\n fout.close()\n\n self.standardize_taxonomy(taxonomy_file,\n output_prefix + '_standardized.tsv')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine taxonomic identifier for each assembly. Returns
def _assembly_to_tax_id(self, refseq_archaea_assembly_file, refseq_bacteria_assembly_file, genbank_archaea_assembly_file, genbank_bacteria_assembly_file): d = {} for assembly_file in [refseq_archaea_assembly_file, refseq_bacteria_assembly_file, genbank_archaea_assembly_file, genbank_bacteria_assembly_file, ]: with open(assembly_file) as f: headers = f.readline().strip().split('\t') try: taxid_index = headers.index('taxid') except: # look for taxid on the next line as NCBI sometimes puts # an extra comment on the first line headers = f.readline().split('\t') taxid_index = headers.index('taxid') for line in f: line_split = line.strip().split('\t') assembly_accession = line_split[0] taxid = line_split[taxid_index] if assembly_accession in d: print('[Error] Duplicate assembly accession: %s' % assembly_accession) sys.exit(-1) d[assembly_accession] = taxid return d
[ "def get_tax_id(species):\n species = species.replace(\" \", \"+\").strip()\n search = Entrez.esearch(term = species, db = \"taxonomy\", retmode = \"xml\")\n record = Entrez.read(search)\n return record['IdList'][0]", "def getTaxid(namelist): \n accessid = []\n for i in namelist:\n name2taxid = ncbi.get_name_translator([i])\n if name2taxid == {}:\n print(\"Wrong Taxon name: \" + i + \"!\")\n exit()\n return\n else:\n accessid.append(name2taxid)\n return accessid", "def getTaxon(self):\n return self.datasets[\"taxon_id\"].keys()", "def get_ncbiid_from_tax_name(self, tax_name):\n ncbi_id = None\n if tax_name in self.spn_to_ncbiid:\n ncbi_id = self.spn_to_ncbiid[tax_name]\n else:\n try:\n tries = 15\n for i in range(tries):\n try:\n Entrez.email = self.config.email\n if tries >= 5:\n ncbi_id = Entrez.read(Entrez.esearch(db=\"taxonomy\", term=tax_name, RetMax=100))['IdList'][0]\n else:\n tax_name = \"'{}'\".format(tax_name)\n ncbi_id = Entrez.read(Entrez.esearch(db=\"taxonomy\", term=tax_name, RetMax=100))['IdList'][0]\n\n ncbi_id = int(ncbi_id)\n except (IndexError, HTTPError) as err:\n if i < tries - 1: # i is zero indexed\n continue\n else:\n raise\n break\n except (IndexError, HTTPError) as err:\n debug(\"except\")\n try:\n ncbi = NCBITaxa()\n tax_info = ncbi.get_name_translator([tax_name])\n debug(tax_info)\n if tax_info == {}:\n tax_name = \"'{}'\".format(tax_name)\n tax_info = ncbi.get_name_translator([tax_name])\n ncbi_id = int(tax_info.items()[0][1][0])\n except (IndexError, HTTPError) as err:\n sys.stderr.write(\"Taxon name does not match any name in ncbi. Check that name is written \"\n \"correctly: {}! We set it to unidentified\".format(tax_name))\n tax_name = 'unidentified'\n ncbi_id = 0\n # TODO: ADD otudict entries....\n ncbi_id = int(ncbi_id)\n assert type(ncbi_id) is int\n self.spn_to_ncbiid[tax_name] = ncbi_id\n return ncbi_id", "def get_ncbi_tax_id(handle):\n ncbi_id = None\n gb_list = handle[0][\"GBSeq_feature-table\"][0][\"GBFeature_quals\"]\n for item in gb_list:\n if item[u\"GBQualifier_name\"] == \"db_xref\":\n if item[u\"GBQualifier_value\"][:5] == \"taxon\":\n ncbi_id = int(item[u\"GBQualifier_value\"][6:])\n break\n else:\n continue\n return ncbi_id", "def _get_index_taxonomy_id(self):\n if self.__index_by_taxonomy_id is None:\n self.__index_by_taxonomy_id = \\\n self.__index_data_by_taxonomy_ensembl_special_case(self._get_species_data_dao())\n # Generic and beautiful old way of building the index\n # self.__index_data_for_property(self._get_species_data_dao(), Species.get_ncbi_taxonomy_id)\n return self.__index_by_taxonomy_id", "def generate_ancestral_taxon_id(name, rank, *, alt_taxon_id=None, taxon_ids=None):\n if taxon_ids is None:\n taxon_ids = set({})\n increment = 0\n while True:\n # TODO: make robust to imports from separate files\n anc_taxon_id = \"anc_%s\" % name\n if increment:\n anc_taxon_id += \"_%d\" % increment\n if anc_taxon_id not in taxon_ids:\n taxon_ids.add(anc_taxon_id)\n return anc_taxon_id\n increment += 1", "def _assembly_organism_name(self, refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file, output_organism_name_file):\n\n fout = open(output_organism_name_file, 'w')\n for assembly_file in [refseq_archaea_assembly_file, refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file, genbank_bacteria_assembly_file]:\n with open(assembly_file) as f:\n f.readline()\n header = f.readline().strip().split('\\t')\n org_name_index = header.index('organism_name')\n\n for line in f:\n line_split = line.strip().split('\\t')\n\n gid = line_split[0]\n if gid.startswith('GCA_'):\n gid = 'GB_' + gid\n else:\n gid = 'RS_' + gid\n org_name = line_split[org_name_index]\n fout.write('%s\\t%s\\n' % (gid, org_name))\n fout.close()", "def get_ancestor_taxID(target_taxID, nancestorNodes, outdir):\n\n\n cmds_ancestor_taxID = [\"from ete3 import NCBITaxa; ncbi = NCBITaxa()\",\n \"ancestor_taxID = int(ncbi.get_lineage(%i)[-%i])\"%(target_taxID, nancestorNodes),\n \"print(ancestor_taxID)\"]\n\n ancestor_taxID_std = \"%s/ancestor_taxID.std\"%outdir \n run_cmd(\"python -c '%s' > %s\"%(\"; \".join(cmds_ancestor_taxID), ancestor_taxID_std), env=EnvName_ete3)\n\n ancestor_taxID = int(open(ancestor_taxID_std, \"r\").readlines()[0])\n\n return ancestor_taxID", "def get_taxon_number_dict(alignment):\n taxon_number_dict = {}\n with open(alignment) as infh:\n started = False\n taxon_num = 0\n for i in infh:\n if i.startswith('matrix') or i.startswith('\\tMATRIX'):\n started = True\n if i.startswith(';'):\n break\n \n if started and not i.startswith('matrix') and not i.startswith('\\tMATRIX'):\n taxon_num += 1\n taxon_name = i.rsplit(' ', 1)[0].strip()\n taxon_number_dict[taxon_name] = taxon_num\n\n assert taxon_number_dict != {}, \"\"\"Could not generate a dictionary of taxon\n numbers from nexus alignment file.\"\"\"\n\n return taxon_number_dict", "def lookup_taxa_by_taxon_id(es, values, template, *, return_type=\"list\"):\n taxa = []\n if return_type == \"dict\":\n taxa = {}\n res = document_by_id(\n es, [\"taxon-%s\" % value for value in values], template[\"index_name\"]\n )\n if res is not None:\n if return_type == \"list\":\n taxa = [key.replace(\"taxon-\", \"\") for key in res.keys()]\n else:\n for key, source in res.items():\n taxon_id = key.replace(\"taxon-\", \"\")\n taxa.update({taxon_id: {\"_id\": key, \"_source\": source}})\n # taxon_res = query_keyword_value_template(\n # es,\n # \"attributes_names_by_keyword_value\",\n # \"taxon_id\",\n # values,\n # index=template[\"index_name\"],\n # )\n # if taxon_res is not None:\n # for response in taxon_res[\"responses\"]:\n # if \"hits\" in response and response[\"hits\"][\"total\"][\"value\"] == 1:\n # if return_type == \"list\":\n # taxa.append(response[\"hits\"][\"hits\"][0])\n # else:\n # taxa[response[\"hits\"][\"hits\"][0][\"_source\"][\"taxon_id\"]] = response[\n # \"hits\"\n # ][\"hits\"][0]\n # elif return_type == \"list\":\n # taxa.append(None)\n return taxa", "def parse_ncbi_taxonomy(self,\n taxonomy_dir,\n refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix):\n\n # parse organism name\n self._assembly_organism_name(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix + '_organism_names.tsv')\n\n # parse metadata file and taxonomy files\n assembly_to_tax_id = self._assembly_to_tax_id(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file)\n\n node_records = self._read_nodes(\n os.path.join(taxonomy_dir, 'nodes.dmp'))\n print('Read %d node records.' % len(node_records))\n\n name_records = self._read_names(\n os.path.join(taxonomy_dir, 'names.dmp'))\n print('Read %d name records.' % len(name_records))\n\n # traverse taxonomy tree for each assembly\n taxonomy_file = output_prefix + '_unfiltered_taxonomy.tsv'\n fout = open(taxonomy_file, 'w')\n\n print('Number of assemblies: %d' % len(assembly_to_tax_id))\n for assembly_accession, tax_id in assembly_to_tax_id.items():\n # traverse taxonomy tree to the root which is 'cellular organism' for genomes,\n # 'other sequences' for plasmids, and 'unclassified sequences' for metagenomic libraries\n taxonomy = []\n cur_tax_id = tax_id\n\n if cur_tax_id not in name_records:\n print('[Warning] Assembly %s has an invalid taxid: %s' % (assembly_accession, tax_id))\n continue\n\n roots = ['cellular organisms', 'other sequences',\n 'unclassified sequences', 'Viruses', 'Viroids']\n while name_records[cur_tax_id].name_txt not in roots:\n if cur_tax_id == '1':\n print('[Error] TaxId %s reached root of taxonomy tree: %s' % (tax_id, taxonomy))\n sys.exit(-1)\n\n try:\n node_record = node_records[cur_tax_id]\n\n if node_record.rank in Taxonomy.rank_labels:\n rank_index = Taxonomy.rank_labels.index(\n node_record.rank)\n rank_prefix = Taxonomy.rank_prefixes[rank_index]\n elif node_record.rank == 'subspecies':\n rank_prefix = 'sb__'\n else:\n # unrecognized rank\n rank_prefix = 'x__'\n if node_record.rank == 'superkingdom':\n rank_prefix = 'd__'\n\n taxonomy.append(\n rank_prefix + name_records[cur_tax_id].name_txt)\n\n cur_tax_id = node_record.parent_tax_id\n except:\n print(traceback.format_exc())\n print(taxonomy)\n\n taxonomy.reverse()\n taxa_str = ';'.join(taxonomy)\n fout.write('%s\\t%s\\n' % (assembly_accession, taxa_str))\n\n fout.close()\n\n self.standardize_taxonomy(taxonomy_file,\n output_prefix + '_standardized.tsv')", "def getTaxnameFromGene(self, geneobject):\n\t\traise NotImplementedError(\"Abstract Base Class\")", "def get_taxid_from_name( in_name ):\n\n # uniref naming convention\n split_name = in_name.split( 'TaxID=' )\n\n # uniprot naming convention\n if len( split_name == 1 ):\n split_name = in_name.split( 'OX=' )\n\n return_name = None\n if len( split_name ) > 1:\n split_name = split_name[ 1 ].split()\n return_name = split_name[ 0 ]\n return return_name", "def acc_to_taxonomy(acc, conn, protein=False, verbose=False):\n\n global data\n cur = conn.cursor()\n if acc in data['acc2tax']:\n taxid = data['acc2tax'][acc]\n return taxid, data['node'][taxid], data['name'][taxid]\n\n db = \"nucl2taxid\"\n if protein:\n db = \"prot2taxid\"\n\n if \".\" in acc:\n sqlexe=f\"select tax_id from {db} where accession_version = ?\"\n else:\n sqlexe=f\"select tax_id from {db} where accession = ?\"\n\n cur.execute(sqlexe, [acc])\n res = cur.fetchone()\n if not res:\n print(f\"ERROR: No taxid for {acc}. Skipped\", file=sys.stderr)\n return None, None, None\n\n p = res[0]\n data['acc2tax'][acc] = p\n if verbose:\n print(f\"GI: {acc} Taxonomy: {p}\", file=sys.stderr)\n t, n = get_taxonomy(p, conn)\n return p, t, n", "def LCA(self, tax_ids):\n tax_ids = [x for x in tax_ids if x]\n if not tax_ids:\n return None\n if len(tax_ids) == 1:\n return tax_ids[0]\n lineages = self.get_lineages(tax_ids)\n if lineages == [None]:\n return None\n common_ancestors = set(lineages[0]).intersection(*lineages[1:])\n for id in lineages[0]:\n if id in common_ancestors:\n return id", "def taxonomy():\n \n taxo={}\n xls = pd.ExcelFile('sources/Taxonomy For Domain bf and sbf.XLSX') #read excel file with multiple sheets\n sheet = xls.parse(2) #Go to the sheet which is important for us\n domaine=sheet.loc[(sheet[\"Domaine\"].isnull()==False)][\"Domaine\"] #Delete NaN elements\n sn=xls.sheet_names #sheets name (1- signalisation 2-IFTE and so on)\n #creation of taxo domain\n for dom in domaine:\n dom1=dom[3:].lower()\n taxo[dom1]=[]\n if dom in sn:\n sheet=xls.parse(sn.index(dom))\n df=sheet.loc[(sheet[\"Sous bloc fonctionnel\"].isnull()==False)]\n taxo[dom1].append(blocf(df))\n return taxo", "def _generate_assembly_id(self, invoice_month):\n fake_assembly_id = \":\".join([str(self._provider_uuid), self.etag, str(invoice_month)])\n return fake_assembly_id", "def get_taxa_to_create(\n es,\n opts,\n *,\n taxonomy_name,\n taxon_ids=None,\n asm_by_taxon_id=None,\n):\n taxa_to_create = {}\n if not taxon_ids:\n return {}\n if asm_by_taxon_id is None:\n asm_by_taxon_id = {}\n taxonomy_template = taxonomy_index_template(taxonomy_name, opts)\n taxonomy_res = query_value_template(\n es,\n \"taxonomy_node_by_taxon_id\",\n taxon_ids,\n taxonomy_template[\"index_name\"],\n )\n if taxonomy_res is None:\n LOGGER.error(\n \"Could not connect to taxonomy index '%s'\",\n taxonomy_template[\"index_name\"],\n )\n sys.exit(1)\n ancestors = set()\n for taxonomy_result in taxonomy_res[\"responses\"]:\n if taxonomy_result[\"hits\"][\"total\"][\"value\"] == 1:\n source = taxonomy_result[\"hits\"][\"hits\"][0][\"_source\"]\n taxa_to_create[source[\"taxon_id\"]] = source\n for ancestor in source[\"lineage\"]:\n ancestors.add(ancestor[\"taxon_id\"])\n if source[\"taxon_id\"] in asm_by_taxon_id:\n for asm in asm_by_taxon_id[source[\"taxon_id\"]]:\n add_taxonomy_info_to_meta(asm, source)\n taxonomy_res = query_value_template(\n es,\n \"taxonomy_node_by_taxon_id\",\n list(ancestors),\n taxonomy_template[\"index_name\"],\n )\n if taxonomy_res and \"responses\" in taxonomy_res:\n for taxonomy_result in taxonomy_res[\"responses\"]:\n if taxonomy_result[\"hits\"][\"total\"][\"value\"] == 1:\n source = taxonomy_result[\"hits\"][\"hits\"][0][\"_source\"]\n taxa_to_create[source[\"taxon_id\"]] = source\n return taxa_to_create" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if species name is a valid binomial name.
def _valid_species_name(self, species_name, require_full=True, require_prefix=True): if species_name == 's__': return True, None # remove single quotes as sometimes given for # candidatus species names species_name = species_name.replace("'", "") # test for prefix if require_prefix: if not species_name.startswith('s__'): return False, 'name is missing the species prefix' # remove prefix before testing other properties test_name = species_name if test_name.startswith('s__'): test_name = test_name[3:] # test for full name if require_full: if 'candidatus' in test_name.lower(): if len(test_name.split(' ')) <= 2: return False, 'name appears to be missing the generic name' else: if len(test_name.split(' ')) <= 1: return False, 'name appears to be missing the generic name' # get putative binomial name if 'candidatus' in test_name.lower(): sp_name = ' '.join(test_name.split()[0:3]) else: sp_name = ' '.join(test_name.split()[0:2]) # check for tell-tale signs on invalid species names if sp_name[0].islower(): return False, 'first letter of name is lowercase' if sp_name.split()[-1].isupper(): return False, 'first letter of specific name is uppercase' if " bacterium" in sp_name.lower(): return False, "name contains the word 'bacterium'" if " bacteirum" in sp_name.lower(): return False, "name contains the word 'bacteirum'" if " bacteria" in sp_name.lower(): return False, "name contains the word 'bacteria'" if " archaea" in sp_name.lower(): return False, "name contains the word 'archaea'" if " archaeon" in sp_name.lower(): return False, "name contains the word 'archaeon'" if " archeaon" in sp_name.lower(): return False, "name contains the word 'archeaon'" if " archaeum" in sp_name.lower(): return False, "name contains the word 'archaeum'" if "cyanobacterium" in sp_name.lower().split()[-1]: return False, "specific name is 'cyanobacterium'" if " group" in sp_name.lower(): return False, "name contains 'group'" if " subdivision" in sp_name.lower(): return False, "name contains 'subdivision'" if " taxon" in sp_name.lower(): return False, "name contains 'taxon'" if " cluster" in sp_name.lower(): return False, "name contains 'cluster'" if " clade" in sp_name.lower(): return False, "name contains 'clade'" if " of " in sp_name.lower(): return False, "name contains 'of'" if 'sp.' in sp_name.lower(): return False, "name contains 'sp.'" if 'cf.' in sp_name.lower(): return False, "name contains 'cf.'" if ' endosymbiont' in sp_name.lower(): return False, "name contains 'endosymbiont'" if ' symbiont' in sp_name.lower(): return False, "name contains 'symbiont'" if ' mycovirus' in sp_name.lower(): return False, "name contains 'mycovirus'" if sp_name.lower().split()[1] == 'oral': return False, "specific name is 'oral'" if 'candidatus' in sp_name.lower() and sp_name.lower().split()[2] == 'oral': return False, "specific name is 'oral'" if '-like' in test_name.lower(): return False, "full name contains '-like'" if 'endosymbiont' in test_name.lower().split(): return False, "full name contains 'endosymbiont'" if 'symbiont' in test_name.lower().split(): return False, "full name contains 'symbiont'" if 'mycovirus' in test_name.lower().split(): return False, "full name contains 'mycovirus'" if 'phytoplasma' in test_name.split(): # note the Phytoplasma is a valid genus so we are # specifically looking for a lowercase 'p' return False, "full name contains 'phytoplasma'" # check that binomial name contains only valid characters for ch in sp_name: # *** if not ch.isalpha() and ch not in [' ', '[', ']']: return False, 'species name contains invalid character' return True, 's__' + sp_name
[ "def is_legal_bag_name(name):\n for pat in (BAGNAME04_RE, BAGNAME02_RE):\n if pat.match(name):\n return True\n return False", "def check_libname(name):\n # name = <str>\n # ch = <str>\n # return <int>|<bool>\n name = str(name)\n if not name:\n return 0\n return (name[0] in _FIRST_LETTERS and\n all(ch in _OTHER_LETTERS for ch in name[1:]))", "def is_valid_name(name):\n return isinstance(name, str) and bool(Command._name_pattern.fullmatch(name))", "def is_bank_name_valid(self, name_to_check: str):\n return True if (not len(name_to_check) > 12) and (name_to_check.isalpha()) else False", "def has_badname(name):\n return len(BAD_NAMES_REGEX.findall(name)) > 0", "def SbName_isBaseNameChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isBaseNameChar(c)", "def normalize_species_binomial(genus, species):\n def equal_ignoring_rank(a, b):\n a = a.split('__', 1)[1]\n b = b.split('__', 1)[1]\n return a == b\n\n def equal_ignoring_polytag(a, b):\n a = a.split('__', 1)[1]\n b = b.split('__', 1)[1]\n a = a.split('_', 1)[0]\n b = b.split('_', 1)[0]\n return a == b\n\n genus_from_species, species_from_species = species.split(' ', 1)\n\n if genus_from_species == genus:\n return species\n\n genus_match = POLY_RE.match(genus)\n genus_from_species_match = POLY_RE.match(genus_from_species)\n\n if genus_from_species_match and not genus_match:\n # this can happen from name placement if the input taxonomy does not\n # correspond well in this region to the phylogeny\n return species\n\n if genus_match and genus_from_species_match:\n if equal_ignoring_rank(genus, genus_from_species):\n return species\n if not equal_ignoring_polytag(genus, genus_from_species):\n raise ValueError(\"%s, but we have %s\" % (species, genus))\n\n if genus_match:\n genus_with_poly = EXTRACT_POLY_GENUS.match(genus).groups()[0]\n genus_match_group = genus_match.groups()[0]\n genus_match_without_rank = genus_match_group.split('__', 1)[1]\n genus_without_rank = genus_from_species.split('__', 1)[1]\n\n if genus_match_without_rank not in genus_without_rank:\n # this would happen if we place a species name under an unexpected\n # genera, which is realistic. we should not create new species\n # names\n return species\n else:\n # we have a reasonable polyphyletic label\n\n return f's__{genus_with_poly} {species_from_species}'\n else:\n # we do not have a polyphyletic genus so let's move on\n return species", "def check_genus_names(taxon_name, taxon_status):\n \n try:\n assert(taxon_name[-4:] not in family_group or taxon_name[-3:] != 'ini')\n assert(len(taxon_name.split(' ')) == 1)\n assert(taxon_status == 'genus')\n print('(o) Cool! Correctly identified as a genus.')\n\n except:\n print('(x) WRONG! Not a genus!')", "def checkChemical(name):\n df = pd.read_csv(CPD_NAME2BRD_ID_PATH, index_col=\"CPD_NAME\")\n return name in df.index or name in df[\"BROAD_ID\"]", "def isBaseNameChar(c: 'char const') -> \"SbBool\":\n return _coin.SbName_isBaseNameChar(c)", "def is_valid_name(name):\n return isinstance(name, str) and bool(Option._name_pattern.fullmatch(name))", "def __is_valid_char_name(char):\n return char.isalpha() or char.isnumeric() or char in Project.VALID_NAME_SPECIAL_CHARS", "def IsSpecialName(self) -> bool:", "def name_sanity_check(self, name, nametype):\n reason = None\n if not name.isalnum():\n reason = \"Please choose a {} name that consists of alphanumeric characters.\\n\".format(nametype)\n if len(name) > Chat.MAXLEN_NAME:\n reason = \"The {} name can't be longer than {} characters.\\n\".format(nametype, Chat.MAXLEN_NAME)\n return reason", "def validate_name(property, name):\n if not name:\n raise Exception('Uh-oh. You forgot a name!')\n elif len(name) > 128:\n raise Exception('Uh-oh. That name is too long!')", "def is_name(word):\n letters = string.ascii_uppercase + 'Ñ'\n names = dict.fromkeys(letters, [])\n \n load_array('files/ner/per_esp_s.dat', names)\n\n if word.title():\n first_letter=word[0]\n if re.search('[ÑA-Z]', first_letter)!=None and re.compile(names[first_letter]).search(word):\n return 'PER'\n else:\n return False", "def check_varname(self, value):\n i = 0\n for name in self.longdick_names:\n if name.lower() in value.lower():\n i += 1\n if i == 0:\n raise Exception(f\"Variable name {value} is not valid in LongDick!\")", "def name_is_valid(self, name):\n if isinstance(name, str):\n return not name.endswith(('_worker', '_localCollector', '_globalCollector'))\n else:\n return False", "def validate_process_name(fullname):\n\tif not isinstance(fullname, basestring):\n\t\traise TypeError(\"Process name:%r is not a string.\" % fullname)\n\telif not _re_proc_fullname.match(fullname):\n\t\traise ValueError(\"Process name:%r is not a string of alphanumeric/underscored basenames separated by periods.\" % fullname)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce standardized 7rank taxonomy file from NCBI taxonomy strings.
def standardize_taxonomy(self, ncbi_taxonomy_file, output_consistent): fout_consistent = open(output_consistent, 'w') failed_filters = set() for line in open(ncbi_taxonomy_file): line_split = line.strip().split('\t') gid = line_split[0] taxonomy = line_split[1].split(';') if not ('d__Bacteria' in taxonomy or 'd__Archaea' in taxonomy): continue # remove unrecognized ranks (i.e., 'x__') and strain classification revised_taxonomy = [] for t in taxonomy: if not t.startswith('x__') and not t.startswith('st__') and ' family' not in t.lower() : revised_taxonomy.append(t) # create longest taxonomy string possible with canonical ranks canonical_taxonomy = {} for i, taxon in enumerate(revised_taxonomy): rank_prefix = taxon[0:3] if rank_prefix in Taxonomy.rank_prefixes: if rank_prefix == 's__': valid_name, canonical_species_name = self._valid_species_name( taxon) if valid_name: canonical_taxonomy[Taxonomy.rank_prefixes.index( rank_prefix)] = canonical_species_name else: if ('full name' in canonical_species_name and ('oral' in canonical_species_name or '-like' in canonical_species_name or 'endosymbiont' in canonical_species_name or 'symbiont' in canonical_species_name or 'mycovirus' in canonical_species_name or 'phytoplasma' in canonical_species_name)): failed_filters.add(taxon) else: canonical_taxonomy[Taxonomy.rank_prefixes.index( rank_prefix)] = taxon # fill in missing ranks where possible if canonical_taxonomy: for i in range(0, max(canonical_taxonomy.keys())): if i in canonical_taxonomy and (i + 1) not in canonical_taxonomy: canonical_taxonomy[i + 1] = Taxonomy.rank_prefixes[i + 1] cur_taxonomy = [] for i in range(0, len(Taxonomy.rank_prefixes)): if i in canonical_taxonomy: cur_taxonomy.append(canonical_taxonomy[i]) else: break # unable to correctly determine a valid taxonomy below this rank if len(cur_taxonomy) > 0: if len(cur_taxonomy) != len(Taxonomy.rank_prefixes): cur_taxonomy = cur_taxonomy + \ list(Taxonomy.rank_prefixes[len(cur_taxonomy):]) fout_consistent.write('%s\t%s\n' % (gid, ';'.join(cur_taxonomy))) fout_consistent.close() # Sanity check particular filters fout = open('failed_filters.tsv', 'w') for sp in failed_filters: fout.write(sp + '\n') fout.close() print('Genomes with a consistent taxonomy written to: %s' % output_consistent)
[ "def create_taxonomy(genome_record):\n # Get taxonomy object from NCBI\n rec = Entrez.read(Entrez.elink(db='taxonomy', dbfrom='nuccore',\n id=genome_record.annotations['gi'],\n linkname='nuccore_taxonomy'))\n tax_id = rec[0]['LinkSetDb'][0]['Link'][0]['Id']\n handle = Entrez.efetch(db='Taxonomy', id=tax_id, retmode='xml')\n records = Entrez.read(handle)\n assert len(records) == 1, \"More than one taxonomy records retrieved.\"\n record = records[0]\n record['LineageEx'].append({'Rank': records[0]['Rank'],\n 'ScientificName': records[0]['ScientificName'],\n 'TaxId': records[0]['TaxId']})\n # The lineage that NCBI returns seems already sorted, but to be safe, sort it here.\n lineage = []\n for rank in ['phylum', 'class', 'order', 'family', 'genus', 'species']:\n r = filter(lambda x: x['Rank']==rank, record['LineageEx'])\n assert len(r) <= 1\n lineage.extend(r)\n assert len(lineage) >= 1, \"Number of lineages greater than one\"\n p = None\n for item in lineage:\n p,_ = Taxonomy.objects.get_or_create(rank=item['Rank'],\n taxonomy_id=item['TaxId'],\n name=item['ScientificName'],\n parent=p)\n return p", "def make_taxon_to_sci_name(folder):\n print('...making taxon_to_sci_name dictionary...')\n archive_name = os.path.join(folder, 'taxdump.tar.gz')\n archive = tarfile.open(archive_name)\n names = archive.extractfile('names.dmp')\n taxon_to_name = {}\n \n # read file and save names from 'scientific name' lines\n line = names.readline()\n line = line.decode('utf-8')\n line = line.rstrip()\n while line:\n item = line.split('\\t')\n if item[6] == 'scientific name':\n taxon_to_name[int(item[0])] = item[2]\n line = names.readline()\n line = line.decode('utf-8')\n line = line.rstrip()\n names.close()\n \n # there may be some gi numbers that have taxon id of zero\n taxon_to_name[0] = 'Zero_taxon_number'\n return taxon_to_name", "def create_or_update_taxonomy(gbif_data):\n try:\n species_key = gbif_data['nubKey']\n except KeyError:\n species_key = gbif_data['key']\n try:\n rank = TaxonomicRank[gbif_data['rank']].name\n except KeyError:\n logger.error('No RANK')\n return None\n if 'scientificName' not in gbif_data:\n logger.error('No scientificName')\n return None\n if 'canonicalName' not in gbif_data:\n logger.error('No canonicalName')\n return None\n canonical_name = gbif_data['canonicalName']\n scientific_name = gbif_data['scientificName']\n taxa = Taxonomy.objects.filter(\n scientific_name=scientific_name,\n canonical_name=canonical_name,\n taxonomic_status=TaxonomicStatus[\n gbif_data['taxonomicStatus']].name,\n rank=rank,\n )\n if not taxa:\n taxonomy = Taxonomy.objects.create(\n scientific_name=scientific_name,\n canonical_name=canonical_name,\n taxonomic_status=TaxonomicStatus[\n gbif_data['taxonomicStatus']].name,\n rank=rank,\n )\n else:\n taxonomy = taxa[0]\n taxonomy.gbif_key = species_key\n merge_taxa_data(species_key, taxonomy)\n\n vernacular_names = get_vernacular_names(species_key)\n if vernacular_names:\n print('Found %s vernacular names' % len(\n vernacular_names['results']))\n for result in vernacular_names['results']:\n fields = {}\n if 'source' in result:\n fields['source'] = result['source']\n if 'language' in result:\n fields['language'] = result['language']\n if 'taxonKey' in result:\n fields['taxon_key'] = int(result['taxonKey'])\n try:\n vernacular_name, status = VernacularName.objects.get_or_create(\n name=result['vernacularName'],\n **fields\n )\n except VernacularName.MultipleObjectsReturned:\n vernacular_name = VernacularName.objects.filter(\n name=result['vernacularName'],\n **fields\n )[0]\n taxonomy.vernacular_names.add(vernacular_name)\n taxonomy.save()\n return taxonomy", "def clean_taxonomy_file(taxonomy_file):\n taxon_dir = os.path.dirname(taxonomy_file)\n output_path = \"{}/taxonomy_clean.tsv\".format(taxon_dir)\n if not os.path.exists(output_path):\n # clean taxonomy file, writes cleaned file to taxonomy_clean.tsv\n os.system('grep -a -v \"major_rank_conflict\" ' + taxonomy_file + ' | egrep -a -v \"sibling_higher\" | egrep -a -v \"varietas\" | egrep -a -v \"no rank\" | egrep -a -v \"Incertae\" | egrep -a -v \"incertae\" | egrep -a -v \"uncultured\" | egrep -a -v \"barren\" | egrep -a -v \"extinct\" | egrep -a -v \"unplaced\" | egrep -a -v \"hidden\" | egrep -a -v \"inconsistent\" | egrep -a -v \"synonym\" > {}'.format(output_path))\n assert os.path.exists(output_path)\n return output_path", "def make_uniprot_to_taxon(folder):\n print('...making scientific names and IDs to taxon dictionaries...')\n speclist = open(os.path.join(folder, 'speclist.txt'), 'r')\n sci_to_taxon = {}\n id_to_taxon = {}\n \n # read file and save names from 'scientific name' lines\n start = False\n while True:\n line = speclist.readline()\n if not line: break\n if 'Real organism codes' in line:\n start = True\n if '\"Virtual\" codes' in line:\n start = False\n if start:\n line = line.rstrip()\n line = line.lstrip()\n item = line.split('N=')\n if len(item) > 1:\n name = item[1]\n if name == 'Official (scientific) name':\n continue\n bit = item[0].split()\n spec_id = '_' + bit[0]\n taxon = bit[2][:-1]\n sci_to_taxon[name] = int(taxon)\n id_to_taxon[spec_id] = int(taxon)\n \n # close file and return dictionaries\n speclist.close()\n return sci_to_taxon, id_to_taxon", "def _taxonomy_tree_from_features(self, features):\n feature_taxons = self._features.loc[features]\n tree_data = ((i, [taxon.lstrip() for taxon in lineage.split(';')])\n for i, lineage in feature_taxons['Taxon'].items())\n return skbio.TreeNode.from_taxonomy(tree_data)", "def parse_ncbi_taxonomy(self,\n taxonomy_dir,\n refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix):\n\n # parse organism name\n self._assembly_organism_name(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file,\n output_prefix + '_organism_names.tsv')\n\n # parse metadata file and taxonomy files\n assembly_to_tax_id = self._assembly_to_tax_id(refseq_archaea_assembly_file,\n refseq_bacteria_assembly_file,\n genbank_archaea_assembly_file,\n genbank_bacteria_assembly_file)\n\n node_records = self._read_nodes(\n os.path.join(taxonomy_dir, 'nodes.dmp'))\n print('Read %d node records.' % len(node_records))\n\n name_records = self._read_names(\n os.path.join(taxonomy_dir, 'names.dmp'))\n print('Read %d name records.' % len(name_records))\n\n # traverse taxonomy tree for each assembly\n taxonomy_file = output_prefix + '_unfiltered_taxonomy.tsv'\n fout = open(taxonomy_file, 'w')\n\n print('Number of assemblies: %d' % len(assembly_to_tax_id))\n for assembly_accession, tax_id in assembly_to_tax_id.items():\n # traverse taxonomy tree to the root which is 'cellular organism' for genomes,\n # 'other sequences' for plasmids, and 'unclassified sequences' for metagenomic libraries\n taxonomy = []\n cur_tax_id = tax_id\n\n if cur_tax_id not in name_records:\n print('[Warning] Assembly %s has an invalid taxid: %s' % (assembly_accession, tax_id))\n continue\n\n roots = ['cellular organisms', 'other sequences',\n 'unclassified sequences', 'Viruses', 'Viroids']\n while name_records[cur_tax_id].name_txt not in roots:\n if cur_tax_id == '1':\n print('[Error] TaxId %s reached root of taxonomy tree: %s' % (tax_id, taxonomy))\n sys.exit(-1)\n\n try:\n node_record = node_records[cur_tax_id]\n\n if node_record.rank in Taxonomy.rank_labels:\n rank_index = Taxonomy.rank_labels.index(\n node_record.rank)\n rank_prefix = Taxonomy.rank_prefixes[rank_index]\n elif node_record.rank == 'subspecies':\n rank_prefix = 'sb__'\n else:\n # unrecognized rank\n rank_prefix = 'x__'\n if node_record.rank == 'superkingdom':\n rank_prefix = 'd__'\n\n taxonomy.append(\n rank_prefix + name_records[cur_tax_id].name_txt)\n\n cur_tax_id = node_record.parent_tax_id\n except:\n print(traceback.format_exc())\n print(taxonomy)\n\n taxonomy.reverse()\n taxa_str = ';'.join(taxonomy)\n fout.write('%s\\t%s\\n' % (assembly_accession, taxa_str))\n\n fout.close()\n\n self.standardize_taxonomy(taxonomy_file,\n output_prefix + '_standardized.tsv')", "def load_taxonomy(db_prefix):\n global name_map\n name_map = {}\n global rank_map\n rank_map = {}\n global child_lists\n child_lists = defaultdict(list)\n global name_clade_map\n parent_map = {}\n #read the taxonomy .dmp to and create or dict\n if not os.path.exists(db_prefix+\"/taxonomy/name_map.json\") or \\\n not os.path.exists(db_prefix+\"/taxonomy/rank_map.json\") or \\\n not os.path.exists(db_prefix+\"/taxonomy/child_lists.json\") or \\\n not os.path.exists(db_prefix+\"/taxonomy/parent_map.json\"):\n print (\"Map files don't exist, creating json...\", file=sys.stderr)\n with gzip.open(db_prefix+\"/taxonomy/names_trimmed.dmp.gz\", 'rt') as name_file:\n for line in name_file:\n node_id, name = line.strip().split('|')\n node_id = node_id.strip()\n name = name.strip()\n name_map[node_id] = name\n with gzip.open(db_prefix+\"/taxonomy/nodes_trimmed.dmp.gz\", 'rt') as nodes_file:\n for line in nodes_file:\n node_id, parent_id, rank = line.strip().split('|')\n node_id = node_id.strip()\n parent_id = parent_id.strip()\n rank = rank.strip()\n if node_id == '1':\n parent_id = '0'\n child_lists[parent_id].append(node_id)\n rank_map[node_id] = rank\n parent_map[node_id] = parent_id\n #save our dicts as json\n with open(db_prefix+\"/taxonomy/name_map.json\",'w') as name_map_file, \\\n open(db_prefix+\"/taxonomy/rank_map.json\",'w') as rank_map_file, \\\n open(db_prefix+\"/taxonomy/child_lists.json\",'w') as child_lists_file, \\\n open(db_prefix+\"/taxonomy/parent_map.json\",'w') as parent_map_file:\n json.dump(name_map,name_map_file)\n json.dump(rank_map, rank_map_file)\n json.dump(child_lists,child_lists_file)\n json.dump(parent_map, parent_map_file)\n else: #load the json\n with open(db_prefix+\"/taxonomy/name_map.json\",'r') as name_map_file, \\\n open(db_prefix+\"/taxonomy/rank_map.json\",'r') as rank_map_file, \\\n open(db_prefix+\"/taxonomy/child_lists.json\",'r') as child_lists_file:\n name_map = json.load(name_map_file)\n rank_map = json.load(rank_map_file)\n child_lists = json.load(child_lists_file)\n name_clade_map = {v: k for k, v in name_map.items()}\n #return (name_map, rank_map, child_lists, name_clade_map)", "def vcf2snapp(vcf_file, output_file):\r\n\r\n fh = open(vcf_file)\r\n\r\n chroms = []\r\n\r\n for line in fh:\r\n\r\n # Skip header\r\n if line.startswith(\"##\"):\r\n pass\r\n elif line.startswith(\"#CHROM\"):\r\n # Get taxa information\r\n taxa_list = line.strip().split()\r\n nexus_data = OrderedDict((x, []) for x in taxa_list[9:])\r\n elif line.strip() != \"\":\r\n fields = line.strip().split()\r\n\r\n ref_snp = fields[3]\r\n alt_snp = fields[4]\r\n\r\n # If SNP is not bialleic, ignore\r\n if len(alt_snp) > 1:\r\n continue\r\n\r\n # Record data for each Taxon\r\n for tx in nexus_data:\r\n # Get genotype\r\n gen = fields[taxa_list.index(tx)]\r\n gen = gen.split(\":\")[0]\r\n\r\n if gen == \"./.\":\r\n nexus_data[tx].append(\"-\")\r\n elif gen == \"0/0\":\r\n nexus_data[tx].append(\"0\")\r\n elif gen == \"1/1\":\r\n nexus_data[tx].append(\"2\")\r\n elif gen == \"1/0\" or gen == \"0/1\":\r\n nexus_data[tx].append(\"1\")\r\n\r\n\r\n # Write nexus files\r\n nexus_fh = open(output_file, \"w\")\r\n\r\n # Write header\r\n ntaxa = len(nexus_data)\r\n nloci = len(nexus_data[tx])\r\n nexus_fh.write(\"#NEXUS\\nBEGIN Data;\\n\\tDIMENSIONS NTAX={} NCHAR={};\\n\\t\"\r\n r'FORMAT DATATYPE=standard SYMBOLS=\"012\" INTERLEAVE=no missing=-;'\r\n \"\\n\"\r\n \"Matrix\\n\".format(ntaxa, nloci))\r\n\r\n # Write Data\r\n for tx in nexus_data:\r\n nexus_fh.write(\"{}\\t{}\\n\".format(tx, \"\".join(nexus_data[tx])))\r\n\r\n # Write file ending\r\n nexus_fh.write(\";\\nEND;\\n\")\r\n nexus_fh.close()", "def get_ott_ids_for_rank(rank, taxonomy_file, synth_only = True):\n assert rank in ['species', 'genus', 'family', 'order', 'class']\n assert os.path.exists(taxonomy_file)\n taxon_dir = os.path.dirname(taxonomy_file)\n output_path = \"{}/{}.tsv\".format(taxon_dir, rank)\n #if not os.path.exists(output_path):\n os.system(\"\"\"cat {tf} | awk '$7 == \"{r}\"' > {op}\"\"\".format(tf=taxonomy_file, r=rank, op=output_path))\n # clean taxonomy file\n# os.system('grep -a \"' + rank + '\" ' + taxonomy_file + ' | egrep -v \"Incertae\" | egrep -v \"no rank\" | egrep -v \"major_rank_conflict\" | egrep -v \"uncultured\" | egrep -v \"barren\" | egrep -v \"extinct\" | egrep -v \"incertae\" | egrep -v \"unplaced\" | egrep -v \"hidden\" | egrep -v \"inconsistent\" | egrep -v \"synonym\" | egrep -v \"in ' + rank + '\" | egrep -v \"species\" | egrep -v \"genus\" | egrep -v \"super' + rank + '\" | egrep -v \"sub' + rank + '\" > {}'.format(output_path))\n # extract ott ids from taxonomy reduced file\n with open(output_path, \"r\") as inp:\n ott_ids = []\n for lin in inp:\n lii = lin.split('\\t')\n ott_ids.append(lii[0])\n if synth_only == True:\n nodes = ['ott' + idn for idn in ott_ids]\n resp = OT.synth_node_info(node_ids = nodes)\n if 'unknown' in resp.response_dict:\n synth_ids = set(nodes).difference(set(resp.response_dict['unknown']))\n ott_ids = [nodeid.strip('ott') for nodeid in synth_ids]\n return ott_ids", "def get_taxonomy(labels):\n labels = labels.split(',')\n num_labels = len(labels)\n # taking the root label into consideration\n num_labels += 1\n shogun_labels = dict()\n taxonomy = np.zeros(num_labels, dtype=np.int32)\n # considering the root_label node index to be 0\n taxonomy[0] = -1\n for i, label in enumerate(labels):\n shogun_labels[label] = i + 1\n try:\n parent_label = label[:-2]\n parent_idx = labels.index(parent_label) + 1\n taxonomy[i + 1] = parent_idx\n except ValueError:\n taxonomy[i + 1] = 0\n return shogun_labels, taxonomy", "def collapse_tax(self):\n try:\n for level in self.inputs['levels']:\n if level != 'otu':\n for x in list(self.levels['otu']):\n self.levels[level][x] = _data_bin(self.otu[x], self.n[level], level + '_' + x)\n self.write_bioms()\n except TypeError:\n logger.error(\"Could not collapse taxonomy\", exc_info=True)", "def get_tax_rank_names(names_tax_file, nodes_tax_file, tax_rank):\n tax_ids = set()\n tax_names = set()\n with open(nodes_tax_file, 'r') as nodes_tax:\n for line in nodes_tax:\n if '\\t'+tax_rank in line:\n current_node = [field.strip() for field in line.split('|')]\n tax_ids.add(current_node[0])\n with open(names_tax_file, 'r') as names_tax:\n for line in names_tax:\n current_rec = [field.strip() for field in line.split('|') if 'scientific name' in line]\n if current_rec and current_rec[0] in tax_ids:\n tax_names.add(current_rec[1])\n if not tax_names:\n sys.stderr.write('[Warning] Could not find any names corresponding to taxonomic rank \\'%s\\'. \\n' % tax_rank)\n # return sorted(list(tax_names))\n return tax_names", "def __init_taxonomy_from_lineages(\n self,\n taxonomy_series: pd.Series,\n taxonomy_notation: Optional[str],\n order_ranks: Optional[Sequence[str]],\n ) -> pd.DataFrame: # Done\n # Check if taxonomy is known and is available for parsing. Otherwise indentify_taxon_notation() will try to identify notation\n if taxonomy_notation in AVAIL_TAXONOMY_NOTATIONS:\n notation = taxonomy_notation\n else:\n # Get first lineage _sample for notation testing assuming the rest have the the same notations\n sample_taxon = taxonomy_series.iloc[0]\n # Identify notation of the lineage string\n notation = indentify_taxon_notation(sample_taxon)\n if order_ranks is not None:\n if all([rank in VALID_RANKS for rank in order_ranks]):\n target_order_ranks = order_ranks\n else:\n raise NotImplementedError\n else:\n target_order_ranks = VALID_RANKS\n if notation == \"greengenes\":\n lineages = taxonomy_series.reset_index().values.tolist()\n ordered_taxa_list = []\n ordered_indices_list = [elem[0] for elem in lineages]\n for lineage in lineages:\n tmp_lineage = jRegexGG.findall(lineage[1])\n tmp_taxa_dict = {\n elem[0]: elem[1] for elem in tmp_lineage if elem[0] in VALID_RANKS\n }\n for rank in VALID_RANKS:\n if rank not in tmp_taxa_dict.keys():\n tmp_taxa_dict.update({rank: None})\n tmp_taxa_ordered = [tmp_taxa_dict[rank] for rank in VALID_RANKS]\n ordered_taxa_list.append([None] + tmp_taxa_ordered)\n taxonomy = pd.DataFrame(\n index=ordered_indices_list,\n data=ordered_taxa_list,\n columns=[\"lineage\"] + VALID_RANKS,\n )\n return taxonomy\n elif notation == \"qiime\":\n lineages = taxonomy_series.reset_index().values.tolist()\n tmp_taxa_dict_list = []\n tmp_ranks = set()\n for lineage in lineages:\n tmp_lineage = jRegexQIIME.findall(lineage[1])\n tmp_lineage.sort(key=lambda x: x[0])\n tmp_taxa_dict = defaultdict(None)\n tmp_taxa_dict[None] = lineage[0]\n for rank, taxon in tmp_lineage:\n tmp_taxa_dict[rank] = taxon\n tmp_ranks.add(rank)\n tmp_taxa_dict_list.append(dict(tmp_taxa_dict))\n tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)\n tmp_taxonomy_df.set_index(None, inplace=True)\n tmp_taxonomy_df = tmp_taxonomy_df.loc[:, sorted(list(tmp_ranks))]\n tmp_taxonomy_df.columns = [\n rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]\n ][::-1]\n for rank in VALID_RANKS:\n if rank not in tmp_taxonomy_df.columns:\n tmp_taxonomy_df.loc[:, rank] = None\n return tmp_taxonomy_df\n elif notation == \"silva\":\n lineages = taxonomy_series.reset_index().values.tolist()\n tmp_taxa_dict_list = []\n tmp_ranks = set()\n for lineage in lineages:\n tmp_lineage = lineage[1].split(\";\")\n tmp_taxa_dict = defaultdict(None)\n tmp_taxa_dict[None] = lineage[0]\n for rank_i, taxon in enumerate(tmp_lineage):\n rank = target_order_ranks[rank_i]\n tmp_taxa_dict[rank] = taxon\n tmp_ranks.add(rank)\n tmp_taxa_dict_list.append(dict(tmp_taxa_dict))\n tmp_taxonomy_df = pd.DataFrame.from_records(tmp_taxa_dict_list)\n tmp_taxonomy_df.set_index(None, inplace=True)\n tmp_rank_ordered = [\n rank for rank in target_order_ranks if rank in VALID_RANKS\n ]\n tmp_taxonomy_df = tmp_taxonomy_df.loc[:, tmp_rank_ordered]\n tmp_taxonomy_df.columns = [\n rank for rank in target_order_ranks[::-1][: len(tmp_ranks)]\n ][::-1]\n for rank in VALID_RANKS:\n if rank not in tmp_taxonomy_df.columns:\n tmp_taxonomy_df.loc[:, rank] = None\n return tmp_taxonomy_df\n\n else:\n raise NotImplementedError", "def taxonomy():\n \n taxo={}\n xls = pd.ExcelFile('sources/Taxonomy For Domain bf and sbf.XLSX') #read excel file with multiple sheets\n sheet = xls.parse(2) #Go to the sheet which is important for us\n domaine=sheet.loc[(sheet[\"Domaine\"].isnull()==False)][\"Domaine\"] #Delete NaN elements\n sn=xls.sheet_names #sheets name (1- signalisation 2-IFTE and so on)\n #creation of taxo domain\n for dom in domaine:\n dom1=dom[3:].lower()\n taxo[dom1]=[]\n if dom in sn:\n sheet=xls.parse(sn.index(dom))\n df=sheet.loc[(sheet[\"Sous bloc fonctionnel\"].isnull()==False)]\n taxo[dom1].append(blocf(df))\n return taxo", "def _parse_id_to_taxonomy_file(f):\n result = {}\n for line in f:\n line = line.strip()\n if line:\n try:\n identifier, taxonomy = map(strip, line.split('\\t'))\n result[identifier] = taxonomy\n except:\n print line, identifier, taxonomy\n return result", "def make_all_names_to_taxon(folder):\n print('...making all_names_to_taxon dictionary...')\n archive_name = os.path.join(folder, 'taxdump.tar.gz')\n archive = tarfile.open(archive_name)\n names = archive.extractfile('names.dmp')\n all_names_to_taxon = {}\n \n # read file and save taxonomy numbers for all names\n while True:\n## line = names.readline().rstrip()\n line = names.readline()\n line = line.decode('utf-8')\n line = line.rstrip()\n if not line: break \n item = line.split('\\t')\n name = item[2].replace('\"','')\n name = name.lstrip()\n all_names_to_taxon[name] = int(item[0])\n \n # close archive and return dictionary\n names.close()\n return all_names_to_taxon", "def load_taxonomy(path_out: str) -> Dict[int, List[int]]:\n taxonomy = {}\n path_tax = os.path.join(path_out, 'hierarchy/taxonomy.csv')\n with open(path_tax, 'r', encoding='utf8') as f:\n csv_reader = csv.reader(f, delimiter=',')\n for row in csv_reader:\n node_id = int(row[0])\n child_ids = [int(nid) for nid in row[1:6]]\n taxonomy[node_id] = child_ids\n return taxonomy", "def read_terms_auto(path):\r\n mytrie = Node()\r\n with open(path, \"r\", encoding=\"utf8\") as fp:\r\n line = fp.readlines()\r\n for i in range(1, len(line)):\r\n if line[i].strip() == \"\":\r\n continue\r\n weight, word = line[i].split('\\t')\r\n weight = int(weight)\r\n word = word.strip()\r\n mytrie.insertWord(weight, word)\r\n return mytrie" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read NCBI taxonomy information and create summary output files.
def parse_ncbi_taxonomy(self, taxonomy_dir, refseq_archaea_assembly_file, refseq_bacteria_assembly_file, genbank_archaea_assembly_file, genbank_bacteria_assembly_file, output_prefix): # parse organism name self._assembly_organism_name(refseq_archaea_assembly_file, refseq_bacteria_assembly_file, genbank_archaea_assembly_file, genbank_bacteria_assembly_file, output_prefix + '_organism_names.tsv') # parse metadata file and taxonomy files assembly_to_tax_id = self._assembly_to_tax_id(refseq_archaea_assembly_file, refseq_bacteria_assembly_file, genbank_archaea_assembly_file, genbank_bacteria_assembly_file) node_records = self._read_nodes( os.path.join(taxonomy_dir, 'nodes.dmp')) print('Read %d node records.' % len(node_records)) name_records = self._read_names( os.path.join(taxonomy_dir, 'names.dmp')) print('Read %d name records.' % len(name_records)) # traverse taxonomy tree for each assembly taxonomy_file = output_prefix + '_unfiltered_taxonomy.tsv' fout = open(taxonomy_file, 'w') print('Number of assemblies: %d' % len(assembly_to_tax_id)) for assembly_accession, tax_id in assembly_to_tax_id.items(): # traverse taxonomy tree to the root which is 'cellular organism' for genomes, # 'other sequences' for plasmids, and 'unclassified sequences' for metagenomic libraries taxonomy = [] cur_tax_id = tax_id if cur_tax_id not in name_records: print('[Warning] Assembly %s has an invalid taxid: %s' % (assembly_accession, tax_id)) continue roots = ['cellular organisms', 'other sequences', 'unclassified sequences', 'Viruses', 'Viroids'] while name_records[cur_tax_id].name_txt not in roots: if cur_tax_id == '1': print('[Error] TaxId %s reached root of taxonomy tree: %s' % (tax_id, taxonomy)) sys.exit(-1) try: node_record = node_records[cur_tax_id] if node_record.rank in Taxonomy.rank_labels: rank_index = Taxonomy.rank_labels.index( node_record.rank) rank_prefix = Taxonomy.rank_prefixes[rank_index] elif node_record.rank == 'subspecies': rank_prefix = 'sb__' else: # unrecognized rank rank_prefix = 'x__' if node_record.rank == 'superkingdom': rank_prefix = 'd__' taxonomy.append( rank_prefix + name_records[cur_tax_id].name_txt) cur_tax_id = node_record.parent_tax_id except: print(traceback.format_exc()) print(taxonomy) taxonomy.reverse() taxa_str = ';'.join(taxonomy) fout.write('%s\t%s\n' % (assembly_accession, taxa_str)) fout.close() self.standardize_taxonomy(taxonomy_file, output_prefix + '_standardized.tsv')
[ "def collapse_tax(self):\n try:\n for level in self.inputs['levels']:\n if level != 'otu':\n for x in list(self.levels['otu']):\n self.levels[level][x] = _data_bin(self.otu[x], self.n[level], level + '_' + x)\n self.write_bioms()\n except TypeError:\n logger.error(\"Could not collapse taxonomy\", exc_info=True)", "def parse(path):\n print \"Parsing file: %s\" % path\n acc2taxa = {}\n acc2ncbi = {}\n f = open(path)\n line = f.readline()\n tax = []\n while line:\n if line[0:2] == 'ID':\n ID = line.split(' ')[3].split('_')[1]\n if line[0:2] == 'OC':\n [tax.append(i.strip()) for i in line.strip().split(' ')[1].split(';')[:-1]]\n if line[0:2] == 'OX':\n ncbi = line.strip().split('NCBI_TaxID=')[1].split(';')[0]\n if line[0:2] == 'OS':\n name = line.split(' ')[1].strip()\n if line[0:2] == '//':\n # print \"Adding %s : %s\" % (ID, tax)\n tax.append(name)\n acc2taxa[ID] = tax\n acc2ncbi[ID] = ncbi\n tax = []\n line = f.readline()\n return acc2taxa, acc2ncbi", "def handleTaxon(self, filepage):\n pywikibot.output(u'Working on %s' % (filepage.title(),))\n if not filepage.exists():\n return\n\n qid = None\n taxonName = None\n\n toremove = [u' (museum specimens)', u' (taxidermied)']\n\n for category in filepage.categories():\n categoryname = category.title(with_ns=False)\n for remove in toremove:\n if categoryname.endswith(categoryname):\n categoryname = categoryname.replace(remove, u'')\n print (categoryname)\n if categoryname in self.speciescategories:\n qid = self.speciescategories.get(categoryname)\n taxonName = categoryname\n break\n\n if not qid:\n return\n\n pywikibot.output(u'Found %s based on %s' % (qid, taxonName,))\n\n mediaid = u'M%s' % (filepage.pageid,)\n if self.mediaInfoHasStatement(mediaid, u'P180'):\n return\n\n summary = u'based on Naturalis Leiden image in [[Category:%s]]' % (taxonName, )\n\n self.addClaim(mediaid, u'P180', qid, summary)", "def load_taxonomy(db_prefix):\n global name_map\n name_map = {}\n global rank_map\n rank_map = {}\n global child_lists\n child_lists = defaultdict(list)\n global name_clade_map\n parent_map = {}\n #read the taxonomy .dmp to and create or dict\n if not os.path.exists(db_prefix+\"/taxonomy/name_map.json\") or \\\n not os.path.exists(db_prefix+\"/taxonomy/rank_map.json\") or \\\n not os.path.exists(db_prefix+\"/taxonomy/child_lists.json\") or \\\n not os.path.exists(db_prefix+\"/taxonomy/parent_map.json\"):\n print (\"Map files don't exist, creating json...\", file=sys.stderr)\n with gzip.open(db_prefix+\"/taxonomy/names_trimmed.dmp.gz\", 'rt') as name_file:\n for line in name_file:\n node_id, name = line.strip().split('|')\n node_id = node_id.strip()\n name = name.strip()\n name_map[node_id] = name\n with gzip.open(db_prefix+\"/taxonomy/nodes_trimmed.dmp.gz\", 'rt') as nodes_file:\n for line in nodes_file:\n node_id, parent_id, rank = line.strip().split('|')\n node_id = node_id.strip()\n parent_id = parent_id.strip()\n rank = rank.strip()\n if node_id == '1':\n parent_id = '0'\n child_lists[parent_id].append(node_id)\n rank_map[node_id] = rank\n parent_map[node_id] = parent_id\n #save our dicts as json\n with open(db_prefix+\"/taxonomy/name_map.json\",'w') as name_map_file, \\\n open(db_prefix+\"/taxonomy/rank_map.json\",'w') as rank_map_file, \\\n open(db_prefix+\"/taxonomy/child_lists.json\",'w') as child_lists_file, \\\n open(db_prefix+\"/taxonomy/parent_map.json\",'w') as parent_map_file:\n json.dump(name_map,name_map_file)\n json.dump(rank_map, rank_map_file)\n json.dump(child_lists,child_lists_file)\n json.dump(parent_map, parent_map_file)\n else: #load the json\n with open(db_prefix+\"/taxonomy/name_map.json\",'r') as name_map_file, \\\n open(db_prefix+\"/taxonomy/rank_map.json\",'r') as rank_map_file, \\\n open(db_prefix+\"/taxonomy/child_lists.json\",'r') as child_lists_file:\n name_map = json.load(name_map_file)\n rank_map = json.load(rank_map_file)\n child_lists = json.load(child_lists_file)\n name_clade_map = {v: k for k, v in name_map.items()}\n #return (name_map, rank_map, child_lists, name_clade_map)", "def create_taxonomy(genome_record):\n # Get taxonomy object from NCBI\n rec = Entrez.read(Entrez.elink(db='taxonomy', dbfrom='nuccore',\n id=genome_record.annotations['gi'],\n linkname='nuccore_taxonomy'))\n tax_id = rec[0]['LinkSetDb'][0]['Link'][0]['Id']\n handle = Entrez.efetch(db='Taxonomy', id=tax_id, retmode='xml')\n records = Entrez.read(handle)\n assert len(records) == 1, \"More than one taxonomy records retrieved.\"\n record = records[0]\n record['LineageEx'].append({'Rank': records[0]['Rank'],\n 'ScientificName': records[0]['ScientificName'],\n 'TaxId': records[0]['TaxId']})\n # The lineage that NCBI returns seems already sorted, but to be safe, sort it here.\n lineage = []\n for rank in ['phylum', 'class', 'order', 'family', 'genus', 'species']:\n r = filter(lambda x: x['Rank']==rank, record['LineageEx'])\n assert len(r) <= 1\n lineage.extend(r)\n assert len(lineage) >= 1, \"Number of lineages greater than one\"\n p = None\n for item in lineage:\n p,_ = Taxonomy.objects.get_or_create(rank=item['Rank'],\n taxonomy_id=item['TaxId'],\n name=item['ScientificName'],\n parent=p)\n return p", "def process_terms(self):\n template_data = []\n in_class = \"Record-level\"\n # sequence matters in config and it starts with Record-level which we populate here ad-hoc\n class_group = {}\n class_group[\"label\"] = \"Record-level\"\n class_group[\"iri\"] = None\n class_group[\"class\"] = None\n class_group[\"definition\"] = None\n class_group[\"comments\"] = None\n class_group[\"rdf_type\"] = None\n class_group[\"terms\"] = []\n class_group[\"namespace\"] = None\n\n addedUseWithIRI = False\n for term in self.versions(): # sequence of the terms file used as order\n term_data = self.get_term_definition(term['term_iri'])\n test = term['term_iri']\n if term_data[\"rdf_type\"] == \"http://www.w3.org/2000/01/rdf-schema#Class\":\n # new class encountered\n # store previous section in template_data\n template_data.append(class_group)\n #start new class group\n class_group = term_data\n class_group[\"terms\"] = []\n in_class = term_data[\"label\"] # check on the class working in\n elif term['term_iri']=='http://purl.org/dc/terms/language':\n # Vulnerable to ordering terms in term_versions.csv, but...\n # This is the first row of dwciri terms\n # store previous section in template_data\n template_data.append(class_group)\n #start a class group for UseWithIRI\n class_group = {\"label\":\"UseWithIRI\"}\n class_group[\"terms\"] = []\n in_class = \"UseWithIRI\" # check on the class working in\n addedUseWithIRI = True\n class_group['terms'].append(term_data)\n else:\n class_group['terms'].append(term_data)\n # save the last class to template_data\n template_data.append(class_group)\n return template_data", "def kaiju_to_tax_summary_data(kaiju_output_file, output_data_table, names_tax_file, nodes_tax_file,\n rank_limit='phylum', top_tax=10, kaiju_to_table_path=KAIJU_TO_TABLE_PATH):\n sys.stderr.write(\"Generating %s summary... \\n\" % rank_limit)\n chart_data = {}\n unclass_str = 'Unclassified'\n n_unclass = 0\n # 1. Run `kaiju2table`\n kaiju2table_output_file = os.path.join(os.path.dirname(output_data_table), \"kaiju_summary.%s.tsv\" % rank_limit)\n kaiju2table = subprocess.Popen(\n [kaiju_to_table_path,\n \"-t\", nodes_tax_file,\n \"-n\", names_tax_file,\n \"-r\", rank_limit,\n \"-o\", kaiju2table_output_file,\n kaiju_output_file],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n sys.stderr.write(\"Creating Kaiju summary file (`kaiju2table`)... \\n\")\n kaiju2table.communicate()\n exit_status = kaiju2table.returncode\n sys.stderr.write(\"... kaiju2table finished with code: \" + str(exit_status) + '\\n')\n if exit_status != 0:\n sys.exit(exit_status)\n\n # 2. Parse output to create top tax. file\n with open(kaiju2table_output_file, 'r') as in_file:\n next(in_file)\n for line in in_file:\n current_rec = line.strip().split('\\t')\n n_sqces = int(current_rec[2])\n tax_id = current_rec[3]\n tax_name = current_rec[-1]\n if tax_id == 'NA':\n # Ignore the 'cannot be assigned to a (non-viral) [...]' record\n # Replace unclassified by the string expected by the web app\n if tax_name == 'unclassified':\n n_unclass = n_sqces\n else:\n chart_data[tax_name] = n_sqces\n\n # 3. Sort/filter chart data and create output\n with open(output_data_table, 'w') as out_file:\n top = sorted([a for a in chart_data.iteritems()], key=lambda tup: tup[1], reverse=True)[0:top_tax]\n for e in top:\n out_file.write(e[0]+'\\t'+str(e[1])+'\\n')\n if len(chart_data) > top_tax:\n other_sum = sum([a[1] for a in sorted([a for a in chart_data.iteritems()], key=lambda tup: tup[1], reverse=True)[top_tax:]])\n out_file.write('Other'+'\\t'+str(other_sum)+'\\n')\n # Add Unclassified\n if n_unclass > 0:\n out_file.write(unclass_str + '\\t' + str(n_unclass) + '\\n')\n\n # 4. Delete `kaiju2table` output\n os.remove(kaiju2table_output_file)", "def standardize_taxonomy(self, ncbi_taxonomy_file, output_consistent):\n\n fout_consistent = open(output_consistent, 'w')\n failed_filters = set()\n for line in open(ncbi_taxonomy_file):\n line_split = line.strip().split('\\t')\n\n gid = line_split[0]\n taxonomy = line_split[1].split(';')\n\n if not ('d__Bacteria' in taxonomy or 'd__Archaea' in taxonomy):\n continue\n\n # remove unrecognized ranks (i.e., 'x__') and strain classification\n revised_taxonomy = []\n for t in taxonomy:\n if not t.startswith('x__') and not t.startswith('st__') and ' family' not in t.lower() :\n revised_taxonomy.append(t)\n\n # create longest taxonomy string possible with canonical ranks\n canonical_taxonomy = {}\n for i, taxon in enumerate(revised_taxonomy):\n rank_prefix = taxon[0:3]\n if rank_prefix in Taxonomy.rank_prefixes:\n if rank_prefix == 's__':\n valid_name, canonical_species_name = self._valid_species_name(\n taxon)\n\n if valid_name:\n canonical_taxonomy[Taxonomy.rank_prefixes.index(\n rank_prefix)] = canonical_species_name\n else:\n if ('full name' in canonical_species_name and\n ('oral' in canonical_species_name\n or '-like' in canonical_species_name\n or 'endosymbiont' in canonical_species_name\n or 'symbiont' in canonical_species_name\n or 'mycovirus' in canonical_species_name\n or 'phytoplasma' in canonical_species_name)):\n failed_filters.add(taxon)\n else:\n canonical_taxonomy[Taxonomy.rank_prefixes.index(\n rank_prefix)] = taxon\n\n # fill in missing ranks where possible\n if canonical_taxonomy:\n for i in range(0, max(canonical_taxonomy.keys())):\n if i in canonical_taxonomy and (i + 1) not in canonical_taxonomy:\n canonical_taxonomy[i +\n 1] = Taxonomy.rank_prefixes[i + 1]\n\n cur_taxonomy = []\n for i in range(0, len(Taxonomy.rank_prefixes)):\n if i in canonical_taxonomy:\n cur_taxonomy.append(canonical_taxonomy[i])\n else:\n break # unable to correctly determine a valid taxonomy below this rank\n\n if len(cur_taxonomy) > 0:\n if len(cur_taxonomy) != len(Taxonomy.rank_prefixes):\n cur_taxonomy = cur_taxonomy + \\\n list(Taxonomy.rank_prefixes[len(cur_taxonomy):])\n fout_consistent.write('%s\\t%s\\n' %\n (gid, ';'.join(cur_taxonomy)))\n\n fout_consistent.close()\n\n # Sanity check particular filters\n fout = open('failed_filters.tsv', 'w')\n for sp in failed_filters:\n fout.write(sp + '\\n')\n fout.close()\n\n print('Genomes with a consistent taxonomy written to: %s' % output_consistent)", "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final datasets from raw data')\n\n raw_path = project_dir / 'data' / 'raw'\n interim_path = project_dir / 'data' / 'interim'\n processed_path = project_dir / 'data' / 'processed'\n\n split_data(raw_path, interim_path)\n preprocess(interim_path, processed_path)\n write_most_freq_words(interim_path)", "def main(directory):\n docs = []\n for entry in entries:\n docs.append(Document(entry, path))\n\n processed = []\n\n print('Processing documents...')\n print()\n for document in docs:\n processed.append(document.pre_process())\n \n processed_counts = termCounts(processed)\n \n with open('wordCounts.txt', 'w') as file:\n file.write(json.dumps(processed_counts))\n \n return processed_counts", "def main():\n\n global args, summaryInstance\n from Bio import SeqIO\n import pysam\n import logging\n\n\n\n configureLogging('info')\n\n readArgs() # Argument parsing\n logging.info('Arguments read successfully')\n\n summaryInstance = Summary()\n\n # Generates a dictionary from concatenated .clstr file (stores in Summary instance)\n # dict1 = consensus barcode representing the cluster : non-consensus barcode in the same cluster\n # dict2 = master dictionary, concatenated from dict1\n\n readAndProcessClusters(args.input_clstr)\n\n logging.info('Cluster file processed successfully')\n\n infile = pysam.AlignmentFile(args.input_mapped_bam, 'rb')\n out = pysam.AlignmentFile(args.output_tagged_bam+'.bam', 'wb', template=infile)\n\n for read in infile.fetch(until_eof=True):\n read_bc = read.query_name.split()[0].split('_')[-1]\n consensus_seq = summaryInstance.master_barcode_dict[read_bc]\n read.set_tag('BC', str(consensus_seq),value_type='Z') # Stores as string, makes duplicate removal possible. Can do it as integer as well.\n read.query_name = (read.query_name + '_@BC:Z:' + str(consensus_seq))\n out.write(read)\n\n infile.close()\n out.close()\n\n logging.info('Tagging completed')", "def download_db(taxnamelist, group): \n for i in getTaxid(taxnamelist):\n Taxon = list(i.keys())[0]\n Taxonid = str(list(i.values())[0][0])\n outdir = workpath + \"_\".join(Taxon.split(\" \")) + \"/\"\n try:\n os.mkdir(outdir)\n except FileExistsError:\n print(\"Path exists: \"+ outdir)\n print(\"#############################################################\")\n print(\"Downloading complete sequence in fasta from NCBI database...\\n\" + \n \"Taxon: \" + Taxon + \"\\n\" + \n \"Taxon id: \" + Taxonid + \"\\n\" + \n \"Directory: \" + outdir)\n print(\"Executing: \" + \"ncbi-genome-download -t \" + Taxonid + \\\n \" -F fasta -l complete \" +\" -o \" + outdir + \" \" + \\\n group)\n os.system(\"ncbi-genome-download -t \" + Taxonid + \\\n \" -F fasta -l complete \" +\" -o \" + outdir + \" \" + \\\n group)\n print(\"...Taxon \" + Taxon + \" downloaded complete!\")\n print(\"Unzip and re-organizing...\")\n ungz_all_fasta(outdir)\n for i in os.walk(outdir):\n for j in i[2]:\n if j[-4:] == \".fna\":\n os.system(\"cp \" + i[0]+\"/\"+j + \" \" + outdir)\n rm_not_fasta(outdir)", "def make_all_names_to_taxon(folder):\n print('...making all_names_to_taxon dictionary...')\n archive_name = os.path.join(folder, 'taxdump.tar.gz')\n archive = tarfile.open(archive_name)\n names = archive.extractfile('names.dmp')\n all_names_to_taxon = {}\n \n # read file and save taxonomy numbers for all names\n while True:\n## line = names.readline().rstrip()\n line = names.readline()\n line = line.decode('utf-8')\n line = line.rstrip()\n if not line: break \n item = line.split('\\t')\n name = item[2].replace('\"','')\n name = name.lstrip()\n all_names_to_taxon[name] = int(item[0])\n \n # close archive and return dictionary\n names.close()\n return all_names_to_taxon", "def clean_taxonomy_file(taxonomy_file):\n taxon_dir = os.path.dirname(taxonomy_file)\n output_path = \"{}/taxonomy_clean.tsv\".format(taxon_dir)\n if not os.path.exists(output_path):\n # clean taxonomy file, writes cleaned file to taxonomy_clean.tsv\n os.system('grep -a -v \"major_rank_conflict\" ' + taxonomy_file + ' | egrep -a -v \"sibling_higher\" | egrep -a -v \"varietas\" | egrep -a -v \"no rank\" | egrep -a -v \"Incertae\" | egrep -a -v \"incertae\" | egrep -a -v \"uncultured\" | egrep -a -v \"barren\" | egrep -a -v \"extinct\" | egrep -a -v \"unplaced\" | egrep -a -v \"hidden\" | egrep -a -v \"inconsistent\" | egrep -a -v \"synonym\" > {}'.format(output_path))\n assert os.path.exists(output_path)\n return output_path", "def build_corpus(self):\n logging.info('Start')\n\n make_folder(self.file_path)\n self.gen_info_file()\n\n for term in self.search_terms:\n term_path = os.path.join(self.file_path, term)\n make_folder(term_path)\n logging.info(\"searching for %s\" % term)\n\n for year in self.dates_range:\n logging.error(\n \"Start retrieving %s in year %d\" % (term, year))\n data_path = os.path.join(term_path, str(year) + '.pickle')\n data = self.retrieve_all_in_year(term, year)\n if len(data) is not 0:\n with open(data_path, 'wb') as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n\n logging.info('End')", "def indexcandsfile(candsfile, indexprefix, tags=None):\n\n nc = []\n nn = []\n nm = []\n for cc in candidates.iter_cands(candsfile):\n st = cc.state\n scanId = st.metadata.scanId\n workdir = st.prefs.workdir\n mocks = st.prefs.simulated_transient\n\n elastic.indexscan(inmeta=st.metadata, preferences=st.prefs,\n indexprefix=indexprefix)\n nc.append(indexcands_and_plots(cc, scanId, tags, indexprefix, workdir))\n nn.append(elastic.indexnoises(cc.state.noisefile, scanId,\n indexprefix=indexprefix))\n if mocks is not None:\n nm.append(elastic.indexmock(scanId, mocks,\n indexprefix=indexprefix))\n return nc, nn, nm", "def build_taxonomy_data(mapping):\n try:\n ensembl_species_history = EnsemblSpeciesHistory.objects.filter(\n transcripthistory__transcript=mapping.transcript\n ).latest('time_loaded')\n except EnsemblSpeciesHistory.DoesNotExist:\n raise Http404(\n (\n \"Couldn't find an ensembl species history associated to mapping {}\"\n ).format(mapping.mapping_id)\n )\n\n try:\n return {\n 'species': ensembl_species_history.species,\n 'ensemblTaxId': ensembl_species_history.ensembl_tax_id,\n 'uniprotTaxId': mapping.uniprot.uniprot_tax_id\n }\n except:\n raise Http404(\n (\n \"Couldn't find uniprot tax id as I couldn't find a uniprot entry \"\n \"associated to the mapping\"\n )\n )", "def ncbi_Species2Acc(self, species_list, *more_terms):\n\n print(\n \"\"\"\n #########################################################\\n\n ############ NCBI ncbi species to accession #############\\n\n #########################################################\\n\n \"\"\")\n\n Entrez.api_key = self._key\n Entrez.email = self._email\n\n if type(species_list) == str and species_list.endswith('.lst'):\n sp_names = []\n try:\n with open(species_list, 'r') as sp:\n for i in sp:\n i = i.strip()\n sp_names.append(i)\n except ValueError:\n return \"File Not Found\"\n elif type(species_list) == list:\n sp_names = species_list\n\n try:\n conn = sqlite3.connect(self.sqlite_db)\n cur = conn.cursor()\n except sqlite3.Error as e:\n print(e)\n return\n\n cur.execute('''CREATE TABLE IF NOT EXISTS Sp2AccIDs (rowid INT PRIMARY KEY, species TEXT, acc_id TEXT)''')\n cur.execute('''SELECT species FROM Sp2AccIDs''') # check if species exists\n existed_species = cur.fetchall()\n\n len_existed_sp = len(existed_species)\n #flattern it\n print(\"[[Summary]]\\nHave Extracted {} IDs\".format(len_existed_sp))\n\n if len_existed_sp > 0:\n existed = [i[0] for i in existed_species]\n else:\n existed = []\n\n n = len_existed_sp\n for i in range(len_existed_sp, len(sp_names)):\n\n sp = sp_names[i]\n if sp in existed:\n print(\"{}: {} existed in the database\".format(i, sp))\n continue\n \n else:\n search_term = ' AND '.join([sp+'[ORGN]', ' AND '.join(more_terms)])\n print('\\n{} Search Term: {}'.format(i, search_term))\n\n # search total count for a specific term\n try:\n handle = Entrez.esearch(db=self.ncbi_db, term = search_term)\n total_record = int(Entrez.read(handle)['Count'])\n except:\n print(\"Entrez Error\")\n\n if total_record > 0:\n try:\n handle = Entrez.esearch(db=self.ncbi_db, term = search_term, retmax = total_record, idtype = self.idtype)\n record = Entrez.read(handle)\n IDs = record['IdList']\n except:\n print(\"Entrez Error\")\n\n\n handle.close()\n\n print(\"Entrez retrieved {} Accession IDs in {} \\n\".format(total_record, sp))\n\n for i in range(len(IDs)):\n print('Saving into database')\n print(i, ' ', IDs[i], '\\n')\n cur.execute('''INSERT OR IGNORE INTO Sp2AccIDs (rowid, species, acc_id) VALUES (?,?,?)''', (n, sp, IDs[i]))\n conn.commit()\n n += 1\n \n\n elif total_record == 0:\n print(\"Entrez retrieved {} Accession IDs in {}. NOT FOUND!\\n\".format(total_record, sp))\n cur.execute('''INSERT OR IGNORE INTO Sp2AccIDs (rowid, species, acc_id) VALUES (?,?,?)''', (n, sp, 'NA'))\n conn.commit()\n n += 1\n \n\n time.sleep(3)\n\n cur.close()\n conn.close()\n print(\"\\nCompleted!\\n\")\n return self.track.append('P2')", "def build_index(self):\n\n root = \"..\\\\WEBPAGES_RAW\\\\4\"\n '''Loop through all the dirs, extract content in each file.'''\n\n bookkeeper = json.load(open(\"..\\\\WEBPAGES_RAW\\\\bookkeeping.json\"))\n #print bookkeeper\n for path, subdirs, files in os.walk(root):\n for current_file in files:\n '''clear term_frequency for next HTML file'''\n self.term_frequency.clear()\n '''Get the parent directory, used to map terms to \"doc_id\"'''\n parent_directory = path.split(os.path.sep)[-1]\n '''Create the doc_id using parent directory and the current file.'''\n doc_id = str(parent_directory + \"\\\\\" + current_file)\n page_key = str(parent_directory + \"/\" + current_file)\n current_path = path + \"\\\\\" + current_file\n html_source = open(current_path).read()\n tokens = self.tokenize(self.text_from_html(html_source))\n tokens = self.stem(tokens)\n tokens = self.remove_number_tokens(tokens)\n for token in tokens:\n if token in self.term_unique_count:\n self.term_unique_count[token] += 1\n else:\n self.term_unique_count[token] = 1\n self.unique_words = self.unique_words + 1\n self.document_index[token].append(doc_id)\n #print self.document_index[token]\n #print self.document_index\n self.term_frequency[token] += 1\n '''At this time, 'tokens' holds a list of every token in the current document.\n doc_id is the current document. document_index '''\n for token2 in tokens:\n if token2 not in self.doc_term_count:\n self.doc_term_count[token2] = dict()\n # print token2\n # print page_key\n self.doc_term_count[token2][doc_id] = term_data(bookkeeper[page_key], 0.0, self.term_frequency[token2])\n #self.doc_term_count[token2][doc_id].print_members()\n #print \"\"\n self.print_report()\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Trigger condition is matched or not. This class object should pass the data whenever received any data, so return True always.
def _is_condition(self, data): return True
[ "def _is_condition(self, data):\n ret = False\n current_charge_value = data[\"data\"][\"Charge Current\"][\"value\"]\n\n if self.pre_current_ is None:\n if self.high_current_ <= current_charge_value:\n ret = True\n self.pre_current_ = current_charge_value\n\n # If the charge current run over the limit of highest charege current,\n # returns True and run some procedure.\n elif self.pre_current_ < self.high_current_:\n if self.high_current_ <= current_charge_value:\n ret = True\n\n self.pre_current_ = current_charge_value\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def _is_condition(self, data):\n ret = False\n current_voltage = data[\"data\"][\"Battery Voltage\"][\"value\"]\n\n if self.pre_voltage_ is None:\n if self.full_voltage_ <= current_voltage:\n ret = True\n self.pre_voltage_ = current_voltage\n\n # If the battery volate run over the limit of highest batery voltate,\n # returns True and run some procedure.\n elif self.pre_voltage_ < self.full_voltage_:\n if self.full_voltage_ <= current_voltage:\n ret = True\n\n self.pre_voltage_ = current_voltage\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def _is_condition(self, data):\n ret = False\n current_voltage = data[\"data\"][\"Battery Voltage\"][\"value\"]\n\n if self.pre_voltage_ is None:\n if self.lowest_voltage_ > current_voltage:\n ret = True\n self.pre_voltage_ = current_voltage\n\n # If the battery volate run over the limit of lowest batery voltate,\n # returns True and run some procedure to save the battery power.\n elif self.pre_voltage_ >= self.lowest_voltage_:\n if self.lowest_voltage_ > current_voltage:\n ret = True\n\n self.pre_voltage_ = current_voltage\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def match(self, packet):\n for trigger in self.triggers:\n if not trigger(packet):\n return False\n return True", "def fulfilled(self):\n return self.data is not None", "def match(self, indata: _cffi_backend.buffer) -> bool:\n raise NotImplementedError('Subclasses must implement match')", "def retrieveCondition(self):\n return True", "def _condition_met(self, event):\n if self.condition_function is None:\n return True\n return self.condition_function(event)", "def Match(self, event):\n if not self._matcher:\n return True\n\n self._decision = self._matcher.Matches(event)\n return self._decision", "def check_result(cls, data):\n return False if NO_RESULT in data else True", "def return_True():\n return True", "def triggered(self) -> bool:\n if self.type in (\n DeviceTypes.CARBON_MONOXIDE,\n DeviceTypes.ENTRY,\n DeviceTypes.GLASS_BREAK,\n DeviceTypes.LEAK,\n DeviceTypes.MOTION,\n DeviceTypes.MOTION_V2,\n DeviceTypes.SMOKE,\n DeviceTypes.TEMPERATURE,\n ):\n return (\n self._system.sensor_data[self._serial][\"status\"].get(\"triggered\")\n is True\n )\n\n if self.type == DeviceTypes.SMOKE_AND_CARBON_MONOXIDE:\n return (\n self._system.sensor_data[self._serial][\"status\"].get(\n \"coTriggered\", False\n )\n is True\n or self._system.sensor_data[self._serial][\"status\"].get(\n \"smokeTriggered\", False\n )\n is True\n )\n\n return False", "def any_matches(self) -> bool:\n ...", "def matches(self, x):\n return self.condition(x)", "def is_accepting_data(self):\n return self._is_accepting_data", "def sent_data_present(self):\r\n for socket_type in self.sent_data:\r\n if self.sent_data[socket_type]:\r\n log.debug(\"Got data to send for socket type : %s\" %\r\n socket_type)\r\n return True\r\n return False", "def _should_accept_regulated_payload(payload, condition):\n return not condition(payload)", "def _has_received_data(self):\n return self._bytes_received != self.bytes_received_on_connection", "def Success(self) -> bool:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if battery voltage getting low and run over the limit of lowest voltage setting. _run_in_condition() method run if this method returns True.
def _is_condition(self, data): ret = False current_voltage = data["data"]["Battery Voltage"]["value"] if self.pre_voltage_ is None: if self.lowest_voltage_ > current_voltage: ret = True self.pre_voltage_ = current_voltage # If the battery volate run over the limit of lowest batery voltate, # returns True and run some procedure to save the battery power. elif self.pre_voltage_ >= self.lowest_voltage_: if self.lowest_voltage_ > current_voltage: ret = True self.pre_voltage_ = current_voltage logger.debug("Return {} on {} at {}".format( ret, type(self).__name__, data["at"])) return ret
[ "def _is_condition(self, data):\n ret = False\n current_voltage = data[\"data\"][\"Battery Voltage\"][\"value\"]\n\n if self.pre_voltage_ is None:\n if self.full_voltage_ <= current_voltage:\n ret = True\n self.pre_voltage_ = current_voltage\n\n # If the battery volate run over the limit of highest batery voltate,\n # returns True and run some procedure.\n elif self.pre_voltage_ < self.full_voltage_:\n if self.full_voltage_ <= current_voltage:\n ret = True\n\n self.pre_voltage_ = current_voltage\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def within_limits(self):\n within_limit = True\n\n for lux_sensor, limit in self.lightlevel.items():\n current_lightlevel = float(self.get_state(lux_sensor))\n if current_lightlevel > limit:\n within_limit = False\n self.log('Light level beyond limit')\n break\n\n return within_limit", "def _is_condition(self, data):\n ret = False\n current_charge_value = data[\"data\"][\"Charge Current\"][\"value\"]\n\n if self.pre_current_ is None:\n if self.high_current_ <= current_charge_value:\n ret = True\n self.pre_current_ = current_charge_value\n\n # If the charge current run over the limit of highest charege current,\n # returns True and run some procedure.\n elif self.pre_current_ < self.high_current_:\n if self.high_current_ <= current_charge_value:\n ret = True\n\n self.pre_current_ = current_charge_value\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def _check_low_battery(self):\n if not self._low_battery_recd and self._low_battery_state:\n self._low_battery_clear_event.call_subscribers(low_battery=False)", "def battery_level(robot: cozmo.robot.Robot):\n\tlog.info('Battery level...')\n\tlevel = robot.battery_voltage\n\tlog.info('Level is '+str(level)+'V')\n\tif level<=3.5:\n\t\tlog.warning('Level is low. Please place Cozmo on charger.')", "def stopping_condition_met(self, execution):\r\n return execution.oobs > 0", "def set_exit_lowbatt_threshold(self, **kwargs):\n value = kwargs['value']\n try:\n assert 0 < value < 1.0 and float(value) is float\n if value >= self.parent.constants.ENTER_LOW_BATTERY_MODE_THRESHOLD:\n self.parent.logger.error(\n f\"New value for Exit LB thresh must be less than current Enter LB thresh value\")\n assert False\n self.set_parameter(name=\"EXIT_LOW_BATTERY_MODE_THRESHOLD\", value=value)\n except AssertionError:\n self.parent.logger.error(f\"Incompatible value {value} for EXIT_LOW_BATTERY_MODE_THRESHOLD\")", "def check(self, currentTau):\n self.voltage += self.sumInputs\n self.sumInputs = 0\n self.refractCount -= 1\n self.voltageHistory.append(self.voltage)\n if(self.refractCount <= 0):\n self.refractCount = 0 \n if(self.voltage >= self.threshold):\n self.spikeTimes.append(currentTau)\n self.AP()\n self.voltage -= abs(self.threshold)\n self.refractCount = self.refractory\n #print(\"AP at \"+ str(currentTau) + \" at \" + self.name)\n return True\n return False", "def low_AGWRC(self, value = 0.88):\n\n if any([p.AGWRC < value for p in self.postprocessor.hspfmodel.perlnds]):\n print('Some of the PERLNDs have an AGWRC less than 0.88, ' +\n ' which is very low. Consider increasing these values.\\n')\n return True\n\n return False", "def examine(self):\n\n max_capacity, current_capacity = self.__get_battery_capacities()\n\n if (int(max_capacity) > 0 and int(current_capacity) > 0):\n\n current_rate = float(current_capacity) / float(max_capacity)\n\n if self.__is_charging_battery():\n if current_rate >= self.__BATTERY_UPPER_LIMIT:\n self.__send_notification(\n title='the undercharge',\n current_capacity=str(current_capacity)\n )\n else:\n if current_rate <= self.__BATTERY_LOWER_LIMIT:\n self.__send_notification(\n title='the overcharging',\n current_capacity=str(current_capacity)\n )", "def checkLimit(self):\n self.clearLatchedStatus()\n value = self.readLatchedEventStatus()\n if (int(value)&1536)<>0: \n return 3\n print(\"Positive and Negative limit switches are active\")\n elif (int(value)&512)<>0:\n return 1\n print(\"Positive limit switch is active\") \n elif (int(value)&1024)<>0:\n return 2 \n print(\"Negative limit switch is active\") \n elif int(value) == 0 or (int(value)&131072)<>0 or (int(value)&65536)<>0 or (int(value)&67108864)<>0: \n return 0\n print(\"NO limit switch is active\")", "def _quiet_enough(self):\n return self.level <= self.min_level", "def test_get_battery_voltage(self):\n vmin = ThunderBorg._BATTERY_MIN_DEFAULT\n vmax = ThunderBorg._BATTERY_MAX_DEFAULT\n voltage = self._tb.get_battery_voltage()\n msg = (\"Voltage should be in the range of {:0.02f} to {:0.02f}, \"\n \"found {:0.02f} volts\").format(vmin, vmax, voltage)\n self.assertTrue(vmin <= voltage <= vmax, msg)", "def set_voltage(self, voltage):\n assert voltage <= self.limit_voltage,\\\n \"Invalid range! {}V > limit of {}V\".format(voltage, self.limit_voltage)\n assert voltage > 0, \"Negative voltage given\"\n if voltage < self.min_voltage:\n self.logger.warning(\"Given voltage {}V < {}V minimum, setting to minimum voltage\".format(voltage,\n self.min_voltage))\n voltage = self.min_voltage\n voltage_bytes = \"{:0{}d}\".format(round(voltage * 10**self.SET_DECIMALS[\"U\"]),\n self.SET_DECIMALS[\"U\"] + 2).encode()\n self._execute(b\"VOLT\" + voltage_bytes)\n return True", "def check(self, currentTau):\n self.voltage += self.sumInputs\n self.sumInputs = 0\n self.refractCount -= 1\n self.voltageHistory.append(self.voltage)\n if(self.refractCount <= 0):\n self.refractCount = 0 \n if(self.voltage >= self.threshold or random.random()*1000<self.rateConstant):\n self.spikeTimes.append(currentTau)\n #self.AP()\n self.voltage = 0\n self.refractCount = self.refractory\n self.voltageHistory[len(self.voltageHistory)-1] = self.voltageHistory[len(self.voltageHistory)-1] +1\n #print(\"AP at \"+ str(currentTau) + \" at \" + self.name)\n return True\n return False", "def _condition_met(self, event):\n if self.condition_function is None:\n return True\n return self.condition_function(event)", "def _min_cond_to_trigger(global_step, n_epoch, min_step=-1):\n if min_step > 0 and min_step <= 10:\n if n_epoch >= min_step:\n return True\n else:\n return False\n else:\n if global_step >= min_step:\n return True\n else:\n return False", "def is_supply(self):\n return self.quantities[self.period - 1] <= 0", "def check_power(self, value):\n if value > self.PW_lim[-1] or value < self.PW_lim[0]:\n raise Exception('The device does not support the power {} dBm. \\n '\n 'The supported range is ({},{}) dBm.'.format(value, self.PW_lim[0], self.PW_lim[1]))\n self.logger.debug('The value {} for power in dBm is OK.'.format(value))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if battery voltage getting high and run over the limit of highest voltage setting. _run_in_condition() method run if this method returns True.
def _is_condition(self, data): ret = False current_voltage = data["data"]["Battery Voltage"]["value"] if self.pre_voltage_ is None: if self.full_voltage_ <= current_voltage: ret = True self.pre_voltage_ = current_voltage # If the battery volate run over the limit of highest batery voltate, # returns True and run some procedure. elif self.pre_voltage_ < self.full_voltage_: if self.full_voltage_ <= current_voltage: ret = True self.pre_voltage_ = current_voltage logger.debug("Return {} on {} at {}".format( ret, type(self).__name__, data["at"])) return ret
[ "def _is_condition(self, data):\n ret = False\n current_voltage = data[\"data\"][\"Battery Voltage\"][\"value\"]\n\n if self.pre_voltage_ is None:\n if self.lowest_voltage_ > current_voltage:\n ret = True\n self.pre_voltage_ = current_voltage\n\n # If the battery volate run over the limit of lowest batery voltate,\n # returns True and run some procedure to save the battery power.\n elif self.pre_voltage_ >= self.lowest_voltage_:\n if self.lowest_voltage_ > current_voltage:\n ret = True\n\n self.pre_voltage_ = current_voltage\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def within_limits(self):\n within_limit = True\n\n for lux_sensor, limit in self.lightlevel.items():\n current_lightlevel = float(self.get_state(lux_sensor))\n if current_lightlevel > limit:\n within_limit = False\n self.log('Light level beyond limit')\n break\n\n return within_limit", "def _is_condition(self, data):\n ret = False\n current_charge_value = data[\"data\"][\"Charge Current\"][\"value\"]\n\n if self.pre_current_ is None:\n if self.high_current_ <= current_charge_value:\n ret = True\n self.pre_current_ = current_charge_value\n\n # If the charge current run over the limit of highest charege current,\n # returns True and run some procedure.\n elif self.pre_current_ < self.high_current_:\n if self.high_current_ <= current_charge_value:\n ret = True\n\n self.pre_current_ = current_charge_value\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def stopping_condition_met(self, execution):\r\n return execution.oobs > 0", "def examine(self):\n\n max_capacity, current_capacity = self.__get_battery_capacities()\n\n if (int(max_capacity) > 0 and int(current_capacity) > 0):\n\n current_rate = float(current_capacity) / float(max_capacity)\n\n if self.__is_charging_battery():\n if current_rate >= self.__BATTERY_UPPER_LIMIT:\n self.__send_notification(\n title='the undercharge',\n current_capacity=str(current_capacity)\n )\n else:\n if current_rate <= self.__BATTERY_LOWER_LIMIT:\n self.__send_notification(\n title='the overcharging',\n current_capacity=str(current_capacity)\n )", "def _check_low_battery(self):\n if not self._low_battery_recd and self._low_battery_state:\n self._low_battery_clear_event.call_subscribers(low_battery=False)", "def battery_level(robot: cozmo.robot.Robot):\n\tlog.info('Battery level...')\n\tlevel = robot.battery_voltage\n\tlog.info('Level is '+str(level)+'V')\n\tif level<=3.5:\n\t\tlog.warning('Level is low. Please place Cozmo on charger.')", "def test_get_battery_voltage(self):\n vmin = ThunderBorg._BATTERY_MIN_DEFAULT\n vmax = ThunderBorg._BATTERY_MAX_DEFAULT\n voltage = self._tb.get_battery_voltage()\n msg = (\"Voltage should be in the range of {:0.02f} to {:0.02f}, \"\n \"found {:0.02f} volts\").format(vmin, vmax, voltage)\n self.assertTrue(vmin <= voltage <= vmax, msg)", "def check(self, currentTau):\n self.voltage += self.sumInputs\n self.sumInputs = 0\n self.refractCount -= 1\n self.voltageHistory.append(self.voltage)\n if(self.refractCount <= 0):\n self.refractCount = 0 \n if(self.voltage >= self.threshold):\n self.spikeTimes.append(currentTau)\n self.AP()\n self.voltage -= abs(self.threshold)\n self.refractCount = self.refractory\n #print(\"AP at \"+ str(currentTau) + \" at \" + self.name)\n return True\n return False", "def has_battery(self):\n return self.read_battery1 or self.read_battery2", "def checkLimit(self):\n self.clearLatchedStatus()\n value = self.readLatchedEventStatus()\n if (int(value)&1536)<>0: \n return 3\n print(\"Positive and Negative limit switches are active\")\n elif (int(value)&512)<>0:\n return 1\n print(\"Positive limit switch is active\") \n elif (int(value)&1024)<>0:\n return 2 \n print(\"Negative limit switch is active\") \n elif int(value) == 0 or (int(value)&131072)<>0 or (int(value)&65536)<>0 or (int(value)&67108864)<>0: \n return 0\n print(\"NO limit switch is active\")", "def set_exit_lowbatt_threshold(self, **kwargs):\n value = kwargs['value']\n try:\n assert 0 < value < 1.0 and float(value) is float\n if value >= self.parent.constants.ENTER_LOW_BATTERY_MODE_THRESHOLD:\n self.parent.logger.error(\n f\"New value for Exit LB thresh must be less than current Enter LB thresh value\")\n assert False\n self.set_parameter(name=\"EXIT_LOW_BATTERY_MODE_THRESHOLD\", value=value)\n except AssertionError:\n self.parent.logger.error(f\"Incompatible value {value} for EXIT_LOW_BATTERY_MODE_THRESHOLD\")", "def _quiet_enough(self):\n return self.level <= self.min_level", "def is_supply(self):\n return self.quantities[self.period - 1] <= 0", "def algorithm_should_terminate(self, config, check_cycling):\n if self.should_terminate:\n # self.primal_bound_progress[0] can only be inf or -inf.\n # If the current primal bound equals inf or -inf, we can infer there is no solution.\n if self.primal_bound == self.primal_bound_progress[0]:\n self.results.solver.termination_condition = tc.noSolution\n else:\n self.results.solver.termination_condition = tc.feasible\n return True\n return (\n self.bounds_converged()\n or self.reached_iteration_limit()\n or self.reached_time_limit()\n or self.reached_stalling_limit()\n or (check_cycling and self.iteration_cycling())\n )", "def check(self, currentTau):\n self.voltage += self.sumInputs\n self.sumInputs = 0\n self.refractCount -= 1\n self.voltageHistory.append(self.voltage)\n if(self.refractCount <= 0):\n self.refractCount = 0 \n if(self.voltage >= self.threshold or random.random()*1000<self.rateConstant):\n self.spikeTimes.append(currentTau)\n #self.AP()\n self.voltage = 0\n self.refractCount = self.refractory\n self.voltageHistory[len(self.voltageHistory)-1] = self.voltageHistory[len(self.voltageHistory)-1] +1\n #print(\"AP at \"+ str(currentTau) + \" at \" + self.name)\n return True\n return False", "def over(self, x):\n return not math.isnan(x) and x >= self.high", "def stopping_condition_is_met(self) -> bool:\n return self.iter >= self.max_iter", "def upper_limit(self, val):\n self.gf_condition(upperLimit=val)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if charge current getting high and run over the limit of highest current setting. _run_in_condition() method run if this method returns True.
def _is_condition(self, data): ret = False current_charge_value = data["data"]["Charge Current"]["value"] if self.pre_current_ is None: if self.high_current_ <= current_charge_value: ret = True self.pre_current_ = current_charge_value # If the charge current run over the limit of highest charege current, # returns True and run some procedure. elif self.pre_current_ < self.high_current_: if self.high_current_ <= current_charge_value: ret = True self.pre_current_ = current_charge_value logger.debug("Return {} on {} at {}".format( ret, type(self).__name__, data["at"])) return ret
[ "def within_limits(self):\n within_limit = True\n\n for lux_sensor, limit in self.lightlevel.items():\n current_lightlevel = float(self.get_state(lux_sensor))\n if current_lightlevel > limit:\n within_limit = False\n self.log('Light level beyond limit')\n break\n\n return within_limit", "def _is_condition(self, data):\n ret = False\n current_voltage = data[\"data\"][\"Battery Voltage\"][\"value\"]\n\n if self.pre_voltage_ is None:\n if self.full_voltage_ <= current_voltage:\n ret = True\n self.pre_voltage_ = current_voltage\n\n # If the battery volate run over the limit of highest batery voltate,\n # returns True and run some procedure.\n elif self.pre_voltage_ < self.full_voltage_:\n if self.full_voltage_ <= current_voltage:\n ret = True\n\n self.pre_voltage_ = current_voltage\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def _is_condition(self, data):\n ret = False\n current_voltage = data[\"data\"][\"Battery Voltage\"][\"value\"]\n\n if self.pre_voltage_ is None:\n if self.lowest_voltage_ > current_voltage:\n ret = True\n self.pre_voltage_ = current_voltage\n\n # If the battery volate run over the limit of lowest batery voltate,\n # returns True and run some procedure to save the battery power.\n elif self.pre_voltage_ >= self.lowest_voltage_:\n if self.lowest_voltage_ > current_voltage:\n ret = True\n\n self.pre_voltage_ = current_voltage\n\n logger.debug(\"Return {} on {} at {}\".format(\n ret, type(self).__name__, data[\"at\"]))\n\n return ret", "def stopping_condition_met(self, execution):\r\n return execution.oobs > 0", "def _end_condition(self):\n return self._num_iters >= self._max_iter", "def is_supply(self):\n return self.quantities[self.period - 1] <= 0", "def stopping_condition_is_met(self) -> bool:\n return self.iter >= self.max_iter", "def should_hit(self):\n \n return self.hand.compute_bj_count() < 17", "def isHigher(self, current_card, next_card):\n if current_card < next_card:\n status = True\n elif current_card >= next_card:\n status = False\n return status", "def algorithm_should_terminate(self, config, check_cycling):\n if self.should_terminate:\n # self.primal_bound_progress[0] can only be inf or -inf.\n # If the current primal bound equals inf or -inf, we can infer there is no solution.\n if self.primal_bound == self.primal_bound_progress[0]:\n self.results.solver.termination_condition = tc.noSolution\n else:\n self.results.solver.termination_condition = tc.feasible\n return True\n return (\n self.bounds_converged()\n or self.reached_iteration_limit()\n or self.reached_time_limit()\n or self.reached_stalling_limit()\n or (check_cycling and self.iteration_cycling())\n )", "def capacity_reached(self):\n\n percentage_free, available_space = self.get_volume_capacity()\n percentage_free = int(percentage_free.replace('%', ''))\n if percentage_free >= int(self.config.safe_capacity_percentage):\n return True\n return False", "def over(self, x):\n return not math.isnan(x) and x >= self.high", "def __IsBlockingMandateBreached(self):\n if not self._blockPreDealCheck:\n self._blockPreDealCheck = False\n for limitSpecName in self._allMandateDetailDict:\n mandate = self._allMandateDetailDict.At(limitSpecName)\n if mandate.GetBehaviour()[0] == 3:\n self._blockPreDealCheck = True\n return True\n else:\n return self._blockPreDealCheck\n return False", "def is_supply(self):\n return self.quantities[0] <= 0", "def check_backtrack(self):\n differential = self.character.stats[4] - self.dungeonlevel\n if differential < 0:\n cutoff = float(3 - differential) / float(6 - 6 * differential)\n else:\n cutoff = float(3 + 5 * differential) / float(6 + 6 * differential)\n return random.random() < cutoff", "def check_high_card(self):\n if not self.cards or not len(self.cards) == self.MAXIMUM_CARDS:\n return False\n\n # Always at least have a high card in this case.\n card_values = self.get_card_values()\n card_values.sort(reverse=True)\n self.multiple = 0\n self.rank = card_values\n\n return True", "def is_bust(self):\n for value in self.get_hand_values():\n if value <= 21:\n return False\n return True", "def _quiet_enough(self):\n return self.level <= self.min_level", "def _check_if_cut_off_time_is_reached(self):\n if self._current_sub_circuit:\n longest_duration = min([self.nodes[node].sub_circuit_time\n for node in self._current_sub_circuit.involved_nodes])\n else:\n longest_duration = 0\n\n if self.total_duration + longest_duration >= self.cut_off_time:\n if self._current_sub_circuit is not None:\n if self._current_sub_circuit.all_ran:\n self.cut_off_time_reached = True\n else:\n self._current_sub_circuit.set_cut_off_time_reached()\n if self.total_duration >= self.cut_off_time:\n self.cut_off_time_reached = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run a sentiment analysis request on text within a passed filename.
def analyze(movie_review_filename): client = language.LanguageServiceClient() with open(movie_review_filename, 'r') as review_file: # Instantiates a plain text document. content = review_file.read() print(content) document = types.Document( content=content, type=enums.Document.Type.PLAIN_TEXT) annotations = client.analyze_sentiment(document=document) print_result(annotations)
[ "def get_sentiment(text):\n response = requests.post(settings.SENTIMENT_ANALYSIS_API, data={\n 'text': text\n })\n return response.json()", "def _extract_sentiment_from_text(self, corpus_list, doc_name_to_id_dict):\n vader = SentimentIntensityAnalyzer()\n '''\n Go through the documents and rate their sentiment\n '''\n doc_count=0\n sentiment_feature_dict=defaultdict(list)\n for doc_name, row_id in doc_name_to_id_dict.iteritems():\n logger.debug(\"Extracting sentiment from: \" + doc_name)\n doc=corpus_list[row_id]\n ''' \n doc is one document from our corpus\n '''\n sentences=doc.split(\".\")\n pos_count=0\n neg_count=0\n prev_word_was_positive=False\n prev_word_was_negative=False\n pos_neg_count=0\n count=0\n longest_run_of_positives=0\n longest_run_of_negatives=0\n run_of_positives_count=0\n run_of_negatives_count=0\n score=vader.polarity_scores(' '.join(sentences))\n compound_polarity=score['compound']\n '''\n Rate the overall polarity of the document (1 positive, 0 negative)\n '''\n if compound_polarity>0:\n compound_polarity=1\n else:\n compound_polarity=0\n\n '''\n Rate each word in the corpus for sentiment and construct the word-based\n features\n '''\n for sentence in sentences:\n words=sentence.split(\" \")\n for word in words:\n score=vader.polarity_scores(word)\n '''\n If the negative sentiment of a word is greater than the positive sentiment\n '''\n if score['pos']>abs(score['neg']):\n pos_count+=1\n if prev_word_was_negative:\n pos_neg_count+=1\n prev_word_was_negative=False\n if run_of_negatives_count>longest_run_of_negatives:\n longest_run_of_negatives=run_of_negatives_count\n run_of_negatives_count=0\n else:\n run_of_positives_count+=1\n prev_word_was_positive=True\n\n '''\n If the positive sentiment of a word is greater than the negative sentiment\n '''\n if score['pos']<abs(score['neg']):\n neg_count+=1\n if prev_word_was_positive:\n prev_word_was_positive=False\n pos_neg_count+=1\n if run_of_positives_count>longest_run_of_positives:\n longest_run_of_positives=run_of_positives_count\n run_of_negatives_count=0\n else:\n run_of_negatives_count+=1\n prev_word_was_negative=True\n count+=1\n\n sentiment_feature_dict[doc_name].append([pos_count,neg_count,pos_neg_count,longest_run_of_negatives,longest_run_of_positives,compound_polarity])\n \n return sentiment_feature_dict", "def parse_sentiment_file(self, file):\n \n file_sentiment = file['documentSentiment']\n file_entities = [x['name'] for x in file['entities']]\n file_entities = self.sentence_sep.join(file_entities)\n \n file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]\n \n file_sentences_sentiment = pd.DataFrame.from_dict(\n file_sentences_sentiment, orient='columns')\n file_sentences_sentiment_df = pd.DataFrame(\n {\n 'magnitude_sum': file_sentences_sentiment['magnitude'].sum(axis=0),\n 'score_sum': file_sentences_sentiment['score'].sum(axis=0),\n 'magnitude_mean': file_sentences_sentiment['magnitude'].mean(axis=0),\n 'score_mean': file_sentences_sentiment['score'].mean(axis=0),\n 'magnitude_var': file_sentences_sentiment['magnitude'].var(axis=0),\n 'score_var': file_sentences_sentiment['score'].var(axis=0),\n }, index=[0]\n )\n \n df_sentiment = pd.DataFrame.from_dict(file_sentiment, orient='index').T\n df_sentiment = pd.concat([df_sentiment, file_sentences_sentiment_df], axis=1)\n \n df_sentiment['entities'] = file_entities\n df_sentiment = df_sentiment.add_prefix('sentiment_')\n \n return df_sentiment", "def sentiment_analysis(text):\n\n # pass text into sentiment url\n if True:\n ret = get_sentiment_from_url(text, sentimentURL)\n if ret is None:\n sentiment_url = None\n else:\n sentiment_url, neg_url, pos_url, neu_url = ret\n else:\n sentiment_url = None\n\n # pass text into TextBlob\n text_tb = TextBlob(text)\n\n # pass text into VADER Sentiment\n analyzer = SentimentIntensityAnalyzer()\n text_vs = analyzer.polarity_scores(text)\n\n # determine sentiment from our sources\n if sentiment_url is None:\n #threshold values\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05:\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05:\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n else:\n # this works if the above function executes properly\n if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05 and sentiment_url == \"negative\":\n sentiment = \"negative\"\n elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05 and sentiment_url == \"positive\":\n sentiment = \"positive\"\n else:\n sentiment = \"neutral\"\n\n polarity = (text_tb.sentiment.polarity + text_vs['compound']) / 2\n\n # output sentiment polarity\n print(\"************\")\n print(\"Sentiment Polarity: \" + str(round(polarity, 3)))\n\n # output sentiment subjectivity (TextBlob)\n print(\"Sentiment Subjectivity: \" + str(round(text_tb.sentiment.subjectivity, 3)))\n\n # output sentiment\n print(\"Sentiment (url): \" + str(sentiment_url))\n print(\"Sentiment (algorithm): \" + str(sentiment))\n print(\"Overall sentiment (textblob): \", text_tb.sentiment)\n print(\"Overall sentiment (vader): \", text_vs)\n print(\"sentence was rated as \", round(text_vs['neg']*100, 3), \"% Negative\")\n print(\"sentence was rated as \", round(text_vs['neu']*100, 3), \"% Neutral\")\n print(\"sentence was rated as \", round(text_vs['pos']*100, 3), \"% Positive\")\n print(\"************\")\n\n return polarity, text_tb.sentiment.subjectivity, sentiment", "def parse_sentiment_file(self, file):\n \n file_sentiment = file['documentSentiment']\n file_entities = [x['name'] for x in file['entities']]\n file_entities = self.sentence_sep.join(file_entities)\n \n file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]\n \n file_sentences_sentiment = pd.DataFrame.from_dict(\n file_sentences_sentiment, orient='columns')\n file_sentences_sentiment_df = pd.DataFrame(\n {\n 'magnitude_sum': file_sentences_sentiment['magnitude'].sum(axis=0),\n 'score_sum': file_sentences_sentiment['score'].sum(axis=0),\n }, index=[0]\n )\n \n df_sentiment = pd.DataFrame.from_dict(file_sentiment, orient='index').T\n df_sentiment = pd.concat([df_sentiment, file_sentences_sentiment_df], axis=1)\n \n df_sentiment['entities'] = file_entities\n df_sentiment = df_sentiment.add_prefix('sentiment_')\n \n return df_sentiment", "def sentiment(conn, name):\n\n curs = conn.cursor()\n text_query = f\"\"\"SELECT BodyText FROM PullRequests \n WHERE RepoName = '{name}'\"\"\"\n curs.execute(text_query)\n #Collect messages, convert to strings then replace punct\n text = pd.DataFrame(curs.fetchall(), columns=['text'])\n text['text'] = text['text'].astype(str).str.replace(\"[^\\w\\s]\",\"\")\n #Ensure none of the messages are empty\n text = text[text[\"text\"] != \"\"]\n text['text'] = text['text'].str.lower()\n text['text_lemmatized'] = text['text'].apply(lemmatize_text)\n #Generate scores, create list of compound scores, then return average\n sid = SentimentIntensityAnalyzer()\n scores = []\n for i in text[\"text_lemmatized\"]:\n score = sid.polarity_scores(i)\n scores.append(score)\n compounds = [x['compound'] for x in scores]\n if len(compounds) == 0:\n return \"You don't have any commit messages with body text!\"\n else:\n avg = sum(compounds)/len(compounds)\n return avg", "def get_sentiment(text):\n # check that text does not exceed API's character limit\n url = \"http://text-processing.com/api/sentiment/\"\n if len(text) < 80000:\n # query text-processing API for sentiment score\n payload = {'text': text}\n\n # make API call\n r = requests.post(url, data=payload)\n\n # load JSON from API call\n result = json.loads(r.text)\n\n # pull sentiment score\n sen_score = result['probability']['pos']\n\n time.sleep(random.randint(0,5))\n return sen_score", "def market_sentiment(raw_data):\n # TODO\n pass", "def sentiment_score(data,sent_file,response_dict,name = 'File1',threshold = 0):\n \n sentiments = {}\n\n for i,response in enumerate(data):\n sentiment = 0 \n word_count = 0\n for word in response:\n word_count+=1\n if sent_file.has_key(word.lower()):\n sentiment+=float(sent_file[word.lower()])\n sentiments[i] = [sentiment,response]\n \n #add to score as first var in response_dict\n tmp_lst = response_dict[i]\n tmp_lst.append(sentiment)\n tmp_lst.append(word_count)\n response_dict[i] = tmp_lst\n \n\n #aggregate analysis\n overall = 0\n for score in sentiments.keys():\n overall += sentiments[score][0]\n\n pos=0\n neg=0\n for key in sentiments.keys():\n if sentiments[key][0]>threshold:\n pos+=1\n elif sentiments[key][0]<-(threshold):\n neg+=1\n\n pos_ratio = pos/float(len(sentiments.keys()))\n neg_ratio = neg/float(len(sentiments.keys()))\n\n #ls.print_sent_analysis(name,overall,sentiments,pos_ratio,neg_ratio)\n\n return response_dict", "def callNLPService(text):\n google_cloud_credentials = \"./assets/Interview_Voice_google_cloud_key.json\"\n nlp_service = get_google_nlp_service(google_cloud_credentials)\n client = nlp_service.documents()\n request1 = client.analyzeEntitySentiment(body={\n \"document\": {\n \"type\": \"PLAIN_TEXT\",\n \"content\": text,\n \"language\": \"en_IN\"\n }\n })\n try:\n response = request1.execute()\n except googleapiclient.errors.HttpError as e:\n raise RequestError(e)\n except URLError as e:\n raise RequestError(\"recognition connection failed: {0}\".format(e.reason))\n entities = response[\"entities\"]\n return entities", "def analyse_text(cls, text: str) -> List[str]:\n print(\"\\nSending data to Deep AI for analysis...\\n\")\n try:\n response = requests.post(\n \"https://api.deepai.org/api/sentiment-analysis\",\n data={\n 'text': text,\n },\n headers={\n 'api-key': DEEP_API_KEY\n }\n )\n\n sentiments = response.json()['output']\n return sentiments\n except Exception:\n print(\"\\nSorry, looks like something went wrong!\")\n return []", "def main():\n\n # command line parsing\n parser = buildParser()\n args = parser.parse_args()\n\n\n # construct the tweet pro-processing object\n tweetTokenizer = TweetTokenizer()\n lPunct = list(string.punctuation)\n lStopwords = stopwords.words('english') + lPunct + ['rt', 'via', '...', '…', '\"', \"'\", '`']\n\n tweetProcessor = TwitterProcessing(tweetTokenizer, lStopwords)\n\n\n # load set of positive words\n lPosWords = []\n with open(args.posWordFile, 'r', encoding='utf-8', errors='ignore') as fPos:\n for sLine in fPos:\n lPosWords.append(sLine.strip())\n\n setPosWords = set(lPosWords)\n\n\n # load set of negative words\n lNegWords = []\n with codecs.open(args.negWordFile, 'r', encoding='utf-8', errors='ignore') as fNeg:\n for sLine in fNeg:\n lNegWords.append(sLine.strip())\n\n setNegWords = set(lNegWords)\n\n # compute the sentiment\n lSentiment = []\n if args.approach == 'count':\n lSentiment = countWordSentimentAnalysis(setPosWords, setNegWords, args.tweetsFile, args.print, tweetProcessor)\n elif args.approach == 'vader':\n lSentiment = vaderSentimentAnalysis(args.tweetsFile, args.print, tweetProcessor)\n\n\n # determine if we should output a time series of sentiment scores across time\n if args.ts:\n # TODO: write code to display the time series\n # we are using pandas for this, but first we need to get it into a pandas data frame structure\n series = pd.DataFrame(lSentiment, columns=['date', 'sentiment'])\n # tell pandas that the date column is the one we use for indexing (or x-axis)\n series.set_index('date', inplace=True)\n # pandas makes a guess at the type of the columns, but to make sure it doesn't get it wrong, we set the sentiment\n # column to floats\n series[['sentiment']] = series[['sentiment']].apply(pd.to_numeric)\n\n # This step is not necessary, but pandas has a neat function that allows us to group the series at different\n # resultion. The 'how=' part tells it how to group the instances. In this example, it sames we want to group\n # by day, and add up all the sentiment scores for the same day and create a new time series called 'newSeries'\n # with this day resolution\n # TODO: play with this for different resolution, '1H' is by hour, '1M' is by minute etc\n sentimentSeries = series.resample('1H').sum()\n tweetCountSeries = series.resample('1H').count()\n \n # this plots and shows the time series\n plt.figure(figsize=(6,3), dpi = 100)\n plt.plot(sentimentSeries)\n plt.plot(tweetCountSeries)\n plt.legend(['Sentiment', 'Tweet Count'], loc='upper left')\n plt.savefig('fig6.png')\n plt.show()\n plt.close()", "def sentiment(request):\n sentiment_response = False\n text_data = request.POST.get('sent_text')\n\n if not text_data:\n text_data = 'I am very happy to meet you. I can only imagine this is a positive sentiment.'\n\n if text_data:\n api_key = 'AIzaSyB_Ycxdg5aIuJ8HiJTJj3gfIN-i8CguAZ4'\n service = build('language', 'v1', developerKey=api_key)\n sentiment_request = service.documents().analyzeSentiment(\n body={\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': text_data,\n },\n }\n )\n\n sentiment_response = sentiment_request.execute()\n\n\n entity_request = service.documents().analyzeEntities(\n body={\n 'document': {\n 'type': 'PLAIN_TEXT',\n 'content': text_data,\n },\n }\n )\n\n entity_response = entity_request.execute()\n\n\n\n\n\n MattUtils.write_json_to_file(r'F:\\dev\\jsonData.txt', entity_response)\n return render(request, 'SentimentAnalysis.html',\n {'sentiment_response':sentiment_response, 'text_data':text_data, 'entity_response':entity_response, 'range':range(10)})", "def entity_sentiment_text(text):\n client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n document = types.Document(\n content=text.encode('utf-8'),\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detect and send native Python encoding to receive correct word offsets.\n encoding = enums.EncodingType.UTF32\n if sys.maxunicode == 65535:\n encoding = enums.EncodingType.UTF16\n\n result = client.analyze_entity_sentiment(document, encoding)\n for entity in result.entities:\n# print('Mentions: ')\n print(u'Name: \"{}\"'.format(entity.name))\n for mention in entity.mentions:\n# print(u' Begin Offset : {}'.format(mention.text.begin_offset))\n print(u' Content : {}'.format(mention.text.content))", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments1 = map(lambda word: wordlist.get(word, 0), words)\n sentiments = []\n for k in sentiments1:\n\tif k != 0:\n\t\tsentiments.append(k)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n else:\n sentiment = 0\n print 'from function',sentiment\n return sentiment", "def detect_sentiment(text):\r\n\r\n document = language.types.Document(\r\n content=text,\r\n type=language.enums.Document.Type.PLAIN_TEXT)\r\n\r\n sentiment = client.analyze_sentiment(document).document_sentiment\r\n\r\n return sentiment.score, sentiment.magnitude", "def add_file(self, filename):\n for line in [line.rstrip().lower() for line in open(filename, errors='ignore').readlines()]:\n self.add_sentence(line)", "def sentiment_analysis(text):\n return SentimentIntensityAnalyzer().polarity_scores(skip_gutenberg_header_and_tail(text))", "def read_sentiment():\n with open('sentiment.txt', 'r') as f:\n for line in f:\n line = line.strip().split()\n if line[1]<0:\n neg_words.add(line[1])\n elif line[1]>0:\n pos_words.add(line[1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over all layout entity spaces.
def __iter__(self): return iter(self._layout_spaces.values())
[ "def handles(self):\n for entity_space in self:\n for handle in entity_space:\n yield handle", "def delete_all_entities(self):\n # Do not delete the entity space objects itself, just remove all entities from all entity spaces.\n for entity_space in self._layout_spaces.values():\n entity_space.delete_all_entities()", "def modelspace(self) -> Iterable[DXFGraphic]:\n factory = EntityFactory()\n polyline: Optional[Polyline] = None\n for index_entry, text in self.entities_as_str(self.index_entities + 1):\n dxftype = index_entry.value\n if dxftype in SUPPORTED_DXF_TYPES:\n xtags = ExtendedTags.from_text(text)\n # do not process paperspace entities\n if xtags.noclass.get_first_value(67, 0) == 1:\n continue\n entity = factory.entity(xtags)\n if dxftype == 'SEQEND':\n if polyline is not None:\n polyline.seqend = entity\n yield polyline\n polyline = None\n # suppress all other SEQEND entities -> ATTRIB\n elif dxftype == 'VERTEX' and polyline is not None:\n # vertices without POLYLINE are DXF structure errors, but here just ignore it.\n polyline.vertices.append(entity)\n elif dxftype == 'POLYLINE':\n polyline = cast(Polyline, entity)\n else:\n # POLYLINE without SEQEND is a DXF structure error, but here just ignore it.\n # By using this add-on be sure to get valid DXF files.\n polyline = None\n yield entity", "def __iter__(self):\r\n return self.layout.__iter__()", "def multi_insert(self) -> Iterator[Insert]:\n\n def transform_attached_attrib_entities(insert, offset):\n for attrib in insert.attribs:\n attrib.dxf.insert += offset\n\n def adjust_dxf_attribs(insert, offset):\n dxf = insert.dxf\n dxf.insert += offset\n dxf.discard(\"row_count\")\n dxf.discard(\"column_count\")\n dxf.discard(\"row_spacing\")\n dxf.discard(\"column_spacing\")\n\n done = set()\n row_spacing = self.dxf.row_spacing\n col_spacing = self.dxf.column_spacing\n rotation = self.dxf.rotation\n for row in range(self.dxf.row_count):\n for col in range(self.dxf.column_count):\n # All transformations in OCS:\n offset = Vec3(col * col_spacing, row * row_spacing)\n # If any spacing is 0, yield only unique locations:\n if offset not in done:\n done.add(offset)\n if rotation: # Apply rotation to the grid.\n offset = offset.rotate_deg(rotation)\n # Do not apply scaling to the grid!\n insert = self.copy()\n adjust_dxf_attribs(insert, offset)\n transform_attached_attrib_entities(insert, offset)\n yield insert", "def iter_all_worlds(self):\n world = [None] * len(self.evidence)\n for i, w in self._iter_worlds(self.variables, world, CallbyRef(0), {}):\n yield i, w", "def _all_entity_ranges(self):\n return (itertools.chain(\n (sm.asym_unit for sm in self._all_starting_models()),\n (seg.asym_unit for seg in self._all_segments()),\n (comp for a in self._all_assemblies() for comp in a),\n (comp for f in self._all_features()\n for comp in f._all_entities_or_asyms()),\n (d.asym_unit for d in self._all_densities())))", "def __iter__(self):\n for meta_offset in range(0, 4096, 4):\n z_offset, x_offset = divmod(meta_offset // 4, 32)\n x = self.x * 32 + x_offset\n z = self.z * 32 + z_offset\n chunk_location = self.locations[meta_offset:meta_offset + 4]\n offset = chunk_location[0] * (256 ** 2) + chunk_location[1] * 256 + chunk_location[2]\n if offset == 0:\n continue\n else:\n offset -= 2\n sector_count = chunk_location[3]\n yield ChunkColumn(self.data[4096 * offset:5096 * (offset + sector_count)], x=x, z=z)", "def iterdims(self):\n return iter(self._dimensions)", "def __iter__(self):\n for y in range(0, self.__panorama_h, self.__windows_size[1]):\n for x in range(0, self.__panorama_w, self.__windows_size[0]):\n y_end = y + self.__windows_size[1]\n x_end = x + self.__windows_size[0]\n yield Window(x, y, x_end, y_end, self.panorama[y:y_end, x:x_end])", "def loopHierSpaces(self):\n for i in range(1,self.level+1):\n self.hSpace = [i]\n self.loopHierSpacesRec(self.dim-1,self.level-(i-1))", "def print_entities(self) -> None:\n for row in self.maze_data:\n for ent in row:\n print(ENTITY_NAME[row[ent]], end=\"\\t\")\n print(\"\\n\")", "def iter_feature(self) -> Generator[Feature, None, None]:\n for feature in self.api.get_space_iterate(\n space_id=self._info[\"id\"], limit=100\n ):\n yield feature", "def grid_cells(self) -> Iterator:\n for row in self.grid_2d:\n for cell in row:\n yield cell", "def __iter__(self) -> Generator[tuple[str, str, Types], None, None]:\n for cluster, namespaces in self._inv.items():\n for namespace, types in namespaces.items():\n yield cluster, namespace, types", "def iter_all_with_margins(self):\n col_totals = self.col_totals\n row_totals = self.row_totals\n for ri, row in iter_items(self.rows):\n rm = row_totals[ri]\n for ci in iter_keys(col_totals):\n if isinstance(row, Mapping):\n cell = row.get(ci, 0)\n else:\n cell = row[ci]\n cm = col_totals[ci]\n yield rm, cm, cell", "def grid_assets_iterator(self, grid):\n for point in grid:\n assets = self.assets_for_cell(self.job_ctxt.job_id, point.site)\n for asset in assets:\n yield point, asset", "def __iter__(self):\n for (route, (media_type, obj)) in self._map.items():\n\n if isinstance(obj, dominate.dom_tag.dom_tag):\n obj = obj.render()\n\n yield (route, (media_type, obj))", "def iter_collect_entities_per_page_graph(job_scope: JobScope) -> Generator[Dict[str, Any], None, None]:\n page_token_manager = PageTokenManager.from_job_scope(job_scope)\n with PlatformApiContext(page_token_manager.get_best_token(job_scope.ad_account_id)) as fb_ctx:\n page_root_fb_entity = fb_ctx.to_fb_model(job_scope.ad_account_id, Entity.Page)\n\n entity_type = job_scope.report_variant\n # page size reduced to avoid error:\n # \"Please reduce the amount of data you're asking for, then retry your request\"\n entities = iter_native_entities_per_page_graph(page_root_fb_entity, entity_type, page_size=30)\n\n record_id_base_data = job_scope.to_dict()\n record_id_base_data.update(entity_type=entity_type, report_variant=None)\n\n with ChunkDumpStore(job_scope, chunk_size=DEFAULT_CHUNK_SIZE) as store, ChunkDumpStore(\n job_scope,\n chunk_size=DEFAULT_CHUNK_SIZE,\n bucket_type=ColdStoreBucketType.RAW_BUCKET,\n custom_namespace=NAMESPACE_RAW,\n ) as raw_store:\n for entity in entities:\n entity_data = entity.export_all_data()\n entity_data = add_vendor_data(\n entity_data, id=generate_universal_id(entity_id=entity_data.get('id'), **record_id_base_data)\n )\n entity_data['page_id'] = job_scope.ad_account_id\n\n if entity_type == Entity.PagePostPromotable:\n # store raw version of response (just to remain consistent)\n raw_store(entity_data)\n entity_data = _augment_page_post(entity_data)\n\n # Store the individual datum, use job context for the cold\n # storage thing to divine whatever it needs from the job context\n store(entity_data)\n\n # Signal to the system the new entity\n feedback_entity_task.delay(entity_data, entity_type)\n yield entity_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get layout entity space by key.
def __getitem__(self, key): return self._layout_spaces[key]
[ "def get_entity_space(self, key):\n try:\n entity_space = self._layout_spaces[key]\n except KeyError: # create new entity space; internal exception\n entity_space = EntitySpace(self._entitydb)\n self.set_entity_space(key, entity_space)\n return entity_space", "def delete_entity_space(self, key):\n entity_space = self._layout_spaces[key]\n entity_space.delete_all_entities()\n del self._layout_spaces[key]", "def get_dim(self, key):\n return self.dim.get(key, None)", "def space(self, datastore):\n return self._get('/datastores/%s/space' % base.getid(datastore),\n 'datastore')", "def get_space(self, space_id: str) -> Space:\n return [x for x in self.spaces if x.id == space_id][0]", "def __getitem__(self, key):\n return self.__grid[key]", "def _get_workspace(key):\n return _WORKSPACES.get(key, None)", "def getSpace(self, space):\n if isinstance(space, self.connection.space._ROOTOBJECTTYPE): #pylint: disable=W0212\n return space\n\n space = self._getSpaceGuid(space)\n return self.connection.space.get(space)", "def get_space(self):\n return self.space", "def component_of(self, key):\n return self.subgraph(nx.ancestors(self, key).union([key]))", "def __iter__(self):\n return iter(self._layout_spaces.values())", "def get_layout(self):\n # Executing command and parsing output\n cmd = subprocess.Popen(['setxkbmap', '-print'], stdout=subprocess.PIPE)\n cmd_out, cmd_err = cmd.communicate()\n cmd_outparsed = cmd_out.split('\\n')\n\n\t# Looking for Keyboard Layout and printing it\n for line in cmd_outparsed:\n if \"xkb_symbols\" in line: \n layout = line.split('+')[1]\n sys.stdout.write(\"%s\" % layout)", "def __get_key(self):\n buffer = self.view.buffer\n insert_mark = buffer.get_insert()\n insert_iter = buffer.get_iter_at_mark(insert_mark)\n start_iter = insert_iter.copy()\n\n self.__find_word_start(start_iter)\n key = buffer.get_text(start_iter, insert_iter)\n \n return key", "def lookup_device_by_key(self, key) -> LandscapeDevice:\n\n found = None\n\n self._coord_lock.acquire()\n try:\n if key in self._cl_children:\n found = self._cl_children[key].basedevice\n finally:\n self._coord_lock.release()\n\n return found", "def __getitem__(self, key: Union[slice, int]) -> Union[\"DatasetItemEntity\", List[\"DatasetItemEntity\"]]:\n return self._fetch(key)", "def find_physical_center_from_key(key, x0, r0):\n anchor = decode_key(key)\n return find_physical_center_from_anchor(anchor, x0, r0)", "def name(self, key):\n try:\n return self.dim[key].name\n except KeyError:\n return None", "def get_component(self, key):\n component = self.get_place(key)\n if component == None:\n component = self.get_transition(key)\n if component == None:\n component = self.get_arc(key)\n return component", "def _resolve_normal(self, key):\n keysym = self._key_to_keysym(key)\n if keysym is None:\n return None\n\n if keysym not in self.keyboard_mapping:\n return None\n\n return keysym" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over all handles in all entity spaces.
def handles(self): for entity_space in self: for handle in entity_space: yield handle
[ "def iter_item_handles(self):\n raise(NotImplementedError())", "def system_iter(self):\n for system in self.systems:\n yield self.systems[system]", "def iter_sys(self):\n names = self.sys_names()\n for name in names:\n osys = self.GetOverallSys(name)\n hsys = self.GetHistoSys(name)\n yield name, osys, hsys", "def __iter__(self):\n return iter(self._layout_spaces.values())", "def iter_feature(self) -> Generator[Feature, None, None]:\n for feature in self.api.get_space_iterate(\n space_id=self._info[\"id\"], limit=100\n ):\n yield feature", "def __iter__(self):\n return self._all_shas()", "def handles(self):\r\n l = []\r\n for i in range(self.count()):\r\n h = self.handle(i)\r\n if h:\r\n l.append(h)\r\n return l", "def __iter__(self):\r\n for shape in self.__shapes:\r\n yield shape", "def iter_context_objects(self):\n tid = current_greenlet()\n objects = self._cache.get(tid)\n if objects is None:\n if len(self._cache) > _MAX_CONTEXT_OBJECT_CACHE:\n self._cache.clear()\n objects = self._global[:]\n objects.extend(getattr(self._context, 'stack', ()))\n objects.sort(reverse=True)\n objects = [x[1] for x in objects]\n self._cache[tid] = objects\n return iter(objects)", "def modelspace(self) -> Iterable[DXFGraphic]:\n factory = EntityFactory()\n polyline: Optional[Polyline] = None\n for index_entry, text in self.entities_as_str(self.index_entities + 1):\n dxftype = index_entry.value\n if dxftype in SUPPORTED_DXF_TYPES:\n xtags = ExtendedTags.from_text(text)\n # do not process paperspace entities\n if xtags.noclass.get_first_value(67, 0) == 1:\n continue\n entity = factory.entity(xtags)\n if dxftype == 'SEQEND':\n if polyline is not None:\n polyline.seqend = entity\n yield polyline\n polyline = None\n # suppress all other SEQEND entities -> ATTRIB\n elif dxftype == 'VERTEX' and polyline is not None:\n # vertices without POLYLINE are DXF structure errors, but here just ignore it.\n polyline.vertices.append(entity)\n elif dxftype == 'POLYLINE':\n polyline = cast(Polyline, entity)\n else:\n # POLYLINE without SEQEND is a DXF structure error, but here just ignore it.\n # By using this add-on be sure to get valid DXF files.\n polyline = None\n yield entity", "def iter_item_handles(self):\n logger.debug(\"Iter item handles {}\".format(self))\n\n bucket = self.s3resource.Bucket(self.bucket)\n\n # Older dtool-s3 datasets, prior to version 0.14.0, use handles that\n # represent the relative path of the file. These did not support\n # non-ascii charcters. Since 0.14.0 dtool-s3 datasets therefore use\n # base64 encoded versions of the relative paths as handles. This means\n # that we need to work out if the handle is base64 encoded or not to\n # handle each case appropriately.\n base64_encoded = True\n sb_version = self._get_upload_storage_broker_version()\n if sb_version is not None:\n if packaging.version.parse(sb_version) < packaging.version.parse(\"0.14.0\"): # NOQA\n base64_encoded = False\n else:\n base64_encoded = False\n\n for obj in bucket.objects.filter(Prefix=self.data_key_prefix).all():\n handle = obj.get()['Metadata']['handle']\n\n # The handle is a base64 encoded version of the relpath in order\n # to deal with non-ascii chars in the relpath. We therefore need\n # to decode it to get the actual relpath.\n if base64_encoded:\n yield _base64_to_unicode(handle)\n else:\n yield handle", "def delete_all_entities(self):\n # Do not delete the entity space objects itself, just remove all entities from all entity spaces.\n for entity_space in self._layout_spaces.values():\n entity_space.delete_all_entities()", "def iter_dirs(self, handle):\n raise NotImplementedError()", "def __iter__(self) -> Iterator[HandlerEntry]:\n raise NotImplementedError", "def __iter__(self):\n for coreg in self.pipeline:\n yield coreg", "def __iter__( self ) :\n\n for nuclide in self.__nuclides : yield nuclide", "def __iter__(self):\n if not self._ctx:\n raise RuntimeError(\"context already destroyed\")\n idx = ffi.new(\"uint32_t *\")\n mod = lib.ly_ctx_get_module_iter(self._ctx, idx)\n while mod:\n yield Module(self, mod)\n mod = lib.ly_ctx_get_module_iter(self._ctx, idx)", "def wire_iter(self, grid):\n tr_w = self.track_id.width\n layer_id = self.layer_id\n for tr_idx in self.track_id:\n layer_name = grid.get_layer_name(layer_id, tr_idx)\n bbox = grid.get_bbox(layer_id, tr_idx, self._lower_unit, self._upper_unit,\n width=tr_w, unit_mode=True)\n yield layer_name, bbox", "def delete_all_entities(self) -> None:\n # noinspection PyTypeChecker\n for entity in list(self): # temp list, because delete modifies the base data structure of the iterator\n self.delete_entity(entity)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get entity space by key or create new entity space.
def get_entity_space(self, key): try: entity_space = self._layout_spaces[key] except KeyError: # create new entity space; internal exception entity_space = EntitySpace(self._entitydb) self.set_entity_space(key, entity_space) return entity_space
[ "def space(self, datastore):\n return self._get('/datastores/%s/space' % base.getid(datastore),\n 'datastore')", "def getSpace(self, space):\n if isinstance(space, self.connection.space._ROOTOBJECTTYPE): #pylint: disable=W0212\n return space\n\n space = self._getSpaceGuid(space)\n return self.connection.space.get(space)", "def get_space(self, space_id: str) -> Space:\n return [x for x in self.spaces if x.id == space_id][0]", "def delete_entity_space(self, key):\n entity_space = self._layout_spaces[key]\n entity_space.delete_all_entities()\n del self._layout_spaces[key]", "def get_space(\n self, space_id: str, force_refresh: bool = False\n ) -> Optional[NotionSpace]:\n space = self.get_record_data(\"space\", space_id, force_refresh)\n if space:\n return NotionSpace(self, space_id)", "def get_or_create_entity(self, entity_name):\n if entity_name not in self._entity_dict:\n self._entity_dict[entity_name] = Entity(entity_name)\n return self._entity_dict[entity_name]", "def wit_space_test(self):\n command = \"wit space create\"\n spacename = f\"synthetictest_{SITE}\"\n log.debug(f\"WITCLIUser: wit space create {spacename}\")\n with self.client.execute(['wit', 'space', 'create'], ['-space', spacename, '-noprompt', '-json', '-team', 'wit', '-size', '20480'], catch_response=True) as cli:\n if cli.failed == 0:\n spacename = f'space_{USERNAME}_{spacename}'\n self.spaces.append(spacename)\n cli.success()\n else:\n cli.failure(cli.failed, cli.error)\n return\n log.debug(f\"WITCLIUser: wit space list -space {spacename}\")\n with self.client.execute(['wit', 'space', 'list'], ['-space', spacename, '-noprompt', '-json'], catch_response=True) as cli:\n if cli.failed == 0:\n results = json.loads(cli.output)['cli_results']\n if len(results) != 1:\n cli.failure(1, f\"'wit space list -space {spacename}' should have returned 1 space\")\n return\n spacepath = results[0]['Storage Path']\n if not os.access(spacepath, os.W_OK):\n cli.failure(1, f\"Workspace path: {spacepath} not writeable\")\n return\n cli.success()\n else:\n cli.failure(cli.failed, cli.error)\n return\n\n log.debug(f\"WITCLIUser: wit space delete -space {spacename}\")\n with self.client.execute(['wit', 'space', 'delete'], ['-space', spacename, '-noprompt'], catch_response=True) as cli:\n if cli.failed == 0:\n cli.success()\n else:\n cli.failure(cli.failed, cli.error)", "def from_id(cls, space_id: str) -> \"Space\":\n api = HubApi()\n obj = cls(api)\n obj._info = api.get_space(space_id=space_id)\n return obj", "def __getitem__(self, key):\n return self._layout_spaces[key]", "def get_store(key):\n key_prefix, key_content = key.split(\":\", 1)\n if key_prefix == \"fs\":\n return StoreFs()\n elif key_prefix == \"lo\":\n return StoreLo(storage_root=EpacFramework.roots[key_content])\n else:\n raise ValueError(\"Invalid value for key: should be:\"+\n \"lo for no persistence and storage on living objects or\"+\n \"fs and a directory path for file system based storage\")", "def entity_to_instance(entity):\n key_path = entity.key().to_path()\n key = ndb.Key(flat=key_path)\n obj = key.get()\n\n return obj", "def spaces(self):\n if not self._spaces or not self._client.cache:\n self._spaces = [\n Space(x, client=self._client, team=self)\n for x in self._client.get(f\"team/{self.id}/space\")[\"spaces\"]\n ]\n return self._spaces", "def _get_workspace(key):\n return _WORKSPACES.get(key, None)", "def _get_key(self, entity_id):\n if entity_id:\n return self.client.key(self.kind, entity_id)\n return self.client.key(self.kind)", "def create_exchange_space(self, exchange_space=None, org_id=''):\n log.debug(\"create_exchange_space(%s, org_id=%s)\" % (exchange_space, org_id))\n self.assert_condition(exchange_space and org_id, \"Arguments not set\")\n\n #First make sure that Org with the org_id exists, otherwise bail\n org = self.clients.resource_registry.read(org_id)\n if not org:\n raise NotFound(\"Org %s does not exist\" % org_id)\n\n exchange_space_id,rev = self.clients.resource_registry.create(exchange_space)\n\n aid = self.clients.resource_registry.create_association(org_id, PRED.hasExchangeSpace, exchange_space_id)\n\n # Now do the work\n\n# if exchange_space.name == \"ioncore\":\n# # Bottom turtle initialization\n# # @TODO: what's different here\n# self.container.ex_manager.create_xs(exchange_space.name)\n# else:\n self.container.ex_manager.create_xs(exchange_space.name, use_ems=False)\n \n return exchange_space_id", "def get_object(bucket, key):\n return ObjectStore.get_object(bucket, key)", "def _getActionSpace(self, shape=\"discrete\"):\n if shape == \"multiDiscrete\":\n env_space = spaces.MultiDiscrete([2]*self.n_switch)\n elif shape == \"discrete\":\n# env_space = spaces.Discrete(len(self.net2.line.in_service))\n action_discrete = 2*(self.n_line + self.n_varloads + self.n_gen + self.n_pv + self.n_wind + self.n_storage)\n env_space = spaces.Discrete(action_discrete)\n elif shape == \"Box\":\n# dim = self.n_line #+ self.n_varloads + self.n_gen\n env_space = spaces.Box(np.array([0,0,0,0,0,0]),np.array([self.n_line-1, 1,1,1,1, self.n_gen]))\n# elif shape == \"Tuple\":\n# env_space = spaces.Tuple([\n# spaces.MultiDiscrete([2]*self.n_line),\n# spaces.Box(0,1, [1,4])])\n# elif shape == \"single\": \n# env_space = spaces.Tuple([\n# spaces.Discrete(action_length),\n# spaces.Box(0,1, [1,1])\n# ])\n return env_space", "def _stack_space(self,\n space: Union[gym.spaces.Box, gym.spaces.Dict],\n dtype: np.dtype = None) -> Any:\n if self._single_latency:\n return space\n\n # Allow sensors such as last_action_sensor to override the dtype.\n dtype = dtype or space.dtype\n\n if isinstance(space, gym.spaces.Box):\n return self._stack_space_box(space, dtype)\n elif isinstance(space, gym.spaces.Dict):\n return self._stack_space_dict(space, dtype)\n else:\n raise ValueError(f\"Space {space} is an unsupported type.\")", "def _create_ipspace(self, network_info):\n\n if not self._client.features.IPSPACES:\n return None\n\n if (network_info['network_allocations'][0]['network_type']\n not in SEGMENTED_NETWORK_TYPES):\n return client_cmode.DEFAULT_IPSPACE\n\n # NOTE(cknight): Neutron needs cDOT IP spaces because it can provide\n # overlapping IP address ranges for different subnets. That is not\n # believed to be an issue for any of Manila's other network plugins.\n ipspace_id = network_info.get('neutron_subnet_id')\n if not ipspace_id:\n return client_cmode.DEFAULT_IPSPACE\n\n ipspace_name = self._get_valid_ipspace_name(ipspace_id)\n if not self._client.ipspace_exists(ipspace_name):\n self._client.create_ipspace(ipspace_name)\n\n return ipspace_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store tags in associated layout entity space.
def store_tags(self, tags): # AC1018: if entities have no owner tag (330) (thanks Autodesk for making the owner tag not mandatory), store # this entities in a temporary model space with layout_key = 0 # this will be resolved later in LayoutSpaces.repair_owner_tags() entity_space = self.get_entity_space(self._get_key(tags)) entity_space.store_tags(tags)
[ "def _store_tags(self):\n file = None\n try:\n rospy.loginfo(\"--- Storeing tags ---\")\n # open the file\n file = open(self.abs_file_path, \"w\")\n\n count = 0\n # write all entries in tag_list to the file\n for (y, x) in self.tag_list:\n count = count + 1\n file.write(str(y) + \",\" + str(x) + \"\\n\")\n\n rospy.loginfo(\"--- Stored \" + str(count) + \" tags ---\")\n except IOError:\n rospy.loginfo(\"--- Storing tags to file failed ---\")\n finally:\n file.close()", "def set_tags_attr(self, tags):\n for i in self.tags.all():\n db.session.delete(i)\n db.session.commit()\n # Update with new list of tags returned from make_tags\n tags_models = Tag().create(tags)\n if tags_models:\n self.tags = tags_models", "def save_tag(self):\n self.save()", "def store_tags(self, tags_object):\n self.tags_cnt += 1\n self.tags_objects.append(Tags(\n osm_type=tags_object.osmtype,\n osm_id=tags_object.osmid,\n key=tags_object.key,\n value=tags_object.value\n ))", "def update_tags(self):\n raise NotImplementedError", "def bind_tags(self, tags):\n current_map = dict((x.name, x) for x in self.tags)\n currently_attached = set(x.name for x in self.tags)\n new_tags = set(tags)\n\n def lookup_tag(name):\n tag = Tag.query.filter_by(locale=self.locale,\n name=name).first()\n if tag is not None:\n return tag\n return Tag(name, self.locale)\n\n # delete outdated tags\n for name in currently_attached.difference(new_tags):\n self.tags.remove(current_map[name])\n\n # add new tags\n for name in new_tags.difference(currently_attached):\n self.tags.append(lookup_tag(name))", "def _set_tags(self, tags: dict[any, any]) -> None:\n\n self.set_tags(tags, inplace=True)", "def save(self, *args, **kwargs):\n super(Events, self).save(*args, **kwargs)\n self.tagged.add(self.page)", "def load_tags(self):\n\n # Connect to the database\n conn = sqlite3.connect(config.cfg['db_location'])\n crsr = conn.cursor()\n\n # Retrieve list of all tags from SQL database\n crsr.execute(\"SELECT id, tag \"\n \"FROM Tags;\")\n\n # Write tags to self.tags and define enumeration for cross-reference\n _tag_tuples = crsr.fetchall()\n self.tag_to_id = dict((tag, ident) for (ident, tag) in _tag_tuples)\n self.ls_tags = [i[1] for i in _tag_tuples]\n\n # Close connection\n crsr.close()\n conn.close()", "def save():\n global __ITEM_TAGS\n #read tags from file\n with open(tag_file(), 'w') as file_to_write:\n for listitem in __ITEM_TAGS:\n file_to_write.write('%s\\n' % listitem)", "def save_tags(article_id, tag_names=None):\n # Get tags in the correct format\n if isinstance(tag_names, str):\n tag_names = tag_names.split(\",\")\n tag_names = tuple(tag.strip() for tag in tag_names if tag != \"\")\n if not isinstance(tag_names, (list, tuple, type(None))):\n try:\n tag_names = tuple(tag_names)\n tag_names = tuple(tag for tag in tag_names if tag != \"\")\n except TypeError:\n current_app.logger.error(\"Could not convert tags to Tuple.\")\n return\n current_app.logger.debug(\"Tags given: {}\".format(tag_names))\n\n conn = engine.connect()\n\n # Remove all current tags for the given article\n delstmt = tag_map.delete().where(tag_map.c.article_id == article_id)\n conn.execute(delstmt)\n\n # If tags is None, we just wanted to delete current tag associations\n if tag_names is None or len(tag_names) == 0:\n conn.close()\n return\n\n # Insert any new tags which didn't exist before\n insstmt = tags.insert().prefix_with(\"OR IGNORE\")\n conn.execute(insstmt, [{'tag': tag} for tag in tag_names])\n\n # Now attach the tags to the articles using the map table\n selstmt = select([tags.c.id]).where(tags.c.tag == bindparam(\"tag_name\"))\n mapstmt = tag_map.insert({'tag_id': selstmt})\n conn.execute(mapstmt,\n [{'tag_name': tag,\n 'article_id': article_id} for tag in tag_names])", "def __setitem__(self, name, entity):\n self._entity_map[name] = entity", "def add_entity(self, entity):\n entity.universe = self\n self.entities[entity.uuid] = entity", "def update_tags(self):\n\n app = App.get_running_app()\n display_tags = self.ids['panelDisplayTags']\n display_tags.clear_widgets()\n photo = app.session.query(Photo).filter_by(id=app.target).first()\n self.photo = str(photo.id)\n if photo:\n tags = photo.tags\n if 'favorite' in tags:\n self.favorite = True\n else:\n self.favorite = False\n for tag in tags:\n display_tags.add_widget(NormalLabel(text=tag.name, size_hint_x=1))\n display_tags.add_widget(RemoveFromTagButton(to_remove=tag.name, remove_from=photo.new_full_filename(), owner=self))\n\n tag_list = self.ids['panelTags']\n tag_list.clear_widgets()\n tag_list.add_widget(TagSelectButton(type='Tag', text='favorite', target='favorite', owner=self))\n tag_list.add_widget(ShortLabel())\n for tag in app.session.query(Tag).order_by(Tag.name):\n tag_list.add_widget(TagSelectButton(type='Tag', text=tag.name, target=str(tag.id), owner=self))\n tag_list.add_widget(RemoveTagButton(to_remove=tag.name, owner=self))", "def inject_set_tag_after_insert(cls):\n\n @event.listens_for(cls, \"after_insert\")\n def set_node_tag(mapper, conn, node):\n table = node.__table__\n\n if not node.is_taggable():\n return # do nothing\n\n tag = compute_tag(node)\n\n version = __get_tagged_version(node.node_id, table, tag, conn)\n\n node._sysan[TagKeys.tag] = tag\n node._sysan[TagKeys.latest] = True\n node._sysan[TagKeys.version] = version\n\n # update tag and version\n conn.execute(\n table.update()\n .where(table.c.node_id == node.node_id)\n .values(_sysan=node._sysan)\n )", "def insert_entity(self, entity: entities.Entity) -> None:\n self.grid[entity.y: entity.y + entity.height, entity.x: entity.x + entity.width] = entity.render()", "def load():\n global __ITEM_TAGS\n #read tags from file\n if os.path.exists(tag_file()):\n with open(tag_file()) as file_to_read:\n __ITEM_TAGS = file_to_read.read().splitlines()\n else:\n #no tag file existing, create new\n __ITEM_TAGS = []\n save()", "def add_tag(self, tag: str, content):\n if tag in self:\n if type(self[tag]) == type(content):\n self[tag] = [self[tag], content]\n elif type(self[tag]) is list:\n self[tag].append(content)\n else:\n print(\"Wrong tag <{}> content\".format(tag))\n exit(-1)\n else:\n self[tag] = content", "def DeclareEntity(self, entity):\n if isinstance(entity, XMLGeneralEntity):\n self.generalEntities[entity.name] = entity\n elif isinstance(entity, XMLParameterEntity):\n self.parameterEntities[entity.name] = entity\n else:\n raise ValueError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write all entity spaces to stream. If keys is not None, write only entity spaces defined in keys.
def write(self, tagwriter, keys=None): layout_spaces = self._layout_spaces if keys is None: keys = set(layout_spaces.keys()) for key in keys: layout_spaces[key].write(tagwriter)
[ "def delete_all_entities(self):\n # Do not delete the entity space objects itself, just remove all entities from all entity spaces.\n for entity_space in self._layout_spaces.values():\n entity_space.delete_all_entities()", "def all_spaces(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.all_spaces_with_http_info(**kwargs)\n else:\n (data) = self.all_spaces_with_http_info(**kwargs)\n return data", "def save_device_keys(self, device_keys):\n rows = []\n\n for user_id, devices_dict in device_keys.items():\n for device_id, device in devices_dict.items():\n rows.append(\n {\n \"curve_key\": device.curve25519,\n \"deleted\": device.deleted,\n \"device\": self.device_id,\n \"ed_key\": device.ed25519,\n \"user_device_id\": device_id,\n \"user_id\": user_id,\n }\n )\n\n if not rows:\n return\n\n # TODO this needs to be batched\n DeviceKeys.replace_many(rows).execute()", "def query_all_kb(self, kb: KnowledgeBase):\n for ent in tqdm.tqdm(kb.entities, total=len(kb.entities)):\n mesh_syn, dbp_syn = self.syn_enricher.get_synonyms_to_entity(ent.aliases)\n wiki_ents, definition = self.wiki_enricher.get_definition_to_entity(ent.canonical_name)\n ent.additional_details['mesh_synonyms'] = mesh_syn\n ent.additional_details['dbpedia_synonyms'] = dbp_syn\n ent.additional_details['wiki_entities'] = wiki_ents\n if len(ent.definition) < 5:\n ent.definition = definition\n\n kb.dump(kb, self.out_path)\n return", "def write_to_disk(self, clear=True, write_all=False):\n\n frames = []\n\n if self.write_status:\n frames.append(self.hksess.status_frame())\n self.write_status = False\n\n for pid, prov in self.providers.items():\n if prov.empty():\n continue\n if write_all or prov.new_frame_time():\n frames.append(prov.to_frame(self.hksess, clear=clear))\n\n self.writer.Process(frames)", "def PutAll(self, *args):\n return _snap.TStrV_PutAll(self, *args)", "def write(self, entity: List[dict]) -> None:\n path = Path(self.path)\n try:\n rindex = str(path).rindex(os.sep)\n Path(str(path)[:rindex]).mkdir(parents=True, exist_ok=True)\n except ValueError as _:\n pass\n\n if len(entity) == 0:\n detail = 'Entity is empty'\n logging.error(detail)\n return\n\n with open(path, 'w') as f:\n fieldnames = entity[0].keys()\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n\n f.write('')\n writer.writeheader()\n for row in entity:\n for key, value in self._mapping.items():\n if key in row and row[key] is not None:\n row[key] = value['func'](row[key])\n writer.writerow(row)", "def handles(self):\n for entity_space in self:\n for handle in entity_space:\n yield handle", "def PutAll(self, *args):\n return _snap.TCnComV_PutAll(self, *args)", "def insert_keys(self, keys):\n start = 0\n bulk_insert = self.bulk_insert\n keys_len = len(keys)\n select = 'SELECT ?,?'\n query = 'INSERT OR IGNORE INTO gauged_keys (namespace, `key`) '\n execute = self.cursor.execute\n while start < keys_len:\n rows = keys[start:start+bulk_insert]\n params = [param for params in rows for param in params]\n insert = (select + ' UNION ') * (len(rows) - 1) + select\n execute(query + insert, params)\n start += bulk_insert", "def delete_entity_space(self, key):\n entity_space = self._layout_spaces[key]\n entity_space.delete_all_entities()\n del self._layout_spaces[key]", "def all_spaces_with_http_info(self, **kwargs):\n\n all_params = ['fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method all_spaces\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n collection_formats = {}\n\n resource_path = '/spaces'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'fields' in params:\n query_params['fields'] = params['fields']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[SpaceBase]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def spaces(self):\n if not self._spaces or not self._client.cache:\n self._spaces = [\n Space(x, client=self._client, team=self)\n for x in self._client.get(f\"team/{self.id}/space\")[\"spaces\"]\n ]\n return self._spaces", "def PutAll(self, *args):\n return _snap.TIntFltKdV_PutAll(self, *args)", "def write(self):\n for data_object in self.data_list:\n data_object.write()", "def hsetall(self, key, mapping, ex=None):\n\n pipe = self.pipeline()\n for name, value in mapping.items():\n pipe.hset(key, name, pickle.dumps(value))\n\n if ex:\n pipe.expire(key, ex)\n\n pipe.execute()", "def write(pmids):\n\n # Writing once per each file because bugs happens otherwise (inefficient, might be changed)\n for pmid in pmids:\n filename = str(pmid)+\".entities\"\n\n if not os.path.isfile(filename):\n file = open(filename, 'w')\n entities = UTILITY.get_entities_by_pmids([pmid])\n\n for i in range(len(entities)):\n for j in range(len(entities[i])):\n file.write(entities[i][j].cid+\"\\n\")\n file.write(\"\\n\")\n\n file.close()", "def delete_all_entities(self) -> None:\n # noinspection PyTypeChecker\n for entity in list(self): # temp list, because delete modifies the base data structure of the iterator\n self.delete_entity(entity)", "def bulk_insert(self, keys):\n for k in keys:\n self.insert(k)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete entity from associated layout entity space. Type of entity has to be DXFEntity() or inherited.
def delete_entity(self, entity): key = self._get_key(entity.tags) try: entity_space = self._layout_spaces[key] except KeyError: # ignore; internal exception pass else: entity_space.delete_entity(entity)
[ "def delete_entity(self, entity: 'DXFEntity') -> None:\n self.entitydb.delete_entity(entity) # 1. delete from drawing database\n self.unlink_entity(entity) # 2. unlink from entity space", "def delete(self, entity):", "def delete_entity_space(self, key):\n entity_space = self._layout_spaces[key]\n entity_space.delete_all_entities()\n del self._layout_spaces[key]", "def delete_entity(self, entity):\r\n self._entity_manager.delete(entity)", "def unlink_entity(self, entity: 'DXFEntity') -> None:\n self._entity_space.delete_entity(entity)\n entity.dxf.paperspace = -1 # set invalid paper space\n if entity.supports_dxf_attrib('owner'): # R2000\n entity.dxf.owner = '0'", "def delete(self):\n failed, model, entity = self._get_model_and_entity(True, True)\n if failed: return\n entity.delete()\n self._serve({})", "def move_to_layout(self, entity: 'DXFEntity', layout: 'GenericLayoutType') -> None:\n if entity.dxf.handle in self._entity_space:\n self.unlink_entity(entity)\n layout.add_entity(entity)\n else:\n raise DXFValueError('Layout does not contain entity.')", "def delete_entity(self, entity_id):\n return self.table.delete_item(\n Key={\n self.PRIMARY_KEY: entity_id,\n }\n )", "def remove_entity(self, entity):\n self.entities.remove(entity)", "def delete_entity(entity_type_id, entity_value):\n import dialogflow_v2 as dialogflow\n entity_types_client = dialogflow.EntityTypesClient()\n\n entity_type_path = entity_types_client.entity_type_path(\n PROJECT_ID, entity_type_id)\n\n entity_types_client.batch_delete_entities(\n entity_type_path, [entity_value])", "def entityDelete(_id):\n\n\tif _id is None:\n\t\treturn ContextBroker.Entities.respond(400, {\n\t\t\t\"Response\": \"Failed\",\n\t\t\t\"Error\": \"BadRequest\",\n\t\t\t\"Description\": \"Entity ID required!\"\n\t\t})\n\tif request.args.get('type') is None:\n\t\treturn ContextBroker.Entities.respond(400, {\n\t\t\t\"Response\": \"Failed\",\n\t\t\t\"Error\": \"BadRequest\",\n\t\t\t\"Description\": \"Entity type required!\"\n\t\t})\n\treturn ContextBroker.Entities.deleteEntity(request.args.get('type'), _id)", "def DeleteEntityType(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def remove_entity(self, entity):\n del self.entities[entity.uuid]\n entity.universe = None", "def entity_removed(self, entity):\r\n\t\tpass", "def delete_entity(id):\n entity = UrlRequest.get_by_id(id)\n entity.delete()", "def remove_entity(self, x, y):\n tile = map.tiles[x][y]\n entity = tile.entity\n \n if entity is None:\n raise LogicException(\"Tried to remove entity from (%d,%d) but there was nothing there.\" % (x, y))\n\n entity.x = -1\n entity.y = -1\n entity.owner = None\n\n tile.entity = None\n self.entities.remove(entity)\n return entity", "def delete_entity_type(PROJECT_ID, entity_type_id):\n entity_types_client = dialogflow.EntityTypesClient()\n\n entity_type_path = entity_types_client.entity_type_path(\n PROJECT_ID, entity_type_id)\n\n entity_types_client.delete_entity_type(entity_type_path)", "def delete_all_entities(self):\n # Do not delete the entity space objects itself, just remove all entities from all entity spaces.\n for entity_space in self._layout_spaces.values():\n entity_space.delete_all_entities()", "def delete(self):\r\n \r\n #get all the nodes in this object\r\n nodes = self._object.nodes.nodes()\r\n \r\n #execute ecotect instruction\r\n arg_str = p2e._base._util._convert_args_to_string(\"object.delete\", self._object._eco_id)\r\n p2e._app.Exec(arg_str)\r\n \r\n #update the nodes list\r\n for i in nodes:\r\n p2e.model._nodes.remove(i)\r\n \r\n #update model list\r\n p2e.model._objects.remove(self._object)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete layout entity space key.
def delete_entity_space(self, key): entity_space = self._layout_spaces[key] entity_space.delete_all_entities() del self._layout_spaces[key]
[ "def delete_entity(self, entity):\n key = self._get_key(entity.tags)\n try:\n entity_space = self._layout_spaces[key]\n except KeyError: # ignore; internal exception\n pass\n else:\n entity_space.delete_entity(entity)", "def delete(self, entity):", "def delete_entity(self, entity: 'DXFEntity') -> None:\n self.entitydb.delete_entity(entity) # 1. delete from drawing database\n self.unlink_entity(entity) # 2. unlink from entity space", "def delete(self, key):\n del self.dict[key]", "def test_delete_sales_quick_entries_key(self):\n pass", "def delete(self, keyname):\n self.db.delete_by_name(KEY, name=keyname)", "def delete(self,key):\n\t\tdel self.form_dict[key]", "async def delete(self, key):\n _LOGGER.debug(_(\"Deleting %s from sqlite\"), key)\n\n cur = await self.client.cursor()\n await cur.execute(\"DELETE FROM {} WHERE key=?\".format(self.table), (key,))\n await self.client.commit()", "def delete(self, engine_path, name):\n path = self.vault.normalize(\"/\" + engine_path + \"/keys/\" + name)\n address = self.vault.vault_adress + \"/v1\" + path\n logging.info(\"Deleting the totp key: %s\", address)\n self.vault.requests_request(\"DELETE\", address, headers=self.vault.token_header)", "def delete_entity(self, entity_id):\n return self.table.delete_item(\n Key={\n self.PRIMARY_KEY: entity_id,\n }\n )", "def delete(self, kind: VersionedDataKind, key: str, version: int):", "def get_entity_space(self, key):\n try:\n entity_space = self._layout_spaces[key]\n except KeyError: # create new entity space; internal exception\n entity_space = EntitySpace(self._entitydb)\n self.set_entity_space(key, entity_space)\n return entity_space", "def delete_entity(self, entity):\r\n self._entity_manager.delete(entity)", "def deleteData(self):\n\t\tloopdata_keys = LoopData.query().fetch(keys_only = True)\n\t\tloopdata_entities = ndb.get_multi(loopdata_keys)\n\t\tndb.delete_multi([l.key for l in loopdata_entities])", "def test_key_deletion(self):\n pass", "def DelKey(self, *args):\n return _snap.TIntFltH_DelKey(self, *args)", "def delete(self):\n self.set_key(\"\")\n self.set_current(0, 0)\n #self.keys_list.getContext().getControl(\"btnAssign\").setEnable(True)", "def delete_key(self, table, key, topic=None, sync=True):\n t_entries = self._db.table(table)\n t_entries.remove(Query().key == key)", "def key_permission_remove(request, **kwargs):\n\n org = kwargs.get(\"org\")\n prefix = request.POST.get(\"key_prefix\")\n key = OrganizationAPIKey.objects.get(prefix=prefix)\n\n entity = request.POST.get(\"entity\")\n kperms, perms = load_entity_permissions(org, key)\n if entity in perms:\n del perms[entity]\n save_key_permissions(org, key, perms)\n\n return JsonResponse({\"status\": \"ok\"})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete all entities from all layout entity spaces.
def delete_all_entities(self): # Do not delete the entity space objects itself, just remove all entities from all entity spaces. for entity_space in self._layout_spaces.values(): entity_space.delete_all_entities()
[ "def delete_all_entities(self) -> None:\n # noinspection PyTypeChecker\n for entity in list(self): # temp list, because delete modifies the base data structure of the iterator\n self.delete_entity(entity)", "def delete_entity_space(self, key):\n entity_space = self._layout_spaces[key]\n entity_space.delete_all_entities()\n del self._layout_spaces[key]", "def delete_all(app_name):\n all_models = all_models = fetch_models(app_name)\n for x in all_models:\n for y in x.objects.all():\n y.delete()", "def clear_smartgrid():\n for level in Level.objects.all():\n level.delete()\n for row in ColumnGrid.objects.all():\n row.delete()\n for row in Grid.objects.all():\n row.delete()", "def delete_all(self):\n self.db.delete_all(KEY)", "def delete_all(self):\n self.db_tool.session.query(Plot).delete()\n self.db_tool.session.query(Parameter).delete()\n self.db_tool.session.query(Signal).delete()\n self.db_tool.commit()", "def delete_all(self, collection):\n self.__db[collection].delete_many({})", "def delete_all(self):\n to_delete = list(self.instances.keys())\n if len(to_delete) > 0: # Only delete stuff if there's stuff to\n # delete.\n self.delete(to_delete)", "def delete_all_maps(self):\n self._delete_all_maps()", "def clear_content():\n\n print(\"\\nDeleting existing Content...\\n\")\n\n for s in FaqSection.objects.all():\n s.delete()\n for s in AboutSection.objects.all():\n s.delete()", "def clear_designer():\n for obj in DesignerLevel.objects.all():\n obj.delete()\n for obj in DesignerColumnName.objects.all():\n obj.delete()\n for obj in DesignerAction.objects.all():\n obj.delete()\n for obj in DesignerColumnGrid.objects.all():\n obj.delete()\n for obj in DesignerGrid.objects.all():\n obj.delete()", "def deleteDetectors(self):\n\t\tdetector_keys = Detector.query().fetch(keys_only = True)\n\t\tdetector_entities = ndb.get_multi(detector_keys)\n\t\tndb.delete_multi([d.key for d in detector_entities])", "def delete_all(self, obj):\n ctype = ContentType.objects.get_for_model(obj)\n self.filter(content_type=ctype, object_id=obj.pk).delete()", "def drop_all_objects(self, engine):\n self.impl.drop_all_objects(engine)", "def delete_all():\n all_data_lines = DataLine.objects.all()\n return all_data_lines.delete()", "def clear_all(self):\n data = self.Entries\n del data[:]", "def delete_all_on_layer(self):\n bpy.ops.object.select_by_layer()\n bpy.ops.object.delete(use_global=False)", "def _erase(self):\n ds9Cmd(\"regions delete all\", flush=True, frame=self.display.frame)", "def purge_all(cls):\n for x in cls.objects.filter(enabled=True):\n x.purge()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The phylip path for the MSA used in RAxML
def get_raxml_phylippath(dir): nick = get_msa_nickname(dir) return dir + "/" + ap.params["geneid"] + SEP + nick + SEP + "raxml" + SEP + "phylip"
[ "def trip_path(self):\n path = [self.fm_town.alpha]\n path += [t.alpha for t in self.via]\n path += [self.to_town.alpha]\n return '-'.join(path)", "def triangular_prism():\n return nx.read_gml(abs_path('gml/triangular_prism.gml'))", "def getPath(self):\n # print(\"I'm serious. You actually did it. Here is your path again so you can see how far you have come.\")\n return self.pathTraveled", "def path(self, sid):\n paths = IAnnotations(self.root).get(SOUPPATHS, {})\n return paths.get(sid, '/')", "def _rate_meta_path(self, mp: Dict) -> float:\n pass", "def getLinePath(self) -> Path[Stop, Link]:\n return self.line_path", "def test_get_polyline(self):\n graphic = Graphic(etree.parse(\"fat-giraffes.svg\"))\n f = open(\"pout.py\",\"w\")\n f.write(\"poly = %s\"%graphic.get_polyline())\n f.close()", "def get_raxml_fastapath(dir):\n nick = get_msa_nickname(dir)\n return dir + \"/\" + ap.params[\"geneid\"] + SEP + nick + SEP + \"raxml\" + SEP + \"fasta\"", "def arn_url_path(self):\r\n return '/' + urlparse.quote(self.arn(), '')", "def hexagonal_pyramid_prism():\n return nx.read_gml(abs_path('gml/hexagonal_pyramid_prism.gml'))", "def print_metapath(mp, rev_map, n_abbv=False, e_abbv=False, lines='multi'):\n path_list = metapath_to_list(mp, rev_map, n_abbv, e_abbv)\n print_path_list(path_list, lines)", "def _set_thepath(self):\n the_path = os.path.join(self.raspeye_path, 'timelapse')\n return the_path", "def pyramid_prism_4():\n return nx.read_gml(abs_path('gml/pyramid_prism_4.gml'))", "def __repr__(self):\n return 'Path('+repr(self._s)+')'", "def flatten_pmathml(xml):\n tag = gettag(xml)\n if tag == 'mn':\n return xml.text\n elif tag == 'mi':\n return xml.text\n elif tag == 'mrow':\n return ''.join([flatten_pmathml(y) for y in xml])\n raise Exception('[flatten_pmathml] unknown tag %s' % tag)", "def _get_lsp_path_name(self):\n return self.__lsp_path_name", "def path_to_message(self, mapdata, path):\n ### REQUIRED CREDIT\n #path is a list of grid Tuples\n #convert list of tuples to list of PoseStamped\n poseArray = [];\n #rospy.loginfo(\"NODES IN PATH: \"+str(len(path)));\n for key in path:\n rospy.loginfo(\"key[0]: \"+str(key[0])+\", key[1]:\"+str(key[1]));\n worldCoords = PathPlanner.grid_to_world(mapdata, key[0], key[1]);\n poseStamped = PoseStamped();\n poseStamped.pose.position.x = worldCoords.x;\n poseStamped.pose.position.y = worldCoords.y;\n header = Header()\n header.frame_id = \"map\"\n poseStamped.header = header;\n poseArray.append(poseStamped);\n\n pathHeader = Header();\n pathHeader.frame_id = \"map\";\n pathObject = Path();\n pathObject.header = pathHeader;\n pathObject.poses = poseArray;\n\n rospy.loginfo(\"Returning a Path message\")\n return pathObject;", "def prolunga(self, du):\n if self.t0 < self.t1:\n return TrattoPath(self.path, self.t0, self.t1 + du)\n else:\n return TrattoPath(self.path, self.t0, self.t1 - du)", "def get_xpath(self):\n return self.node.path()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The fasta path for the MSA used in RAxML
def get_raxml_fastapath(dir): nick = get_msa_nickname(dir) return dir + "/" + ap.params["geneid"] + SEP + nick + SEP + "raxml" + SEP + "fasta"
[ "def get_raxml_phylippath(dir):\n nick = get_msa_nickname(dir)\n return dir + \"/\" + ap.params[\"geneid\"] + SEP + nick + SEP + \"raxml\" + SEP + \"phylip\"", "def get_sequence(msapath, taxa):\n fin = open(msapath, \"r\")\n for l in fin.readlines():\n if l.startswith(taxa):\n tokens = l.split()\n return tokens[1]", "def dna_reference_location():\n return \"/home/user/git_private/data/reference/hs38.fa\"", "def align_path(self) -> Path:\n return self.working_directory.joinpath(f\"{self.data_source_identifier}.align.fst\")", "def mkfastadict(self,fastapath): \n\t\tseq_dict={}\n\t\n\t\tfasta_sequences = SeqIO.parse(open(fastapath),'fasta')\n\t\n\t\tfor fasta in fasta_sequences:\n\t\t\tseq_dict[fasta.id]=fasta.seq\n\t\t\n\t\treturn(seq_dict)", "def _get_adm_path(state):\n return ADMPath(\n audioProgramme=state.audioProgramme,\n audioContent=state.audioContent,\n audioObjects=state.audioObjects,\n audioPackFormats=state.audioPackFormat_path,\n audioChannelFormat=state.audioChannelFormat,\n )", "def standalone_scene_file(self):\n return self._standalone_scene_file_prefix + '.ass'", "def fasta(self, fastaFileName, model=0):\n fastaFile = open(fastaFileName,\"w\")\n fastaFile.write(\">%s Model %d \\n\" % (self.__fileName, model))\n keys = self.atoms.keys()\n keys.sort()\n resSeq = -1\n iCode = ''\n currentLine = []\n for line in keys: \n if self.atoms[line].inModel(0):\n if self.atoms[line].resSeq != resSeq or self.atoms[line].iCode != iCode:\n if len(currentLine) < 79:\n currentLine.append(aminoacids[self.atoms[line].residue])\n else:\n currentLine.append(aminoacids[self.atoms[line].residue]) \n fastaFile.write(\"%s\\n\" % ''.join(currentLine))\n currentLine = []\n resSeq = self.atoms[line].resSeq\n iCode = self.atoms[line].iCode\n fastaFile.write(\"%s\\n\" % ''.join(currentLine))\n \n fastaFile.close()", "def set_MESA_paths():\n function = LegacyFunctionSpecification()\n function.addParameter(\n 'inlist_path', dtype='string', direction=function.IN,\n description=\"Path to the inlist file.\")\n function.addParameter(\n 'mesa_dir', dtype='string', direction=function.IN,\n description=\"Path to the MESA directory.\")\n function.addParameter(\n 'mesa_data_dir', dtype='string', direction=function.IN,\n description=\"Path to the MESA data directory. Normally this would be mesa_dir/data\")\n function.addParameter(\n 'local_data_path', dtype='string', direction=function.IN,\n description=\"Path to the data directory.\")\n function.addParameter(\n 'gyre_in_filename', dtype='string', direction=function.IN,\n description=\"Path to the gyre.in file.\")\n function.addParameter(\n 'temp_dir', dtype='string', direction=function.IN,\n description=\"Unique per-MESA temporary folder\")\n function.result_type = 'int32'\n function.result_doc = \"\"\"\n 0 - OK\n Current value was set\n -1 - ERROR\n Directory does not exist\n \"\"\"\n return function", "def generate_mps_files(self):\n print(\"starting mps generation\")\n # setting antares options\n print(\"-- pre antares\")\n self.pre_antares()\n # launching antares\n print(\"-- launching antares\")\n antares_output_name = self.launch_antares()\n # writting things\n print(\"-- post antares\")\n lp_path = self.post_antares(antares_output_name)\n return lp_path", "def bacon_path(data, actor_id):\n \n \n return actor_to_actor_path(data, 4724, actor_id)", "def arn_url_path(self):\r\n return '/' + urlparse.quote(self.arn(), '')", "def process_fa_arman(paths, short_name):\n assert short_name == \"fa_arman\"\n language = \"fa\"\n base_input_path = os.path.join(paths[\"NERBASE\"], \"PersianNER\")\n train_input_file = os.path.join(base_input_path, \"train_fold1.txt\")\n test_input_file = os.path.join(base_input_path, \"test_fold1.txt\")\n if not os.path.exists(train_input_file) or not os.path.exists(test_input_file):\n full_corpus_file = os.path.join(base_input_path, \"ArmanPersoNERCorpus.zip\")\n if os.path.exists(full_corpus_file):\n raise FileNotFoundError(\"Please unzip the file {}\".format(full_corpus_file))\n raise FileNotFoundError(\"Cannot find the arman corpus in the expected directory: {}\".format(base_input_path))\n\n base_output_path = paths[\"NER_DATA_DIR\"]\n test_output_file = os.path.join(base_output_path, \"%s.test.bio\" % short_name)\n\n split_wikiner(base_output_path, train_input_file, prefix=short_name, train_fraction=0.8, test_section=False)\n shutil.copy2(test_input_file, test_output_file)\n convert_bio_to_json(base_output_path, base_output_path, short_name)", "def getrefseq(fastafname):\n f = open(fastafname,\"r\")\n f.readline()\n lines = []\n for l in f:\n if l=='' or l[0] == '\\n' or l[0]=='>':\n break\n else:\n lines.append(l.strip())\n return ''.join(lines)", "def align_one_fa(infilepath, outdirpath=None, program=None, aamatrix_path=None,\n conv_to_nex=None):\n #if not infilepath.rsplit('.', 1)[1] == 'fa':\n # print('\\n*** Warning: The file specified does not have the extension fa.')\n if outdirpath == None:\n outdirpath = os.path.dirname(infilepath)\n outfilename = os.path.basename(infilepath).rsplit('.', 1)[0] + '.afaa'\n outfilepath = os.path.join(outdirpath, outfilename)\n # Align with muscle with default DataPaths(main_data_dir) and aamatrix.\n align_fa(infilepath, outfilepath, aamatrix_path,\\\n program)\n\n # Optionally convert output file to nex and delete afa.\n if conv_to_nex:\n o = os.path.join(outdirpath, outfilename)\n outfilepath2 = outfilepath.rsplit('.', 1)[0] + '.nex'\n afa_to_nex(outfilepath, outfilepath2)\n os.remove(outfilepath)\n outfilepath = outfilepath2\n \n # Return path to output file.\n return outfilepath", "def walk_msa(msa, k):\n\n shannons = []\n with open(msa, 'r') as f:\n\n # inelegant hack to get MSA length\n length = len(list(SeqIO.parse(f, 'fasta'))[0]) \n \n start = 0\n while start < length:\n start, stop = walk_msa(msa, k)\n current = []\n \n for record in SeqIO.parse(f, 'fasta'):\n current.append(record.seq[start, stop])\n \n shannons.append(shannon(current))\n \n return shannons, length", "def getSequence(sequencePath):\n with open(sequencePath,'r') as SEQPATH:\n lines=SEQPATH.readlines()\n SEQPATH.closed\n rna=[]\n names=[]\n i=0\n while i<len(lines):\n if lines[i][0]=='>':\n names.append(lines[i].strip().replace('.','-'))\n rna.append(\"\")\n line=len(names)-1\n else:\n rna[line]+=lines[i].strip()\n i+=1\n \n if len(rna)!=len(names):\n print\"error:fasta file is not good:\", len(names),\" rna but \", len(rna),\" sequences.\"\n sys.exit(1)\n return names,rna", "def idfname(self):\n return Path(self.idf.savecopy(self.running_directory / \"in.idf\")).expand()", "def find_path(self):\n j = JeuRecherche(self.pos, self.goal.pos, distManhattan, self.dir_vecs, self.dims, self.walls)\n self.path = astar(j)\n self.current = 0\n if(self.verbose):\n print(\"Player {} moving towards Restaurant {}.\".format(self.id, self.goal.id))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the Newickformatted string with the cladogram of ancestral nodes for the given alignment method (msaid) and model (phylomodelid)
def get_anc_cladogram(con, msaid, phylomodelid): cur = con.cursor() sql = "select newick from AncestralCladogram where unsupportedmltreeid in" sql += "(select id from UnsupportedMlPhylogenies where almethod=" + \ msaid.__str__() + " and phylomodelid=" + phylomodelid.__str__() + ")" cur.execute(sql) newick = cur.fetchone()[0] return newick
[ "def make_cograph(tree, alist):\n #first find number of verts in cograph\n ord = 1\n for a in alist:\n ord = ord*a\n #initialize a matrix of the right size to be all 0s\n adj = np.zeros((ord, ord))\n #bubble up the tree\n #for each leaf\n leaves = get_vertices_of_depth(tree, len(alist))\n print(leaves)\n for i in range(len(leaves)):\n for j in range(len(leaves)):\n if i != j:\n #we have 2 distinct leaves find MRCA\n n1 = leaves[i]\n n2= leaves[j]\n while True:\n pari = n1.get_parent().get_id()\n parj = n2.get_parent().get_id()\n if pari == parj:\n if n1.get_parent().get_level() % 2==0: # parent is X join\n adj[i][j] = 1\n # adj[j][i] = 1\n break\n n1 = n1.get_parent()\n n2 = n2.get_parent()\n return adj", "def align():\n sh.clustalo('-i', amplified, '-o', aligned)", "def __str__(self):\n\t\tstring = \"\"\n\t\tfor aa in aa_to_codon:\n\t\t\tstring += \" Amino Acid: \" + aa + \" Count: \" + str(self.aa_count[aa]) +\"\\n\"\n\t\t\tstring += \" Codon Usage: \"\n\t\t\tfor codon in aa_to_codon[aa]:\n\t\t\t\tstring += codon + \" : \" + str(self.codon_table[codon]) + \" \"\n\t\t\tstring += \"\\n RSCU Values: \"\n\t\t\tfor codon in aa_to_codon[aa]:\n\t\t\t\tstring += codon + \" : \" + str(self.rscu_table[codon]) + \" \"\n\t\t\tstring += \"\\n\\n\"\n\n\t\treturn string", "def phyloxml(self):\n # Load Tree with addition information\n tree = newick.loads(self.data_phyloxml)[0]\n\n # Load Additional information from the database\n clades = Clade.query.all()\n id_to_clade = {c.id: c.name for c in clades}\n seq_to_species = {}\n seq_to_id = {}\n species = []\n\n for s in self.sequences.all():\n seq_to_id[s.name] = s.id\n seq_to_species[s.name] = s.species.code\n if s.species not in species:\n species.append(s.species)\n\n csep = CrossSpeciesExpressionProfile()\n csep_data = csep.get_data(*seq_to_id.values())\n\n has_heatmap = False\n heatmap_order = []\n for cd in csep_data:\n if \"profile\" in cd.keys() and \"order\" in cd[\"profile\"].keys():\n has_heatmap = True\n heatmap_order = cd[\"profile\"][\"order\"]\n break\n\n # Start constructing PhyloXML\n doc, tag, text, line = Doc().ttl()\n with tag(\"phyloxml\"):\n with tag(\"phylogeny\", rooted=\"True\"):\n # line('name', self.label)\n # line('description', \"PlaNet 2.0 PhyloXML tree\")\n Tree.__yattag_node(\n tree, tag, text, line, id_to_clade, seq_to_species, seq_to_id\n )\n\n with tag(\"graphs\"):\n if has_heatmap:\n with tag(\"graph\", type=\"heatmap\"):\n line(\"name\", \"Heatmap\")\n with tag(\"legend\", show=1):\n for label in heatmap_order:\n with tag(\"field\"):\n line(\"name\", label)\n with tag(\"gradient\"):\n line(\"name\", \"YlGnBu\")\n line(\"classes\", len(heatmap_order))\n with tag(\"data\"):\n for cd in csep_data:\n if (\n \"profile\" in cd.keys()\n and \"data\" in cd[\"profile\"].keys()\n ):\n with tag(\n \"values\", **{\"for\": str(cd[\"sequence_id\"])}\n ):\n for label in heatmap_order:\n if cd[\"profile\"][\"data\"][label] is not None:\n line(\n \"value\",\n cd[\"profile\"][\"data\"][label],\n )\n else:\n line(\"value\", \"\")\n\n with tag(\"graph\", type=\"binary\"):\n line(\"name\", \"Low Expression\")\n with tag(\"legend\", show=1):\n with tag(\"field\"):\n line(\"name\", \"Low expression\")\n line(\"color\", \"0xf03b20\")\n line(\"shape\", \"circle\")\n\n with tag(\"data\"):\n for cd in csep_data:\n if \"low_expressed\" in cd.keys():\n with tag(\"values\", **{\"for\": str(cd[\"sequence_id\"])}):\n line(\"value\", cd[\"low_expressed\"])\n\n with tag(\"graph\", type=\"multibar\"):\n line(\"name\", \"Expression Range\")\n with tag(\"legend\", show=1):\n with tag(\"field\"):\n line(\"name\", \"Max. Expression (TPM)\")\n line(\"color\", \"0x664977\")\n\n with tag(\"data\"):\n for cd in csep_data:\n if \"max_expression\" in cd.keys():\n with tag(\"values\", **{\"for\": str(cd[\"sequence_id\"])}):\n line(\"value\", cd[\"max_expression\"])\n\n with tag(\"taxonomies\"):\n for s in species:\n with tag(\"taxonomy\", code=s.code):\n line(\"color\", s.color.replace(\"#\", \"0x\"))\n line(\"name\", s.name)\n line(\n \"url\",\n url_for(\n \"species.species_view\", species_id=s.id, _external=True\n ),\n )\n\n for c in clades:\n with tag(\"taxonomy\", code=c.name):\n line(\"color\", \"0x000000\")\n line(\"name\", c.name)\n line(\n \"url\",\n url_for(\"clade.clade_view\", clade_id=c.id, _external=True),\n )\n\n return indent(doc.getvalue())", "def generate_ATT_from_phylesystem(aln,\n workdir,\n study_id,\n tree_id,\n phylesystem_loc='api',\n ingroup_mrca=None):\n assert isinstance(aln, datamodel.charmatrixmodel.DnaCharacterMatrix)\n for tax in aln.taxon_namespace:\n tax.label = tax.label.replace(\" \", \"_\") # Forcing all spaces to underscore UGH\n nexson = get_nexson(study_id, phylesystem_loc)\n ott_ids = get_subtree_otus(nexson,\n tree_id=tree_id,\n subtree_id=\"ingroup\",\n return_format=\"ottid\")\n if ingroup_mrca:\n if type(ingroup_mrca) == list:\n ott_ids = set(ingroup_mrca)\n ott_mrca = get_mrca_ott(ott_ids)\n else:\n ott_mrca = int(ingroup_mrca)\n else:\n ott_mrca = get_mrca_ott(ott_ids)\n newick = extract_tree(nexson,\n tree_id,\n PhyloSchema('newick',\n output_nexml2json='1.2.1',\n content=\"tree\",\n tip_label=\"ot:originalLabel\"))\n newick = newick.replace(\" \", \"_\") # UGH Very heavy handed, need to make sure happens on alignement side as well.\n tre = Tree.get(data=newick,\n schema=\"newick\",\n preserve_underscores=True,\n taxon_namespace=aln.taxon_namespace)\n # this gets the taxa that are in the subtree with all of their info - ott_id, original name,\n otus = get_subtree_otus(nexson, tree_id=tree_id)\n otu_dict = {}\n orig_lab_to_otu = {}\n treed_taxa = {}\n for otu_id in otus:\n otu_dict[otu_id] = extract_otu_nexson(nexson, otu_id)[otu_id]\n otu_dict[otu_id][\"^physcraper:status\"] = \"original\"\n otu_dict[otu_id][\"^physcraper:last_blasted\"] = \"1800/01/01\"\n orig = otu_dict[otu_id].get(u\"^ot:originalLabel\").replace(\" \", \"_\")\n orig_lab_to_otu[orig] = otu_id\n treed_taxa[orig] = otu_dict[otu_id].get(u\"^ot:ottId\")\n for tax in aln.taxon_namespace:\n try:\n tax.label = orig_lab_to_otu[tax.label].encode(\"ascii\")\n except KeyError:\n sys.stderr.write(\"{} doesn't have an otu id. It is being removed from the alignment. \"\n \"This may indicate a mismatch between tree and alignment\\n\".format(tax.label))\n # need to prune tree to seqs and seqs to tree...\n otu_newick = tre.as_string(schema=\"newick\")\n workdir = os.path.abspath(workdir)\n return AlignTreeTax(otu_newick, otu_dict, aln, ingroup_mrca=ott_mrca, workdir=workdir)\n # newick should be bare, but alignment should be DNACharacterMatrix", "def phyml_tree(self):\n print \"Invoking PhyML...\"\n if runs > 0 or boot > 0:\n print \"ERROR: Bootstrap and multiple runs not yet implemented for PhyML.\"\n print \"Try using RAxML.\"\n exit()\n # Output sequence to a temp FASTA file\n tempfastafile = self.indata.filebase + \"_\" + self.impname + \"_fastatmp.fasta\"\n AlignIO.write(self.indata.sequence, tempfastafile, \"fasta\")\n tempphyfile = self.indata.filebase + \"_\" + self.impname + \"_phytmp.phy\"\n AlignIO.convert(tempfastafile, \"fasta\", tempphyfile, \"phylip-relaxed\")\n\n phyml_args = {\"input\": tempphyfile, \"alpha\": \"e\"}\n phystarttreename = \"PhyML_imp\", self.impname, \"starttree.newick\"\n if self.starttree:\n Phylo.write(self.starttree, phystarttreename, \"newick\")\n phyml_args[\"input_tree\"] = phystarttreename\n\n if exlocal:\n cmdline = PhymlCommandline(cmd='./PhyML', **phyml_args)\n else:\n cmdline = PhymlCommandline(**phyml_args)\n\n print \"Commandline for PhyML: \" + str(cmdline)\n out_log, err_log = cmdline()\n if verbose:\n print err_log\n print out_log\n phytreefile = tempphyfile + \"_phyml_tree.txt\"\n self.tree = Phylo.read(phytreefile, \"newick\")\n if not verbose:\n phyml_globname = self.indata.filebase + \"_\" + self.impname + \"*\"\n phyml_glob = glob.glob(phyml_globname)\n for delfile in phyml_glob:\n os.remove(delfile)", "def build_trees(alns, trees):\n # prepare calculator and constructor\n calculator = DistanceCalculator('blosum62')\n constructor = DistanceTreeConstructor()\n for aln, tree in zip(alns, trees):\n print(aln, tree)\n processes = []\n for method in phylip_symb:\n processes.append(subprocess.Popen([\n method,\n '-auto',\n '-sequence',\n aln,\n '-outtreefile',\n tree.format(method)\n ]))\n # nj + upgma\n with open(aln) as fin:\n alnr = AlignIO.read(fin, 'fasta')\n dm = calculator.get_distance(alnr)\n Phylo.write(\n constructor.upgma(dm),\n tree.format('upgma'),\n 'newick'\n )\n Phylo.write(\n constructor.nj(dm),\n tree.format('nj'),\n 'newick'\n )\n for process in processes:\n print(process.wait())", "def write_multilevel_consensus(self, algorithm, motif):\n raise NotImplementedError()\n self.f.write(\"\"\"\nMultilevel CAGCCCTG\nconsensus T A A\nsequence T\n\"\"\")", "def cladogram(self):\n\n self.add_path('results_cladogram', '.svg')\n cmd = 'lefse_plot_cladogram.py {input_file} {cladogram} --format svg;'\n self.jobtext.append(cmd.format(input_file=self.get_file('lefse_results'),\n cladogram=self.get_file('results_cladogram')))", "def project_hyd(alnfile):\n\n alignment = AlignIO.read(open(alnfile), \"clustal\")\n\n nrow = len(alignment)\n ncol = alignment.get_alignment_length()\n alnmat = np.zeros(( nrow, ncol ))\n list_substr = []\n list_id = []\n list_ix = []\n\n for ix, record in enumerate(alignment):\n seq_aln = np.array(record.seq)\n seq_ref = \"\".join(list(seq_aln[seq_aln!='-']))\n current_record = record.id.split('_')[0]\n\n if \"dssp\" not in record.id and current_record not in list_id and current_record != \"space\":\n list_id.append(current_record)\n list_ix.append(ix)\n\n substr = structures[structures['ORF'] == current_record]['substrate'].item()\n if substr == '+':\n list_substr.append(1)\n elif substr == '-':\n list_substr.append(0)\n\n # get profile\n hyd = np.zeros(( len(seq_ref) ))\n \n for ax, aa in enumerate(seq_ref):\n hyd[ax] = aa_hydrophobicity[aa]\n\n hyd_aln = np.zeros(( len(seq_aln) )) * np.nan\n\n pos_aln = 0\n pos_ref = 0\n\n while (pos_aln < ncol):\n if seq_aln[pos_aln] == '-':\n pos_aln += 1\n else:\n hyd_aln[pos_aln] = hyd[pos_ref]\n pos_aln += 1\n pos_ref += 1\n\n alnmat[ix,:] = hyd_aln\n\n alnmat = alnmat[np.array(list_ix),:]\n\n alndf = pd.DataFrame(alnmat)\n alndf.insert(loc=0, column='substrate', value=list(list_substr) )\n alndf.sort_values('substrate', ascending=True, inplace=True)\n #alndf.drop('substrate', 1, inplace=True)\n alndf.to_csv(\"../data/processed/c3718_aln_hyd.txt\", sep='\\t', header=True, index=False)\n\n return alndf, alnmat", "def generate_pfam_aligned_codons(pfam_id):\n _log.info(\"Started a meta-domain based on the alignment of all '\"+pfam_id+\"' Pfam domains in the human genome\")\n start_time = time.clock()\n \n # the consensus length \n consensus_length = 0\n # the meta_domain that is to be returned\n meta_codons_per_consensus_pos = {}\n # the mapping of the protein {protein_id: {protein_posistion: consensus_position}}\n consensus_pos_per_protein = {}\n # the amount of domain occurrences found\n n_instances = 0 \n \n # retrieve the alignment\n hmmeralign_output = interpret_hmm_alignment_file(METADOMAIN_DIR+pfam_id+'/'+METADOMAIN_ALIGNMENT_FILE_NAME) \n if not len (hmmeralign_output) == 0:\n #update the consensus length\n consensus_length = len(hmmeralign_output['consensus']['sequence'])\n \n # update the number of instances\n n_instances = len(hmmeralign_output['alignments'])\n _log.debug(\"Creating the alignment of mappings for '\"+str(n_instances) +\"' '\"+pfam_id+\"' domain occurrences based on the HMM alignment to consensus and original domain sequence\")\n \n # ensure we can map consensus residues back to consensus positions\n hmmeralign_output['consensus']['aligned_sequence'] = convert_pfam_fasta_alignment_to_original_aligned_sequence(hmmeralign_output['consensus']['alignment'])\n hmmeralign_output['consensus']['mapping_consensus_alignment_to_positions'] = map_sequence_to_aligned_sequence(hmmeralign_output['consensus']['sequence'], hmmeralign_output['consensus']['aligned_sequence'])\n \n # create mappings between domain occurrences and the domain consensus sequence\n for _alignment in hmmeralign_output['alignments']:\n # retrieve current aligned domain\n \n # Create a mapping from the aligned domain sequence to the domain sequence\n aligned_sequence = convert_pfam_fasta_alignment_to_original_aligned_sequence(_alignment['alignment'])\n original_sequence = convert_pfam_fasta_alignment_to_strict_sequence(aligned_sequence)\n mapping_domain_alignment_to_sequence_positions = map_sequence_to_aligned_sequence(original_sequence, aligned_sequence)\n \n # Generate the strict sequence for this domain; leaving only residues that were aligned to the domain consensus\n strict_aligned_sequence = convert_pfam_fasta_alignment_to_strict_fasta(_alignment['alignment'])\n \n # create the mapping between the strict alignments and the original consensus sequence\n mapping_aligned_domain_to_domain_consensus = createAlignedSequenceMapping(strict_aligned_sequence, hmmeralign_output['consensus']['aligned_sequence'], False)\n \n # create a list of mapping positions that includes insertions\n mapping_positions = list(mapping_domain_alignment_to_sequence_positions.keys()) + list(set(mapping_aligned_domain_to_domain_consensus.keys()) - set(mapping_domain_alignment_to_sequence_positions.keys()))\n \n # Second add each aligned residue mapping\n for mapping_pos in sorted(mapping_positions):\n # retrieve the residue at the consensus position and the residue at the domain position\n consensus_domain_residue = hmmeralign_output['consensus']['aligned_sequence'][mapping_pos]\n \n if consensus_domain_residue == '-':\n # Set the default values for the insertion\n continue\n else:\n # retrieve the position in the domain consensus\n domain_consensus_pos = hmmeralign_output['consensus']['mapping_consensus_alignment_to_positions'][mapping_pos]\n \n # retrieve the position in the domain sequence\n ref_pos = mapping_domain_alignment_to_sequence_positions[mapping_pos]\n # convert the position in the domain sequence to the uniprot position and genomic position\n uniprot_pos = int(_alignment['start_pos']) + ref_pos -1\n \n # Add the consensus pos to the protein\n if not _alignment['uniprot_ac'] in consensus_pos_per_protein.keys():\n consensus_pos_per_protein[_alignment['uniprot_ac']] = {}\n if not uniprot_pos in consensus_pos_per_protein[_alignment['uniprot_ac']].keys():\n consensus_pos_per_protein[_alignment['uniprot_ac']][uniprot_pos] = []\n consensus_pos_per_protein[_alignment['uniprot_ac']][uniprot_pos].append(domain_consensus_pos) \n \n # now incorporate the alignment data into our domain model in form of mappings\n # First get the protein ids for the uniprot acs\n uniprot_acs_to_ids = ProteinRepository.retrieve_protein_id_for_multiple_protein_acs([x for x in consensus_pos_per_protein.keys()])\n protein_ids = [int(y) for y in np.unique([x for x in uniprot_acs_to_ids.values()])]\n \n # Second, get all mappings for these proteins\n protein_mappings = MappingRepository.get_mappings_for_multiple_protein_ids(protein_ids)\n \n # retrieve all transcripts mapped to these protein_ids\n gene_ids = GeneRepository.retrieve_transcript_id_for_multiple_protein_ids(protein_ids)\n \n # create all aligned codons\n meta_codons_per_consensus_pos = {}\n for uniprot_ac in consensus_pos_per_protein.keys():\n for uniprot_pos in consensus_pos_per_protein[uniprot_ac].keys():\n for domain_consensus_pos in consensus_pos_per_protein[uniprot_ac][uniprot_pos]:\n # Retrieve the mapping for the corresponding uniprot_position\n mappings_for_uniprot_pos = [x for x in protein_mappings[uniprot_acs_to_ids[uniprot_ac]] if x.uniprot_position == uniprot_pos]\n \n # Seperate the mappings per gene_id\n mapping_per_gene_id = {}\n for mapping in mappings_for_uniprot_pos:\n if not mapping.gene_id in mapping_per_gene_id.keys():\n mapping_per_gene_id[mapping.gene_id] = []\n mapping_per_gene_id[mapping.gene_id].append(mapping)\n \n for gene_id in mapping_per_gene_id.keys():\n # Obtain the mappings for this position\n mappings = mapping_per_gene_id[gene_id]\n\n try:\n # create a codon\n codon = Codon.initializeFromMapping(mappings, gene_ids[gene_id], uniprot_ac)\n \n # Add the codon to the consensus positions\n if not domain_consensus_pos in meta_codons_per_consensus_pos.keys():\n meta_codons_per_consensus_pos[domain_consensus_pos] = []\n \n meta_codons_per_consensus_pos[domain_consensus_pos].append(codon)\n except MalformedCodonException as e:\n raise MalformedMappingsForAlignedCodonsPosition(\"Encountered a malformed codon mapping for domain '\"\n +str(pfam_id)+\"' in gene '\"+str(gene_id)\n +\"', at amino_acid_position '\"+str(uniprot_pos)\n +\"':\" + str(e))\n \n time_step = time.clock()\n _log.info(\"Finished the alignment of mappings for '\"+str(n_instances) +\"' instances '\"+pfam_id+\"' domain occurrences in \"+str(time_step-start_time)+\" seconds\")\n return meta_codons_per_consensus_pos, consensus_length, n_instances", "def __str__ (self):\n return \"ncRNA(%s)\" % str(self.sequence)", "def anarci_output(numbered, sequences, alignment_details, outfile, sequence_id=None, domain_id=None): \n assert (sequence_id is not None) or (sequence_id is None and domain_id is None), \"If domain_id is specified, sequence_id must also be specified.\"\n for i in range(len(numbered)):\n if sequence_id is None:\n outfile.write(\"# %s\\n\"%sequences[i][0]) # print the name\n if numbered[i] is not None:\n if sequence_id is not None:\n if i != sequence_id: continue\n outfile.write(\"# ANARCI numbered\\n\")\n for j in range( len(numbered[i])): # Iterate over domains\n if domain_id is not None:\n if j != domain_id: continue\n outfile.write(\"# Domain %d of %d\\n\"%(j+1, len(numbered[i]) ))\n outfile.write(\"# Most significant HMM hit\\n\")\n outfile.write(\"#|species|chain_type|e-value|score|seqstart_index|seqend_index|\\n\")\n alignment_details[i][j][\"evalue\"] = str( alignment_details[i][j][\"evalue\"] )\n outfile.write(\"#|%s|%s|%s|%.1f|%d|%d|\\n\"%tuple( [alignment_details[i][j][field] for field in \n [\"species\",\"chain_type\",\"evalue\",\"bitscore\"]] \n +[ numbered[i][j][1], numbered[i][j][2]] ))\n \n if 'germlines' in alignment_details[i][j]:\n outfile.write('# Most sequence-identical germlines\\n')\n outfile.write('#|species|v_gene|v_identity|j_gene|j_identity|\\n')\n (species, vgene), vid =alignment_details[i][j]['germlines'].get('v_gene', [['','unknown'],0])\n if vgene is None:\n vgene, vid = 'unknown', 0\n (_,jgene), jid =alignment_details[i][j]['germlines'].get('j_gene', [['','unknown'],0])\n if jgene is None:\n jgene, jid = 'unknown', 0\n outfile.write('#|%s|%s|%.2f|%s|%.2f|\\n'%(species, vgene, vid, jgene, jid )\t)\n chain_type = chain_type_to_class[ alignment_details[i][j][\"chain_type\"] ]\n outfile.write(\"# Scheme = %s\\n\"%alignment_details[i][j][\"scheme\"])\n if len( numbered[i][j][0] ) == 0:\n outfile.write(\"# Warning: %s scheme could not be applied to this sequence.\\n\"%alignment_details[i][j][\"scheme\"])\n for (index, insertion), aa in numbered[i][j][0]:\n outfile.write( \"%s %s %s %s\\n\" % (chain_type, (\"%d\"%index).ljust(5), insertion, aa))\n outfile.write(\"//\\n\")", "def create_pdf(clf):\n\t dot_data = StringIO.StringIO() \n\t tree.export_graphviz(clf, out_file=dot_data)\n\t graph = pydot.graph_from_dot_data(dot_data.getvalue())\n\t graph.write_pdf('abalone.pdf')", "def __str__ (self):\n return \"mRNA(%s)\" % str(self.sequence)", "def make_cosmat_string(self):\n\n mat = self.matX\n\n if not mat.ready:\n return 1, '** no X-matrix to compute correlation matrix from'\n\n if not mat.cormat_ready: # then set it\n mat.set_cormat()\n\n mstr = ''\n for r in range(mat.ncols):\n for c in range(mat.ncols):\n mstr += '%6.3f ' % mat.cosmat[r,c]\n mstr += '\\n'\n\n return 0, mstr", "def adata_clonotype_network(adata_conn):\n adata = AnnData(\n var=pd.DataFrame().assign(gene_symbol=[\"CD8A\", \"CD4\"]).set_index(\"gene_symbol\"),\n X=np.array(\n [\n [3, 4, 0, 0, 3, 3, 1, 0, 2, 2, 0],\n [0, 0, 1, 1, 2, 0, 0, 0, 1, 0, 0],\n ]\n ).T,\n obs=adata_conn.obs,\n uns=adata_conn.uns,\n obsm=adata_conn.obsm,\n )\n adata.obs[\"continuous\"] = [3, 4, 0, 0, 7, 14, 1, 0, 2, 2, 0]\n ir.tl.clonotype_network(adata, sequence=\"aa\", metric=\"alignment\")\n adata.uns[\"scirpy_version\"] = \"0.7\"\n return adata", "def display_ensembl_alignment_table(compara):\n compara.method_species_links.Legend = (\n \"Assign the desired value from method_link_species_set_id to the\"\n \" method_clade_id argument\"\n )\n print(compara.method_species_links)\n exit(0)", "def String(self, mid):\n if mid in self.motifs.keys():\n dMotif = self.motifs[mid]\n else:\n Info(\"ID incorrect, can't find Motif ID: %s\" %mid)\n return ''\n motif_string = ['\\n']\n for itag in self.attr_list + self.tag_list:\n try:\n motif_string.append(\"%s: %s\\n\" %(itag, ' '*(10-len(itag)) + List2Str(dMotif[itag]) ))\n except KeyError:\n motif_string.append(\"%s: None\\n\" %itag)\n\n itag = 'pssm'\n for imatrix in dMotif[itag]:\n motif_string.append(\"PSSM: A C G T\\n\")\n for i in range(len(imatrix)):\n motif_string.append(\"|%6d\"%(i+1,) + \" %3.3f %3.3f %3.3f %3.3f\\n\" %tuple(imatrix[i]))\n motif_string.append(\"\\n\")\n \n print List2Str(motif_string,\"\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Provide a newick string, this method will reroot the tree based on the 'outgroup' setting.
def reroot_newick(con, newick): cur = con.cursor() dendrotree = Tree() dendrotree.read_from_string(newick, "newick") sql = "select shortname from Taxa where id in (select taxonid from GroupsTaxa where groupid in (select id from TaxaGroups where name='outgroup'))" cur.execute(sql) rrr = cur.fetchall() outgroup_labels = [] for iii in rrr: label = re.sub("_", " ", iii[0]) outgroup_labels.append(label.__str__()) mrca = dendrotree.mrca(taxon_labels=outgroup_labels) if mrca.edge.tail_node is not None and mrca.edge.head_node is not None: dendrotree.reroot_at_edge(mrca.edge, update_splits=True) newick = dendrotree.as_string("newick") return newick
[ "def init_newick(self):\n with open(self.newick_path, 'r') as myfile:\n tree_str = myfile.read().replace('\\n', '')\n\n return tree_str", "def ntree_parse(matchObj, argv):\n tree = matchObj.group('tree')\n out = ''\n out += \"\\n\\\\begin{tikzpicture}[nodes={circle, draw}]\"\n out += \"\\n\\\\graph[binary tree layout, fresh nodes]{\\n\"\n # the syntax we use is the syntax used by the sub package 'graph' of TikZ\n out += tree + \"};\\n\\\\end{tikzpicture}\\n\"\n return out", "def get_outgroup(self):\n if self.outgroup is not None:\n outgroup_taxonomy = ''\n for i in self.data.seq_records:\n if self.outgroup == i.voucher_code:\n outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'],\n i.taxonomy['species'])\n break\n outgroup = '\\noutgroup {0}_{1};'.format(self.outgroup,\n outgroup_taxonomy)\n else:\n outgroup = ''\n return outgroup", "def get_group(self): # real signature unknown; restored from __doc__\n return \"\"", "def group(objects, parent=\"string\", empty=bool, world=bool, relative=bool, absolute=bool, name=\"string\", useAsGroup=\"string\"):\n pass", "def test_write_tree(self):\n\n newick = '''(\n (\n a:1.000000,\n b:2.000000\n )x:3.000000,\n (\n c:4.000000,\n d:5.000000\n )y:6.000000\n)rra:0.000000;\n'''\n infile = StringIO(newick)\n tree = read_tree(infile)\n\n out = StringIO()\n tree.write(out, rootData=True)\n self.assertEqual(newick, out.getvalue())", "def nodeOutliner(string, replace=\"string\", docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", lastMenuChoice=\"string\", numberOfPopupMenus=bool, connectivity=\"string\", width=int, dragCallback=\"string\", showConnectedOnly=bool, highlightColor=float, annotation=\"string\", enable=bool, longNames=bool, preventOverride=bool, nodesDisplayed=bool, showNonKeyable=bool, showInputs=bool, showOutputs=bool, attrAlphaOrder=\"string\", pressHighlightsUnconnected=bool, menuCommand=\"string\", exists=bool, showPublished=bool, showNonConnectable=bool, showHidden=bool, multiSelect=bool, addObject=\"string\", niceNames=bool, enableBackground=bool, visibleChangeCommand=\"string\", visible=bool, useTemplate=\"string\", noBackground=bool, fullPathName=bool, dropCallback=\"string\", selectCommand=\"string\", popupMenuArray=bool, addCommand=\"string\", removeAll=bool, backgroundColor=float, noConnectivity=bool, manage=bool, showReadOnly=bool, menuMultiOption=bool, isObscured=bool, currentSelection=bool, remove=\"string\"):\n pass", "def gen_new_group_name(self, group, new_name):\n new_name.value = self._gen_new_group_name(group.encode(), new_name.value.encode())", "def appendGroup(self, *args):\n\n\t\tcurGroup = cmds.textScrollList('hairGroups', q=1, si=1)[0]\n\n\t\tfor x in cmds.ls(sl=1):\n\t\t\tcmds.parent(x, curGroup)\n\n\t\treturn", "def rtree_outgroup_labels(tree):\n node = None\n # add an n_leaves_under attribute\n for node in tree.postorder_node_iter():\n e = node.edge\n p = getattr(e, \"tail_node\", None)\n if p:\n p.n_leaves_under = getattr(p, \"n_leaves_under\", 0) + getattr(node, \"n_leaves_under\", 1)\n\n # find the child of the root with the largest number of descendants\n seed_node = tree.seed_node\n ch = seed_node.child_nodes()\n f = ch[0]\n f.in_biggest = False\n biggest_clade, bc_size = f, getattr(f, \"n_leaves_under\", 1)\n for nd in ch[1:]:\n nk = getattr(nd, \"n_leaves_under\", 1)\n if nd > bc_size:\n biggest_clade, bc_size = nd, nk\n nd.in_biggest = False\n # Mark the biggest clade, and accumulate out all unmarked leaf names\n biggest_clade.in_biggest = True\n outgroup_labels = []\n for node in tree.preorder_node_iter():\n par = node.parent_node\n if node == seed_node or par == seed_node:\n continue\n node.in_biggest = par.in_biggest\n if (not node.in_biggest) and (not node.child_nodes()):\n outgroup_labels.append(node.label)\n return outgroup_labels", "def groupTo(self, mNode='', mGrp=''):\r\n\r\n if mNode:\r\n mNode = self.convertToPyNode(mNode)\r\n else:\r\n if self.verifyMeshSelection():\r\n mNode = selected()[0]\r\n\r\n if mNode:\r\n if mGrp:\r\n mGrp = self.convertToPyNode(mGrp)\r\n if mNode.getParent():\r\n if not mNode.getParent() == mGrp.name():\r\n parent(mNode.name(), mGrp.name())\r\n else:\r\n self.logger.info('Node is already child of group')\r\n else:\r\n parent(mNode.name(), mGrp.name())\r\n else:\r\n if mNode.getParent():\r\n parent(mNode.name(), world=1)\r\n else:\r\n self.logger.info('Node is already child of world')", "def leaf_from_string(self, entry, *, parent=None):", "def write_groups(out_file, groupname):\r\n print(\"To create a single group please just enter the main group name i.e. Group Name\")\r\n print('To create a subgroup to an exisitng group, please enter /Group Name/Subgroup Name/etc/etc/')\r\n print() \r\n attributes = {}\r\n print(\"Enter attributes for\", groupname)\r\n meta = input(\"Is there a metadata file? (Y/N): \")\r\n if meta == \"Y\" or meta == \"y\":\r\n metapath = input(\"Enter metadata file path: \")\r\n with open(metapath, 'r') as metafile:\r\n for line in metafile:\r\n line = line.split('\\t')\r\n item = line[0].strip('\\n')\r\n value = line[-1].strip('\\n')\r\n if item in attributes.keys():\r\n attributes[item].append(value)\r\n else:\r\n attributes[item] = [value]\r\n else:\r\n input_attributes = input(\"Enter an attribute followed by a value. i.e. Project Name: iknowit, Date: 04-11-2019: \")\r\n for attribute in input_attributes.split(','):\r\n attribute = attribute.split(':')\r\n attributes[attribute[0].strip(' ')] = attribute[1].strip(' ')\r\n data_file = h5py.File(out_file, 'a')\r\n dset = data_file.create_group(groupname)\r\n for k, v in attributes.items():\r\n dset.attrs[k] = v", "def draw(self):\n print self.treeString()", "def replaceChild(self, *args) -> \"void\":\n return _coin.SoGroup_replaceChild(self, *args)", "def test_tree_namefunc(self):\n\n count = [0]\n\n def namefunc(name):\n count[0] += 1\n return 'name%d' % count[0]\n\n tree = treelib.read_tree(StringIO(fungi2), namefunc=namefunc)\n newick = tree.get_one_line_newick()\n expected_newick = '(((((((name1:7.061760,name2:7.061760):4.999680,name3:12.061440):5.970600,name4:18.032040):52.682400,name5:70.714260):7.220700,name6:77.934960):23.181480,((name7:78.553260,name8:78.553260):10.434960,name9:88.988220):12.128400):78.883560,(((name10:41.275620,name11:41.275980):29.632860,(name12:52.323120,name13:52.323120):18.585720):31.149540,((name14:75.615840,name15:75.615840):14.006880,name16:89.622720):12.435660)xx:77.941620);' # nopep8\n\n self.assertEqual(newick, expected_newick)\n\n def namefunc2(name):\n return 'prefix_' + name\n\n newick2 = tree.get_one_line_newick(namefunc=namefunc2)\n expected_newick2 = '(((((((prefix_name1:7.061760,prefix_name2:7.061760):4.999680,prefix_name3:12.061440):5.970600,prefix_name4:18.032040):52.682400,prefix_name5:70.714260):7.220700,prefix_name6:77.934960):23.181480,((prefix_name7:78.553260,prefix_name8:78.553260):10.434960,prefix_name9:88.988220):12.128400):78.883560,(((prefix_name10:41.275620,prefix_name11:41.275980):29.632860,(prefix_name12:52.323120,prefix_name13:52.323120):18.585720):31.149540,((prefix_name14:75.615840,prefix_name15:75.615840):14.006880,prefix_name16:89.622720):12.435660)xx:77.941620);' # nopep8\n\n self.assertEqual(newick2, expected_newick2)", "def get_nodegroup_by_name(self, context, cluster_id, nodegroup_name):", "def treeLister(string, vnnString=bool, docTag=\"string\", height=int, defineTemplate=\"string\", parent=\"string\", clearContents=bool, addVnnItem=\"string\", numberOfPopupMenus=bool, useTemplate=\"string\", favoritesList=bool, width=int, highlightColor=float, dragCallback=\"string\", favoritesCallback=\"string\", collapsePath=\"string\", annotation=\"string\", preventOverride=bool, popupMenuArray=bool, refreshCommand=\"string\", addFavorite=\"string\", exists=bool, resultsPathUnderCursor=bool, executeItem=\"string\", enable=bool, enableBackground=bool, selectPath=\"string\", visibleChangeCommand=\"string\", visible=bool, expandPath=\"string\", itemScript=\"string\", removeItem=\"string\", fullPathName=bool, dropCallback=\"string\", noBackground=bool, backgroundColor=float, removeFavorite=\"string\", addItem=\"string\", manage=bool, expandToDepth=int, isObscured=bool):\n pass", "def input_tree(self):\n\n if self.starttreename:\n if self.starttreename[-3:] == 'xml':\n self.starttree = Phylo.read(self.starttreename, \"phyloxml\")\n elif self.starttreename[-6:] == 'newick':\n self.starttree = Phylo.read(self.starttreename, \"newick\")\n\n print \"Generating phylogenetic tree...\"\n\n if self.treetype[-3:] == 'xml':\n self.tree = Phylo.read(self.treetype, \"phyloxml\")\n elif self.treetype[-3:] == 'nwk':\n self.tree = Phylo.read(self.treetype, \"newick\")\n elif self.treetype == 'pars':\n self.parsimony_tree()\n elif self.treetype == 'PhyML':\n self.phyml_tree()\n else:\n self.raxml_tree()\n\n self.tree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.treeparents = self.all_parents(self.tree)\n for btree in self.btrees:\n btree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.btreeparents.append(self.all_parents(btree))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
msapath must be a phylip file. Returns the seed sequence.
def get_sequence(msapath, taxa): fin = open(msapath, "r") for l in fin.readlines(): if l.startswith(taxa): tokens = l.split() return tokens[1]
[ "def genRandomSequence(numDoms):\n files = ls(DATAPATH)\n f = list(open(DATAPATH + choice(files)))[1::2]\n sequence = choice(f).strip()\n sequence.translate(None, '-')\n \n starts, ends, seqs = findDomains(sequence, hmmfile)\n if len(starts) < numDoms:\n return genRandomSequence(numDoms)\n prefix = sequence[:starts[0]]\n suffix = sequence[ends[-1]:]\n if prefix == '' or suffix == '':\n return genRandomSequence(numDoms)\n linkers = []\n for i in range(len(starts)-1):\n linkers.append(sequence[ends[i]+1:starts[i+1]])\n \n middle = ''\n for _ in range(numDoms - 1):\n middle += choice(seqs) + choice(linkers)\n middle += choice(seqs)\n\n newSeq = prefix + middle + suffix\n newSeq = ''.join(newSeq.split('-'))\n\n #Deletes all lowercase letters\n newSeq = newSeq.translate(None, string.ascii_lowercase)\n #Deletes all illegal AA characters\n newSeq = newSeq.translate(None, 'BJOUXZ')\n\n return newSeq", "def genRandomSequence2(numDoms):\n files = ls(DATAPATH)\n for i in range(len(files))[::-1]:\n if '.fa' not in files[i]:\n files.pop(i)\n pool = []\n i = 0\n while i < numDoms:\n f = list(open(DATAPATH + choice(files)))[1::2]\n f = choice(f).strip()\n if len(findDomains(f, hmmfile)[0]) > 1:\n pool.append(string.translate(f, None, '-'))\n i += 1\n\n starts, ends = findDomains(pool[0], hmmfile)[:2]\n prefix = pool[0][:starts[0]]\n suffix = pool[0][ends[-1]:]\n if prefix == '' or suffix == '':\n return genRandomSequence2(numDoms)\n\n i,j = starts[0], starts[1]\n middle = fixzf(pool[0][i:j])\n\n for sequence in pool[1:]:\n starts = findDomains(sequence, hmmfile)[0]\n i, j = starts[0], starts[1]\n middle += fixzf(sequence[i:j])\n\n newSeq = prefix + middle + suffix\n newSeq = newSeq.translate(None, '-') #''.join(newSeq.split('-'))\n newSeq = newSeq.translate(None, string.ascii_lowercase)\n newSeq = newSeq.translate(None, 'BJOUXZ')\n\n return newSeq", "def get_seq(filepath):\n seqrecord = SeqIO.read(filepath, \"genbank\")\n return seqrecord.seq", "def generate_mps_files(self):\n print(\"starting mps generation\")\n # setting antares options\n print(\"-- pre antares\")\n self.pre_antares()\n # launching antares\n print(\"-- launching antares\")\n antares_output_name = self.launch_antares()\n # writting things\n print(\"-- post antares\")\n lp_path = self.post_antares(antares_output_name)\n return lp_path", "def random_seed(self):\n rand_start = np.random.randint(0, len(self.full_text)-self.seq_len)\n return self.full_text[rand_start:rand_start+self.seq_len]", "def make_default_sequence(filepath):\n\n opened_file = open_and_read_file(filepath)\n sequence = make_list(opened_file)\n\n # print(new_sequence)\n return sequence", "def getSequence(sequencePath):\n with open(sequencePath,'r') as SEQPATH:\n lines=SEQPATH.readlines()\n SEQPATH.closed\n rna=[]\n names=[]\n i=0\n while i<len(lines):\n if lines[i][0]=='>':\n names.append(lines[i].strip().replace('.','-'))\n rna.append(\"\")\n line=len(names)-1\n else:\n rna[line]+=lines[i].strip()\n i+=1\n \n if len(rna)!=len(names):\n print\"error:fasta file is not good:\", len(names),\" rna but \", len(rna),\" sequences.\"\n sys.exit(1)\n return names,rna", "def mkfastadict(self,fastapath): \n\t\tseq_dict={}\n\t\n\t\tfasta_sequences = SeqIO.parse(open(fastapath),'fasta')\n\t\n\t\tfor fasta in fasta_sequences:\n\t\t\tseq_dict[fasta.id]=fasta.seq\n\t\t\n\t\treturn(seq_dict)", "def get_seq_from_hps(self):\n mass_range, atom_range = [0, 0], [0, 0]\n reading_masses, reading_atoms = False, False\n # TODO Not too smart\n for i, line in enumerate(self.data_file):\n if \"Atoms\" in line:\n reading_masses = False\n mass_range[1] = i\n if reading_masses and line != '\\n' and mass_range[0] == 0:\n mass_range[0] = i\n if \"Masses\" in line:\n reading_masses = True\n if \"Bonds\" in line:\n reading_atoms = False\n atom_range[1] = i\n if reading_atoms and line != '\\n' and atom_range[0] == 0:\n atom_range[0] = i\n if \"Atoms\" in line:\n reading_atoms = True\n masses = np.genfromtxt(self.data_file[mass_range[0]:mass_range[1]])\n atoms = np.genfromtxt(self.data_file[atom_range[0]:atom_range[1]])\n\n # mass_dict = {}\n # for mass in masses:\n # for res in self.residue_dict:\n # if self.residue_dict[res][\"mass\"] == mass[1]:\n # mass_dict[int(mass[0])] = res\n # break\n\n mass_dict = {}\n for i, res in enumerate(self.residue_dict):\n mass_dict[int(i)+1] = res\n\n # for mass in masses:\n # for res in self.residue_dict:\n # if self.residue_dict[res][\"mass\"] == mass[1]:\n # mass_dict[int(mass[0])] = res\n # break\n\n seq = []\n for atom in atoms:\n seq.append(mass_dict[atom[2]])\n seq_str = ''.join(seq)\n seq_str = seq_str[:self.chain_atoms]\n seqs_avail = glob.glob(f'{self.this}md/data/sequences/*.seq')\n for seq_f in seqs_avail:\n with open(seq_f, 'r') as fin:\n txt = fin.readlines()[0]\n if seq_str.replace('I', 'L') == txt.replace('I', 'L'):\n seq_str = txt\n break\n return seq_str", "def radamsa_gen(seed_payload,res_count):\n\tsubprocess.call(\"radamsa\",\"-n \"+res_count)", "def get_map_seed(instructions):\n match = re.search(rb'\\x00.*? (\\-?[0-9]+)\\x00.*?\\.rms', instructions)\n seed = None\n if match:\n seed = int(match.group(1))\n return seed", "def sequence(self, path):\n raise NotImplementedError('{0} does not support sequences!'.format(type(self)))", "def fastagenerate(size):\n sequence = \"\"\n for i in range(0,int(size)):\n sequence += random.choice([\"A\",\"C\",\"G\",\"T\"])\n\n s = fastasequence(\"sequence\", sequence)\n print s", "def getSequencesFromFile(inputFile):\n sequences = io().readFastaFile(inputFile)\n return sequences", "def one_protein_generator(self):\n seq_title = ''\n seq_list = []\n with open(self._fasta_path, 'r') as fasta_handle:\n for _line in fasta_handle:\n if not _line:\n print('Blank line existed in fasta file')\n continue\n if _line.startswith('>'):\n if seq_title and seq_list:\n yield seq_title, ''.join(seq_list)\n seq_title = _line.strip('\\n')\n seq_list = []\n else:\n seq_list.append(_line.strip('\\n'))\n if seq_title and seq_list:\n yield seq_title, ''.join(seq_list)", "def get_alice_cds_124_seqfeature():\n seq_ftr = create_2_part_seqfeature(70374, 70902, 1, 70901, 71285, 1, \"CDS\")\n return seq_ftr", "def align_protein():\n file = [file for file in os.listdir(\"./data/download/\") if \".hmm\" in file][0]\n cmd = (\"hmmalign \" \n \"./data/download/%s \"\n \"./data/download/all_protein \"\n \"> ./data/download/aligned_prot \")%file\n print \"Aligning sequences\"\n process = os.system(cmd)\n if process:\n print cmd\n raise\n print \"Reading Alignment\"\n alignment = AlignIO.read(open(\"./data/download/aligned_prot\"), \"stockholm\")\n print \"Writing Alignment\"\n write_fasta(\"./data/download/aligned_prot\", alignment)\n sys.stdout.flush()", "def sample_root_seq(n):\n seq = \"\"\n for i in range(n):\n seq += random.sample(MLE.NUCS, 1)[0]\n return seq", "def get_alice_tmrna_169():\n seq_ftr = create_1_part_seqfeature(95923, 96358, 1, \"tmRNA\")\n return seq_ftr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the hashtable; key = site, value = tuple of (mlstate, mlpp)
def get_site_ml(con, ancid, skip_indels=True): cur = con.cursor() sql = "select site, state, pp from AncestralStates" + ancid.__str__() cur.execute(sql) x = cur.fetchall() site_tuple = {} site_mlpp = {} for ii in x: site = int(ii[0]) state = ii[1] pp = float(ii[2]) if state == "-": pp = 100.0 if site not in site_mlpp: site_mlpp[site] = pp site_tuple[site] = (state, pp) if pp > site_mlpp[site]: site_mlpp[site] = pp site_tuple[site] = (state, pp) """Indel correction:""" for site in site_tuple: found_gap = False if site_tuple[site][0] == "-": found_gap = True break if found_gap: if skip_indels: """Remove the indel site from the dictionary""" del site_tuple[site] else: """Correct the probability of an indel. We don't really have probs. here, so I set it to 0.0""" site_tuple[site] = ("-", 0.0) return site_tuple
[ "def build_site_dictionary(page, site):\n headers, cookies, word_count = get_data_from(page)\n return {\n \"site_name\": site,\n \"headers\": headers,\n \"cookies\": cookies,\n \"word_count\": word_count}", "def get_sites_dict(self):\n return self.sites_to_dict(self.bsites)", "def get_ml_dict(self):\r\n return self.ml_dict", "def __get_state__(self):\n\t\t## unroll all the parameters\n\t\tgates = self._gates\n\t\t\n\t\tThetas = [theta for gate in gates for theta in gate.__get_state__()['Thetas']] \n\t\tparams = [weight for gate in gates for weight in gate.__get_state__()['params']]\n\n\t\tprint \"Total number of parameters: %d \" % len(params) \n\n\t\treturn dict(Thetas=Thetas,params=params)", "def _generate_graph_state(frame):\n return {\"CT\": [], \"T\": [], \"Global\": []}", "def construct_neighbouring_sites(sites: List[PolyhedralSite]) -> Dict[int, List[PolyhedralSite]]:\n neighbours: Dict[int, List[PolyhedralSite]] = {}\n for site_i in sites:\n neighbours[site_i.index] = []\n for site_j in sites:\n if site_i is site_j:\n continue\n # 3 or more common vertices indicated a shared face.\n n_shared_vertices = len(set(site_i.vertex_indices) & set(site_j.vertex_indices))\n if n_shared_vertices >= 3:\n neighbours[site_i.index].append(site_j)\n return neighbours", "def get_sites():\n sites = {}\n sites['JUQUEEN'] = {'name': 'JUQUEEN (JSC)', 'id': 'JUQUEEN', \n 'url': \"https://hbp-unic.fz-juelich.de:7112/HBP_JUQUEEN/rest/core\" }\n sites['JURECA'] = {'name': 'JURECA (JSC)', 'id': 'JURECA',\n 'url': \"https://hbp-unic.fz-juelich.de:7112/HBP_JURECA/rest/core\" }\n sites['MARCONI'] = {'name': 'MARCONI (CINECA)', 'id': 'MARCONI',\n 'url': \"https://grid.hpc.cineca.it:9111/CINECA-MARCONI/rest/core\" }\n sites['GALILEO'] = {'name': 'GALILEO (CINECA)', 'id': 'MARCONI',\n 'url': \"https://grid.hpc.cineca.it:9111/CINECA-GALILEO/rest/core\" }\n sites['DAINT-CSCS'] = {'name': 'PIZDAINT (CSCS)', 'id': 'CSCS',\n 'url': \"https://unicoregw.cscs.ch:8080/DAINT-CSCS/rest/core\" }\n return sites", "def get_hashes(self):\n\n hashes = {}\n try:\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT MultiverseID, Hash FROM Hashes\")\n for row in cursor.fetchall():\n hashes[row[0]] = row[1]\n\n return hashes\n except sqlite3.Error, e:\n print(\"Error %s:\" % e.args[0])\n sys.exit(1)", "def hash_map(self):\n return self._hash_map", "def top_local_ham(H, n, isos_012):\n h1, h2 = H\n h1 = top_localop_1site(h1, n, isos_012)\n h2 = top_localop_2site(h2, n, isos_012)\n return (h1, h2)", "def create_oo_mdp_state_dict(self):\n oo_mdp_dict = dict()\n oo_mdp_dict['oo_mdp_to_flat_map'] = dict()\n oo_mdp_dict['flat_to_oo_mdp_map'] = [[] for flat_state in range(self.nS)]\n\n i_pass_in_taxi = len(self.PREDEFINED_LOCATIONS)\n\n for taxi_y in range(self.grid_size):\n for taxi_x in range(self.grid_size):\n for idx_pass in range(len(self.PREDEFINED_LOCATIONS)):\n for idx_dest in range(len(self.PREDEFINED_LOCATIONS)):\n for in_taxi in [False, True]:\n if in_taxi:\n # all combinations of passenger locations if passenger in taxi\n state_cond = self.set_state(taxi_y, taxi_x, i_pass_in_taxi,\n idx_dest, idx_pass)\n else:\n state_cond = self.set_state(taxi_y, taxi_x, idx_pass, idx_dest)\n\n oo_mdp_s_tuple = tuple(state_cond[0])\n flat_state = self.make_classical_MDP_state()\n\n oo_mdp_dict['oo_mdp_to_flat_map'][oo_mdp_s_tuple] = flat_state\n oo_mdp_dict['flat_to_oo_mdp_map'][flat_state].append(state_cond)\n return oo_mdp_dict", "def get_switch_states_dict(self):\n num_switches = self.opendss.num_switches\n switch_state = [0, 1]\n switch_states = list(product(switch_state, repeat=num_switches))\n ss_list = [str(x).strip('()').replace(',', '').replace(' ', '') for x in switch_states]\n ss_dict = dict(zip(ss_list, range(len(ss_list))))\n return ss_dict", "def create_cameFrom(self):\n # TODO: return a data structure that shows which node can most efficiently be reached from another,\n # for each node.\n dic={}\n return dic", "def __init__(self, state, district_list, site_list):\n\n self.state = state\n self.district_list = district_list\n self.district_dict_sites = {}\n self.district_dict = {}\n self.district_dict[\"districts\"] = self.district_list\n for site in site_list:\n for ind in range(len(district_list)):\n if in_district(site[\"lat\"], site[\"lon\"], district_list[ind]):\n district_name = district_list[ind][\"district_name\"]\n if district_name in self.district_dict_sites:\n self.district_dict_sites[district_name].append(site)\n else:\n self.district_dict_sites[district_name] = [site]", "def partial_state_dict(self):\n return {\n \"degree\": self._degree,\n \"train_count\": self._train_count,\n \"train_window\": self._train_window,\n \"train_window_size\": self._train_window_size,\n \"eval_window\": self._eval_window,\n \"eval_window_size\": self._eval_window_size,\n \"past_window\": self._past_window,\n \"traverse_threshold\": self._traverse_threshold,\n \"total_successes\": self._total_successes,\n \"reward\": self._reward,\n \"state\": self._state,\n \"dead\": self._dead,\n \"life_lost\": self._life_lost,\n }", "def pollster_predictions(poll_rows):\n pp=nesteddict()\n s={}\n for state in unique_column_values(poll_rows,\"State\"):\n for pollster in unique_column_values(poll_rows,\"Pollster\"):\n s = most_recent_poll_row(poll_rows, pollster, state)\n if s:\n key = pollster\n key1 = state \n pp[key][key1]=row_to_edge(s)\n return pp", "def hashable_state(self):\n if self._hashable_state is None:\n state = OrderedDict()\n state['name'] = self.name\n state['edges_hash'] = self.edges_hash\n state['is_log'] = self.is_log\n state['is_lin'] = self.is_lin\n state['bin_names'] = self.bin_names\n self._hashable_state = state\n return self._hashable_state", "def get_table_info():\r\n table_dictionary = {}\r\n for page in IHS_pages:\r\n print(\"Loading... {}\".format(page))\r\n searchable = get_html(IHS_pages[page])\r\n try:\r\n search_result = replace_new_lines(re.search(\"Security[\\n]([\\d\\D]*)\", searchable).group(1))\r\n search_result = search_result.split('\\n')\r\n table_dictionary[page] = gather_risks(search_result)\r\n except AttributeError:\r\n table_dictionary[page] = {}\r\n return table_dictionary", "def get_machine_info():\n return {\n 'platform': system(),\n 'hostname': gethostname(),\n 'ip_address': gethostbyname(gethostname()),\n 'mac_address': ':'.join(findall('..', '%012x' % getnode())),\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a bin number for the given probability value.
def binForProb(p): return int(p / 0.05)
[ "def probForBin(b):\n x = float(b * 5) / float(100)\n if x == 1.00:\n return x\n return x + 0.025", "def get_bin(self, n):\n return self.bins[n]", "def bin_index(self, x):\n return int((np.log(x / self.bin_start[0]) / np.log(self.ratio)))", "def binVal( self, val ):\n return int( math.floor( ( val - self.binShift ) / self.binSize ) )", "def get_prob(self, value):\n return self.get_prob(ValueFactory.create(float(value)))", "def bin_index(self, x):\n return int((x - self.bin_start[0]) / self.step)", "def valToBin(self,x):\n ibin = np.digitize(np.array(x,ndmin=1),self._edges)-1\n return ibin", "def stochastic_binarize(x, name=None):\n\n with ops.name_scope(name, 'Binarized', [x]) as name:\n # compute the probability to get +1.\n # probs is a tensor that has the same shape as x\n probs = tf.clip_by_value((x + 1) / 2, 0, 1)\n\n # create the Bernoulli distribution\n dist = tf.distributions.Bernoulli(probs=probs)\n\n # sample from the distribution.\n # samples should have the same shape as x.\n samples = dist.sample()\n\n bin_val = samples * 2 - 1 # turn 0 to -1, 1 to 1.\n\n return bin_val, probs", "def StatsBinomialDistribution(successNumber, totalTrials, probabilityOfSuccess):\n \n probabilityOfFailure = 1 - probabilityOfSuccess\n \n probability = StatsnCk(totalTrials, successNumber) * \\\n (probabilityOfSuccess ** successNumber) * \\\n (probabilityOfFailure ** (totalTrials - successNumber))\n \n return probability", "def sampler_binned_random(value, percentile):\n softmaxed = nn.functional.softmax(value[0], dim=1)\n samples = torch.tensor(\n np.array(\n list(\n torch.utils.data.WeightedRandomSampler(\n softmaxed, 10000)))).float()\n if percentile == 10000:\n percentile -= 1\n to_return = torch.sort(samples)[0][:, percentile]\n return to_return + torch.rand(to_return.shape)*.1", "def binary(prob: float, variable: str):\n return Discrete.from_probs(\n {0: 1 - prob, 1: prob}, variables=variable\n )", "def decimal_to_binary(n: int):\n return int(\"{0:b}\".format(n))", "def pvalue_binomial(fgMatches, fgSize, bgMatches, bgSize):\n if bgSize == 0 or fgSize == 0:\n raise RuntimeError('bgSize and fgSize must not be 0 for calculation!')\n bgRate = float(bgMatches) / float(bgSize)\n pvalue = scipy.stats.binom.sf(fgMatches, fgSize, bgRate)\n return pvalue", "def bin_probability(q_ranges, epsilon):\n return rp(q_ranges[0], epsilon)*rp(q_ranges[1], epsilon)/4", "def binomial_trial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))", "def val_to_bin(edges, x):\n ibin = np.digitize(np.array(x, ndmin=1), edges) - 1\n ibin[x > edges[-1]] = -1\n return ibin", "def int_to_bin(num, length):\n return \"{:b}\".format(num).zfill(length)", "def get_binning(hi):\n return [hi.GetBinLowEdge(i) for i in range(1, hi.GetNbinsX()+2)]", "def float_to_bin(value): # For testing.\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n # return '{:064b}'.format(d)\n return bitarray('{:064b}'.format(d))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the probability value for the floor of the given bin number
def probForBin(b): x = float(b * 5) / float(100) if x == 1.00: return x return x + 0.025
[ "def binForProb(p):\n return int(p / 0.05)", "def _opacity_from_bin(bin, n_bins):\n if n_bins <= 0:\n return 0.35\n ratio = bin/float(n_bins)\n if ratio < 0.2:\n return 0.6\n elif ratio < 0.3:\n return 0.85\n elif ratio < 0.5:\n return 1.0\n return 1.0", "def bin_index(self, x):\n return int((np.log(x / self.bin_start[0]) / np.log(self.ratio)))", "def bin_coef(n,r):\n return factorial(n)/ (factorial(r)* factorial(n-r))", "def bin_probability(q_ranges, epsilon):\n return rp(q_ranges[0], epsilon)*rp(q_ranges[1], epsilon)/4", "def bin_index(self, x):\n return int((x - self.bin_start[0]) / self.step)", "def calculate_p_value_for_single_bin_poisson_counting_experiment(\n n_meas,\n n_bkg_expected,\n): \n poisson_probability_sum = 0\n ctr = 0\n n = n_meas\n while ctr <= 110:\n poisson_probability_sum += (n_bkg_expected**n)/(math.factorial(n)) *np.exp(-n_bkg_expected)\n n += 1\n ctr += 1\n \n return poisson_probability_sum", "def binVal( self, val ):\n return int( math.floor( ( val - self.binShift ) / self.binSize ) )", "def get_bin(self, n):\n return self.bins[n]", "def floor(n: float) -> int:\n return (int(n//1))", "def bottomfloor(p):\n return p == 1", "def sampler_binned_random(value, percentile):\n softmaxed = nn.functional.softmax(value[0], dim=1)\n samples = torch.tensor(\n np.array(\n list(\n torch.utils.data.WeightedRandomSampler(\n softmaxed, 10000)))).float()\n if percentile == 10000:\n percentile -= 1\n to_return = torch.sort(samples)[0][:, percentile]\n return to_return + torch.rand(to_return.shape)*.1", "def _bin_func(xvals, y_weight, bins):\r\n i_sort = xvals.argsort()\r\n r_sorted = xvals[i_sort]\r\n y_weight = y_weight[i_sort] if y_weight is not None else y_weight\r\n bin_inds = np.digitize(r_sorted,\r\n bins) - 1 # -1 to assure points between 0 and step are in bin 0 (the first)\r\n yvals = np.bincount(bin_inds, weights=y_weight, minlength=len(bins))\r\n if y_weight is not None:\r\n yvals /= np.bincount(bin_inds, minlength=len(bins))\r\n return np.nan_to_num(yvals)", "def ifloor(x):\n\n return np.floor(x).astype(int)", "def frac_below_threshold(hist, threshold):\n tbin = hist.FindBin(threshold)\n return hist.Integral(1, tbin)/hist.Integral()", "def get_binning(hi):\n return [hi.GetBinLowEdge(i) for i in range(1, hi.GetNbinsX()+2)]", "def frac_above_threshold(hist, threshold):\n tbin = hist.FindBin(threshold)\n return hist.Integral(tbin, hist.GetNbinsX())/hist.Integral()", "def entropy1d(x, binrule):\n p, be = np.histogram(x, bins=binrule, density=True)\n r = be[1:] - be[:-1]\n P = p * r\n H = -(P * np.log2(P)).sum()\n\n return H, be", "def binomial_trial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
By default the start/end are the boundaries of the provided sequence. But if motifs were provided, then we'll refine these boundaries.
def get_boundary_sites(seq, start_motif=None, end_motif=None): startsite = 1 endsite = seq.__len__() if start_motif is not None: if start_motif.__len__() > 0: for i in range(0, seq.__len__()): # print "258:", i, seq[i], start_motif[0] if seq[i] == start_motif[0]: here = "" j = i while here.__len__() < start_motif.__len__() and j < seq.__len__(): # print "262:", j, here if seq[j] != "-": here += seq[j] j += 1 if here == start_motif: startsite = i + 1 break if end_motif is not None: if end_motif.__len__() > 0: for i in range(i, seq.__len__()): if seq[i] == end_motif[0]: here = "" j = i while here.__len__() < end_motif.__len__() and j < seq.__len__(): if seq[j] != "-": here += seq[j] j += 1 if here == end_motif: endsite = j break return [startsite, endsite]
[ "def find_breakpoint_variants(my_bg, ref, supercontig, start, end,\n min_overlap=70, max_anchors=10000, max_steps=100000,\n skip_ambiguous=False, buf_len=300):\n if start >= end:\n raise RuntimeError(\"start must be < end\")\n\n # find_ranges would need to behave differently all_variants = []\n fwd_start, rev_start = 0, 0\n fwd_end, rev_end = 0, 0\n if start + buf_len >= end - buf_len:\n mid = int((end + start) / 2)\n fwd_start = start - buf_len\n fwd_end = mid\n rev_start = mid\n rev_end = end + buf_len\n else:\n fwd_start = start - buf_len\n fwd_end = start + buf_len\n rev_start = end - buf_len\n rev_end = end + buf_len\n\n ref_range = ref.make_range(supercontig, fwd_start, rev_end)\n\n # this could also be fwd = fwd_start, fwd_end, False and rev = rev_start,\n # rev_end, True\n from biograph.internal import find_anchors, assemble\n fwd = find_anchors(my_bg, ref.make_range(\n supercontig, rev_start, rev_end), True, min_overlap, max_anchors)\n rev = find_anchors(my_bg, ref.make_range(\n supercontig, fwd_start, fwd_end), False, min_overlap, max_anchors)\n\n results = assemble(\n fwd, rev, min_overlap, max_steps, skip_ambiguous, my_bg.readmap)\n ref_range = ref.make_range(supercontig, start - buf_len, end + buf_len)\n # Add in reference coverage for anchors. Start with reference coverage.\n fixed_coverage = my_bg.seq_coverage(ref_range.sequence)\n\n # Add hom-reference object (no variants)\n if not results:\n # all_variants.append(Assembly(ref_range=ref_range, variants=[],\n # coverage=fixed_coverage))\n return Assembly(ref_range=ref_range, variants=[], coverage=fixed_coverage)\n\n # Coverage entries are of the format:\n # ['scaffold', position, [25,25,26,26,26...]]\n for cov in results[1]:\n if cov[0] != ref_range.scaffold:\n continue\n\n for i in range(len(cov[2])):\n mod_pos = cov[1] + i - ref_range.start\n if 0 <= mod_pos < ref_range.size:\n fixed_coverage[mod_pos] += cov[2][i]\n\n # all_variants.append(Assembly(ref_range=ref_range,\n # variants=sorted(results[0]), coverage=fixed_coverage))\n return Assembly(ref_range=ref_range, variants=sorted(results[0]), coverage=fixed_coverage)", "def split_region_and_fix_sequences(self, start, end):\n pass", "def motif(X, pattern, *, start=1, end=None):\n \n # input handling\n X = check_input(X)\n\n ## convert motif to regex pattern ##\n # replace \"any\"\n pattern = pattern.replace('x', '.')\n\n # replace \"either or\"\n brackets = re.findall(r'\\[([A-Z]+)\\]', pattern)\n if brackets:\n for rep in brackets:\n s = re.sub(r'([A-Z])(?!$)', r'\\1|', rep)\n s = '(?:' + s + ')'\n pattern = re.sub(rep, s, pattern)\n\n # remove brackets \n pattern = pattern.replace('[', '')\n pattern = pattern.replace(']', '')\n\n # replace \"except\"\n pattern = pattern.replace('{', '[^')\n pattern = pattern.replace('}', ']')\n \n ## compute binary vector of motif presence\n arr = np.zeros((len(X),))\n for i, seq in enumerate(X):\n check_alpha(seq) # check if alphabetical \n seq = seq[start-1:end] # positional information\n present = re.findall(r'{}'.format(pattern), seq)\n if present:\n arr[i] = 1\n \n return arr", "def __init__(self, startseq, seq2mutate, maxmut):\n\n self.startseq = startseq\n self.maxmut = maxmut\n self.seq2mutate = seq2mutate\n self.forward = []\n self.backward = []\n self.aa = 0\n self.break_here = False\n self.backmutate = np.sum(np.abs(seq2mutate[:20, :] - startseq[:20, :]))/2 >= maxmut\n\n if self.backmutate:\n self.new_tobackmutate()\n self.new_tomutate()", "def _set_seq(self,sequence,start=0):\n if start+len(sequence) > self._slen: \n sys.stderr.write(\"Error not long enough to add\\n\")\n sys.exit()\n z = 0\n for i in xrange(start, start+len(sequence)):\n self._set_nt(sequence[z],i)\n z+=1", "def boundFinder(operon1, operon2, readsList):\n WINDOWWIDTH = 25\n\n upGene = operon1[-1]\n downGene = operon2[0]\n \n leftEdge1 = upGene.getEnd() - 200\n leftEdge2 = (upGene.getStart() + upGene.getEnd() )/ 2\n leftEdge = max(leftEdge1, leftEdge2)\n rightEdge1 = downGene.getStart() + 200\n rightEdge2 = (downGene.getStart() + downGene.getEnd()) / 2\n rightEdge = min(rightEdge1, rightEdge2)\n midPoint = (upGene.getEnd() + downGene.getStart()) / 2\n workSpace = Sequence(readsList, (leftEdge, rightEdge))\n breakPoint = workSpace.getMinReadLocation()\n\n if len(workSpace) <= WINDOWWIDTH: \n operon1.setRightBound(breakPoint)\n operon2.setLeftBound(breakPoint)\n else:\n refCV = min(upGene.getLogCV(), downGene.getLogCV())\n space1 = Sequence(readsList, (leftEdge, breakPoint))\n space2 = Sequence(readsList, (breakPoint, rightEdge))\n # Determine the turning point in the left space: \n if len(space1) <= WINDOWWIDTH:\n maxWindow1 = space1\n else:\n start1 = space1.getStart()\n maxWindow1 = Sequence(readsList, (start1, start1 + WINDOWWIDTH)) \n while start1 + WINDOWWIDTH < space1.getEnd():\n window1 = Sequence(readsList, (start1, start1 + WINDOWWIDTH))\n if maxWindow1.getLogCV() < window1.getLogCV() and \\\n window1.getReads()[0] > window1.getReads()[-1]:\n maxWindow1 = window1\n start1 += 1\n if maxWindow1.getLogCV() >= refCV * 2:\n turnPoint1 = maxWindow1.getMinReadLocation()\n else:\n turnPoint1 = -1\n \n # Determine the turning point in the right space: \n if len(space2) <= WINDOWWIDTH:\n maxWindow2 = space2\n else:\n start2 = space2.getStart()\n maxWindow2 = Sequence(readsList, (start2, start2 + WINDOWWIDTH))\n while start2 + WINDOWWIDTH < space2.getEnd():\n window2 = Sequence(readsList, (start2, start2 + WINDOWWIDTH))\n if maxWindow2.getLogCV() < window2.getLogCV() and \\\n window2.getReads()[0] < window2.getReads()[-1]:\n maxWindow2 = window2\n start2 += 1\n if maxWindow2.getLogCV() >= refCV * 2:\n turnPoint2 = maxWindow2.getRightMinReadLocation()\n else:\n turnPoint2 = -1\n \n \n # Assign turning points to each space.\n if turnPoint1 > 0 and turnPoint2 > 0:\n operon1.setRightBound(turnPoint1)\n operon1.setRightBoundPrecision(True)\n operon2.setLeftBound(turnPoint2)\n operon2.setLeftBoundPrecision(True)\n \n elif turnPoint1 > 0 and turnPoint2 < 0:\n operon1.setRightBound(turnPoint1)\n operon1.setRightBoundPrecision(True)\n operon2.setLeftBound(turnPoint1+1)\n operon2.setLeftBoundPrecision(False)\n \n elif turnPoint1 < 0 and turnPoint2 > 0:\n operon1.setRightBound(turnPoint2-1)\n operon1.setRightBoundPrecision(False)\n operon2.setLeftBound(turnPoint2)\n operon2.setLeftBoundPrecision(True)\n \n else:\n operon1.setRightBound(midPoint)\n operon1.setRightBoundPrecision(False)\n operon2.setLeftBound(midPoint+1)\n operon2.setLeftBoundPrecision(False)", "def _welch_seg_bounds(pos, l_seg, p_overlap):\n step = l_seg - p_overlap * l_seg\n starts = np.arange(pos.iloc[0], pos.iloc[-1], step)\n ends = np.arange(pos.iloc[0]+l_seg, pos.iloc[-1], step)\n ends[-1] = pos.iloc[-1]\n\n return starts, ends", "def to_mrna(seq):\n start_codon = \"AUG\"\n stop = [\"UAG\", \"UGA\", \"UAA\"]\n start_positions = []\n final_mrnas = []\n i = 0\n while i < len(seq) - 2:\n if seq[i:i+3] == start_codon: # At start codon\n start_positions.append(i)\n i += 3\n\n for pos in start_positions:\n mrna = \"\"\n i = pos\n is_orf = True\n while i < (len(seq)-2) and is_orf:\n if seq[i:i+3] in stop: # Stop codon reached\n is_orf = False\n final_mrnas.append(mrna)\n else:\n mrna += seq[i:i+3]\n i += 3\n\n return final_mrnas", "def _setup_frame_range(frame_range, seq):\n seq_start = seq.start_frame()\n seq_end = seq.end_frame()\n user_frame_start = seq_start\n user_frame_end = seq_end\n\n # first see if we have multiple sequences, if not proceed\n if not frame_range == \"N/A\":\n # get the user's input for frame range\n try:\n temp_start = re.search(r'\\d+', frame_range).group()\n temp_end = re.findall(r'\\d+', frame_range)[-1]\n except (ValueError, IndexError, AttributeError, TypeError) as e:\n error_msg = \"Problem with frame range {0}. Error is {1}\".format(frame_range, e)\n logger.exception(error_msg)\n return None, None, error_msg\n\n # make frame objects\n try:\n temp_path_start = seq[0].path.replace(str(seq_start), temp_start)\n user_frame_start = AniFrame(temp_path_start)\n temp_path_end = seq[-1].path.replace(str(seq_end), temp_end)\n user_frame_end = AniFrame(temp_path_end)\n except (IndexError, ValueError, AniFrameError, TypeError) as e:\n error_msg = \"Problem with frame range {0}. Error is {1}\".format(frame_range, e)\n logger.exception(error_msg)\n return None, None, error_msg\n\n logger.info(\n \"Should be AniFrame Objects : user frame start {0}, user frame end {1}\".format\n (\n user_frame_start,\n user_frame_end\n )\n )\n return user_frame_start, user_frame_end, None", "def distance_to_stop(sequence, start):\r\n\r\n counter = 0\r\n for i in list(range(start, int(len(sequence)/3))): #\r\n\r\n if codon2aa[sequence[i*3:(i+1)*3]] == \"*\": # does it handle ambiguities?\r\n break\r\n\r\n else:\r\n counter += 1\r\n continue\r\n\r\n return counter", "def adjust_intron_position(cls, tgs_read, intron):\n if tgs_read.reference_name != intron.chrom:\n return None\n if tgs_read.is_reverse ^ (intron.strand == \"-\"):\n return None\n if (tgs_read.reference_start > intron.end) or (tgs_read.reference_end < intron.start):\n return None\n new_blocks = list()\n blocks = tgs_read.blocks\n for indx, block in enumerate(blocks):\n this_block = list(block)\n if indx == (len(blocks) - 1):\n if this_block[0] < intron.start < this_block[1]:\n this_block = [this_block[0], intron.start]\n else:\n if this_block[0] < intron.start < blocks[indx + 1][0]:\n this_block = [this_block[0], intron.start]\n\n if indx == 0:\n if this_block[0] < intron.end < this_block[1]:\n this_block = [intron.end, this_block[1]]\n else:\n if blocks[indx - 1][1] < intron.end < this_block[1]:\n this_block = [intron.end, this_block[1]]\n new_blocks.append(tuple(this_block))\n tgs_read.cigar = cls.blocks2cigar(new_blocks)\n return None", "def ParseInterfaceRanges(self):\n ranges = Session.ExecCommand(\"show configuration interfaces | display set | match interface-range\")\n for line in [l.lower().strip() for l in ranges.splitlines()] :\n try:\n words = line.split(\" \")\n if \"interface-range\" in line :\n if \" member-range \" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE member-range ge-0/0/0 to ge-0/0/41\n # add ranges\n rangeName = words[3]\n fromInterfaceName = words[5]\n toInterfaceName = words[7]\n # find if already a defined range\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n foundRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n else:\n newRange = InterfaceRange(rangeName)\n newRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n self.InterfaceRanges.append(newRange) \n elif \" member \" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE member ge-0/0/0\n # add ranges\n rangeName = words[3]\n fromInterfaceName = words[5]\n toInterfaceName = words[5]\n # find if already a defined range\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n foundRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n else:\n newRange = InterfaceRange(rangeName)\n newRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n self.InterfaceRanges.append(newRange) \n else :\n rangeName = words[3]\n # find a defined range (should aready be in the list)\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n # set interface properties for ranges\n if \"interface-mode\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching interface-mode access\n foundRange.portMode = words[len(words) - 1] \n elif \"port-mode\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching interface-mode access\n foundRange.portMode = words[len(words) - 1] \n elif \"vlan members\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching vlan members Corp-Access\n foundRange.vlanMembers.append(words[len(words) - 1])\n else:\n raise Exception(\"Interface range name <{0}> definition is missing\".format(rangeName))\n \n except Exception as Ex:\n message = \"JunOS Router Module Error : could not parse an interface range for line <{0}>. Error is : {1} \".format(line, str(Ex))\n DebugEx.WriteLine(message) \n \n pass", "def get_start_and_end_positions(pos, ref, alt):\n\n pos = int(pos)\n\n if len(ref) == len(alt):\n start, end = pos, pos + len(alt) - 1\n\n elif len(ref) < len(alt):\n start, end = pos, pos + 1\n\n else: # len(alt) < len(ref)\n start, end = pos + 1, pos + len(ref) - len(alt)\n\n return start, end", "def fit_next_segment( start, end_st, end_heads, thresh, pixel_done,\n cr_flag_2d, data_section, mask_2d, one_by_var,\n m_by_var ):\n\n nreads, asize2, asize1 = data_section.shape \n all_pix = np.arange( asize2*asize1 ) \n\n slope, variance, resid, var_den_0 = fit_lines( data_section, mask_2d,\n end_heads, end_st, start) \n \n # calculate the differences of the residuals for each pixel\n resid_diff = resid[1:,:] - resid[:-1,:] \n\n # get the index and value for the max resid_diff for all pixels\n ind_max_per_pixel = resid_diff.argmax( axis = 0 )\n max_rd_all_pix = resid_diff.max( axis = 0 ) \n\n end_locs = end_st[ end_heads[ all_pix ]-1, all_pix ] \n l_interval = end_locs - start # fitting interval length\n \n wh_done = ( start == -1) \n l_interval[ wh_done ] = 0 # set interval lengths for done pixels to 0 \n\n # CASE 1 - interval too short to fit well, not at array end (any thresh )\n # set start to 1 beyond end of current interval\n # remove current end from end stack\n # decrement number of ends \n wh_check = np.where(( l_interval <= 2) & (end_locs != nreads-1))\n \n if( len( wh_check[0]) > 0 ):\n these_pix = wh_check[0] \n start[ these_pix ] = end_locs[ these_pix ] + 1 \n end_st[ end_heads[ these_pix ]-1, these_pix ] = 0 \n end_heads[ these_pix ] -= 1 \n\n wh_neg = ( end_heads < 0. ) \n end_heads[ wh_neg ] = 0. \n\n # CASE 2 - interval too short to fit well, at end of array\n # set start to -1 to designate all fitting done\n # remove current end from end stack\n # set number of ends to 0 \n # set pixel_done to True to designate all fitting done\n wh_check = np.where(( l_interval <= 2) & (end_locs == nreads-1))\n \n if( len( wh_check[0]) > 0 ):\n these_pix = wh_check[0] \n start[ these_pix ] = -1\n end_st[ end_heads[ these_pix ]-1, these_pix ] = 0 \n end_heads[ these_pix ] = 0\n pixel_done[ these_pix ] = True\n\n # CASE 3 - interval long enough, not at end of array, below thresh \n # remove current end from end stack\n # decrement number of ends \n # add slopes and variances to running sums\n wh_check = np.where(( l_interval > 2) & (end_locs != nreads-1) & \n (max_rd_all_pix <= thresh )) \n\n if( len(wh_check[0]) > 0 ): \n these_pix = wh_check[0] \n start[ these_pix ] = end_locs[ these_pix ] + 1 \n end_st[end_heads[ these_pix ]-1, these_pix ] = 0 \n end_heads[ these_pix ] -= 1\n wh_neg = ( end_heads < 0. )\n end_heads[ wh_neg ] = 0. \n \n good_pix = these_pix[ variance[these_pix] > 0. ] \n if ( len(good_pix ) > 0 ):\n one_by_var[ good_pix ] += 1.0/variance[ good_pix ]\n m_by_var[ good_pix ] += slope[ good_pix ]/variance[ good_pix ]\n\n # CASE 4 - interval long enough, at end of array, below thresh\n # set start to -1 to designate all fitting done\n # remove current end from end stack\n # set number of ends to 0 \n # add slopes and variances to running sums\n wh_check = np.where(( l_interval > 2) & (end_locs == nreads-1) & \n (max_rd_all_pix <= thresh )) \n\n if( len( wh_check[0]) > 0 ):\n these_pix = wh_check[0] \n start[ these_pix ] = -1 # all processing for this pixel is completed\n end_st[ end_heads[ these_pix ]-1, these_pix ] = 0 \n end_heads[ these_pix ] = 0\n pixel_done[ these_pix ] = True # all processing for pixel is completed\n \n good_pix = these_pix[ variance[these_pix] > 0. ] \n if ( len(good_pix ) > 0 ):\n one_by_var[ good_pix ] += 1.0/variance[ good_pix ]\n m_by_var[ good_pix ] += slope[ good_pix ]/variance[ good_pix ]\n \n # CASE 5 - interval long enough, above thresh, at or not at end\n # set new end at location of largest difference of residuals (new CR) \n # increment number of ends \n # add location of this newly found CR to cr_flag_2d\n wh_check = np.where(( l_interval > 2) & (max_rd_all_pix > thresh )) \n if( len( wh_check[0]) > 0 ):\n these_pix = wh_check[0] \n end_st[ end_heads[ these_pix ], these_pix ] = \\\n ind_max_per_pixel[ these_pix ] \n end_heads[ these_pix ] += 1 \n cr_amp = end_st[ end_heads[these_pix]-1,these_pix] + 1 \n cr_flag_2d[[cr_amp], [these_pix] ] = 1\n\n return var_den_0", "def IRs(seq, verbose=False):\n start = str(seq[:IR_WINDOW])\n end = str(seq[-IR_WINDOW:].reverse_complement())\n\n #aln = pairwise2.align.localms(start, end, 1, -20, -5, -2)\n aln = myalign(start, end)\n\n if (aln[2] < MIN_IR_SCORE_CHANGE):\n # try a close alignment with a lower penalty- one that doesn't move\n # based on the alignment, and accepts only an exact match\n close_aln = myalign(start[:IR_WINDOW_NONCHANGE],\n end[:IR_WINDOW_NONCHANGE], mismatch_score_num=-1)\n\n if (close_aln[2] < MIN_IR_SCORE_NONCHANGE or\n close_index(start, close_aln[0]) != 0 or\n close_index(end, close_aln[1]) != 0):\n # no alignment near or far\n return False, False, 0, 0, 0\n return close_aln[0], close_aln[1], 0, 0, close_aln[2]\n\n lin, rin = close_index(start, aln[0]), -close_index(end, aln[1])\n\n return aln[0], aln[1], lin, rin, aln[2]", "def _GetStartAndEnd(match, cut_points, length_per_snippet):\n max_length = cut_points[-1] if cut_points else 0\n match_start = match.start() if match else 0\n match_end = match.end() if match else 0\n\n # Get start cut point.\n start = 0\n if match_start > .5 * length_per_snippet:\n # Get first point within 1/2 * length_per_snippet chars of term.\n for c in cut_points:\n if c >= match_start - (.5 * length_per_snippet) and c < match_start:\n start = c + 1\n break # The cut points are already sorted, so first = min.\n # If no cut points, just start 1/2 the desired length back or at 0.\n start = int(max(match_start - (.5 * length_per_snippet), start))\n\n # Get end cut point.\n # Must be after term but within desired distance of start.\n end = match_end\n # Look for last cut point in this interval\n for c in cut_points:\n if end < c <= start + length_per_snippet:\n end = c\n elif c > start + length_per_snippet:\n break # the list was sorted, so last = max.\n # If no cut points, just cut at the exact desired length or at the end,\n # whichever comes first.\n if end == match_end:\n end = max(min(max_length, start + length_per_snippet), end)\n\n # If cutting at the end, update start so we get the maximum length snippet.\n # Look for the first cut point within length_of_snippet of the end.\n if end == max_length:\n for c in cut_points:\n if end - c <= (length_per_snippet + 1) and c < start:\n start = c + 1\n break\n return TextSlice(start, end)", "def __padded_sequences(self, sequence):\n start, end = [Markov.start], [Markov.end]\n yield list(chain(start, sequence, end))\n yield list(chain(end, reversed(sequence), start))", "def test_get_position(self):\n iv1 = GenomicSegment(\"chrA\", 100, 150, \"+\")\n iv2 = GenomicSegment(\"chrA\", 150, 200, \"+\")\n iv3 = GenomicSegment(\"chrA\", 250, 350, \"+\")\n\n mask = GenomicSegment(\"chrA\", 50, 125, \"+\")\n non_overlap_mask = GenomicSegment(\"chrA\", 400, 500, \"+\")\n\n ivc1 = SegmentChain(iv1, iv2, iv3)\n ivc2 = SegmentChain(iv1, iv2, iv3)\n\n whole_position_list = list(range(100, 150)) + list(range(150, 200)) + list(range(250, 350))\n whole_position_set = set(whole_position_list)\n masked_set = whole_position_set - set(range(50, 125))\n\n pre_unmask_list = list(ivc1.get_position_list())\n pre_unmask_set = ivc1.get_position_set()\n pre_unmask_valid_set = ivc1.get_masked_position_set()\n\n self.assertEquals(pre_unmask_list, whole_position_list)\n self.assertEquals(pre_unmask_set, whole_position_set)\n self.assertEquals(pre_unmask_valid_set, whole_position_set)\n\n # add real mask\n ivc1.add_masks(mask)\n\n post_unmask_list = list(ivc1.get_position_list())\n post_unmask_set = ivc1.get_position_set()\n post_unmask_valid_set = ivc1.get_masked_position_set()\n\n self.assertEquals(post_unmask_list, whole_position_list)\n self.assertEquals(post_unmask_set, whole_position_set)\n self.assertEquals(post_unmask_valid_set, masked_set)\n\n # add non-overlapping mask\n pre_unmask_list = list(ivc2.get_position_list())\n pre_unmask_set = ivc2.get_position_set()\n pre_unmask_valid_set = ivc2.get_masked_position_set()\n\n self.assertEquals(pre_unmask_list, whole_position_list)\n self.assertEquals(pre_unmask_set, whole_position_set)\n self.assertEquals(pre_unmask_valid_set, whole_position_set)\n\n ivc2.add_masks(non_overlap_mask)\n post_unmask_list = list(ivc2.get_position_list())\n post_unmask_set = ivc2.get_position_set()\n post_unmask_valid_set = ivc2.get_masked_position_set()\n\n self.assertEquals(post_unmask_list, whole_position_list)\n self.assertEquals(post_unmask_set, whole_position_set)\n self.assertEquals(post_unmask_valid_set, whole_position_set)", "def _check_array_range(istart, iend, npts):\n istart = int(istart if istart<iend else iend)\n iend = int(istart if istart>iend else iend)\n\n istart = istart if istart>0 else 0\n iend = iend if iend<=npts else npts\n if istart == iend: \n # ensure that istart and iend are not the same\n if istart > 0:\n istart = istart-1\n else:\n iend = iend+1\n\n return istart, iend" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Maps the codon sequence to the aligned (may contain indels) aa seq.
def align_codon_to_aaseq(con, aaseq, codonseq): # ret is the returned aligned codon sequence. ret = "" """Quick sanity check: do we have exactly 3x more nucleotides than amino acids?""" aa_no_indels = re.sub("-", "", aaseq) nt_no_indels = re.sub("-", "", codonseq) """Remove stop codon in the nt sequence.""" if nt_no_indels.endswith("TAG") or nt_no_indels.endswith("TAA") or nt_no_indels.endswith("TGA"): nt_no_indels = nt_no_indels[0: nt_no_indels.__len__() - 3] if float(aa_no_indels.__len__()) != float(nt_no_indels.__len__()) / 3.0: write_error(con, "The nt and aa sequence don't match.") print aa_no_indels.__len__(), codonseq.__len__() print aa_no_indels print nt_no_indels return None """Map the codons onto the aa sequence.""" ntptr = 0 for ii in range(0, aaseq.__len__()): codon = None if aaseq[ii] == "-": codon = "---" else: codon = nt_no_indels[ntptr: ntptr + 3] ntptr += 3 ret += codon return ret
[ "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def align_sequences(dic):\n\n\t##Function to calculate the percentaje of identity inside the alignment function\n\tdef calculate_identity(seqA, seqB):\n\t\t\"\"\"\n\t\tReturns the precentage of identical characters between two sequences.\n\t\tAssumes the sequences are aligned.\n\t\t\"\"\"\n\t\tsa, sb, sl = seqA, seqB, len(seqA)\n\t\tmatches = [sa[i] == sb[i] for i in range(sl)]\n\t\tseq_id = (100 * sum(matches)) / sl\n\n\t\tgapless_sl = sum([1 for i in range(sl) if (sa[i] != '-' and sb[i] != '-')])\n\t\tgap_id = (100 * sum(matches)) / gapless_sl\n \n\t\treturn (seq_id, gap_id)\n\n #Return to main function\n\tppb = CaPPBuilder() #create sequence with Ca-Ca \n\tn = len(dic)\n\tmatrix = matlist.blosum62\n\tgap_open = -10.0\n\tgap_extend = -0.5\n\tbest_aln = []\n\tall_aln = []\n\n\t#Select sequences to align\n\tfor i in range(n-1):\n\t\tch1 = dic[i].get_id()\n\t\tpp1 = ppb.build_peptides(dic[i])\n\t\tid1 = \"%s-%d\" %(ch1, i)\n\t\tseq1 = pp1[0].get_sequence()\n\t\t\n\t\tfor j in range(i+1,n):\n\t\t\tch2 = dic[j].get_id()\n\t\t\tpp2 = ppb.build_peptides(dic[j])\n\t\t\tid2 = \"%s-%d\" %(ch2, j)\n\t\t\tseq2 = pp2[0].get_sequence()\t\n\n\t\t\t#Align sequence\n\t\t\talns = pairwise2.align.globalds(seq1, seq2, \n\t\t\t\t\t\t\t\t\t\t\tmatrix, gap_open, gap_extend,\n\t\t\t\t\t\t\t\t\t\t\tpenalize_end_gaps = (False, False))\n\t\t\ttop_aln = alns[0]\n\t\t\tal1, al2, score, begin,end = top_aln\n\n\t\t\t#Calculate sequence identity\n\t\t\tseq_id, g_seq_id = calculate_identity(al1, al2)\n\n\t\t\tall_aln.append((id1, id2, seq_id))\n\n\t\t\tif seq_id > 99:\n\t\t\t\tbest_aln.append((id1,id2))\n\n\treturn best_aln, all_aln", "def translate(seq):\n \n #translation table of codons to amino acids\n # _ underscores are nature's stop codons.\n table = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',\n }\n \n #The protein is a sequence of amino acids\n protein = \"\"\n \n # Check that the length of the string is divisible by 3\n if len(seq) % 3 == 0:\n # Valid sequence - proceed\n # Loop over the sequence\n for i in range(0, len(seq), 3):\n \n # Extract a single codon (3-letter string)\n codon = seq[i:i+3]\n \n # Look up each codon (3-letter string) and store the result\n # Concatenating to generate an amino acid sequence\n protein += table[codon]\n else:\n pass\n \n\n return protein;", "def alignment(self):\n if self._alignment is None:\n if self._map is None:\n if self._columns is not None:\n self.__map_columns()\n else:\n self._map = self._align(self._graph)\n self._refine_each()\n if self._refinements:\n self._refine()\n assert self._map.shape[1] > 0, \"Alignment has no columns\"\n records = deepcopy(self._records)\n for i, record in enumerate(records):\n seq = record.seq\n aligned_seq = []\n map = self._map[i]\n index = 0\n for symbol in map:\n if symbol:\n aligned_seq.append(seq[index])\n index += 1\n else:\n aligned_seq.append(SPACE)\n record.seq = Seq(\"\".join(aligned_seq), GAPPED_ALPHABET)\n self._alignment = tomsa(records)\n return self._alignment", "def convert2aa(sequence):\r\n\r\n # sequence = \"\".join([x.upper() for x in sequence]) # converts lowercase to uppercase\r\n\r\n number_of_codons = len(sequence)/3\r\n aa_seq = []\r\n\r\n for nmbr in list(range(1, int(number_of_codons)+1)): # goes through each codon converting it to an aa\r\n\r\n if \"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3] in codon2aa:\r\n aa_seq.append(codon2aa[\"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3]])\r\n else:\r\n aa_seq.append(\"XXX\")\r\n\r\n return \"\".join(aa_seq)", "def __add_seq_alignment(self, alignment: MyAlign, seq: Seq):\n res = [\"\" for i in range(len(alignment.get_seqs())+1)]\n\n # create consensus from given alignments\n cons = BioSeq.create_bio_seq(\n alignment.consensus(), alignment.get_align_type())\n\n align2 = MyAlign.align_from_global_alignment(\n cons, seq, *self.pair_align_data)\n\n orig = 0\n for i in range(len(align2)):\n if align2[0, i] == \"-\":\n for k in range(len(alignment.get_seqs())):\n res[k] += \"-\"\n else:\n for k in range(len(alignment.get_seqs())):\n res[k] += alignment[k, orig]\n orig += 1\n\n res[len(alignment.get_seqs())] = align2.get_seqs()[1]\n\n return MyAlign(res, alignment.get_align_type())", "def interSequence(seq_maps):\n seq_map = {}\n \n return seq_map", "def align_contigs(scaffold, contigs_data, contigs_seq):\n\n #print \"scaffold:\", scaffold\n #print \"contigs_data:\", contigs_data\n #print \"contigs_seq:\", contigs_seq\n\n scaffold_list = list(scaffold)\n for cd in contigs_data:\n remapped_Ns = 0\n #print cd\n\n sequence = contigs_seq[cd[\"contig_id\"]]\n pos_initial = cd[\"contig_pos_initial\"]\n pos_final = cd[\"contig_pos_final\"]\n orientation = cd[\"orientation\"]\n\n if orientation == '+':\n #print \"orientacion +\"\n contig_position = len(sequence)-1\n scaffold_position = pos_initial + pos_final - 1\n while scaffold_position > pos_initial:\n if sequence[contig_position] == \"N\":\n scaffold_list[scaffold_position] = \"N\"\n remapped_Ns += 1\n contig_position -= 1\n scaffold_position -= 1\n\n elif orientation == '-':\n #print \"orientacion -\"\n contig_position = 0\n scaffold_position = pos_initial + pos_final - 1\n while scaffold_position > pos_initial: \n if sequence[contig_position] == \"N\":\n scaffold_list[scaffold_position] = \"N\"\n remapped_Ns += 1\n scaffold_position -= 1\n contig_position += 1\n\n return \"\".join(scaffold_list)", "def translate(seq):\n\n table = {\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',\n 'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',\n }\n\n seq_len = len(seq)\n if seq_len % 3 == 0:\n protein = \"\"\n for i in range(0, seq_len, 3):\n codon = seq[i: i+3]\n protein += table[codon]\n return protein\n else:\n return f\"Invalid Input Sequence, len = {seq_len}\"", "def align(self, sequences):\n seqs = [copy.deepcopy(s) for s in sequences]\n c = seqs[0]\n aligned = [c]\n klass = c.__class__\n with tqdm(total=len(seqs)-1) as pbar:\n for s in seqs[1:]:\n score, traceback = c.global_align_multiple_solutions(s, self.sm, self.g)\n c, s = next(c.recover_global_align_multiple_solutions(s, traceback))\n aligned = self.update_aligned_with_gaps(aligned, c)\n aligned.append(klass(s)) # add temp alignments to the list of processed\n c = self.consensus(aligned + [s], klass)\n pbar.update()\n return c, aligned", "def genome_gen_align(genome, gen):\n return genome.get_name(), gen.get_name(), genome.local_global_align(gen, gen_poses[gen.get_name()])", "def align_nucleotides():\n nucleotides = make_dict_records(\"./data/download/all_nucleotide\")\n proteins = make_dict_records(\"./data/download/aligned_prot\")\n nuc2prot_acc = get_nuc2prot()\n aligned_nucleotides = []\n for nuc_acc, nucleotide in nucleotides.iteritems():\n protein = proteins[nuc2prot_acc[nuc_acc]]\n prot_unaligned = regex.sub(r\"[-\\.]\", \"\", str(protein.seq))\n prot_transl = str(nucleotide.seq.translate())\n #Checking if frames are right\n nuc_seq = str(nucleotide.seq)\n protein_seq = str(protein.seq)\n codons = [nuc_seq[i: i+3] for i in xrange(0, len(nuc_seq), 3)]\n aligned_seq = []\n for position in protein_seq:\n if position == \".\" or position == \"-\":\n aligned_seq += [\"---\"]\n else:\n if codons: \n codon = codons.pop(0)\n aligned_seq += codon\n else:\n aligned_seq += [\"---\"]\n nucleotide.seq = Seq(\"\".join(aligned_seq))\n aligned_nucleotides += [nucleotide]\n if not prot_transl.upper()[1:-1] in prot_unaligned.upper():\n print \"Incorrect frame!\"\n raise\n write_fasta(\"./data/download/aligned_nucleotides\", aligned_nucleotides)", "def _make_mapping_from_seq(self):\n mapping = []\n for residue in self.seq:\n mapping.append((residue, ATOM_MAP_14[residue]))\n return mapping", "def _seq_from_struct(self):\n seq = []\n ch = self.structure[0][0][4]\n fasta = ''\n for atom in self.structure[0]:\n if atom[2] == ' CA ':\n if atom[4] == ch:\n fasta += AA_code(atom[3])\n else:\n seq.append(fasta)\n ch = atom[4]\n fasta = AA_code(atom[3])\n seq.append(fasta)\n return seq", "def generate_pfam_aligned_codons(pfam_id):\n _log.info(\"Started a meta-domain based on the alignment of all '\"+pfam_id+\"' Pfam domains in the human genome\")\n start_time = time.clock()\n \n # the consensus length \n consensus_length = 0\n # the meta_domain that is to be returned\n meta_codons_per_consensus_pos = {}\n # the mapping of the protein {protein_id: {protein_posistion: consensus_position}}\n consensus_pos_per_protein = {}\n # the amount of domain occurrences found\n n_instances = 0 \n \n # retrieve the alignment\n hmmeralign_output = interpret_hmm_alignment_file(METADOMAIN_DIR+pfam_id+'/'+METADOMAIN_ALIGNMENT_FILE_NAME) \n if not len (hmmeralign_output) == 0:\n #update the consensus length\n consensus_length = len(hmmeralign_output['consensus']['sequence'])\n \n # update the number of instances\n n_instances = len(hmmeralign_output['alignments'])\n _log.debug(\"Creating the alignment of mappings for '\"+str(n_instances) +\"' '\"+pfam_id+\"' domain occurrences based on the HMM alignment to consensus and original domain sequence\")\n \n # ensure we can map consensus residues back to consensus positions\n hmmeralign_output['consensus']['aligned_sequence'] = convert_pfam_fasta_alignment_to_original_aligned_sequence(hmmeralign_output['consensus']['alignment'])\n hmmeralign_output['consensus']['mapping_consensus_alignment_to_positions'] = map_sequence_to_aligned_sequence(hmmeralign_output['consensus']['sequence'], hmmeralign_output['consensus']['aligned_sequence'])\n \n # create mappings between domain occurrences and the domain consensus sequence\n for _alignment in hmmeralign_output['alignments']:\n # retrieve current aligned domain\n \n # Create a mapping from the aligned domain sequence to the domain sequence\n aligned_sequence = convert_pfam_fasta_alignment_to_original_aligned_sequence(_alignment['alignment'])\n original_sequence = convert_pfam_fasta_alignment_to_strict_sequence(aligned_sequence)\n mapping_domain_alignment_to_sequence_positions = map_sequence_to_aligned_sequence(original_sequence, aligned_sequence)\n \n # Generate the strict sequence for this domain; leaving only residues that were aligned to the domain consensus\n strict_aligned_sequence = convert_pfam_fasta_alignment_to_strict_fasta(_alignment['alignment'])\n \n # create the mapping between the strict alignments and the original consensus sequence\n mapping_aligned_domain_to_domain_consensus = createAlignedSequenceMapping(strict_aligned_sequence, hmmeralign_output['consensus']['aligned_sequence'], False)\n \n # create a list of mapping positions that includes insertions\n mapping_positions = list(mapping_domain_alignment_to_sequence_positions.keys()) + list(set(mapping_aligned_domain_to_domain_consensus.keys()) - set(mapping_domain_alignment_to_sequence_positions.keys()))\n \n # Second add each aligned residue mapping\n for mapping_pos in sorted(mapping_positions):\n # retrieve the residue at the consensus position and the residue at the domain position\n consensus_domain_residue = hmmeralign_output['consensus']['aligned_sequence'][mapping_pos]\n \n if consensus_domain_residue == '-':\n # Set the default values for the insertion\n continue\n else:\n # retrieve the position in the domain consensus\n domain_consensus_pos = hmmeralign_output['consensus']['mapping_consensus_alignment_to_positions'][mapping_pos]\n \n # retrieve the position in the domain sequence\n ref_pos = mapping_domain_alignment_to_sequence_positions[mapping_pos]\n # convert the position in the domain sequence to the uniprot position and genomic position\n uniprot_pos = int(_alignment['start_pos']) + ref_pos -1\n \n # Add the consensus pos to the protein\n if not _alignment['uniprot_ac'] in consensus_pos_per_protein.keys():\n consensus_pos_per_protein[_alignment['uniprot_ac']] = {}\n if not uniprot_pos in consensus_pos_per_protein[_alignment['uniprot_ac']].keys():\n consensus_pos_per_protein[_alignment['uniprot_ac']][uniprot_pos] = []\n consensus_pos_per_protein[_alignment['uniprot_ac']][uniprot_pos].append(domain_consensus_pos) \n \n # now incorporate the alignment data into our domain model in form of mappings\n # First get the protein ids for the uniprot acs\n uniprot_acs_to_ids = ProteinRepository.retrieve_protein_id_for_multiple_protein_acs([x for x in consensus_pos_per_protein.keys()])\n protein_ids = [int(y) for y in np.unique([x for x in uniprot_acs_to_ids.values()])]\n \n # Second, get all mappings for these proteins\n protein_mappings = MappingRepository.get_mappings_for_multiple_protein_ids(protein_ids)\n \n # retrieve all transcripts mapped to these protein_ids\n gene_ids = GeneRepository.retrieve_transcript_id_for_multiple_protein_ids(protein_ids)\n \n # create all aligned codons\n meta_codons_per_consensus_pos = {}\n for uniprot_ac in consensus_pos_per_protein.keys():\n for uniprot_pos in consensus_pos_per_protein[uniprot_ac].keys():\n for domain_consensus_pos in consensus_pos_per_protein[uniprot_ac][uniprot_pos]:\n # Retrieve the mapping for the corresponding uniprot_position\n mappings_for_uniprot_pos = [x for x in protein_mappings[uniprot_acs_to_ids[uniprot_ac]] if x.uniprot_position == uniprot_pos]\n \n # Seperate the mappings per gene_id\n mapping_per_gene_id = {}\n for mapping in mappings_for_uniprot_pos:\n if not mapping.gene_id in mapping_per_gene_id.keys():\n mapping_per_gene_id[mapping.gene_id] = []\n mapping_per_gene_id[mapping.gene_id].append(mapping)\n \n for gene_id in mapping_per_gene_id.keys():\n # Obtain the mappings for this position\n mappings = mapping_per_gene_id[gene_id]\n\n try:\n # create a codon\n codon = Codon.initializeFromMapping(mappings, gene_ids[gene_id], uniprot_ac)\n \n # Add the codon to the consensus positions\n if not domain_consensus_pos in meta_codons_per_consensus_pos.keys():\n meta_codons_per_consensus_pos[domain_consensus_pos] = []\n \n meta_codons_per_consensus_pos[domain_consensus_pos].append(codon)\n except MalformedCodonException as e:\n raise MalformedMappingsForAlignedCodonsPosition(\"Encountered a malformed codon mapping for domain '\"\n +str(pfam_id)+\"' in gene '\"+str(gene_id)\n +\"', at amino_acid_position '\"+str(uniprot_pos)\n +\"':\" + str(e))\n \n time_step = time.clock()\n _log.info(\"Finished the alignment of mappings for '\"+str(n_instances) +\"' instances '\"+pfam_id+\"' domain occurrences in \"+str(time_step-start_time)+\" seconds\")\n return meta_codons_per_consensus_pos, consensus_length, n_instances", "def map_codon_codes(self, codon_codes):\n if codon_codes.shape[-1] != 3:\n raise ValueError(\n f\"Codons must be length 3, \"\n f\"but size of last dimension is {codon_codes.shape[-1]}\"\n )\n codon_numbers = CodonTable._to_number(codon_codes)\n aa_codes = self._codons[codon_numbers]\n return aa_codes", "def encode_DNA(seq):\n\tseq2bin_dict = {'A':[0,0], 'C':[0,1], 'G':[1,0], 'T':[1,1]}\n\treturn np.array(sum([seq2bin_dict.get(nuc) for nuc in seq], []))", "def align():\n sh.clustalo('-i', amplified, '-o', aligned)", "def _retranslate(seq):\n if len(seq[\"vdj_nt\"]) % 3 != 0:\n trunc = len(seq[\"vdj_nt\"]) % 3\n seq[\"vdj_nt\"] = seq[\"vdj_nt\"][:-trunc]\n seq[\"vdj_aa\"] = Seq(seq[\"vdj_nt\"], generic_dna).translate()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Throws exception when class have not been initialised before Otherwise, returns blockchain instance
def get_instance(): if not Blockchain.__instance__: raise Exception("Create your instance of blockchain with the respective properties") return Blockchain.__instance__
[ "def __init__(self):\n this = _coin.new_SoError()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, database, contract=None, hash_value=None, testnet=False):\n self.db = database\n self.keychain = KeyChain(self.db)\n if contract is not None:\n self.contract = contract\n elif hash_value is not None:\n try:\n file_path = self.db.HashMap().get_file(hash_value)\n if file_path is None:\n file_path = DATA_FOLDER + \"cache/\" + hexlify(hash_value)\n with open(file_path, 'r') as filename:\n self.contract = json.load(filename, object_pairs_hook=OrderedDict)\n except Exception:\n try:\n file_path = DATA_FOLDER + \"purchases/in progress/\" + hexlify(hash_value) + \".json\"\n with open(file_path, 'r') as filename:\n self.contract = json.load(filename, object_pairs_hook=OrderedDict)\n except Exception:\n self.contract = {}\n else:\n self.contract = {}\n self.log = Logger(system=self)\n\n # used when purchasing this contract\n self.testnet = testnet\n self.ws = None\n self.blockchain = None\n self.amount_funded = 0\n self.received_txs = []\n self.timeout = None\n self.is_purchase = False", "def get_blockchain_from_node(self, node: str) -> Optional[Blockchain]:\n try:\n blockchain = get_blockchain_from_node(node, self.valid_proof)\n except NodeConnectionError:\n # TODO: Handle cases of multiple errors with nodes.\n blockchain = None\n\n return blockchain", "def __init__(self):\n this = _coin.new_SoBaseKit()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def create_genesis_block():\n return Block(0, date.datetime.now(), \"010101\", {\"VIN\": 123456, \"Owner\": \"Qwertz\", \"Mileage\": 0},\n hash_a_block(0, date.datetime.now(), \"010101\", {\"VIN\": 123456, \"Owner\": \"Qwertz\", \"Mileage\": 0}))", "def __init__(self):\n this = _coin.new_SoReadError()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, genesisTarget, maxMintCoinsPerTx):\n self.genesisTarget = genesisTarget\n self.maxMintCoinsPerTx = maxMintCoinsPerTx\n # create a data structure of blocks to maintain the chain\n self.chain = []\n self.blockChain = defaultdict(list)\n genesisBlock = Block() # creating a genesis block\n genesisBlock.setTarget(genesisTarget) # set the difficulty of the genesis block\n genesisBlock.cumulativeWork = 1 # work of genesis block is 1\n self.chain.append(genesisBlock) # add genesis block to the chain\n self.root = genesisBlock\n self.blockHashMapping = defaultdict(Block) # mapping between block hash and the block\n self.blockHashMapping[self.root.getHash()] = self.root\n\n # pointer to chain tip and attribute which keeps track of maximum Work of any fork\n self.chainTip = self.root\n self.maxWork = self.root.cumulativeWork", "def getLendingPoolContractInstance(self):\n # Kovan\n lendingAddressProviderABI = json.loads('[{\"inputs\":[{\"internalType\":\"string\",\"name\":\"marketId\",\"type\":\"string\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"hasProxy\",\"type\":\"bool\"}],\"name\":\"AddressSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"ConfigurationAdminUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"EmergencyAdminUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"LendingPoolCollateralManagerUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"LendingPoolConfiguratorUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"LendingPoolUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"LendingRateOracleUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"string\",\"name\":\"newMarketId\",\"type\":\"string\"}],\"name\":\"MarketIdSet\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"PriceOracleUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"ProxyCreated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"}],\"name\":\"getAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getEmergencyAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLendingPool\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLendingPoolCollateralManager\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLendingPoolConfigurator\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getLendingRateOracle\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getMarketId\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPoolAdmin\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPriceOracle\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"newAddress\",\"type\":\"address\"}],\"name\":\"setAddress\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"id\",\"type\":\"bytes32\"},{\"internalType\":\"address\",\"name\":\"implementationAddress\",\"type\":\"address\"}],\"name\":\"setAddressAsProxy\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"emergencyAdmin\",\"type\":\"address\"}],\"name\":\"setEmergencyAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"manager\",\"type\":\"address\"}],\"name\":\"setLendingPoolCollateralManager\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"configurator\",\"type\":\"address\"}],\"name\":\"setLendingPoolConfiguratorImpl\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"pool\",\"type\":\"address\"}],\"name\":\"setLendingPoolImpl\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"lendingRateOracle\",\"type\":\"address\"}],\"name\":\"setLendingRateOracle\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"marketId\",\"type\":\"string\"}],\"name\":\"setMarketId\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"admin\",\"type\":\"address\"}],\"name\":\"setPoolAdmin\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"priceOracle\",\"type\":\"address\"}],\"name\":\"setPriceOracle\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]')\n lendingAddressProviderAddress = self.web3Instance.toChecksumAddress(\"0x88757f2f99175387ab4c6a4b3067c77a695b0349\")\n lendingPoolAddressesProviderContractInstance = self.web3Instance.eth.contract(address=lendingAddressProviderAddress, abi=lendingAddressProviderABI)\n\n lendingPoolAddress = lendingPoolAddressesProviderContractInstance.functions.getLendingPool().call()\n\n lendingPoolContractInstance = self.web3Instance.eth.contract(address=lendingPoolAddress, abi=lendingPoolABI)\n\n return lendingPoolContractInstance", "def __init__(self):\n this = _coin.new_SoLightKit()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def load_chain(self):\n if os.path.exists('bc_file.txt') and \\\n os.stat('bc_file.txt').st_size != 0 and \\\n Path('bc_file.txt').is_file():\n print_debug_info(\n 'Load existing blockchain from file')\n with open('bc_file.txt', 'r') as bc_file:\n self.chain = serializer.deserialize(bc_file.read())\n else:\n # If file doesn't exist / is empty:\n # Create genesis block\n\n self.chain[Header(0, 0, 768894480, 0, 0, 0)] = []", "def __init__(self):\n this = _coin.new_SoSFRotation()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoWrapperKit()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def initialize_block(self, block_header):\n LOGGER.debug('PbftBlockPublisher::initialize_block previous_block_id=%s (%s)',_short_id(block_header.previous_block_id),block_header)\n # If the previous block ID matches our cached one, that means that we\n # have already determined that even if we initialize the requested\n # block we would not be able to claim it. So, instead of wasting time\n # doing all of the checking again, simply short-circuit the failure so\n # that the validator can go do something more useful.\n if block_header.previous_block_id == PbftBlockPublisher._previous_block_id:\n LOGGER.debug(\"PbftBlockPublisher::initialize_block block_header.previous_block_id == PbftBlockPublisher._previous_block_id TRUE\")\n return False\n PbftBlockPublisher._previous_block_id = block_header.previous_block_id\n # Using the current chain head, we need to create a state view so we\n # can create a PBFT enclave.\n if False:\n state_view = BlockWrapper.state_view_for_block(\n block_wrapper=self._block_cache.block_store.chain_head,\n state_view_factory=self._state_view_factory)\n\n pbft_settings_view = PbftSettingsView(state_view)\n LOGGER.debug(\"PbftBlockPublisher::pbft_settings_view node=%s\",pbft_settings_view.pbft_node)\n #self._node = pbft_settings_view.pbft_node\n \n consensus_state = ConsensusState.consensus_state_for_block_id(\n block_id=block_header.previous_block_id,\n block_cache=self._block_cache,\n state_view_factory=self._state_view_factory,\n consensus_state_store=self._consensus_state_store,\n node=self._node\n )\n # shift into PrePrepare state\n consensus_state.next_step()\n #consensus_state.mark_as_own()\n consensus_state.set_consensus_state_for_block_id(block_header.previous_block_id,self._consensus_state_store)\n self._block_id = block_header.previous_block_id\n #consensus_state.set_node(self._node)\n LOGGER.debug(\"PbftBlockPublisher::initialize_block GET CONSENSUS_STATE=%s for block_id=%s \",consensus_state,_short_id(block_header.previous_block_id))\n # start \n # Get our validator registry entry to see what PBFT public key\n # other validators think we are using.\n\n if _VREG_:\n validator_registry_view = ValidatorRegistryView(state_view)\n validator_info = None\n\n try:\n validator_id = block_header.signer_public_key\n validator_info = validator_registry_view.get_validator_info(validator_id=validator_id)\n except KeyError:\n pass\n\n # If we don't have a validator registry entry, then check the active\n # key. If we don't have one, then we need to sign up. If we do have\n # one, then our validator registry entry has not percolated through the\n # system, so nothing to to but wait.\n active_pbft_public_key = self._pbft_key_state_store.active_key\n if validator_info is None:\n if active_pbft_public_key is None:\n LOGGER.debug('PbftBlockPublisher::initialize_block No public key found, so going to register new signup information')\n self._register_signup_information(block_header=block_header)\n\n else: # Check if we need to give up on this registration attempt\n try:\n nonce = self._pbft_key_state_store[active_pbft_public_key].signup_nonce\n except (ValueError, AttributeError):\n self._pbft_key_state_store.active_key = None\n LOGGER.warning('PbftBlockPublisher::initialize_block Pbft Key State Store had inaccessible or '\n 'corrupt active key [%s] clearing '\n 'key.', active_pbft_public_key)\n return False\n LOGGER.debug('PbftBlockPublisher::initialize_block Check if we need to give up on this registration attempt')\n self._handle_registration_timeout(\n block_header=block_header,\n pbft_enclave_module=None,#pbft_enclave_module,\n state_view=state_view,\n signup_nonce=nonce,\n pbft_public_key=active_pbft_public_key\n )\n LOGGER.debug(\"PbftBlockPublisher::initialize_block validator_info NONE\")\n return True #False\n\n # Retrieve the key state corresponding to the PBFT public key in our\n # validator registry entry.\n pbft_key_state = None\n try:\n pbft_key_state = self._pbft_key_state_store[validator_info.signup_info.pbft_public_key]\n except (ValueError, KeyError):\n pass\n\n # If there is no key state associated with the PBFT public key that\n # other validators think we should be using, then we need to create\n # new signup information as we have no way whatsoever to publish\n # blocks that other validators will accept.\n LOGGER.debug(\"PbftBlockPublisher::check pbft_key_state=%s\",pbft_key_state)\n if pbft_key_state is None:\n LOGGER.debug('PbftBlockPublisher::initialize_block PBFT public key %s...%s in validator registry not found in key state store. Sign up again',\n validator_info.signup_info.pbft_public_key[:8],\n validator_info.signup_info.pbft_public_key[-8:])\n self._register_signup_information(block_header=block_header)\n\n # We need to put fake information in the key state store for the\n # PBFT public key the other validators think we are using so that\n # we don't try to keep signing up. However, we are going to mark\n # that key state store entry as being refreshed so that we will\n # never actually try to use it.\n dummy_data = b64encode(b'No sealed signup data').decode('utf-8')\n self._pbft_key_state_store[validator_info.signup_info.pbft_public_key] = PbftKeyState(\n sealed_signup_data=dummy_data,\n has_been_refreshed=True,\n signup_nonce='unknown')\n\n return False\n\n # Check the key state. If it is marked as being refreshed, then we are\n # waiting until our PBFT public key is updated in the validator\n # registry and therefore we cannot publish any blocks.\n if _VREG_ and pbft_key_state.has_been_refreshed:\n LOGGER.debug(\n 'PBFT public key %s...%s has been refreshed. Wait for new '\n 'key to show up in validator registry.',\n validator_info.signup_info.pbft_public_key[:8],\n validator_info.signup_info.pbft_public_key[-8:])\n\n # Check if we need to give up on this registration attempt\n self._handle_registration_timeout(\n block_header=block_header,\n pbft_enclave_module=pbft_enclave_module,\n state_view=state_view,\n signup_nonce=pbft_key_state.signup_nonce,\n pbft_public_key=active_pbft_public_key\n )\n return False\n\n # If the PBFT public key in the validator registry is not the active\n # one, then we need to switch the active key in the key state store.\n if _VREG_:\n if validator_info.signup_info.pbft_public_key != active_pbft_public_key:\n active_pbft_public_key = validator_info.signup_info.pbft_public_key\n self._pbft_key_state_store.active_key = active_pbft_public_key\n\n # Ensure that the enclave is using the appropriate keys\n try:\n signup_data = json2dict(base64.b64decode(pbft_key_state.sealed_signup_data.encode()).decode())\n unsealed_pbft_public_key = signup_data.get('pbft_public_key')\n except SystemError:\n # Signup data is unuseable\n LOGGER.error(\n 'Could not unseal signup data associated with PPK: %s..%s',\n active_pbft_public_key[:8],\n active_pbft_public_key[-8:])\n self._pbft_key_state_store.active_key = None\n return False\n LOGGER.debug(\"PbftBlockPublisher::unsealed_pbft_public_key=%s ~ %s signup_data=%s\",unsealed_pbft_public_key,active_pbft_public_key,signup_data)\n assert active_pbft_public_key == unsealed_pbft_public_key\n\n LOGGER.debug('Using PBFT public key: %s...%s',active_pbft_public_key[:8],active_pbft_public_key[-8:])\n LOGGER.debug('Unseal signup data: %s...%s',pbft_key_state.sealed_signup_data[:8],pbft_key_state.sealed_signup_data[-8:])\n \"\"\"\n LOGGER.debug(\"PbftBlockPublisher::initialize_block ADD CONSENSUS_STATE for block_id=%s\",block_header.previous_block_id)\n consensus_state = ConsensusState.consensus_state_for_block_id(\n block_id=block_header.previous_block_id,\n block_cache=self._block_cache,\n state_view_factory=self._state_view_factory,\n consensus_state_store=self._consensus_state_store,\n pbft_enclave_module=None,\n )\n \"\"\"\n #pbft_settings_view = PbftSettingsView(state_view)\n #LOGGER.debug(\"PbftBlockPublisher::pbft_settings_view node=%s\",pbft_settings_view.pbft_node)\n\n # If our signup information does not pass the freshness test, then we\n # know that other validators will reject any blocks we try to claim so\n # we need to try to sign up again.\n\n # Using the consensus state for the block upon which we want to\n # build, check to see how many blocks we have claimed on this chain\n # with this PBFT key. If we have hit the key block claim limit, then\n # we need to check if the key has been refreshed.\n # We need to create a wait timer for the block...this is what we\n # will check when we are asked if it is time to publish the block\n pbft_key_state = self._pbft_key_state_store[active_pbft_public_key]\n sealed_signup_data = pbft_key_state.sealed_signup_data\n\n # At this point, we know that if we are able to claim the block we are\n # initializing, we will not be prevented from doing so because of PBFT\n # policies.\n\n self._wait_timer = 20\n self._wait_timer = 20\n PbftBlockPublisher._previous_block_id = None\n block_header.consensus = b\"pbft\"\n LOGGER.debug('PbftBlockPublisher::initialize_block DONE _wait_timer=%s',self._wait_timer)\n self._block_header = block_header\n return True", "def __init__(self):\n this = _coin.new_SoMFBool()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoSFBool()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoMFRotation()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoLightModel()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoInfo()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def create_genesis_block(self) -> None:\n genesis_block = Block(0, [], time.time(), \"0\")\n genesis_block.hash = genesis_block.compute_hash()\n self.chain.append(genesis_block)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return latest block in chain
def get_latest_block(self): return self.chain[-1]
[ "def last_block(self) -> Block:\r\n return self.chain[-1]", "def get_last_block(self):\r\n\r\n if len(self.chain) == 0:\r\n return None\r\n return self.chain[-1]", "def latest_block(self) -> Block:\n return Block(self.latest_header(), self.chain[self.latest_header()])", "def get_previous_block(self):\n # Return previous block\n \n return self.chain[-1]", "def find_last_block(self):\n pass", "def getLastMainChainBlock(self):\n\t\tquery = 'SELECT * from blocks WHERE orphan = False ORDER BY id ASC LIMIT 1'\n\t\tself.executeQuery(query)\n\t\trawBlock = self.fetchOne()\n\t\tif rawBlock is not None:\n\t\t\tblock = Block.Block(None, rawBlock[0], rawBlock[9])\n\t\t\tblock.setBlockFromDb(rawBlock)\n\t\t\treturn block\n\t\treturn rawBlock", "def last_block_for(self, anchor):\n for block in reversed(self.blocks):\n if anchor in block.anchors:\n return block", "def update_last_block(self, last_block):\n pass", "def last_block_block_pair():\n last_block = Block.make_genesis_block()\n return last_block, Block.mine_block(last_block, DATA)", "def getLastBlock(self):\n\t\tquery = 'SELECT * from blocks ORDER BY id DESC LIMIT 1'\n\t\tself.executeQuery(query)\n\t\trawBlock = self.fetchOne()\n\t\tif rawBlock == None:\n\t\t\treturn None\n\t\tblock = Block.Block(None, rawBlock[0], rawBlock[9])\n\t\tblock.setBlockFromDb(rawBlock)\n\t\treturn block", "def latest_header(self) -> Header:\n return next(reversed(self.chain))", "def get_last_blockchain_value(self): # Last Block\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def guess_latest_block():\n with latest_blocks_lock:\n return max(set(latest_blocks), key=latest_blocks.count)", "def view_last_block():\n response = {\n 'chain': [blockchain_db_manager.get_last_block()],\n 'length': 1,\n 'header': 'Last Block'\n }\n return render_template('chain.html', data=response)", "async def async_iter_latest_block(\n w3: \"Web3\", to_block: Optional[Union[BlockNumber, LatestBlockParam]] = None\n) -> AsyncIterable[BlockNumber]:\n _last = None\n\n is_bounded_range = to_block is not None and to_block != \"latest\"\n\n while True:\n latest_block = await w3.eth.block_number # type: ignore\n # type ignored b/c is_bounded_range prevents unsupported comparison\n if is_bounded_range and latest_block > to_block:\n yield None\n # No new blocks since last iteration.\n if _last is not None and _last == latest_block:\n yield None\n else:\n yield latest_block\n _last = latest_block", "def iter_latest_block(\n w3: \"Web3\", to_block: Optional[Union[BlockNumber, LatestBlockParam]] = None\n) -> Iterable[BlockNumber]:\n _last = None\n\n is_bounded_range = to_block is not None and to_block != \"latest\"\n\n while True:\n latest_block = w3.eth.block_number\n # type ignored b/c is_bounded_range prevents unsupported comparison\n if is_bounded_range and latest_block > to_block: # type: ignore\n yield None\n # No new blocks since last iteration.\n if _last is not None and _last == latest_block:\n yield None\n else:\n yield latest_block\n _last = latest_block", "def lastInChain(self,index):\n result = index\n mp = self.genParts[index]\n pdgId = mp.particle.pdgId()\n while True:\n idas = self.genParts[result].daughterIndices()\n if not idas:\n return result\n daIds = { }\n for ida in idas:\n pdgIdDa = self.genParts[ida].particle.pdgId()\n if not pdgIdDa in daIds:\n daIds[pdgIdDa] = [ ]\n daIds[pdgIdDa].append(ida)\n if len(daIds.keys())>2 or ( not pdgId in daIds ) or len(daIds[pdgId])>1:\n break\n if len(daIds.keys())==1:\n if len(daIds[pdgId])!=1:\n break\n else:\n otherIds = [ x for x in daIds.keys() if x != pdgId ]\n if otherIds[0]!=21 and otherIds[0] != 22:\n break\n if daIds[pdgId][0]==result:\n print \"Daughters point back to same line???\"\n break\n result = daIds[pdgId][0]\n \n return result", "def first_block_for(self, anchor):\n for block in self.blocks:\n if anchor in block.anchors:\n return block", "def find_last_block(self):\n init = self.database.status.find_one({'_id': 'height_all_tsx'})\n if (init):\n return init['value']\n else: \n return 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds transactions into the waiting list to be mined
def add_new_pending_data(self, transaction): self.pending_transaction.append(transaction)
[ "def _cleanup_pending_transactions(self) -> None:\n queue = self._last_update_for_transactions\n timeout = datetime.timedelta(0, self.pending_transaction_timeout)\n\n if len(queue) == 0:\n return\n\n next_date, next_item = queue[0]\n\n while datetime.datetime.now() - next_date > timeout:\n\n # remove the element from the queue\n queue.popleft()\n\n # extract dialogue label and message id\n transaction_id = next_item\n logger.debug(\"[{}]: Removing transaction: {}\".format(self.agent_name, transaction_id))\n\n # remove (safely) the associated pending proposal (if present)\n self.locks.pop(transaction_id, None)\n self.locks_as_buyer.pop(transaction_id, None)\n self.locks_as_seller.pop(transaction_id, None)\n\n # check the next transaction, if present\n if len(queue) == 0:\n break\n next_date, next_item = queue[0]", "def list_transaction(self):\n self.parsed_list_transaction = TransactionList()\n\n self.transaction()\n self.parsed_transaction.compute_hash()\n self.parsed_list_transaction.add(self.parsed_transaction)\n\n self.list_transaction_next()", "def on_new_transaction(self, transaction_id, client):\n if transaction_id is not None:\n self._pending_transactions.append(\n PendingTransaction(id=transaction_id, client=client))", "def executeTransactions(self, currentBlock: Block) -> int:\n # TODO: add some incentives for miners (cryptoeconomics)\n transactionsAdded = 0 # number of transactions added to the blockchain\n\n # for each transaction in the pending transaction list\n for pendingTransaction in self.__pendingTransactionList:\n\n # verify the signature of the transaction using the public key of the sender\n verificationResult = self.__verifySignature(pendingTransaction)\n\n if not verificationResult:\n continue # stop with the current pending transaction. Go to the next one\n\n # verify that the sender account balance is enough for the transaction to take place\n txOutTotalValue = 0 # total value of transaction outputs\n for txOutput in pendingTransaction.getTransactionOutputList():\n txOutTotalValue += txOutput.getValue()\n\n accountBalance = self.getAccountAvailableTotal(pendingTransaction.getSender())\n if txOutTotalValue > accountBalance: # if the balance is not enough, stop with this transaction\n continue\n\n # mine the transaction (add it to the block, add block number etc.)\n\n # add some tx inputs\n senderTxInputPool = self.__transactionInputPool.get(pendingTransaction.getSender()) # sender tx inputs\n txInputTotalValue = 0\n txInputList = list()\n i = 0\n while txInputTotalValue < txOutTotalValue:\n txInputTotalValue += senderTxInputPool[i].getValue() # increase the tx input total value\n txInputList.append(senderTxInputPool[i]) # create the tx input list\n senderTxInputPool.remove(senderTxInputPool[i]) # remove the tx input from the resources available\n i += 1\n # txInputList.append(senderTxInputPool[i]) # add one final input\n # senderTxInputPool.remove(senderTxInputPool[i])\n pendingTransaction.extendTransactionInputList(txInputList) # set the tx input list of the transaction\n\n # if there is any change, create a new tx output and set it's script (standard script)\n if txInputTotalValue > txOutTotalValue:\n changeTxOutput = TransactionOutput(txInputTotalValue-txOutTotalValue, pendingTransaction.getSender(),\n pendingTransaction.getSender())\n\n recipientPubKeyHash = TLCUtilities.getSHA256RIPEMDHash(self.getBlockchainAccount().getPublicKey())\n script = SmartContractScripts.getPayToPubKeyHashScript(recipientPubKeyHash)\n changeTxOutput.setScript(script)\n pendingTransaction.addTransactionOutput(changeTxOutput)\n\n # add the transaction to the block\n pendingTransaction.setBlockNumber(currentBlock.getBlockNumber()) # set the block number\n currentBlock.addTransaction(pendingTransaction)\n\n # add the transaction to the confirmed transactions list\n self.__confirmedTransactionList.append(pendingTransaction)\n\n # create some inputs for the input pool\n for txOutput in pendingTransaction.getTransactionOutputList():\n self.__addTransactionInputToPool(\n TransactionInput(txOutput.getValue(),\n txOutput.getRecipient(),\n pendingTransaction.getTransactionHash(),\n pendingTransaction.getTransactionOutputList().index(txOutput)\n )\n )\n\n # increase the number of transactions added\n transactionsAdded += 1\n\n if transactionsAdded > 0: # if at least one transaction is valid\n # set the __previousBlockHash property of the block\n previousBlock = self.__chain[len(self.__chain)-1]\n currentBlock.setPreviousBlockHeaderHash(previousBlock.getBlockHeaderHash())\n\n # mine the block\n nonce = self.getProofOfWork(currentBlock)\n currentBlock.setNonce(nonce) # set the nonce of the block\n\n # add the block to the chain\n self.__chain.append(currentBlock)\n\n # reset the pending transaction list\n self.__pendingTransactionList = list()\n\n return transactionsAdded", "def _fill_accounts_in_progress(self) -> str:\n while self.accounts_waiting_activation or self.accounts_to_process:\n if not self.accounts_added.is_set():\n self.accounts_added.wait()\n self.accounts_added.clear()\n while self.accounts_to_process:\n with self.lock:\n while len(self.accounts_in_progress) < self.max_buffer_size:\n if not self.accounts_to_process:\n break\n next_account_id = self.accounts_to_process.popleft()\n self.accounts_in_progress[\n next_account_id\n ] = AccountPostingsInProgress(last_updated=datetime.now())\n self.total_added_to_in_progress += 1\n self.idle_accounts.put(next_account_id)\n self.process_idle_accounts_queue.set()\n if self.accounts_to_process:\n self.account_complete.wait()\n self.account_complete.clear()\n return COMPLETED_THREAD", "def commit(self):\n self.jobs.extend(self.newjobs)\n self.newjobs = []", "def update_pending_transactions(self, block):\n # ----- method 1 -----\n # check committed transactions\n # \n # transactions = block.transactions\n # committed_hashes = [ transaction.hash for transaction in transactions ]\n # pending_transactions = self.pending_transactions\n # new_pending = {}\n # for committed_hash in committed_hashes:\n # if committed_hash in pending_transactions:\n # pending_transactions.pop(committed_hash)\n # ----- method 2 ----\n self._pending_transactions = {}", "def update_pend(self):\n for txid, txnw in self.pend.db:\n txnw = TxnWrapper.unserialize(SerializationBuffer(txnw))\n timestamp = txnw.timestamp\n timelock = txnw.txn.timelock\n if self.current_time * TIME_MULTIPLIER - timestamp > timelock * \\\n TIMELOCK_CONST:\n\n self.del_from_pending(txnw.txn)\n if self.verify_txn(txnw.txn, check_pend=False):\n self.write_txn_to_db(txnw.txn, timestamp)\n if VERBOSE:\n print('Transaction {} was pending and now put in '\n 'db'.format(b2hex(txid)))\n else:\n if VERBOSE:\n print('Transaction {} was pending and could not be '\n 'written to db, see reason above'.format(b2hex(\n txid)))", "def _transaction_start(self):\n self._command = []\n self._expected = 0", "def extend_orders(self, orders):\n with self._condition:\n if self._unique_orders:\n nb_added = 0\n for o in orders:\n if o in self._waiting_orders or \\\n o in self._processing_orders:\n continue\n self._waiting_orders.append(o)\n nb_added += 1\n else:\n self._waiting_orders.extend(orders)\n nb_added = len(orders)\n if self._sort_orders:\n self._waiting_orders.sort()\n self._condition.notifyAll()\n self._start(nb_threads=nb_added)", "def add_waiting(self, name: str, obj: BaseObject):\n coros = self.waiting_coros.setdefault(name, [])\n coros.append(obj)", "def sync_transactions(bank, bank_account):\n\tlast_transaction_date = frappe.db.get_value(\"Bank Account\", bank_account, \"last_integration_date\")\n\tif last_transaction_date:\n\t\tstart_date = formatdate(last_transaction_date, \"YYYY-MM-dd\")\n\telse:\n\t\tstart_date = formatdate(add_months(today(), -12), \"YYYY-MM-dd\")\n\tend_date = formatdate(today(), \"YYYY-MM-dd\")\n\n\ttry:\n\t\ttransactions = get_transactions(bank=bank, bank_account=bank_account, start_date=start_date, end_date=end_date)\n\n\t\tresult = []\n\t\tfor transaction in reversed(transactions):\n\t\t\tresult += new_bank_transaction(transaction)\n\n\t\tif result:\n\t\t\tlast_transaction_date = frappe.db.get_value('Bank Transaction', result.pop(), 'date')\n\n\t\t\tfrappe.logger().info(\"Plaid added {} new Bank Transactions from '{}' between {} and {}\".format(\n\t\t\t\tlen(result), bank_account, start_date, end_date))\n\n\t\t\tfrappe.db.set_value(\"Bank Account\", bank_account, \"last_integration_date\", last_transaction_date)\n\texcept Exception:\n\t\tfrappe.log_error(frappe.get_traceback(), _(\"Plaid transactions sync error\"))", "def mine_pending_data(self, miner_pk):\n while len(self.pending_transaction) != 0:\n transaction = self.pending_transaction[0]\n mine_block = Block(transaction, self.get_latest_block().hash)\n\n start_time = time()\n mine_block.mine_block(self.__class__.difficulty)\n end_time = time()\n \n if end_time - start_time < self.pow_min_time:\n self.__class__.difficulty += 1\n\n try:\n self.pending_transaction.remove(transaction)\n self.chain.append(mine_block)\n print(\"Mine time taken: \", end_time - start_time, \" | By miner: \", miner_pk)\n # TODO: Implement some form of miner reward scheme\n except:\n pass", "def submit(self):\r\n if len(self.wait_list) != 0:\r\n for i in range(len(self.wait_list)):\r\n taken_booking = Booking.Booking(self.wait_list[i][0], self.wait_list[i][1], self.wait_list[i][2])\r\n user = User.User(taken_booking.user_id)\r\n taken_booking.cancel()\r\n send_email(email=user.email, name=user.name, cancel=True, rec_id=taken_booking.rec_id,\r\n room_id=taken_booking.room_id, t_date=taken_booking.date, period=taken_booking.period,\r\n teacher=self.teacher)\r\n\r\n for i in range(len(self.date)):\r\n new_booking = Booking.Booking(self.room[i], self.date[i], self.period[i])\r\n new_booking.add_booking(0) # user ID is 0 for teachers\r\n\r\n tk.messagebox.showinfo('Success', 'Rooms are reserved successfully!')\r\n self.close_window()", "def __enqueue_in_order(self, lease):\n self.queue.enqueue_in_order(lease)", "def waiting():\n whitelist = Whitelist()\n\n print(f\"Accounts waiting for approval:\")\n for account in whitelist.accounts_waiting():\n print(f\"{account}\")", "def add_approved_deposits_to_wallet(self) -> Optional[List[Future]]:\n try:\n wallet_transactions: List[WalletTransactionsModel] = WalletTransactionsModel.query(\n WalletTransactionsModel.is_verified == True, WalletTransactionsModel.is_settled == False).fetch_async().get_result()\n print(\"approved deposits running\")\n return [self.do_send_to_client_wallet(transaction=transaction) for transaction in wallet_transactions\n if transaction.transaction_type == 'deposit']\n except RetryError as e:\n # TODO log this errors\n return None", "def add_transaction(self, transaction: Transaction) -> None:\n self._incoming_transactions.append(transaction)", "def test_save_unvoid_and_add_transactions(self):\n self.test_save_make_void_with_transactions()\n\n entry = BankSpendingEntry.objects.get()\n entry.void = False\n entry.save()\n\n Transaction.objects.create(bankspend_entry=entry, account=self.account,\n balance_delta=15)\n\n self.assertEqual(Transaction.objects.count(), 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mining the transaction in pending list. Increases difficulty if successful mine in time shorter than set POW. Miners reward (not implemented yet) Implementation depends on individual. For demo convenience, loops through all pending transaction in one call
def mine_pending_data(self, miner_pk): while len(self.pending_transaction) != 0: transaction = self.pending_transaction[0] mine_block = Block(transaction, self.get_latest_block().hash) start_time = time() mine_block.mine_block(self.__class__.difficulty) end_time = time() if end_time - start_time < self.pow_min_time: self.__class__.difficulty += 1 try: self.pending_transaction.remove(transaction) self.chain.append(mine_block) print("Mine time taken: ", end_time - start_time, " | By miner: ", miner_pk) # TODO: Implement some form of miner reward scheme except: pass
[ "def mine(self):\n if not self.unconfirmedTxs: # No txs to add?...\n return False # Then there's no need to work\n\n lastBlock = self.lastBlock # Grb the most recent block\n\n newBlock = Block(index=lastBlock.index + 1, # A new block\n txs=self.unconfirmedTxs, # Mempool data is added to block\n timestamp=time.time(),\n previousHash=lastBlock.hash)\n\n proof = self.proofOfWork(newBlock) # Find the valid hash\n # Add the new, valid, block containing txs\n self.addBlock(newBlock, proof)\n self.unconfirmedTxs = [] # Clear the mempool\n return newBlock.index # Success!", "def mine_block(self):\n last_block = self.__chain[-1]\n hashed_block = hash_block(last_block)\n proof = self.proof_of_work()\n reward_transaction = Transaction('Mining', self.hosting_node, MINING_REWARD)\n copied_transactions = self.__open_transactions[:]\n copied_transactions.append(reward_transaction)\n block = Block(len(self.__chain), hashed_block,copied_transactions, proof)\n self.__chain.append(block)\n self.__open_transactions = []\n self.save_data()\n return True", "async def mine(self, ctx):\r\n #Make sure they're in mining territory\r\n location = await AssetCreation.getLocation(self.client.pg_con, ctx.author.id)\r\n biome = location_dict[location]['Biome']\r\n if biome != 'Hills': # These are the biomes you can hunt in\r\n await ctx.reply(f'You cannot mine at {location}. Move to a hills biome.')\r\n return\r\n\r\n #Otherwise calc\r\n result = random.choices(['success', 'critical success', 'failure'], [45,35,20])\r\n if result[0] == 'success':\r\n gold = random.randint(20,100)\r\n iron = random.randint(40,80)\r\n silver = random.randint(18,25)\r\n\r\n elif result[0] == 'critical success':\r\n gold = random.randint(100, 150)\r\n iron = random.randint(80,100)\r\n silver = random.randint(30,45)\r\n\r\n else:\r\n gold = random.randint(10,30)\r\n iron = random.randint(25,35)\r\n silver = random.randint(2,8)\r\n\r\n #Modify rewards given player's class and weapontype\r\n role = await AssetCreation.getClass(self.client.pg_con, ctx.author.id)\r\n if role == 'Blacksmith':\r\n gold *= 2\r\n iron *= 2\r\n silver *= 2\r\n\r\n if await AssetCreation.check_for_map_control_bonus(self.client.pg_con, ctx.author.id):\r\n gold = int(gold * 1.5)\r\n iron = int(iron * 1.5)\r\n silver = int(silver * 1.5)\r\n\r\n item_id = await AssetCreation.getEquippedItem(self.client.pg_con, ctx.author.id)\r\n item_info = await AssetCreation.getItem(self.client.pg_con, item_id)\r\n if item_info['Type'] == 'Dagger':\r\n gold = int(gold / 2)\r\n iron = int(iron / 2)\r\n silver = int(silver / 2)\r\n elif item_info['Type'] == 'Bow' or item_info['Type'] == 'Sling':\r\n gold = int(gold / 3)\r\n iron = int(iron / 3)\r\n silver = int(silver / 3)\r\n elif item_info['Type'] == 'Trebuchet':\r\n gold = int(gold * 2)\r\n iron = int(iron * 2)\r\n silver = int(silver * 2)\r\n elif item_info['Type'] == 'Greatsword' or item_info['Type'] == 'Axe' or item_info['Type'] == 'Mace':\r\n gold = int(gold * 1.25)\r\n iron = int(iron * 1.25)\r\n silver = int(silver * 1.25)\r\n\r\n await AssetCreation.giveGold(self.client.pg_con, gold, ctx.author.id)\r\n await AssetCreation.giveMat(self.client.pg_con, 'iron', iron, ctx.author.id)\r\n await AssetCreation.giveMat(self.client.pg_con, 'silver', silver, ctx.author.id)\r\n\r\n await ctx.reply(f'Your mining expedition was a {result[0]}! You got `{gold}` gold, `{iron}` iron, and `{silver}` silver.')", "def executeTransactions(self, currentBlock: Block) -> int:\n # TODO: add some incentives for miners (cryptoeconomics)\n transactionsAdded = 0 # number of transactions added to the blockchain\n\n # for each transaction in the pending transaction list\n for pendingTransaction in self.__pendingTransactionList:\n\n # verify the signature of the transaction using the public key of the sender\n verificationResult = self.__verifySignature(pendingTransaction)\n\n if not verificationResult:\n continue # stop with the current pending transaction. Go to the next one\n\n # verify that the sender account balance is enough for the transaction to take place\n txOutTotalValue = 0 # total value of transaction outputs\n for txOutput in pendingTransaction.getTransactionOutputList():\n txOutTotalValue += txOutput.getValue()\n\n accountBalance = self.getAccountAvailableTotal(pendingTransaction.getSender())\n if txOutTotalValue > accountBalance: # if the balance is not enough, stop with this transaction\n continue\n\n # mine the transaction (add it to the block, add block number etc.)\n\n # add some tx inputs\n senderTxInputPool = self.__transactionInputPool.get(pendingTransaction.getSender()) # sender tx inputs\n txInputTotalValue = 0\n txInputList = list()\n i = 0\n while txInputTotalValue < txOutTotalValue:\n txInputTotalValue += senderTxInputPool[i].getValue() # increase the tx input total value\n txInputList.append(senderTxInputPool[i]) # create the tx input list\n senderTxInputPool.remove(senderTxInputPool[i]) # remove the tx input from the resources available\n i += 1\n # txInputList.append(senderTxInputPool[i]) # add one final input\n # senderTxInputPool.remove(senderTxInputPool[i])\n pendingTransaction.extendTransactionInputList(txInputList) # set the tx input list of the transaction\n\n # if there is any change, create a new tx output and set it's script (standard script)\n if txInputTotalValue > txOutTotalValue:\n changeTxOutput = TransactionOutput(txInputTotalValue-txOutTotalValue, pendingTransaction.getSender(),\n pendingTransaction.getSender())\n\n recipientPubKeyHash = TLCUtilities.getSHA256RIPEMDHash(self.getBlockchainAccount().getPublicKey())\n script = SmartContractScripts.getPayToPubKeyHashScript(recipientPubKeyHash)\n changeTxOutput.setScript(script)\n pendingTransaction.addTransactionOutput(changeTxOutput)\n\n # add the transaction to the block\n pendingTransaction.setBlockNumber(currentBlock.getBlockNumber()) # set the block number\n currentBlock.addTransaction(pendingTransaction)\n\n # add the transaction to the confirmed transactions list\n self.__confirmedTransactionList.append(pendingTransaction)\n\n # create some inputs for the input pool\n for txOutput in pendingTransaction.getTransactionOutputList():\n self.__addTransactionInputToPool(\n TransactionInput(txOutput.getValue(),\n txOutput.getRecipient(),\n pendingTransaction.getTransactionHash(),\n pendingTransaction.getTransactionOutputList().index(txOutput)\n )\n )\n\n # increase the number of transactions added\n transactionsAdded += 1\n\n if transactionsAdded > 0: # if at least one transaction is valid\n # set the __previousBlockHash property of the block\n previousBlock = self.__chain[len(self.__chain)-1]\n currentBlock.setPreviousBlockHeaderHash(previousBlock.getBlockHeaderHash())\n\n # mine the block\n nonce = self.getProofOfWork(currentBlock)\n currentBlock.setNonce(nonce) # set the nonce of the block\n\n # add the block to the chain\n self.__chain.append(currentBlock)\n\n # reset the pending transaction list\n self.__pendingTransactionList = list()\n\n return transactionsAdded", "async def mine_new_block():\n block = await self.create_block_async_func(Address.create_empty_account())\n if not block:\n self.input_q.put((None, {}))\n return\n mining_params = self.get_mining_param_func()\n mining_params[\"consensus_type\"] = self.consensus_type\n # handle mining simulation's timing\n if \"target_block_time\" in mining_params:\n target_block_time = mining_params[\"target_block_time\"]\n mining_params[\"target_time\"] = (\n block.header.create_time\n + self._get_block_time(block, target_block_time)\n )\n work = MiningWork(\n block.header.get_hash_for_mining(),\n block.header.height,\n block.header.difficulty,\n )\n self.work_map[work.hash] = block\n if self.process:\n self.input_q.put((work, mining_params))\n return\n\n self.process = AioProcess(\n target=self.mine_loop,\n args=(work, mining_params, self.input_q, self.output_q),\n )\n self.process.start()\n await handle_mined_block()", "def mine(self,tgt):\n self.target = tgt\n\n blockHash = self.getHash()\n ct = 1\n\n # keep changing nonce value until blockHash is less than or equal to target\n while blockHash > tgt:\n self.nonce += random.randint(1, 2**64) # pick a random integer between 0 and 2^64\n blockHash = self.getHash()\n # print(\"Try %d\" %(ct))\n ct += 1\n \n # print(\"Mined the block with nonce = %d\" %(self.nonce))", "def mine_block(self, *args, **kwargs):\n block = self.block\n self.pack_block(block, *args, **kwargs)\n\n if block.number == 0:\n return block\n\n block_reward = self.get_block_reward() + (\n len(block.uncles) * self.get_nephew_reward()\n )\n\n with self.state.state_db() as state_db:\n state_db.delta_balance(block.header.coinbase, block_reward)\n self.logger.debug(\n \"BLOCK REWARD: %s -> %s\",\n block_reward,\n block.header.coinbase,\n )\n\n for uncle in block.uncles:\n uncle_reward = self.get_uncle_reward(block.number, uncle)\n state_db.delta_balance(uncle.coinbase, uncle_reward)\n self.logger.debug(\n \"UNCLE REWARD REWARD: %s -> %s\",\n uncle_reward,\n uncle.coinbase,\n )\n\n return block", "def mine_blocks(number):\n transactions_range = randint(1, 10)\n\n for i in range(number):\n for transaction in range(transactions_range):\n blockchain_db_manager.add_transaction(sender=(str(uuid4()).replace('-', '')[:-10]),\n recipient=(str(uuid4()).replace('-', '')[:-10]),\n amount=round(random.uniform(1, 10), 2))\n blockchain_db_manager.mine_for_next_block()\n\n response = {\n 'header': 'Successfully mined {0} blocks'.format(number)\n }\n\n return render_template('landing.html', data=response)", "def mine_block():\n \n # Get details of previous block\n previous_block = blockchain.get_previous_block()\n previous_proof = previous_block['proof']\n \n # Find proof of the next block\n proof= blockchain.proof_of_work(previous_proof)\n previous_hash = blockchain.hash(previous_block)\n \n # Create new block\n block = blockchain.create_block(proof, previous_hash)\n response = {'message': 'Congratulations, you just mined a block!',\n 'index': block['index'],\n 'timestamp': block['timestamp'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash']}\n \n return jsonify(response), 200", "async def mine_blocks(request: Request, number: int):\n transactions_range = randint(1, 10)\n pm = [\"bibi\", \"benet\", \"gantz\"]\n for i in range(number):\n for transaction in range(transactions_range):\n blockchain_db_manager.add_transaction(\n sender=(str(uuid4()).replace(\"-\", \"\")[:-10]),\n recipient=pm[random.randint(0, 2)],\n amount=1,\n )\n blockchain_db_manager.mine_for_next_block()\n\n response = {\"header\": \"Successfully mined {0} blocks\".format(number)}\n\n return templates.TemplateResponse(\n \"landing.html\", {\"request\": request, \"data\": response}\n )", "def generate_reward_transaction(wallet):\n output = {}\n output[wallet.address] = MINING_REWARD\n\n return Transaction(input=MINING_REWARD_INPUT, output=output)", "def hunt(self, trials=10000, sleep_time=0.1):\n num_runs = 0\n pre_arbitrage_assets = self.load_arbitrage_assets()\n time.sleep(sleep_time)\n while(num_runs < trials):\n try:\n self.update_orderbook()\n except ConnectionError as e:\n print(e + \"will suspend bot for 10 seconds\")\n time.sleep(10)\n continue\n #Search for inefficiency\n orderbook_btc = self.orderbook_btc_eth(self.orderbook)\n orderbook_eth = self.orderbook_eth_btc(self.orderbook)\n if(orderbook_btc[0][1] - (self.fee * orderbook_btc[0][1]) > self.bit_rate['btc_one'] and\n orderbook_eth[0][1] - (self.fee * orderbook_eth[0][1]) > float(self.bit_rate['askPrice'])): \n #print('found' + orderbook_btc[0][0] + orderbook_eth[0][0] + str(num_runs))\n num_runs += 1\n purchase = []\n for k in self.orderbook:\n if(list(k.keys())[0] == orderbook_btc[0][0]):\n purchase.insert(0, k)\n if(list(k.keys())[0] == orderbook_eth[0][0]):\n purchase.insert(1, k)\n btc_limit = binance_config.btc_trade_limit\n while(btc_limit > 0.001):\n if(self.determine_feasibility(orderbook_btc[0][0], orderbook_eth[0][0], purchase, btc_limit) is True):\n self.execute_trade(orderbook_btc[0][0], orderbook_eth[0][0], purchase, btc_limit)\n break\n else:\n btc_limit = btc_limit - 0.001\n num_runs += 1\n if(num_runs % 100 == 0):\n print(str(num_runs))\n post_arbitrage_assets = self.load_arbitrage_assets()\n \n #Print results\n time_delta = datetime.datetime.now().replace(microsecond=0) - pre_arbitrage_assets['datetime'] \n print('Initial: BTC:', pre_arbitrage_assets['BTC'],'ETH:', pre_arbitrage_assets['ETH'], 'BNB:', pre_arbitrage_assets['BNB'])\n print('After__: BTC:', post_arbitrage_assets['BTC'],'ETH:', post_arbitrage_assets['ETH'], 'BNB:', post_arbitrage_assets['BNB'])\n print('Diff___: BTC:', float(post_arbitrage_assets['BTC'])-float(pre_arbitrage_assets['BTC']),\n 'ETH:', float(post_arbitrage_assets['ETH'])-float(pre_arbitrage_assets['ETH']),\n 'BNB:', float(post_arbitrage_assets['BNB'])-float(pre_arbitrage_assets['BNB']),\n 'TIME:', divmod(time_delta.total_seconds(), 60))", "def issue_txs(self, Time):\r\n if MODE[self.NodeID]>0:\r\n if MODE[self.NodeID]==2:\r\n if self.BackOff:\r\n self.LastIssueTime += TAU#BETA*REP[self.NodeID]/self.Lambda\r\n while Time+STEP >= self.LastIssueTime + self.LastIssueWork/self.Lambda:\r\n self.LastIssueTime += self.LastIssueWork/self.Lambda\r\n Parents = self.select_tips()\r\n #Work = np.random.uniform(AVG_WORK[self.NodeID]-0.5, AVG_WORK[self.NodeID]+0.5)\r\n if IOT[self.NodeID]:\r\n Work = np.random.uniform(IOTLOW,IOTHIGH)\r\n else:\r\n Work = 1\r\n self.LastIssueWork = Work\r\n self.TranCounter += 1\r\n self.IssuedTrans.append(Transaction(self.LastIssueTime, Parents, self, Work, Index=self.TranCounter))\r\n elif MODE[self.NodeID]==1:\r\n if IOT[self.NodeID]:\r\n Work = np.random.uniform(IOTLOW,IOTHIGH)\r\n else:\r\n Work = 1\r\n times = np.sort(np.random.uniform(Time, Time+STEP, np.random.poisson(STEP*self.Lambda/Work)))\r\n for t in times:\r\n Parents = self.select_tips()\r\n #Work = np.random.uniform(AVG_WORK[self.NodeID]-0.5, AVG_WORK[self.NodeID]+0.5)\r\n self.TranCounter += 1\r\n # if self.TranCounter==170 and self.Repchange and self.NodeID==4:\r\n # print('Time',Time)\r\n # self.Repchange=False\r\n # self.IssuedTrans.append(Transaction(t, Parents, self, Work, Index=self.TranCounter, Rep_change=7, Rep_massage=True, RepTX=self, RepRX=(self.Neighbours+self.Network.Nodes[3].Neighbours)))\r\n #else:\r\n self.IssuedTrans.append(Transaction(t, Parents, self, Work, Index=self.TranCounter))\r\n else:\r\n Work = 1\r\n times = np.sort(np.random.uniform(Time, Time+STEP, np.random.poisson(STEP*self.Lambda/Work)))\r\n for t in times:\r\n Parents = self.select_tips()\r\n #Work = np.random.uniform(AVG_WORK[self.NodeID]-0.5, AVG_WORK[self.NodeID]+0.5)\r\n self.TranCounter += 1\r\n self.IssuedTrans.append(Transaction(t, Parents, self, Work, Index=self.TranCounter))\r\n \r\n # check PoW completion\r\n while self.IssuedTrans:\r\n Tran = self.IssuedTrans.pop(0)\r\n p = Packet(self, self, Tran, Tran.IssueTime, Tran.IssueTime)\r\n if MODE[self.NodeID]>2: # malicious don't consider own txs for scheduling\r\n self.add_to_ledger(self, Tran, Tran.IssueTime)\r\n else:\r\n self.add_to_inbox(p, Tran.IssueTime)", "def confirm_bitcoin_tx_local(self, hash, sender_gid): \n\n ## send transaction to local bitcond\n segments = self.segment_storage.get_by_transaction_id(hash)\n raw_tx = self.segment_storage.get_raw_tx(segments)\n\n ## pass hex string converted to bytes\n try :\n proxy1 = bitcoin.rpc.Proxy()\n raw_tx_bytes = x(raw_tx)\n tx = CMutableTransaction.stream_deserialize(BytesIO(raw_tx_bytes))\n r1 = proxy1.sendrawtransaction(tx)\n except :\n print(\"Invalid Transaction! Could not send to network.\")\n return\n\n ## try for 30 minutes to confirm the transaction\n for n in range(0, 30) :\n try :\n proxy2 = bitcoin.rpc.Proxy()\n r2 = proxy2.getrawtransaction(r1, True)\n\n ## send zero-conf message back to tx sender\n confirmations = r2.get('confirmations', 0)\n rObj = TxTennaSegment('', '', tx_hash=hash, block=confirmations)\n arg = str(sender_gid) + ' ' + rObj.serialize_to_json()\n self.do_send_private(arg)\n\n print(\"\\nSent to GID: \" + str(sender_gid) + \": Transaction \" + hash + \" added to the mempool.\")\n break \n except IndexError:\n ## tx_id not yet in the global mempool, sleep for a minute and then try again\n sleep(60)\n continue \n \n ## wait for atleast one confirmation\n for m in range(0, 30):\n sleep(60) # sleep for a minute\n try :\n proxy3= bitcoin.rpc.Proxy()\n r3 = proxy3.getrawtransaction(r1, True)\n confirmations = r3.get('confirmations', 0)\n ## keep waiting until 1 or more confirmations\n if confirmations > 0:\n break\n except :\n ## unknown RPC error, but keep trying\n traceback.print_exc()\n\n if confirmations > 0 :\n ## send confirmations message back to tx sender if confirmations > 0\n rObj = TxTennaSegment('', '', tx_hash=hash, block=confirmations)\n arg = str(sender_gid) + ' ' + rObj.serialize_to_json()\n self.do_send_private(arg)\n print(\"\\nSent to GID: \" + str(sender_gid) + \", Transaction \" + hash + \" confirmed in \" + str(confirmations) + \" blocks.\")\n else :\n print(\"\\CTransaction from GID: \" + str(sender_gid) + \", Transaction \" + hash + \" not confirmed after 30 minutes.\")", "def handle(self, *args, **options):\n number_accounts_per_node = 150\n\n nodes_list = get_nodes()\n wallets_list = get_wallets()\n for node in nodes_list:\n wallet = None\n\n for wallet_check in wallets_list:\n if wallet_check.node.id == node.id:\n wallet = wallet_check\n\n if wallet is None:\n wallet = new_wallet(node=node)\n\n for i in range(number_accounts_per_node):\n print(\"Created %s\" % (new_account(wallet=wallet)))\n\n all_accounts = list(get_accounts())\n funding_account = all_accounts[0]\n input(\"Please deposit funds to %s and press enter\" % funding_account.address)\n\n ## Init. PoW\n funding_account.POW = None\n funding_account.save()\n\n ## Wait for funds to clear\n while funding_account.current_balance == 0:\n sync_accounts()\n funding_account = Account.objects.filter(address=funding_account.address)[0]\n time.sleep(5)\n\n\n rpc = nano.rpc.Client(funding_account.wallet.node.URL)\n for i in range(6):\n try:\n address_nano = funding_account.address.replace(\"xrb\", \"nano\", 1)\n frontier = rpc.frontiers(account=funding_account.address, count=1)[address_nano]\n if funding_account.POW is None or not rpc.work_validate(work=funding_account.POW, hash=frontier):\n print(\"Generating PoW account %s \" % (funding_account.address))\n\n data = {\n 'hash': str(frontier),\n 'key': settings.DPOW_API_KEY\n }\n\n res = requests.post(url=settings.DPOW_ENDPOINT, json=data, timeout=15)\n funding_account.POW = res.json()['work']\n funding_account.save()\n break\n except Exception:\n if i == 5:\n print('dPoW failure account %s unlocked without PoW' % funding_account.address)\n funding_account.unlock()\n\n while not funding_account.POW:\n funding_account = Account.objects.get(address=funding_account.address)\n time.sleep(1)\n\n empty_accounts = Account.objects.filter(current_balance=0).all()\n\n #Distribute funds between accounts to open them\n amount = funding_account.current_balance / len(empty_accounts[:])\n\n random.shuffle(all_accounts) # spread opening load across nodes\n print(\"Accounts empty %s \" % (len(empty_accounts[:])))\n for account_init in all_accounts:\n # Already opened\n if account_init.current_balance > 0:\n print(\"Skipping\")\n continue\n try:\n address_nano = funding_account.address.replace(\"xrb\", \"nano\")\n frontier = rpc.frontiers(account=funding_account.address, count=1)[address_nano]\n if funding_account.POW is None or not rpc.work_validate(work=funding_account.POW, hash=frontier):\n\n data = {\n 'hash': str(frontier),\n 'key': settings.DPOW_API_KEY\n }\n\n res = requests.post(url=settings.DPOW_ENDPOINT, json=data, timeout=15)\n funding_account.POW = res.json()['work']\n funding_account.save()\n except Exception:\n if i == 5:\n print('dPoW failure account %s unlocked without PoW' % funding_account.address)\n funding_account.unlock()\n count = 0\n while not funding_account.POW and count < 5:\n funding_account = Account.objects.get(address=funding_account.address)\n count += 1\n time.sleep(1)\n\n simple_send(funding_account, account_init.address, int(amount), generate_PoW=False) ##Using send simple allows node to generate open block for us", "def proof_of_work():\n last_block = blockchain[-1]\n last_hash = hash_block(last_block)\n proof = 0\n # Try different PoW numbers and return the first valid one\n while not valid_proof(open_transactions, last_hash, proof):\n proof += 1\n return proof", "def mine_the_block(last_block, data):\n timestamp = time.time_ns()\n last_hash = last_block.hash\n difficulty = Block.adjust_mining_difficulty(last_block, timestamp)\n nonce = 0\n hash = crypto_hash(timestamp, last_hash, data, difficulty, nonce)\n\n while convert_hex_to_binary(hash)[0:difficulty] != '0' * difficulty:\n nonce += 1\n timestamp = time.time_ns()\n difficulty = Block.adjust_mining_difficulty(last_block, timestamp)\n hash = crypto_hash(timestamp, last_hash, data, difficulty, nonce)\n\n return Block(timestamp, last_hash, hash, data, difficulty, nonce)", "def quick_time_estimate(per_address, transactions_per_block, max_transactions):\n total_btc = per_address * max_transactions\n\n # Number of required blocks is based on the total number of transactions.\n blocks = float(max_transactions) / float(transactions_per_block)\n\n # each block takes 10 minutes (uh, on average, or rather, that's the target)\n minutes = blocks * 10\n\n # each hour takes 60 minutes\n hours = minutes / 60\n\n timefuture = datetime.datetime.now() + datetime.timedelta(minutes=minutes)\n\n return (total_btc, blocks, hours, timefuture)", "def block(max_number_of_txns, exp_time):\n blk = {'transactions':[transaction(randrange(2, max_txt_length)) for i in range(randrange(1, max_number_of_txns))], 'time':exp_time}\n return blk" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify if blockchain is valid Returns true if valid and false otherwise
def verify_blockchain(self): for i in range(1, len(self.chain)): current_block = self.chain[i] previous_block = self.chain[i - 1] if current_block.previous_hash != previous_block.hash: return False return True
[ "def is_valid():\n \n # Get validity of blockchain\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n \n if is_valid: response = {'message': 'The blockchain is valid!'}\n else: response = {'message': 'Error, the blockchain is invalid!'}\n\n return jsonify(response), 200", "def is_chain_valid(self, chain):\n \n # Init. variables for while loop\n previous_block = chain[0]\n block_index = 1\n \n # Check that each block in the chain is valid\n while block_index < len(chain):\n # Get corresponding block\n block = chain[block_index]\n \n # Check previous_hash\n if block['previous_hash'] != self.hash(previous_block): return False\n \n # Check proof\n # Get proofs\n previous_proof = previous_block['proof']\n proof = block['proof']\n \n # Find hash\n hash_operation = hashlib.sha256(\n str(\n proof**2 - previous_proof**2).encode()).hexdigest()\n \n # Invalidate if leading values are not '0000'\n if hash_operation[:4] != '0000': return False\n \n # Move along chain if everything is okay\n previous_block=block\n block_index+=1\n \n return True", "def verify_chain(cls, block_chain):\n for (index, block) in enumerate(block_chain):\n if index == 0:\n continue\n\n if block.previous_hash != Hasher.hash_block(block_chain[index - 1]):\n ConsoleLogger.write_log(\n 'warn',\n __name__,\n 'verify_chain',\n 'Block chain is invalid.'\n )\n\n return False\n\n if not cls.valid_proof(\n block.transactions[:-1],\n block.previous_hash,\n block.proof\n ):\n ConsoleLogger.write_log(\n 'warn',\n __name__,\n 'verify_chain',\n 'Proof of work is invalid.'\n )\n\n return False\n return True", "def verify_transaction(self, transaction):\n\t\tsender = Bee(transaction.sender, 0)\n\t\tsender.calculate_balance(self.chain, self.last_block().index + 1)\n\n\t\treturn sender.honeycomb >= int(transaction.amount)", "def verify_proof_of_work(self) -> bool:\n block_dict = copy.deepcopy(self.__dict__)\n block_dict['transactions'] = [str(tx) for tx in block_dict['transactions']]\n incoming_hash = block_dict.pop('hash') # remove hash from object to verify the rest of the contents\n verify_hash = hashlib.sha256(json.dumps(block_dict).encode()).hexdigest() # recompute hash value of contents\n return verify_hash == incoming_hash", "def validate(self):\n\t\treturn self.checksum == self.create_checksum()", "def valid_chain(self, chain):\n previous_block = chain[0]\n index = 1\n while index < len(chain):\n block = chain[index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n if not self.valid_proof(block['proof'], previous_block['proof']):\n return False\n index += 1\n previous_block = block\n return True", "def validate_chain(self):\n if not self.validate(self.chain[0], None):\n # genesis block\n return False\n for parent_idx, block in enumerate(self.chain[1:]):\n # remainder of chain\n if not self.validate(block, self.chain[parent_idx]):\n return False\n\n return True", "def valid(self):\n try:\n self.validate()\n except InvalidAddress:\n return False\n return True", "def validate_coinbase_transaction(transaction, block_height):\n # Check that there is only 1 input transaction\n if(len(transaction.tx_ins) != 1):\n return False\n if(get_transaction_id(transaction) != transaction.id):\n return False\n # Check if tx_out_index is set to current block's height\n if(transaction.tx_ins[0].tx_out_index != block_height):\n return False\n return True", "async def test_success(\n self,\n conn,\n wallet_transaction,\n ):\n is_valid = await validate_transaction(\n conn,\n transaction_id=wallet_transaction,\n )\n assert is_valid is True", "def validate(self, block, parent):\n if not self.check_hash(block) == block.hash_val:\n # block's stored hash matches\n return False\n\n if (block.hash_val[:self.difficulty] !=\n \"\".join([\"0\" for _ in range(self.difficulty)])):\n # block's hash has the required number of zerores\n return False\n\n if parent is not None:\n # checks for non-genesis blocks (parent required)\n if block.timestamp < parent.timestamp:\n # block must have been created after its parent\n return False\n\n if parent.hash_val != block.parent_hash:\n # block's stored hash of its parent should match the parent\n # block's hash\n # n.b. the parent's hash is verified to be valid of its stored\n # hash since it is part of the chain, thus `validate` approved\n # it before\n return False\n\n if block.index != parent.index+1:\n # block should immediately follow its parent in the chain\n return False\n\n return True", "def isValid(self) -> \"SbBool\":\n return _coin.SbByteBuffer_isValid(self)", "def isValid(self, state: 'SoState') -> \"SbBool\":\n return _coin.SoCache_isValid(self, state)", "def test_signature():\n blockchain = Blockchain()\n blockchain.read_metadata()\n blockchain.read_address_pool_data()\n blockchain.read_genesis_data()\n block = blockchain._blocks[0]\n blockchain.verify_transaction('Eric Chen', block.transactions[0])", "def validate_regular_tx(self, tx):\n sig = tx.senderSignature\n header = tx.receiver.encode() + str(tx.amount).encode()\n if tx.senderPublicKey.verify(header, sig):\n return True\n else:\n return False", "def verify_block(self, block):\n\t\tsha = hasher.sha256('a')\n\t\tsha.update(\n\t\t\t\tstr(block.block_id) +\n\t\t\t\tstr(block.miner_id) + \n\t\t\t\tstr(block.timestamp) + \n\t\t\t\tstr(block.data) + \n\t\t\t\tstr(block.previous_hash))\n\t\tverify_hashed = sha.hexdigest()\n\t\tif verify_hashed != block.hash:\n\t\t\tprint(\"Miner ({}) could not verify the previous generated block.\", self.mid)\n\t\t\treturn 0.\n\t\treturn 1.", "def valid_checksum(self):\n (ck_a, ck_b) = self.checksum()\n d = self._buf[2:-2]\n (ck_a2, ck_b2) = struct.unpack('<BB', self._buf[-2:])\n return ck_a == ck_a2 and ck_b == ck_b2", "def validate_pow(self, block):\n compareStr='0'\n for idx in range(self.difficulty - 1):\n compareStr += '0'\n return block.getHeaderHash()[:self.difficulty] == compareStr and block.previousBlockHash == self.blockchain[-1].hash" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function to print entire blockchain for demo
def print_blockchain(self): print() print("-------------") print("Blockchain") print("-------------") for block in self.chain: print("-------------") print('Timestamp: ', block.timestamp) print('Transaction: ', block.transaction.__dict__) print('Previous Hash: ', block.previous_hash) print('Hash: ', block.hash) print("-------------")
[ "def view_blockchain():\n response = {\n 'chain': blockchain_db_manager.get_all_blocks(),\n 'length': blockchain_db_manager.get_length(),\n 'header': 'Full chain'\n }\n return render_template('chain.html', data=response)", "async def view_blockchain(request: Request):\n response = {\n \"chain\": blockchain_db_manager.get_all_blocks(),\n \"length\": blockchain_db_manager.get_length(),\n \"header\": \"Full chain\",\n }\n return templates.TemplateResponse(\n \"chain.html\", {\"request\": request, \"data\": response}\n )", "def output(blocked):\n print('''digraph blockers {\n layout=neato;\n overlap=false;\n sep=\"+1\";''')\n for issue, blocked_info in blocked.iteritems():\n special = not blocked_info['is-blocked-by']\n if special:\n print('{}'.format(node(issue, special)))\n for out_issue in blocked_info[\"blocks\"]:\n print('{}'.format(node(out_issue)))\n print('{} -> {};'.format(node(issue), node(out_issue)))\n# for in_issue in blocked_info[\"is-blocked-by\"]:\n# print '\"{}\" -> \"{}\";'.format(in_issue, issue)\n print('}')", "def produce_display(self):\n call('clear' if os.name == 'posix' else 'cls')\n curreny = self.vending_machine.get_currency()\n input_amount = self.vending_machine.get_input_amount()\n change_coins_state = self.vending_machine.get_current_change_status()\n print('---------------------------------------------')\n print(f'[Input amount]\\t\\t{input_amount} {curreny}')\n change_coin_text = '[Change]\\t'\n not_first = False\n for change_coins in change_coins_state.items():\n if not_first:\n change_coin_text += '\\t'\n not_first = True\n change_coin_text += f'\\t{str(change_coins[0])} {curreny} \\t {change_coins[1]}\\n'\n print(change_coin_text)\n return_gate_text = '[Return gate]\\t'\n # import ipdb; ipdb.set_trace()\n return_coins = self.vending_machine.get_change_coins_dict()\n\n if return_coins:\n return_coins_list = list(return_coins.keys())\n return_coins_list.sort()\n for return_coin in return_coins_list:\n for _ in range(0, return_coins[return_coin]):\n return_gate_text += f'\\t\\t{return_coin} {curreny}\\n'\n else:\n return_gate_text += 'Empty\\n'\n print(return_gate_text)\n\n items_for_sale_text = '[Items for sale]'\n product_details_list = self.vending_machine.get_product_details_list()\n not_first = False\n for product in product_details_list:\n if not_first:\n items_for_sale_text += '\\t\\t'\n not_first = True\n items_for_sale_text += f'\\t {product[\"id\"]}. {product[\"name\"]} \\t ' \\\n f'{product[\"price\"]} {curreny} \\t {product[\"status\"]} \\n'\n print(items_for_sale_text)\n outlet_text = f'[Outlet]'\n items_in_outlet = self.vending_machine.get_items_in_outlet_list()\n not_first = False\n for product_id in items_in_outlet:\n if not_first:\n outlet_text += '\\t'\n not_first = True\n outlet_text += f'\\t {self.vending_machine.get_product_details(product_id)[\"name\"]} \\n'\n print(outlet_text)\n if self.error_msg:\n print(f'Error : {self.error_msg}')\n self.error_msg = None\n print('---------------------------------------------')", "def dump_trace_block(self):\n header_data = self.read_trace_block()\n s = \"SEG-D Trace Block Header contents\\n\" \"---------------------------------\\n\"\n for block in header_data:\n s = \"%s\\t%s\\n\\t%s\\n\" % (s, block, \"-\" * len(block))\n for field in header_data[block]:\n if field == \"block_length_in_bytes\":\n continue\n s = \"%s\\t\\t%s: %s\\n\" % (\n s,\n header_data[block][field][\"description\"],\n header_data[block][field][\"value\"],\n )\n s = \"%s\\n\" % s\n print(s)", "def print_results(self):\n\n tot_blocks = reduce(lambda x, y: x + y, map(lambda x: x[\"block_number\"], self.struct.partial_payoff.values()))\n tot_payoff = reduce(lambda x, y: x + y, map(lambda x: x[\"payoff\"], self.struct.partial_payoff.values()))\n\n print(\"==========\")\n for miner in self.miners:\n print(\"Miner: {}\".format(miner.name))\n print(\"Hash Power: {} ({:.2f}%)\".format(self.h[miner.name], self.h[miner.name] * 100 / self.tot_h))\n print(\"Block Number: {} ({:.2f}%)\".format(self.struct.partial_payoff[miner.name][\"block_number\"], self.struct.partial_payoff[miner.name][\"block_number\"] * 100 / tot_blocks if tot_blocks else 0))\n print(\"Payoff: {:.2f} ({:.2f}%)\".format(self.struct.partial_payoff[miner.name][\"payoff\"], self.struct.partial_payoff[miner.name][\"payoff\"] * 100 / tot_payoff if tot_payoff else 0))\n print(\"==========\")", "def show_all_keys():\n\n private_keys = keychain.get_all_private_keys()\n if len(private_keys) == 0:\n print(\"There are no saved private keys.\")\n return\n print(\"Showing all private keys:\")\n for sk, seed in private_keys:\n print(\"\")\n print(\"Fingerprint:\", sk.get_g1().get_fingerprint())\n print(\"Master public key (m):\", sk.get_g1())\n print(\"Master private key (m):\", bytes(sk).hex())\n print(\n \"Farmer public key (m/12381/8444/0/0)::\",\n master_sk_to_farmer_sk(sk).get_g1(),\n )\n print(\"Pool public key (m/12381/8444/1/0):\", master_sk_to_pool_sk(sk).get_g1())\n print(\n \"First wallet key (m/12381/8444/2/0):\",\n master_sk_to_wallet_sk(sk, uint32(0)).get_g1(),\n )\n print(\n \"First wallet address:\",\n encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(0)).get_g1())),\n )\n assert seed is not None\n mnemonic = bytes_to_mnemonic(seed)\n print(\" Mnemonic seed (24 secret words):\")\n print(mnemonic)", "def fetch_blockchain():\n get_chain_address = f\"{CONNECTED_NODE_ADDRESS}/chain\"\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n chain_meta = json.loads(response.content)\n chain_content = []\n # TODO\n for block in chain_meta[\"chain\"]:\n chain_content.append(block)\n\n global blocks_to_show\n blocks_to_show = sorted(chain_content, key=lambda block: block['timestamp'],reverse=True)", "def view_genesis_block():\n response = {\n 'chain': [blockchain_db_manager.get_genesis_block()],\n 'length': 1,\n 'header': 'Genesis Block'\n }\n return render_template('chain.html', data=response)", "def print_output(center: VaccCenter):\n center = transform_open_hours(center)\n\n output = f\"\"\"\n____________________________________________________________________________________________________\nName: \\t {center.name}\nLink: \\t {center.link}\nID: \\t {center.vacc_id}\n\nRegion: {center.region}\nAddress: {center.info['address']}\n {center.info['address_spec']}\n\nPhone: {center.info['phone']}\nEmail: {center.info['email']}\n\nNote: {center.info['note']}\n\nVaccines: {center.info['vaccines'].replace(\"'\", \"\")}\nCenter type: {center.info['add_info'].replace(\"'\", \"\")}\nDaily capacity: {center.info['capacity']}\nChange of date: {center.info['changing_date'].replace(\"'\", \"\")}\n\nOpening hours:\n_________________________________\n Open | Closed\nMonday: | {center.open_hours['monday'][0]} | {center.open_hours['monday'][1]} |\nTuesday: | {center.open_hours['tuesday'][0]} | {center.open_hours['tuesday'][1]} |\nWednesday: | {center.open_hours['wednesday'][0]} | {center.open_hours['wednesday'][1]} |\nThursday: | {center.open_hours['thursday'][0]} | {center.open_hours['thursday'][1]} |\nFriday: | {center.open_hours['friday'][0]} | {center.open_hours['friday'][1]} |\nSaturday: | {center.open_hours['saturday'][0]} | {center.open_hours['saturday'][1]} |\nSunday: | {center.open_hours['sunday'][0]} | {center.open_hours['sunday'][1]} |\n____________________________________________________________________________________________________\n \"\"\"\n print(output)", "def __str__(self):\n sc = \"\\nNo. of Blocks: {l}\\n\".format(l=len(self.chain))\n\n offset = len(str(len(self.chain)))\n for i, block in enumerate(self.chain):\n sc += \"\\tBlock {n}. {h}\\n\".format(\n n=str(i).rjust(offset), h=str(block))\n\n sc += \"\\n\"\n\n return sc", "def print_change(coinlist):\n print(\"Can you tell if I am an honest machine? \")\n print(\"Give out the following change: \")\n valuelist=[\"Quarter(s): \", \"Dime(s): \", \"Nickel(s): \", \"Penny(s): \"]\n for item in range(4):\n print(valuelist[item]+str(coinlist[item]))", "def print_block(msg):\n Utils.print_separator()\n Utils.print_msg(msg)\n Utils.print_separator()", "def print_balances(self):\n print('Balances: ')\n table_data = [\n ['Owner',\n 'Totally owned',\n 'Partially owned',\n 'HTLC (if secret can be provided)']]\n\n pok = list(self.pub_outs.keys())\n for i in range(len(pok)):\n table_data.append([i] + self.get_balance(pok[i],self.pub_outs))\n table = AsciiTable(table_data)\n print(table.table)\n\n print('Balances (pending): ')\n table_data = [\n ['Owner',\n 'Totally owned',\n 'Partially owned',\n 'HTLC (if secret can be provided)']]\n\n popk = list(self.pub_outs_pend.keys())\n for i in range(len(popk)):\n table_data.append([i] + self.get_balance(popk[i],\n self.pub_outs_pend))\n table = AsciiTable(table_data)\n print(table.table)", "def print(self):\n print(\"----- KVClient Info -----\")\n print(\"client id: %d\" % self.get_id())\n print(\"data:\")\n for name, data in self._data_store.items():\n print(name)\n print(data)\n print(\"-------------------------\")", "def run_show(self):\n text = json_dumps(self.result, cls=StringEncoder, indent=4)\n print text", "def print_commits(self):\n\n for commit in self.repository_mining.traverse_commits():\n print(f'Commit : {commit.hash}')\n print(f'Parents : {commit.parents}')", "def _printBin(bin_):\n print('Bin has %d items:' % len(bin_), file=sys.stderr)\n for i, hashInfo in enumerate(bin_, start=1):\n print(' Item %d:' % i, file=sys.stderr)\n for key, value in hashInfo.items():\n # The 16 below is the length of the longest key (subjectTrigPoint).\n print(' %16s: %s' % (key, value), file=sys.stderr)", "def hexdump_block(self, block_num: int) -> str:\n\n byte_offset = self.master_offset + block_num * self.block_size\n self.f.seek(byte_offset)\n block_contents = self.f.read(self.block_size)\n\n # return hexdump(block_contents)\n return \"blah hexdump\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a link to test case failure in GitHub The link generated by this method should highlight the line that caused the failure
def github_testlog_failure_link(self, test_log): try: if self._mediator.ci_environment == 'asc': # for Molecule repo of repos pattern path = "/{}/{}/tree/{}/molecule/{}/{}".format(self._repo_fork, self._repo_name, self._git_sha, self._molecule_scenario, test_log.test_file) elif self._mediator.ci_environment == 'mk8s': base_dir = 'tools/installer' # this value is specific to mk8s and can not be derived from the XML path = "/{}/{}/tree/{}/{}/{}".format(self._repo_fork, self._repo_name, self._git_sha, base_dir, test_log.test_file) failure_line_number = self._get_line_number_from_failure_output(test_log) line = failure_line_number or test_log.def_line_number or '' if line: line = "L{}".format(line) return urlunsplit((self._scheme, self._netloc, path, '', line)) except AttributeError: pass # If we ask for the failure link and can't determine it we should silently fail
[ "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return \"{base}/{test_type}/{test_id}\".format(\n base=self.fork.github_url, test_type=test_type, test_id=test_id)", "def test_wronglink(self):\n article = \"wwh.google.com\"\n self.assertRaises(manage_class.not_a_link, fact_checking_score_url,article)", "def add_failure(self, test_name):\n\n self.add_message(\n \"Dependency *{dependency_name}* failed.\",\n dependency_name = test_name\n )", "def test_broken_links(self):\n # Add broken link to test dir\n self.dir.add_link(\"broken\",\"missing.txt\")\n compute_md5sums('.',output_file=self.checksum_file,relative=True)", "def fail (self):\n \n import leoGlobals as g\n \n g.app.unitTestDict[\"fail\"] = g.callerName(2)", "def addExpectedFailure(self, test, err):\n assert err[0] == _ZTodo\n exn = err[1]\n assert isinstance(exn, _ZTodo)\n super(_ZTextTestResult, self).addExpectedFailure(test, exn.exc_info)", "def ShortExplanation(self):\n return 'failed: %s' % (self.message,)", "def format_bad_entities(test_name, test_results):\n msg = f\"{test_name}\\n\"\n for failure in test_results[\"test_failures\"]:\n msg += f\"Expected {failure[1]}: {failure[2]}\\n\"\n msg += f\"Observed {failure[1]}: {failure[3]}\\n\"\n\n return msg", "def test_issue_137(self):\n i = Issue(load('issue_137'))\n self.assertEqual(\n i.html_url,\n \"https://github.com/sigmavirus24/github3.py/pull/1\")\n self.assertEqual(i.repository, (\"sigmavirus24\", \"github3.py\"))", "def test_known_issue(self):\n self.fail_ki('See BZ123456')", "def test_linting_of_markdown_documentation_with_failure(self):\n if os.environ.get(\"APPVEYOR\", None):\n self.skipTest(\"\"\"installation of mdl is too slow on appveyor\"\"\")\n\n with open(os.path.join(os.getcwd(), \"documentation.md\"),\n \"wt\") as markdown_file:\n markdown_file.write(\"Level One\\n==\\n\\n## Level Two ##\\n\")\n markdown_file.flush()\n\n self.assertThat(\"check/project/lint.py\",\n CIScriptExitsWith(1,\n self.__class__.container,\n self.__class__.util,\n extensions=[\"other\"]))", "def format_bad_response(test_name, message, resp):\n return f\"Test {test_name} - Failed: {message}. Response: {resp}\"", "def pull_request_build_failed(pr, build_url, failure_message, request_info):\n comments_url = pr['_links']['comments']['href']\n url = _build_url(comments_url, request_info)\n comment_body = \"\"\"Build failed: {0}\n build url: {1}\n \"\"\".format(failure_message, build_url)\n\n return _json_resp(\n requests.post(\n url,\n data=json.dumps({\n 'body': dedent(comment_body)\n })\n )\n )", "def expected_fails_comment(self, phase, status):\n if phase not in self._fails:\n return \"\"\n\n if self._fails[phase] == status:\n return EXPECTED_FAILURE_COMMENT\n else:\n return \"{}: expected {})\".format(\n UNEXPECTED_FAILURE_COMMENT_START, self._fails[phase]\n )", "def link(desc,url):\n\treturn \"<a href='\" + url + \"'>\" + desc + \"</a>\"", "def summary_failures_with_teardonw(self):\n if self.config.option.tbstyle != \"no\":\n reports = self.getreports('failed')\n if not reports:\n return\n self.write_sep(\"=\", \"FAILURES\")\n for rep in reports:\n if self.config.option.tbstyle == \"line\":\n line = self._getcrashline(rep)\n self.write_line(line)\n else:\n msg = self._getfailureheadline(rep)\n markup = {'red': True, 'bold': True}\n self.write_sep(\"_\", msg, **markup)\n self._outrep_summary(rep)\n for report in self.getreports(''):\n if report.nodeid == rep.nodeid and report.when == 'teardown':\n self.print_teardown_sections(report)", "def markdown_link(title, url):\n return \"[{}]({})\".format(title, url)", "def test_file_preview_redirect_url_for_team_icon(self):\n pass", "def test_click_random_failed_button_case_status_failed(self, browser, login, logout):\n self.open_run_test_page_for_1st_test(browser)\n run_test_page = RunTestPage(browser)\n run_test_page.click_random_failed_btn()\n status = run_test_page.get_case_status()\n assert status == '❌ Failed', \"Status should be 'Passed' for test case\"\n run_test_page.back_to_suite_btn_click()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a GitHub compare link based on the attributes of this facade This method would be used when we have a last known pass of a given test We are making an assumption that the attributes of this facade are children of upstream_fork and upstream_base GitHub docs describing the compare view
def github_diff_link(self, upstream_fork, upstream_base): try: # These variable names are the language used by GitHub base_fork = self._repo_fork base = self._git_sha head_fork = upstream_fork compare = upstream_base path = "/{}/{}/compare/{}...{}:{}".format(base_fork, self._repo_name, base, head_fork, compare) return urlunsplit((self._scheme, self._netloc, path, '', '')) except AttributeError: return 'Unknown' # If we ask for the diff link and can't determine it we will supply 'Unknown'
[ "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return \"{base}/{test_type}/{test_id}\".format(\n base=self.fork.github_url, test_type=test_type, test_id=test_id)", "def get_github_compare_url(last_sha1):\n return '%s/repos/%s/%s/compare/%s...%s' % (GITHUB_API_BASE, MOZILLA_GITHUB_ACCOUNT, DEEPSPEECH_GITHUB_PROJ, last_sha1, DEEPSPEECH_GITHUB_REF)", "def add_comparison_table_step(self, **kwargs):\n kwargs.setdefault(\"attributes\", self.DEFAULT_TABLE_ATTRIBUTES)\n\n def make_comparison_tables():\n for rev1, rev2 in itertools.combinations(self._revisions, 2):\n compared_configs = []\n for config in self._configs:\n config_nick = config.nick\n compared_configs.append(\n (\"%s-%s\" % (rev1, config_nick),\n \"%s-%s\" % (rev2, config_nick),\n \"Diff (%s)\" % config_nick))\n report = CompareConfigsReport(compared_configs, **kwargs)\n outfile = os.path.join(\n self.eval_dir,\n \"%s-%s-%s-compare\" % (self.name, rev1, rev2)\n + \".\" + report.output_format)\n report(self.eval_dir, outfile)\n\n def publish_comparison_tables():\n for rev1, rev2 in itertools.combinations(self._revisions, 2):\n outfile = os.path.join(\n self.eval_dir,\n \"%s-%s-%s-compare\" % (self.name, rev1, rev2)\n + \".html\")\n subprocess.call(['publish', outfile])\n\n self.add_step(Step(\"make-comparison-tables\", make_comparison_tables))\n self.add_step(Step(\"publish-comparison-tables\", publish_comparison_tables))", "def test_get_url_on_diff_viewer(self):\n request = RequestFactory().request()\n request.resolver_match = Mock()\n request.resolver_match.url_name = 'view-diff'\n\n self.assertEqual(self.action.get_url({'request': request}),\n 'raw/')", "def test_get_url_on_diff_viewer_revision(self):\n request = RequestFactory().request()\n request.resolver_match = Mock()\n request.resolver_match.url_name = 'view-diff-revision'\n\n self.assertEqual(self.action.get_url({'request': request}),\n 'raw/')", "def href_template(self) -> str:\n return self.__href_template", "def GetDiffImageLink(self, name: str) -> str:\n assert name in self._comparison_results\n return self._comparison_results[name].local_diff_diff_image", "def team_compare():\n return render_template('teamCompare.html')", "def test_get_latest_version_link(self):\n study = factories.StudyFactory.create()\n ssv1 = factories.SourceStudyVersionFactory.create(study=study, i_version=1)\n self.assertEqual(study.get_latest_version_link(), ssv1.dbgap_link)\n ssv2 = factories.SourceStudyVersionFactory.create(study=study, i_version=2)\n self.assertEqual(study.get_latest_version_link(), ssv2.dbgap_link)", "def href(self,) -> str:\r\n raise NotImplementedError()", "def GetGivenImageLink(self, name: str) -> str:\n assert name in self._comparison_results\n return self._comparison_results[name].local_diff_given_image", "def sort_link(self, name, id):\n # XXX This ought to be in a library or something.\n sort_by, sort_reverse = self._getSortInfo()\n url = self.absolute_url() + '/manage_stats?sort_by=' + id\n newsr = 0\n if sort_by == id:\n newsr = not sort_reverse\n url = url + '&sort_reverse=' + (newsr and '1' or '0')\n return f'<a href=\"{escape(url, 1)}\">{escape(name)}</a>'", "def test_get_url_on_review_request(self):\n request = RequestFactory().request()\n request.resolver_match = Mock()\n request.resolver_match.url_name = 'review-request-detail'\n\n review_request = self.create_review_request()\n\n self.assertEqual(\n self.action.get_url({\n 'request': request,\n 'review_request': review_request,\n }),\n '/r/%s/diff/raw/' % review_request.display_id)", "def test_late_home_link(self):\n award = models.GivingProjectGrant.objects.get(projectapp_id=1)\n award.agreement_mailed = timezone.now() - timedelta(days = 400)\n award.save()\n\n response = self.client.get('/apply/')\n\n self.assertTemplateUsed('grants/org_home.html')\n award = models.GivingProjectGrant.objects.get(projectapp_id=1)\n self.assertContains(response, '<a href=\"/report/%d\">' % award.pk)", "def test_anchor_tag(self):\n \n link = Link.objects.get(pk=1) \n str = \"<a href='%s' target='_blank'>%s</a>\" % (link.href, link.title)\n \n self.assertEqual(link.anchor_tag(), str)", "def ui_link_process(self, process):\n return \"{}/clarity/work-details/{}\".format(process.uri.split(\"/api\")[0], process.id.split(\"-\")[1])", "def regenerate(self):\n return mark_safe(u'<a href=\"%s/fetch/\">%s - %s</a>' % (self.id, _('Fetch'), self.title))", "def webui_link(self):\n return '{0:s}/#binary/{1:s}'.format(self._cb.url, self.md5sum)", "def get_url(self):\n return '%s' % (self.review_url)", "def comparison(web):\n ctx = webutil.changectx(web.repo, web.req)\n if b'file' not in web.req.qsparams:\n raise ErrorResponse(HTTP_NOT_FOUND, b'file not given')\n path = webutil.cleanpath(web.repo, web.req.qsparams[b'file'])\n\n parsecontext = lambda v: v == b'full' and -1 or int(v)\n if b'context' in web.req.qsparams:\n context = parsecontext(web.req.qsparams[b'context'])\n else:\n context = parsecontext(web.config(b'web', b'comparisoncontext'))\n\n def filelines(f):\n if f.isbinary():\n mt = pycompat.sysbytes(\n mimetypes.guess_type(pycompat.fsdecode(f.path()))[0]\n or r'application/octet-stream'\n )\n return [_(b'(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]\n return f.data().splitlines()\n\n fctx = None\n parent = ctx.p1()\n leftrev = parent.rev()\n leftnode = parent.node()\n rightrev = ctx.rev()\n rightnode = scmutil.binnode(ctx)\n if path in ctx:\n fctx = ctx[path]\n rightlines = filelines(fctx)\n if path not in parent:\n leftlines = ()\n else:\n pfctx = parent[path]\n leftlines = filelines(pfctx)\n else:\n rightlines = ()\n pfctx = ctx.p1()[path]\n leftlines = filelines(pfctx)\n\n comparison = webutil.compare(context, leftlines, rightlines)\n if fctx is not None:\n rename = webutil.renamelink(fctx)\n ctx = fctx\n else:\n rename = templateutil.mappinglist([])\n ctx = ctx\n\n return web.sendtemplate(\n b'filecomparison',\n file=path,\n symrev=webutil.symrevorshortnode(web.req, ctx),\n rename=rename,\n leftrev=leftrev,\n leftnode=hex(leftnode),\n rightrev=rightrev,\n rightnode=hex(rightnode),\n comparison=comparison,\n **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Attempts to pull the failure line number from failure output
def _get_line_number_from_failure_output(self, test_log): regex = re.escape(test_log.test_file) + r':(\d+)' match = re.search(regex, test_log.full_failure_output) if match: return match.group(1) else: return ''
[ "def parse_error_output(err) -> tuple:\n lines = err.split(\"\\n\")[:-1]\n line = lines[5]\n split_line = line.split(\" \")\n cursor_line = int(split_line[-3][:-1])\n cursor_column = int(split_line[-1][:-1])\n return cursor_line, cursor_column", "def __find_first_error_line(log_lines: typing.List[str]) -> typing.Optional[int]:\n\n index = None\n for index in range(len(log_lines) - 1, -1, -1):\n if (\n ('ERROR' in log_lines[index])\n or\n ('Collected exception:' in log_lines[index])\n ):\n break\n else:\n # we looked all the way back but never found an error line\n return None\n return index", "def _get_error(self, output):\n\n lines = self._get_lines(output)\n\n code = None\n text = None\n for line in lines:\n match = re.search(r'^Error\\s+number\\s+(0x[0-9a-f]+)\\s+\\((\\d+)\\)\\.\\s*$', line)\n if match:\n code = int(match.group(2))\n elif code is not None:\n text = line\n break\n\n if code is None:\n return CryptoProError('\\n'.join(lines))\n if text is None:\n text = 'Error {}'.format(code)\n\n return CryptoProError(text, code)", "def get_line_number(self):\n return self.line_number", "def _FindFileLine(outbuffer, line, fname, regex):\n match = regex.findall(outbuffer.GetLine(line))\n ifile = None\n if len(match):\n ifile = match[0][0]\n try:\n line = max(int(match[0][1]) - 1, 0)\n except (IndexError, TypeError):\n line = 0\n\n # If not an absolute path then the error is relative to the\n # script that produced this error message.\n if ifile is not None and not os.path.isabs(ifile):\n dname = os.path.split(fname)[0]\n ifile = os.path.join(dname, ifile)\n\n return (ifile, line)", "def get_num_error_failures(self):\n return self.fails", "def _check_output(text):\n # check line by line of text\n for line in text.splitlines():\n # check if error message is in the beginning of the line\n if line[1:11] in ZVM_CODES:\n # return error message\n return line[1:11], ZVM_CODES[line[1:11]]\n\n # check for general errors\n re_match = re.search(ERROR_REGEX, line)\n if re_match:\n error_code = re_match.group(1)\n error_msg = re_match.group(2)\n if error_msg is None:\n error_msg = ''\n return error_code, error_msg.strip()\n\n # return None if no error code was found\n return None", "def check_compilation_failure(output, failure_expects):\n\n fails = list(failure_expects)\n for line in output.strip().split('\\n'):\n # Check we haven't found the source printed out again...\n if EXPECT_ERR_PATTERN.search(line):\n continue\n if fails and fails[0] in line:\n fails.pop(0)\n if fails:\n raise OutputMissingError(output, fails)", "def check_failures(log_file_name, number_lines, mail_creds_file):\n try:\n last_n_status = os.popen(\"tail -{} {}\".format(number_lines, log_file_name)).read()\n last_n_status = last_n_status.split(\"\\n\")[:number_lines]\n last_n_status = list({i.split(\": \")[1] for i in last_n_status})\n\n if \"SUCCESSFUL\" not in last_n_status:\n send_failure_alert_mail(mail_creds_file)\n except Exception as error:\n print(error)\n system_exit_error(\"CheckFailureError\")", "def get_err_file_and_line(tb=None):\n if not tb:\n tb = sys.exc_info()[2]\n\n filename, lineno, _context, _line = traceback.extract_tb(tb)[-1]\n\n return filename, lineno", "def extract_line(text, line_number):\n\n text_lines = text.split(\"\\n\")\n\n if len(text_lines) > 0:\n return text_lines[line_number - 1]\n\n return False", "def parse_cobalt_step_status(output: str, step_id: str) -> str:\n status = \"NOTFOUND\"\n for line in output.split(\"\\n\"):\n fields = line.split()\n if len(fields) >= 2:\n if fields[0] == step_id:\n status = fields[1]\n break\n return status", "def handle_stderr(self, line):\n pass", "def line_number(self, line):\n ret_val = self._line_number(line)\n return ret_val", "def _InstallFailureType(output):\n m = INSTALL_FAILURE_REGEXP.match(output)\n if m:\n return m.groups()[0]\n return 'UNKNOWN'", "def __line__():\n import inspect\n frame = inspect.stack()[1][0]\n return inspect.getframeinfo(frame).lineno", "def report_num(self):\n try:\n local_report_num = int(self.lines[2].strip())\n except IndexError:\n local_report_num = -1\n return local_report_num", "def get_corresponding_lineno(self, lineno):\r\n for template_line, code_line in reversed(self.debug_info):\r\n if code_line <= lineno:\r\n return template_line\r\n return 1", "def _get_line_number(file_lines, pattern):\n return next(i for i, line in enumerate(file_lines) if pattern in line) + 1", "def _GetFailedOutputNodes(self, line):\n # Possible format:\n # FAILED: obj/path/to/file.o\n # FAILED: target.exe\n # FAILED: \"target with space in name\"\n failed_output_nodes = []\n\n while line:\n quote_index = line.find('\"')\n if quote_index < 0:\n sub_part = line\n remaining_part = None\n else:\n sub_part = line[:quote_index]\n match_quote_index = line.find('\"', quote_index + 1)\n if match_quote_index < 0:\n return [] # Return an empty list for unexpected format.\n failed_output_nodes.append(line[quote_index + 1:match_quote_index])\n remaining_part = line[match_quote_index + 1:]\n line = remaining_part\n\n for node in sub_part.split(' '):\n node = node.strip()\n if node:\n failed_output_nodes.append(node)\n\n return failed_output_nodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A helper to remove .git from the end of a string if found
def _strip_git_ending(self, path): if path.endswith('.git'): path = path[:-4] return path
[ "def remove_ext(string):\n index = string.rfind('.')\n if index == -1 or index == len(string)-1:\n print(\"Can't find extension in {}!\".format(string))\n return None\n return string[:index]", "def remove_filext(s):\n dot = s.rfind('.')\n if dot == -1: return s\n return s[:dot]", "def _strip_protocol_and_add_git(url: Optional[str]) -> Optional[str]:\n if not url:\n return None\n return url.split(\"://\")[1] + \".git\"", "def _get_repo_name_from_url(url: str) -> str:\n\n last_slash_index = url.rfind(\"/\")\n last_suffix_index = url.rfind(\".git\")\n if last_suffix_index < 0:\n last_suffix_index = len(url)\n\n if last_slash_index < 0 or last_suffix_index <= last_slash_index:\n raise Exception(\"Badly formatted url {}\".format(url))\n\n return url[last_slash_index + 1:last_suffix_index]", "def removeSuffix(name):\n\n edits = name.split('.')\n\n if len(edits) < 2:\n return name\n\n suffix = '.' + edits[-1]\n nameNoSuffix = name[:-len(suffix)]\n\n return nameNoSuffix", "def strip_revision_specifiers(path):\n at_pos = path.rfind('@')\n if at_pos != -1:\n return path[0:at_pos]\n else:\n hash_pos = path.rfind('#')\n if hash_pos != -1:\n return path[0:hash_pos]\n return path", "def _repo_name_from_url(url_decode: str):\n github_project_name = os.path.split(url_decode.path)[-1]\n return github_project_name.replace('.git', '')", "def _extr_str_wo_last_slash(string):\n indices = [m.start(0) for m in re.finditer(\"/\", string)]\n if indices:\n string = string[indices[-1]+1:]\n string = re.sub(\".json\", \"\", string)\n return string", "def trim(s, x):\n if s.endswith(x):\n s = s.rsplit(x,1)[0]\n return s", "def _strip_build_suffix_from_identifier(identifier):\n # split away official SemVer 2 build specifications if used\n if \"+\" in identifier:\n return identifier.split(\"+\", maxsplit=1)[0]\n\n # split away our custom build specification: something ending in either\n # . or - followed by three or more digits, a dot, an commit sha of four\n # or more alphanumeric characters.\n return re.sub(r\"[-\\.]n\\d{3,}\\.h\\w{4,}\\Z\", \"\", identifier)", "def git_branch_ending():\n if 'CI_COMMIT_REF_NAME' in os.environ:\n # We are inside gitlab-runner, so branches are not checkout.\n # Solution is to pick the name for them ENV variable.\n name = os.environ['CI_COMMIT_REF_NAME'].strip()\n else:\n from sh import git, ErrorReturnCode\n try:\n result = git('rev-parse', '--abbrev-ref', 'HEAD')\n except ErrorReturnCode as error:\n raise Ci3Error(\"Failed to get the name of the current git branch: %s\" % error)\n name = result.strip()\n # Cutting the non-matching prefix.\n ending = re.search('[a-zA-Z0-9_\\-]*$', name)\n if ending is None:\n raise Ci3Error(\"Name of the branch can be only of alphanumerical, \"\n \"underscore, minus\")\n return ending.group()", "def strip_zip_suffix(filename):\n if filename.endswith('.gz'):\n return filename[:-3]\n elif filename.endswith('.bz2'):\n return filename[:-4]\n else:\n return filename", "def clean_path(self, path):\n return path.replace('../', '').lstrip('/')", "def put_star_on(s: str) -> str:\n return s[::-1].replace('.', '.*.', 1)[::-1]", "def _remove_capsule_name(capsule_name, fullname):\n parts = fullname.split(\".\")\n return \".\".join(parts[1:])", "def clean_path(path):\n if sys.platform in [\"win32\", \"cygwin\", \"msys\"]:\n path_clean = re.sub(r\"[<>:|?*\\\"\\/\\\\]\", \"-\", path)\n # This checks for strings that end in ... or similar,\n # weird corner case that affects fewer than 0.1% of titles\n path_clean = re.sub(r\"(.)[.]\\1+$\", \"-\", path_clean)\n return path_clean\n return path", "def RemoveSlash( dirName ):\n return dirName[:-1] if dirName.endswith('/') else dirName", "def remove_extension(path):\n\n for special_ext in special_extensions:\n if path.endswith(special_ext):\n return path[:-len(special_ext)]\n\n return os.path.splitext(path)[0]", "def clean_dir_name(name: str) -> str:\n return name.replace(\"/\", \"_\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the git_sha found by this facade
def git_sha(self): return self._git_sha
[ "def repo_get_sha(self):\n raise NotImplementedError('Method repo_get_sha not implemented in root(Git*Connect) class')", "def get_commit_hash():\n return git.Repo().head.object.hexsha", "def sha(self):\n return self._commit.hexsha", "def get_git_commit_sha():\n\n return os.getenv(\"GIT_COMMIT\")", "def get_git_hash():\n try:\n with open(os.devnull, \"w\") as shutup:\n return (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], stderr=shutup)\n .decode(\"utf-8\")\n .strip(\"\\n\")\n )\n except subprocess.CalledProcessError:\n return \"not_a_git_repo\"", "def get_git():\n try:\n from subprocess import Popen, PIPE\n\n gitproc = Popen(['git', 'show-ref'], stdout=PIPE)\n (stdout, stderr) = gitproc.communicate()\n\n gitproc = Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=PIPE)\n (branch, stderr) = gitproc.communicate()\n branch = branch.split('\\n')[0]\n for row in stdout.split('\\n'):\n if row.find(branch) != -1:\n hash = row.split()[0]\n break\n except:\n hash = None\n branch = None\n return hash, branch", "def get_repo_git_commit_hash(repo_path):\n import subprocess\n\n githash = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd = repo_path).strip()\n # Typecast to fix python3 TypeError (Object of type bytes is not JSON serializable)\n # subprocess.check_output() returns a byte string\n githash = str(githash)\n\n return githash", "def get_commit_sha(self, project, branch):\n\n try:\n commit_sha = subprocess.check_output(\n [self.repo_bin, 'forall', project, '-c',\n f'git show-ref --hash $REPO_REMOTE/{branch}'],\n cwd=self.product_dir, stderr=subprocess.STDOUT\n ).decode()\n except subprocess.CalledProcessError as exc:\n print(f'The \"repo forall\" command failed: {exc.output}')\n sys.exit(1)\n\n return commit_sha.strip()", "def get_git():\n hash=None\n try:\n from subprocess import Popen, PIPE\n\n gitproc = Popen(['git', 'show-ref'], stdout=PIPE, encoding='utf8')\n (stdout, stderr) = gitproc.communicate()\n\n gitproc = Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=PIPE, encoding='utf8')\n (branch, stderr) = gitproc.communicate()\n branch = branch.split('\\n')[0]\n for row in stdout.split('\\n'):\n if row.find(branch) != -1:\n hash = row.split()[0]\n break\n except:\n hash = None\n branch = None\n return hash, branch", "def get_head_sha(self, path): # type: (str) -> str\n command = [\n 'git',\n 'rev-parse',\n 'HEAD',\n ]\n abspath = os.path.abspath(path)\n self.logger.debug('Using dir ' + abspath)\n self.logger.debug('Executing ' + ' '.join(command))\n output = subprocess.check_output(command, cwd=abspath)\n sha = output.decode('UTF-8').strip()\n assert len(sha) == 40, 'Invalid SHA: \"%s\"' % sha\n return sha", "def get_head_sha():\n from sh import git, ErrorReturnCode\n try:\n result = git('rev-parse', 'HEAD')\n except ErrorReturnCode as error:\n raise Ci3Error(\"Failed to get SHA1 of the local git HEAD: %s\" % error)\n return result.strip()", "def get_git_revision_hash():\n try:\n # We are not interested in gits complaints (stderr > /dev/null)\n git_hash = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, encoding=\"utf8\"\n )\n # ie. \"git\" was not found\n # should we return a more generic meta hash here?\n # like \"undefined\"?\n except FileNotFoundError:\n git_hash = \"git_not_available\"\n except subprocess.CalledProcessError:\n # Ditto\n git_hash = \"no_repository\"\n return git_hash.rstrip()", "def sha(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sha\")", "def _GetHash(self):\n assert self._hooks_project, \"Must have hooks to calculate their hash.\"\n\n # We will use the work_git object rather than just calling\n # GetRevisionId(). That gives us a hash of the latest checked in version\n # of the files that the user will actually be executing. Specifically,\n # GetRevisionId() doesn't appear to change even if a user checks out a\n # different version of the hooks repo (via git checkout) nor if a user\n # commits their own revs.\n #\n # NOTE: Local (non-committed) changes will not be factored into this\n # hash. I think this is OK, since we're really only worried about\n # warning the user about upstream changes.\n return self._hooks_project.work_git.rev_parse(HEAD)", "def test_repo_get_single_commit_by_sha(self):\n pass", "def get_git_revision():\n try:\n import git\n except ImportError:\n return UNKNOWN\n try:\n path = os.path.dirname(__file__)\n gitrepo = git.Git(path)\n return gitrepo.rev_list(\"HEAD\").splitlines()[0]\n except git.GitCommandError:\n return UNKNOWN", "def get_last_hash(self):\n last_commit_hash = subprocess.check_output(\n ['git', 'rev-parse', 'HEAD'],\n universal_newlines=True, cwd=self._destination\n )\n return last_commit_hash.strip()", "def get_commit(sha):\n commit = get_from_github(f'repos/streamlit/streamlit/commits/{sha}')\n return commit", "def _get_latest_file_blob_sha(self):\n blob_sha = None\n dir_contents = self.github_repo.get_dir_contents(self.GITHUB_DIR)\n for content_file in dir_contents:\n if content_file.name == self.github_file:\n blob_sha = content_file.sha\n break\n return blob_sha", "def latest_hash() -> str:\n ret = subprocess.run([\"git\", \"rev-parse\", \"HEAD\"], capture_output=True, check=True)\n assert ret.returncode == 0, \"Failed to get latest commit hash.\"\n commit_hash = ret.stdout.decode(\"utf-8\").strip()\n return commit_hash" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inexact Augmented Lagrange Multiplier
def inexact_augmented_lagrange_multiplier(X, lmbda=.01, tol=1e-3, maxiter=100, verbose=True): Y = X norm_two = norm(Y.ravel(), 2) norm_inf = norm(Y.ravel(), np.inf) / lmbda dual_norm = np.max([norm_two, norm_inf]) Y = Y / dual_norm A = np.zeros(Y.shape) E = np.zeros(Y.shape) dnorm = norm(X, 'fro') mu = 1.25 / norm_two rho = 1.5 sv = 10. n = Y.shape[0] itr = 0 while True: Eraw = X - A + (1 / mu) * Y Eupdate = np.maximum(Eraw - lmbda / mu, 0) + np.minimum(Eraw + lmbda / mu, 0) U, S, V = svd(X - Eupdate + (1 / mu) * Y, full_matrices=False) svp = (S > 1 / mu).shape[0] if svp < sv: sv = np.min([svp + 1, n]) else: sv = np.min([svp + round(.05 * n), n]) Aupdate = np.dot(np.dot(U[:, :svp], np.diag(S[:svp] - 1 / mu)), V[:svp, :]) A = Aupdate E = Eupdate Z = X - A - E Y = Y + mu * Z mu = np.min([mu * rho, mu * 1e7]) itr += 1 if ((norm(Z, 'fro') / dnorm) < tol) or (itr >= maxiter): break if verbose: print("Finished at iteration %d" % (itr)) return A, E
[ "def GN_integral(b2, Lspan, a_dB, gam, f_ch, rs, roll_off, power, Nch, model_param):\n alpha_lin = a_dB / 20.0 / np.log10(np.e) # Conversion in linear units 1/km\n min_FWM_inv = np.power(10, model_param['min_FWM_inv'] / 10) # Conversion in linear units\n n_grid = model_param['n_grid']\n n_grid_min = model_param['n_grid_min']\n f_array = model_param['f_array']\n fmax = (f_ch[-1] - (rs[-1] / 2)) - (f_ch[0] - (rs[0] / 2)) # Get frequency limit\n f2eval = np.max(np.diff(f_ch))\n Bopt = f2eval * Nch # Overall optical bandwidth [THz]\n min_step = f2eval / n_grid # Minimum integration step\n max_step = f2eval / n_grid_min # Maximum integration step\n f_dense_start = np.abs(\n np.sqrt(np.power(alpha_lin, 2) / (4 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1)) / f2eval)\n f_ind_eval = 0\n GNLI = np.full(f_array.size, np.nan) # Pre-allocate results\n for f in f_array: # Loop over f\n f_dense_low = f - f_dense_start\n f_dense_up = f + f_dense_start\n if f_dense_low < -fmax:\n f_dense_low = -fmax\n if f_dense_low == 0.0:\n f_dense_low = -min_step\n if f_dense_up == 0.0:\n f_dense_up = min_step\n if f_dense_up > fmax:\n f_dense_up = fmax\n f_dense_width = np.abs(f_dense_up - f_dense_low)\n n_grid_dense = np.ceil(f_dense_width / min_step)\n df = f_dense_width / n_grid_dense\n # Get non-uniformly spaced f1 array\n f1_array = get_freqarray(f, Bopt, fmax, max_step, f_dense_low, f_dense_up, df)\n G1 = raised_cosine_comb(f1_array, rs, roll_off, f_ch, power) # Get corresponding spectrum\n Gpart = np.zeros(f1_array.size) # Pre-allocate partial result for inner integral\n f_ind = 0\n for f1 in f1_array: # Loop over f1\n if f1 != f:\n f_lim = np.sqrt(np.power(alpha_lin, 2) / (4 * np.power(np.pi, 4) * b2 * b2) * (min_FWM_inv - 1)) / (\n f1 - f) + f\n f2_dense_up = np.maximum(f_lim, -f_lim)\n f2_dense_low = np.minimum(f_lim, -f_lim)\n if f2_dense_low == 0:\n f2_dense_low = -min_step\n if f2_dense_up == 0:\n f2_dense_up = min_step\n if f2_dense_low < -fmax:\n f2_dense_low = -fmax\n if f2_dense_up > fmax:\n f2_dense_up = fmax\n else:\n f2_dense_up = fmax\n f2_dense_low = -fmax\n f2_dense_width = np.abs(f2_dense_up - f2_dense_low)\n n2_grid_dense = np.ceil(f2_dense_width / min_step)\n df2 = f2_dense_width / n2_grid_dense\n # Get non-uniformly spaced f2 array\n f2_array = get_freqarray(f, Bopt, fmax, max_step, f2_dense_low, f2_dense_up, df2)\n f2_array = f2_array[f2_array >= f1] # Do not consider points below the bisector of quadrants I and III\n if f2_array.size > 0:\n G2 = raised_cosine_comb(f2_array, rs, roll_off, f_ch, power) # Get spectrum there\n f3_array = f1 + f2_array - f # Compute f3\n G3 = raised_cosine_comb(f3_array, rs, roll_off, f_ch, power) # Get spectrum over f3\n G = G2 * G3 * G1[f_ind]\n if np.count_nonzero(G):\n FWM_eff = fwm_eff(alpha_lin, Lspan, b2, (f1 - f) * (f2_array - f)) # Compute FWM efficiency\n Gpart[f_ind] = 2 * np.trapz(FWM_eff * G, f2_array) # Compute inner integral\n f_ind += 1\n # Compute outer integral. Nominal span loss already compensated\n GNLI[f_ind_eval] = 16 / 27 * gam * gam * np.trapz(Gpart, f1_array)\n f_ind_eval += 1 # Next frequency\n return GNLI # Return GNLI array in W/THz", "def lrm_analytic(lg):\n # Unpack parameters\n γ, ρ, σ, σ_c, μ_c= lg.γ, lg.ρ, lg.σ, lg.σ_c, lg.μ_c \n\n t = σ**2 / (1 - ρ)**2\n return np.exp(μ_c + (1 - γ) * 0.5 * (σ_c**2 + t))", "def exponential_decay_correction(ysig, tau: float, amp: float,\n sampling_rate: float=1):\n\n # alpha ~1/8 is like averaging 8 samples, sets the timescale for averaging\n # larger alphas break the approximation of the low pass filter\n # numerical instability occurs if alpha > .03\n alpha = 1 - np.exp(-1/(sampling_rate*tau*(1+amp)))\n # the coefficient k (and the filter transfer function)\n # depend on the sign of amp\n\n if amp >= 0.0:\n k = amp/(1+amp-alpha)\n # compensating overshoot by a low-pass filter\n # correction filter y[n] = (1-k)*x[n] + k*u[n] = x[n] + k*(u[n]-x[n])\n # where u[n] = u[n-1] + alpha*(x[n] - u[n-1])\n a = [(1-k + k*alpha), -(1-k)*(1-alpha)]\n else:\n k = -amp/(1+amp)/(1-alpha)\n # compensating low-pass by an overshoot\n # correction filter y[n] = (1+k)*x[n] - k*u[n] = x[n] - k*(u[n]-x[n])\n # where u[n] = u[n-1] + alpha*(x[n] - u[n-1])\n a = [(1 + k - k*alpha), -(1+k)*(1-alpha)]\n\n # while the denominator stays the same\n b = [1, -(1-alpha)]\n # if alpha > 0.03 the filter can be unstable.\n\n # hint: to get the inverse just use the filter with (a, b, ysig)\n filtered_signal = signal.lfilter(a, b, ysig)\n return filtered_signal", "def _calIntegralMat(self):\n self.integralMat = self.mat.astype('float64').cumsum(axis=1).cumsum(axis=0)", "def ReloadAdaptiveInfectionRate(self):\n\n self.CalculateAlpha()\n\n # Set aliases\n x, y = self.BaseInfectionRate, self.Alpha\n\n # Define lambda\n self.AdaptiveInfectionRate = lambda n: x+(1-x)*(1-1/(1+y*(n-1)))\n\n return", "def max_lag():\n\treturn 180", "def x_max_lag(self):\n return self._scala.xregMaxLag()", "def ins_to_ann(interest_rate):\n\n return np.expm1(interest_rate)", "def intervaluate(x, y, implication_operator=_imp.reichenbach_implication):\n res = np.zeros((list(x.shape) + [2]))\n res[..., 0] = 1 - implication_operator(x, y)\n res[..., 1] = 1 - implication_operator(x, y) + y\n return res", "def g_acc(alt):\r\n g = (G*Me)/((Re + alt)**2)\r\n return g", "def _alpha(self, interval):\n return 1 - math.exp(-interval / self.period)", "def update_voltage(self, step_number, timestep, artificial_stimulus):\n \"\"\"\n update voltage function:\n based on previous 100 neurons, the sigmoid center is adjusted\n \"\"\"\n prev_step_number = step_number - 1\n prev_v = self.voltage_history[prev_step_number] if self.output_history[prev_step_number] == 0 else\\\n self.param_dict[\"refractory_voltage\"]\n v_ext = 0\n for p in self.pre:\n v_ext += p.get_input(step_number, timestep)\n exponent = np.exp(-1 * timestep / self.param_dict[\"time_constant\"])\n self.voltage_history[step_number] = exponent * prev_v + (1 - exponent) * v_ext + artificial_stimulus\n p_val = np.random.random()\n comp = self.param_dict[\"sigmoid_slope\"]\n adjusted_sigmoid = self.calcium_inhibition_value(step_number, timestep) + self.param_dict[\"sigmoid_center\"]\n\n if comp != 0:\n sc = 1 / (1 + np.exp(-1 * (self.voltage_history[step_number] - adjusted_sigmoid) / comp))\n self.output_history[step_number] = sc > p_val\n else:\n self.output_history[step_number] = self.voltage_history[step_number] > adjusted_sigmoid", "def precomputed_aug_experiment(\n clf,\n auged_featurized_x_train,\n auged_featurized_y_train,\n auged_featurized_x_train_to_source_idxs,\n auged_featurized_x_test,\n auged_featurized_y_test,\n auged_featurized_x_test_to_source_idxs,\n aug_iter,\n train_idxs_scores,\n n_aug_sample_points,\n update_scores=False,\n weight_aug_samples=False,\n use_loss=False,\n stratified_sampling_x_train_ks=None,\n):\n influence_acc = []\n aug_iter_idxs = []\n original_mask_train = auged_featurized_x_train_to_source_idxs < 0\n original_x_train = auged_featurized_x_train[original_mask_train]\n original_y_train = auged_featurized_y_train[original_mask_train]\n auged_x_train = np.copy(original_x_train)\n auged_y_train = np.copy(original_y_train)\n n_aug_sample_points = set(n_aug_sample_points)\n if weight_aug_samples:\n sample_weight = np.ones(len(original_x_train))\n else:\n sample_weight = None\n if stratified_sampling_x_train_ks is not None:\n aug_idxs = stratified_sampling_to_aug_idxs(\n train_idxs_scores,\n aug_iter,\n stratified_sampling_x_train_ks,\n )\n else:\n aug_idxs = np.array(list(aug_iter(train_idxs_scores))).flatten()\n assert len(np.unique(aug_idxs)) == len(aug_idxs)\n already_auged = set()\n while len(already_auged) < len(original_x_train):\n assert len(train_idxs_scores) == len(original_x_train)\n next_idxs = [idx for idx in aug_idxs if idx not in already_auged]\n idx = next_idxs[0]\n already_auged.add(idx)\n aug_mask = auged_featurized_x_train_to_source_idxs == idx\n x_aug_ = auged_featurized_x_train[aug_mask]\n auged_x_train = np.concatenate(\n [\n auged_x_train,\n x_aug_,\n ],\n axis=0)\n y_aug_ = auged_featurized_y_train[aug_mask]\n auged_y_train = np.concatenate(\n [\n auged_y_train,\n y_aug_,\n ],\n axis=0)\n if weight_aug_samples:\n # We downweight all points from the original train point\n rescale_weight = 1.0 / (len(x_aug_) + 1)\n weight_aug_ = np.full(len(x_aug_), rescale_weight)\n sample_weight = np.concatenate([\n sample_weight,\n weight_aug_,\n ],\n axis=0)\n sample_weight[idx] = rescale_weight\n if len(already_auged) in n_aug_sample_points:\n fit_params = {\"logistic_reg__sample_weight\": sample_weight}\n clf.fit(auged_x_train, auged_y_train, **fit_params)\n aug_train_poisoned_acc = clf.score(\n auged_featurized_x_test,\n auged_featurized_y_test)\n influence_acc.append(aug_train_poisoned_acc)\n aug_iter_idxs.append(idx)\n if update_scores:\n if isinstance(clf, sklearn.model_selection.GridSearchCV):\n if use_loss:\n train_idxs_scores = (clf\n .best_estimator_\n .named_steps[\"logistic_reg\"]\n .log_losses(L2_alpha=0.0))\n else:\n train_idxs_scores = (clf\n .best_estimator_\n .named_steps[\"logistic_reg\"]\n .LOO_influence())\n else:\n if use_loss:\n train_idxs_scores = (clf\n .named_steps[\"logistic_reg\"]\n .log_losses(L2_alpha=0.0))\n else:\n train_idxs_scores = (clf\n .named_steps[\"logistic_reg\"]\n .LOO_influence())\n train_idxs_scores = train_idxs_scores[:len(original_x_train)]\n if stratified_sampling_x_train_ks is not None:\n aug_idxs = stratified_sampling_to_aug_idxs(\n train_idxs_scores,\n aug_iter,\n stratified_sampling_x_train_ks,\n )\n else:\n aug_idxs = np.array(\n list(aug_iter(train_idxs_scores))\n ).flatten()\n return influence_acc, aug_iter_idxs", "def update_voltage(self, step_number, timestep, artificial_stimulus):\n \"\"\"\n update voltage function:\n based on previous 100 neurons, the sigmoid center is adjusted\n \"\"\"\n prev_step_number = step_number - 1\n prev_v = self.voltage_history[prev_step_number] if self.output_history[prev_step_number] == 0 else\\\n self.param_dict[\"refractory_voltage\"]\n v_ext = 0\n for p in self.pre:\n v_ext += p.get_input(step_number, timestep)\n\n v_ext += self.get_ca_self_inhib(step_number, timestep)\n\n exponent = np.exp(-1 * timestep / self.param_dict[\"time_constant\"])\n self.voltage_history[step_number] = exponent * prev_v + (1 - exponent) * v_ext + artificial_stimulus\n p_val = np.random.random()\n comp = self.param_dict[\"sigmoid_slope\"]\n if comp != 0:\n sc = 1 / (1 + np.exp(-1 * (self.voltage_history[step_number] - self.param_dict[\"sigmoid_center\"]) / comp))\n\n self.output_history[step_number] = sc > p_val\n else:\n self.output_history[step_number] = self.voltage_history[step_number] > self.param_dict[\"sigmoid_center\"]", "def l1l2_regularization(data, labels, mu, tau, beta=None, kmax=100000,\n tolerance=1e-5, return_iterations=False,\n adaptive=False):\n n, d = data.shape\n\n # beta starts from 0 and we assume also that the previous value is 0\n if beta is None:\n beta = np.zeros(d)\n else:\n beta = beta.ravel()\n\n # Useful quantities\n X = data\n Y = labels.ravel()\n\n if n > d:\n XTY = np.dot(X.T, Y)\n\n # First iteration with standard sigma\n sigma = _sigma(data, mu)\n if sigma < np.finfo(float).eps: # is zero...\n return np.zeros(d), 0\n\n mu_s = mu / sigma\n tau_s = tau / (2.0 * sigma)\n nsigma = n * sigma\n\n # Starting conditions\n aux_beta = beta\n t = 1.\n\n # for k in xrange(kmax):\n for k in range(kmax): # CHELSEA EDIT!!!\n # Pre-calculated \"heavy\" computation\n if n > d:\n precalc = XTY - np.dot(X.T, np.dot(X, aux_beta))\n else:\n precalc = np.dot(X.T, Y - np.dot(X, aux_beta))\n\n # Soft-Thresholding\n value = (precalc / nsigma) + ((1.0 - mu_s) * aux_beta)\n beta_next = np.sign(value) * np.clip(np.abs(value) - tau_s, 0, np.inf)\n\n ######## Adaptive step size #######################################\n if adaptive:\n beta_diff = (aux_beta - beta_next)\n\n # Only if there is an increment of the solution\n # we can calculate the adaptive step-size\n if np.any(beta_diff):\n # grad_diff = np.dot(XTn, np.dot(X, beta_diff))\n # num = np.dot(beta_diff, grad_diff)\n tmp = np.dot(X, beta_diff) # <-- adaptive-step-size drawback\n num = np.dot(tmp, tmp) / n\n\n sigma = (num / np.dot(beta_diff, beta_diff))\n mu_s = mu / sigma\n tau_s = tau / (2.0*sigma)\n nsigma = n * sigma\n\n # Soft-Thresholding\n value = (precalc / nsigma) + ((1.0 - mu_s) * aux_beta)\n beta_next = np.sign(value) * np.clip(np.abs(value) - tau_s, 0, np.inf)\n\n ######## FISTA ####################################################\n beta_diff = (beta_next - beta)\n t_next = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t*t))\n aux_beta = beta_next + ((t - 1.0)/t_next)*beta_diff\n\n # Convergence values\n max_diff = np.abs(beta_diff).max()\n max_coef = np.abs(beta_next).max()\n\n # Values update\n t = t_next\n beta = beta_next\n\n # Stopping rule (exit even if beta_next contains only zeros)\n if max_coef == 0.0 or (max_diff / max_coef) <= tolerance: break\n\n if return_iterations:\n return beta.reshape(-1, 1), k+1\n return beta.reshape(-1, 1)", "def _lcalc(self):\n # this will require using Rubinstein's L-calc\n raise NotImplementedError", "def integrand(t, N, h, a, eizw, L):", "def _prox_tvl1(\n input_img,\n l1_ratio=0.05,\n weight=50,\n dgap_tol=5.0e-5,\n x_tol=None,\n max_iter=200,\n check_gap_frequency=4,\n fista=True,\n init=None,\n):\n dtype= input_img.dtype\n weight = float(weight)\n input_img_flat = input_img.reshape(-1)\n input_img_norm = np.dot(input_img_flat, input_img_flat)\n if not input_img.dtype.kind == \"f\":\n input_img = input_img.astype(dtype)\n shape = [len(input_img.shape) + 1] + list(input_img.shape)\n grad_im = np.zeros(shape).astype(dtype)\n grad_aux = np.zeros(shape).astype(dtype)\n t = 1.0\n i = 0\n lipschitz_constant = 1.1 * (\n 4 * input_img.ndim * (1 - l1_ratio) ** 2 + l1_ratio**2\n )\n\n # negated_output is the negated primal variable in the optimization\n # loop\n negated_output = -input_img if init is None else -init\n\n dgap = np.inf\n\n # A boolean to control if we are going to do a fista step\n fista_step = fista\n\n while i < max_iter:\n grad_tmp = _gradient_id(negated_output, l1_ratio=l1_ratio)\n grad_tmp *= 1.0 / (lipschitz_constant * weight)\n grad_aux += grad_tmp\n grad_tmp = _projector_on_tvl1_dual(grad_aux, l1_ratio)\n\n # Careful, in the next few lines, grad_tmp and grad_aux are a\n # view on the same array, as _projector_on_tvl1_dual returns a view\n # on the input array\n t_new = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t * t))\n t_factor = (t - 1.0) / t_new\n if fista_step:\n grad_aux = (1 + t_factor) * grad_tmp - t_factor * grad_im\n else:\n grad_aux = grad_tmp\n grad_im = grad_tmp\n t = t_new\n gap = weight * _div_id(grad_aux, l1_ratio=l1_ratio)\n\n # Compute the primal variable\n negated_output = gap - input_img\n if (i % check_gap_frequency) == 0:\n if x_tol is None:\n old_dgap = dgap\n dgap = _dual_gap_prox_tvl1(\n input_img_norm,\n -negated_output,\n gap,\n weight,\n l1_ratio=l1_ratio,\n )\n if dgap < dgap_tol:\n break\n if old_dgap < dgap:\n # M-FISTA strategy: switch to an ISTA to have\n # monotone convergence\n fista_step = False\n elif fista:\n fista_step = True\n i += 1\n\n # Compute the primal variable, however, here we must use the ista\n # value, not the fista one\n output = input_img - weight * _div_id(grad_im, l1_ratio=l1_ratio)\n return output, dict(converged=(i < max_iter))", "def priorLikelihood(self, step):", "def mi(x, maxlag = 50):\n \n assert (type(maxlag) is int) and (maxlag > 0)\n # initialize variables\n binrule=\"fd\"\n x = (x-np.mean(x))/np.std(x,ddof=1)\n n = len(x)\n lags = np.arange(0, maxlag, dtype=\"int\")\n mi = np.zeros(len(lags))\n # loop over lags and get MI\n for i, lag in enumerate(lags):\n # extract lagged data\n y1 = x[:n - lag].copy()\n y2 = x[lag:].copy()\n # use np.histogram to get individual entropies\n H1, be1 = entropy1d(y1, binrule)\n H2, be2 = entropy1d(y2, binrule)\n H12, _, _ = entropy2d(y1, y2, [be1, be2])\n # use the entropies to estimate MI\n mi[i] = H1 + H2 - H12\n\n return mi, lags" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if item is missing (not cloned)
def _is_missing(self, item): dst = '{}/{}'.format(self._data_list[item], item.split()[0]) if os.path.exists(dst): # it is bare repo who knows return 'maybe' return True
[ "def _missing(self, album):\n item_mbids = [x.mb_trackid for x in album.items()]\n if len(list(album.items())) < album.albumtotal:\n # fetch missing items\n # TODO: Implement caching that without breaking other stuff\n album_info = hooks.album_for_mbid(album.mb_albumid)\n for track_info in getattr(album_info, 'tracks', []):\n if track_info.track_id not in item_mbids:\n item = _item(track_info, album_info, album.id)\n self._log.debug('track {0} in album {1}',\n track_info.track_id, album_info.album_id)\n yield item", "def NO_EXISTING_ITEM():\r\n ###TODO must be query db or get request\r\n return {\r\n \"item_id\":\"100\", \r\n }", "def has_item(self, usage_key):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def is_singleton(item):\n return isinstance(item, Item) and not item.album_id", "def is_excluded_item(target):", "def __ne__(self, playable_item):\r\n return self.content != playable_item.content", "def no_item_selected(self) -> bool:\n return self.selected_item is None or self.selected_item == -1", "def test_bad_add_to_cart_no_inventory(self, avail):\n itemgroup = ItemGroup.objects.get(pk=1)\n item = itemgroup.item_set.first()\n\n # Set all items in the set to an unavailbility condition\n item.availability = avail\n item.save()\n\n with self.assertRaises(ValidationError):\n manager.add_to_cart(self.cart, item.id)\n\n # Our cart was empty, so it shouldn't create a key-value pair\n self.assertNotIn(item.id, self.cart)", "def ismissing(data):\n mm = missing_marker(data)\n if mm == NotImplemented:\n arr = np.empty(data.shape, dtype=bool)\n arr.fill(False)\n return arr\n else:\n if isinstance(data, la.larry):\n x = data.x\n else:\n x = data \n if mm != mm:\n return np.isnan(x)\n else:\n return x == [mm]", "def test_bad_checkout_item_unavailable(self, avail):\n item = Item.objects.get(pk=1)\n # Oh no, the item we want isn't available\n item.availability = avail\n item.save()\n\n items = Item.objects.all()\n\n with self.assertRaises(ValidationError):\n manager.checkout_items(items, self.due_date, self.user)", "def is_missing(obj):\n return getattr(obj, \"moya_missing\", False)", "def __ne__(self, playable_item):\n if not isinstance(playable_item, MusicServiceItem):\n return True\n return self.content != playable_item.content", "def is_item_owned(self, item):\n if item in self.items:\n return True\n return False", "def __undefinedNamesContain(self, name):\n return len(self.unList.findItems(name, Qt.MatchExactly)) > 0", "def _item_exists(self, item: Item) -> bool:\n return self._build_item_path(item).is_file()", "def __bool__(self):\n return not hasattr(self, 'missing')", "def empty(self, exc=[]):\n attrs = self.get_own_attrs()\n return not set(attrs.keys()).difference(set(exc))", "def test_delitem_missing(self):\n # Get a reference out here to make sure we don't get an exception\n # from an unexpected place\n data_values = self.record.data_values\n with self.assertRaises(KeyError):\n del data_values['no_such_key']", "def isempty(self):\r\n return not self._values" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a AddressBase. If the source is not None, then object is initialized from values of the source object.
def __init__(self, source=None): self.address_list = list(map(Address, source.address_list)) if source else []
[ "def test_BridgeAddressBase_init(self):\n self.assertIsNone(self.bab._address)\n self.assertIsNone(self.bab._fingerprint)", "def __init__(self, strict=True, **kwargs):\n # Only common fields are allowed to be set directly.\n unknown_fields = set(kwargs).difference(self.BASE_FIELD_IDS)\n if unknown_fields:\n raise KeyError(\n \"{!r} fields are not allowed to be set freely.\".format(\n unknown_fields))\n\n # Normalized field's IDs and values of the address are stored here.\n self._fields = dict.fromkeys(self.BASE_FIELD_IDS)\n\n # Load provided fields.\n for field_id, field_value in kwargs.items():\n self[field_id] = field_value\n\n # Normalize addresses fields.\n self.normalize(strict=strict)", "def init(self):\n if not valid_ovsdb_addr(self.ovsdb_addr):\n raise ValueError('Invalid OVSDB address: %s' % self.ovsdb_addr)\n if self.br_name is None:\n self.br_name = self._get_bridge_name()", "def __init__(self, address1=None, address2=None, city=None, country=None, fax=None, phone=None, postal_code=None, state_or_province=None):\n self.swagger_types = {\n 'address1': 'str',\n 'address2': 'str',\n 'city': 'str',\n 'country': 'str',\n 'fax': 'str',\n 'phone': 'str',\n 'postal_code': 'str',\n 'state_or_province': 'str'\n }\n\n self.attribute_map = {\n 'address1': 'address1',\n 'address2': 'address2',\n 'city': 'city',\n 'country': 'country',\n 'fax': 'fax',\n 'phone': 'phone',\n 'postal_code': 'postalCode',\n 'state_or_province': 'stateOrProvince'\n }\n\n self._address1 = address1\n self._address2 = address2\n self._city = city\n self._country = country\n self._fax = fax\n self._phone = phone\n self._postal_code = postal_code\n self._state_or_province = state_or_province", "def __init__(self, source, arch, sink):\n\n if isinstance(source, UHDBase):\n source = source.uhd\n if isinstance(sink, UHDBase):\n sink = sink.uhd\n\n if source and not isinstance(source, tuple):\n source = (source, 0)\n if arch and not isinstance(arch, tuple):\n arch = (arch, 0)\n if sink and not isinstance(sink, tuple):\n sink = (sink, 0)\n\n\n self._source = source\n self._arch = arch\n self._sink = sink\n\n self._state = APath.PENDING", "def __init__(self, address, available, last_used):\n\n self.address = address\n self.available = available\n self.last_used = last_used\n\n pass", "def test_set_address_with_constructor(self, mws_credentials):\n address = {\n \"name\": \"Roland Deschain\",\n \"address_1\": \"500 Summat Cully Lane\",\n \"city\": \"Gilead\",\n }\n inbound_constructed = InboundShipments(**mws_credentials, from_address=address)\n expected = {\n \"ShipFromAddress.Name\": \"Roland Deschain\",\n \"ShipFromAddress.AddressLine1\": \"500 Summat Cully Lane\",\n \"ShipFromAddress.AddressLine2\": None,\n \"ShipFromAddress.City\": \"Gilead\",\n \"ShipFromAddress.DistrictOrCounty\": None,\n \"ShipFromAddress.StateOrProvinceCode\": None,\n \"ShipFromAddress.PostalCode\": None,\n \"ShipFromAddress.CountryCode\": \"US\",\n }\n assert inbound_constructed.from_address == expected", "def __init__(self, addr, addr_index=None, label=None):\n if addr_index:\n if not isinstance(addr_index, int) or not addr_index > 0:\n raise ValueError(\n \"SubAddress index must be an integer bigger than 0\"\n )\n\n super().__init__(addr, label)\n self.index = addr_index", "def __init__(self, xarray_obj):\n super(GeoBase, self).__init__(xarray_obj)\n self._geometry = None", "def setUp(self):\n # Setting and validating `from_address` is already covered by\n # `SetShipFromAddressTestCase`. We don't need to re-test that logic:\n # we just need to set the address on the instance, which can be done\n # after the class is instantiated, by calling `set_ship_from_address`.\n super().setUp()\n\n self.addr = {\n \"name\": \"Roland Deschain\",\n \"address_1\": \"500 Summat Cully Lane\",\n \"city\": \"Gilead\",\n \"country\": \"Mid-World\",\n }\n self.api.set_ship_from_address(self.addr)", "def __init__(self, source_field, target_field):\n self.source_field = source_field\n self.target_field = target_field", "def __init__(self, rawdata=None, header=None, rtes=None):\n self.header = None\n self.rtes = []\n\n if rawdata:\n self._init_from_net(rawdata)\n elif header and rtes:\n self._init_from_self(header, rtes)\n else:\n raise ValueError(\"Invalid data to initialize a RIPv2 packet\")", "def address_obj(self):\n if not self._address_obj:\n self.address()\n return self._address_obj", "def __init__(self, address=None, x=0., y=0., identifier=\"\"):\n Point.__init__(self, address=address, x=x, y=y, identifier=identifier)", "def __init__(self, address: int, *, mask: int = 0, extended: bool = False):\n self.address = address\n self.mask = mask\n self.extended = extended", "def __init__(self, address: int, dp: int = 0) -> None:\n self._nominal_address = address\n self._dp = dp", "def __init__(self):\n this = _coin.new_SoTextureCoordinateBinding()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def init_range(init_address):\r\n new_range = init_address.copy()\r\n new_range[\"house_numbers\"] = [new_range.pop(\"house_nbr\")]\r\n new_range[\"points\"] = [new_range.pop(\"shape@\").firstPoint]\r\n return new_range", "def __init__(self, addrSet: ghidra.program.model.address.AddressSetView, dataType: ghidra.program.model.data.DataType):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge the list of addresses from acquisition with our own.
def _merge_address_list(self, acquisition): address_list = self.address_list[:] for addendum in acquisition.get_address_list(): for address in address_list: equi = address.is_equivalent(addendum) if equi == IDENTICAL: break elif equi == EQUAL: address.merge(addendum) break else: self.address_list.append(addendum)
[ "def nextAddresses(self) -> List[ghidra.program.model.address.Address]:\n ...", "def __init__(self, source=None):\n self.address_list = list(map(Address, source.address_list)) if source else []", "def merge(self, acquisition):\n # TODO what to do with sort and display?\n self._merge_privacy(acquisition)\n self._merge_surname_list(acquisition)\n self._merge_note_list(acquisition)\n self._merge_citation_list(acquisition)", "def merge(self, acquisition):\n self._merge_privacy(acquisition)\n self._merge_tag_list(acquisition)", "def _getAddresses(self, *ues):\n return [self._s1_util.get_ip(ue.ue_id) for ue in ues]", "def address(cls):\n for address in ADDRESS_LIST.values():\n entry = models.Address(**address)\n db.session.add(entry)", "def importAddresses(self):\n self.addressTable = read_csv(self.addressFilePath, header = 0)", "def get_from_addresses(self):\n addresses = self.get_all_addr_header('Resent-From')\n if addresses:\n for address in addresses:\n yield address\n else:\n for key in FROM_HEADERS:\n for address in self.get_all_addr_header(key):\n yield address", "def getVolatileAddresses(self) -> ghidra.program.model.address.AddressSetView:\n ...", "def extract_addresses(elem):\n address_dict_all = list()\n wos_id = extract_wos_id(elem)\n addresses = elem.findall('./static_data/fullrecord_metadata/addresses/address_name')\n for address in addresses:\n address_dict = dict()\n address_spec = address.find('address_spec')\n addr_no = address_spec.attrib.get('addr_no', '')\n for tag in ['city', 'state', 'country', 'zip', 'full_address']:\n if address_spec.find(tag) is not None:\n address_dict[tag] = address_spec.find(tag).text\n else:\n address_dict[tag] = ''\n if address_spec.find('organizations') is not None:\n organizations = '; '.join([oraginization.text for oraginization in address_spec.find('organizations')])\n else:\n organizations = ''\n if address_spec.find('suborganizations') is not None:\n suborganizations = '; '.join([s.text for s in address_spec.find('suborganizations')])\n else:\n suborganizations = ''\n address_dict.update({'wos_id': wos_id,\n 'addr_no': addr_no,\n 'organizations': organizations,\n 'suborganizations': suborganizations})\n address_dict_all.append(address_dict)\n return address_dict_all", "def _address_scraper(self, soup: bs4.element) -> None:\n if soup.find('div', attrs={'class': 'loc'}).find('a'):\n location = soup.find('div', attrs={'class': 'loc'}).find('a').text.split(', ')\n self._city = location[1] if len(location) > 1 else location[0]\n self._apartment_info_dict['address'] = location[0] if len(location) > 1 else None\n if '›' in self._city: # for scrap yerevan from location like that Վայոց Ձոր › Եղեգնաձոր\n self._city = self._city.split('› ')[-1]\n if 'Երևան' in location[0]: # for scrap yerevan from location like that Երևան › Քանաքեռ Զեյթուն\n self._city = 'Երևան'\n self._apartment_info_dict['address'] = location[0].split('› ')[-1]\n else:\n self._apartment_info_dict['address'] = None", "def _clear_address(self):\n for part_addr in [\n \"street\",\n \"house\",\n \"slash\",\n \"letter\",\n \"corpus\",\n \"building\",\n \"room\",\n \"hotel\",\n \"num_address_type\",\n \"region\",\n \"area\",\n \"location\",\n \"place\",\n ]:\n setattr(self, part_addr, \"\")", "def addresses(self):\n addrs = {u.recieved_raw['ingress-address']\n for u in self.all_joined_units}\n return list(sorted(addrs))", "def read_adresses(self):\n\n with open(\"storage/adresses.csv\") as addresses:\n\n csv_reader = csv.reader(addresses)\n for row in csv_reader:\n\n yield row", "def _update_addresses(device, address_data, is_management=False):\n ipaddress_ids = []\n for ip in address_data:\n try:\n ipaddress = IPAddress.objects.get(address=ip)\n except IPAddress.DoesNotExist:\n ipaddress = IPAddress(address=ip)\n ipaddress.device = device\n ipaddress.is_management = is_management\n ipaddress.save(update_last_seen=False)\n ipaddress_ids.append(ipaddress.id)\n # Disconnect the rest of addresses from this device\n for ipaddress in IPAddress.objects.filter(\n device=device,\n is_management=is_management,\n ).exclude(id__in=ipaddress_ids):\n ipaddress.device = None\n ipaddress.save(update_last_seen=False)", "def aggregateAddresses(self, address1, address2):\n\t\tquery = 'SELECT * from addresses where id = %s or id = %s'\n\t\tself.executeQuery(query, (address1, address2))\n\t\tresult = self.fetchAll()\n\t\tif result == None:\n\t\t\tself.insertCluster()\n\t\t\tself.insertAddress(Address.Address(address1, self.lastId))\n\t\t\tself.insertAddress(Address.Address(address2, self.lastId))\n\t\telif len(result) > 1:\n\t\t\treturn\n\t\telif result[0] == address1:\n\t\t\tself.insertAddress(Address.Address(address2, self.lastId))\n\t\telse:\n\t\t\tself.insertAddress(Address.Address(address1, self.lastId))\n\t\tself.commit()", "def _action_import_adresses(self, cr, uid, data, context):\n logger = netsvc.Logger()\n error_report = [u'Error report']\n add_obj = pooler.get_pool(cr.dbname).get('res.partner')\n add_ids = add_obj.search(cr, uid, [])\n addresses = add_obj.browse(cr, uid, add_ids)\n phone_fields = ['phone', 'fax', 'mobile']\n for add in addresses:\n vals = {}\n vals['partner_id'] = add.partner_id.id\n vals['email'] = add.email\n vals['phone'] = add.phone\n vals['fax'] = add.fax\n vals['mobile'] = add.mobile\n vals['name'] = add.firstname\n vals['street'] = add.street\n vals['street2'] = add.street2\n vals['city'] = add.city\n # Validating the mail\n if add.email :\n if re.match(\n \"^.+\\\\@(\\\\[?)[a-zA-Z0-9\\\\-\\\\.]+\\\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\\\]?)$\", add.email) is None or\\\n re.search(u\"[éèàêöüäï&]\", add.email) is not None:\n msg=u'Addresse %s for partner %s has email that is invalid %s'%(\n unicode(vals['firstname']) + ' '+unicode(vals['lastname']),\n add.partner_id.name,\n unicode(add.email)\n )\n logger.notifyChannel('ldap export', netsvc.LOG_INFO, msg)\n error_report.append(msg)\n vals['email'] = False\n # Validating the Phone\n for key in phone_fields :\n if not unicode(vals[key]).startswith('+') or unicode(vals[key]).find(\"\\n\") != -1\\\n or re.search(u\"[éèàêöüä#&]\", unicode(vals[key])) is not None:\n vals[key] = False\n msg = u'Addresse %s for partner %s has %s that is invalid '%(\n unicode(vals['firstname']) + ' '+unicode(vals['lastname']),\n add.partner_id.name,\n key\n )\n logger.notifyChannel('ldap export', netsvc.LOG_INFO, msg)\n error_report.append(msg)\n # Validating the CN\n if not add.lastname and add.firstname:\n msg = u'!!! Addresse %s for partner %s has no last name and first name that is valid partner name was used'%(\n unicode(add.id),\n add.partner_id.name,\n )\n logger.notifyChannel('ldap export', netsvc.LOG_INFO, msg)\n error_report.append(msg)\n # We save to LDAP\n add.write(vals, {'init_mode':True})\n #we by pass the encoding errors\n map(lambda x: unicodedata.normalize(\"NFKD\",x).encode('ascii','ignore'), error_report)\n error_report = \"\\n\".join(error_report)\n logger.notifyChannel(\"MY TOPIC\", netsvc.LOG_ERROR, error_report)\n try:\n data= base64.encodestring(error_report.encode())\n except Exception, e:\n data= base64.encodestring(\"Could not generate report file. Please look in the log for details\")\n\n return {'errors': data}", "def addresses(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"addresses\"),\n )", "def get_to_addresses(self):\n addresses = self.get_all_addr_header('Resent-To')\n addresses.extend(self.get_all_addr_header('Resent-Cc'))\n if addresses:\n for address in addresses:\n yield address\n else:\n for key in TO_HEADERS:\n for address in self.get_all_addr_header(key):\n yield address" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Access the parameters of the layer. Returns Tuple[mygrad.Tensor] The slope of the PReLU unit.
def parameters(self): return (self.slope,)
[ "def LMLgrad(self):\n return _core.CGPkronSum_LMLgrad(self)", "def LMLgrad(self):\n return _core.CGPSum_LMLgrad(self)", "def LMLgrad_X(self):\n return _core.CGPbase_LMLgrad_X(self)", "def gradient(self):\n gx, gy = np.gradient(self.zz)\n return gx, gy", "def GetGradient(self, x):\n return _handle.OperatorHandle_GetGradient(self, x)", "def LMLgrad(self, *args):\n return _core.CGPbase_LMLgrad(self, *args)", "def grad_ReLU(self):\n grad = np.zeros(self.x.shape)\n grad[self.x <= 0] = 0\n grad[self.x > 0] = 1\n return grad", "def LMLgrad(self):\n return _core.CGPkronecker_LMLgrad(self)", "def _get_slope(self):\n return self._slope", "def requires_grad(self):\n return self.param_info.requires_grad", "def updateParameterGradient(self):\n\n self.gradient += gpu.sum(self.to_port.getDelta(), 0)", "def compute_gradients(self,loss):\n\t\tgrads = T.grad(loss, self.rnn.params)\n\t\treturn zip(self.rnn.params, grads)", "def grad(self, u):\n l = self.ldis\n\n ur = eldot(l.Dr, u)\n us = eldot(l.Ds, u)\n ux = self.rx*ur + self.sx*us\n uy = self.ry*ur + self.sy*us\n return ux, uy", "def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE", "def _grad(self, coeffs: np.ndarray, out: np.ndarray) -> np.ndarray:\n if out is None:\n out = self.__grad\n\n n_samples = self.get_info(\"n_samples\")\n labels = self.labels\n tosses = self.tosses\n features = self.features\n bias, intercept, beta = self.__get_params(coeffs)\n prob = (labels / tosses).reshape(n_samples, 1)\n z = bias + intercept + features.dot(beta)\n # TODO: overflow proof logistic\n s = 1 / (1 + np.exp(-z))\n delta = tosses * (s - prob).reshape(n_samples, 1)\n # Gradient with respect to the bias\n out[0] = delta.mean()\n # Gradient with respect to the intercept\n out[1:n_samples + 1] = delta.reshape(n_samples, ) / n_samples\n # Gradient with respect to the beta\n out[n_samples + 1:] = (delta * features).mean(axis=0)\n return out", "def get_vectorized_parameter_updates(self, with_bias=True):\n params = self.get_forward_parameter_list(with_bias)\n return torch.cat([p.grad.view(-1).detach() for p in params])", "def get_layer_output_grad(model, inputs, outputs, layer=-1):\r\n grads = model.optimizer.get_gradients(model.total_loss, model.layers[layer].output)\r\n symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights)\r\n f = K.function(symb_inputs, grads)\r\n x, y, sample_weight = model._standardize_user_data(inputs, outputs)\r\n output_grad = f(x + y + sample_weight)\r\n return output_grad", "def _get_grad(self, module):\n if module.__class__.__name__ == 'Conv2d':\n # n_filters * (in_c * kw * kh)\n grad = module.weight.grad.data.view(module.weight.grad.data.size(0), -1) \n else:\n grad = module.weight.grad.data\n if module.bias is not None:\n grad = torch.cat([grad, module.bias.grad.data.view(-1, 1)], 1)\n return grad", "def grad_input(self, x):\n # Compute the gradient of the mean function.\n d_kernel = self.kernel.grad_input(x, self.X)\n d_mean = d_kernel.T.dot(self.alpha)\n # Compute the gradient of the standard deviation function. It is\n # absolutely crucial to note that the predict method returns the\n # variance, not the standard deviation, of the prediction.\n sd = np.sqrt(self.predict(x)[1])\n K_cross = self.kernel.cov(x, self.X)\n M = spla.cho_solve((self.L, True), K_cross.T).ravel()\n d_sd = -d_kernel.T.dot(M) / sd\n return d_mean, d_sd" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that get_metrics goes ok with an empty index
def test_get_default_metrics_empty(tmpdir): config = DEFAULT_CONFIG tmppath = pathlib.Path(tmpdir) / ".wily" config.cache_path = str(tmppath) tmppath.mkdir() (tmppath / "git").mkdir() with open(tmppath / "git" / "index.json", "w+") as f: f.write("[]") metrics = cache.get_default_metrics(config) assert metrics == []
[ "def test_get_metrics(self):\n pass", "def test_metrics_are_zero(self):\n verifier = MetricVerifier(self.impalad_test_service)\n verifier.verify_metrics_are_zero()", "def testGetEmptyStats(self):\n print(\"--------------\")\n print(\"Test getStats with no elements\")\n expected = (\n \"[]\"\n )\n print(expected)\n result = self.ac.getStats()\n print(result)\n self.assertEqual(result, expected)\n print(\"test success\")", "def test_index_out_of_range(self):\n job_set = self._jm.run([self._qc], backend=self.fake_api_backend)\n result_manager = job_set.results()\n with self.assertRaises(IBMQJobManagerJobNotFound):\n result_manager.get_counts(1)", "def test_empty(self):\n self.assert_tensor_equal([], index_tensor([]))", "def test_get_route_metrics_no_matches(db_mock):\n\n date = datetime.utcnow().replace(minute=0, second=0, microsecond=0)\n before_date = (date + timedelta(hours=-1))\n\n populate_mock_db(db_mock, date)\n\n args = {\n 'limit': 10,\n 'start_time': before_date,\n 'path': 'tracks/some_hash',\n 'query_string': 'with_users=WRONG',\n 'exact': False\n }\n metrics = get_route_metrics(args)\n\n assert not metrics", "def test_get_route_metrics_non_exact(db_mock):\n\n date = datetime.utcnow().replace(minute=0, second=0, microsecond=0)\n before_date = (date + timedelta(hours=-1))\n\n populate_mock_db(db_mock, date)\n\n args = {\n 'limit': 10,\n 'start_time': before_date,\n 'path': 'tracks/some_hash',\n 'exact': False\n }\n metrics = get_route_metrics(args)\n\n assert len(metrics) == 1\n assert metrics[0]['count'] == 11\n assert metrics[0]['unique_count'] == 2", "def test_index_requires_index_value(self):\n self.assertRaises(\n usage.UsageError, self.make_counter, index_field=\"foo\")", "def testAccessEmptyTable(self):\n results = [(idx,) for idx in self.manager.snimpyEmptyDescr]\n self.assertEqual(results, [])", "def test_index_view_with_no_walks(self):\n response = self.client.get(reverse('rate_my_walk:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'There are no walks present.')\n self.assertQuerysetEqual(response.context['enjoyment'], [])\n self.assertQuerysetEqual(response.context['recent'], [])", "def test_query_result_number_index(mock_get):\n resp = do_query(mock_get)\n df = resp[0].to_df()\n assert_is_not_none(df)", "def test_metric_default_return():\n\n from foreshadow.metrics import MetricWrapper\n\n def test(X):\n raise Exception\n\n metric_wrapper = MetricWrapper(test, 0)\n assert 0 == metric_wrapper.calculate([1, 2, 3])", "async def test_missing_uncovered_branches(self):\n json = {\"component\": {\"measures\": []}}\n response = await self.collect(get_request_json_return_value=json)\n self.assert_measurement(\n response,\n value=\"0\",\n total=\"100\",\n landing_url=self.metric_landing_url.format(\"uncovered_conditions\"),\n )", "async def stats(\n self,\n *,\n index: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]] = None,\n metric: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n completion_fields: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n error_trace: t.Optional[bool] = None,\n expand_wildcards: t.Optional[\n t.Union[\n t.Union[\"t.Literal['all', 'closed', 'hidden', 'none', 'open']\", str],\n t.Union[\n t.List[\n t.Union[\n \"t.Literal['all', 'closed', 'hidden', 'none', 'open']\", str\n ]\n ],\n t.Tuple[\n t.Union[\n \"t.Literal['all', 'closed', 'hidden', 'none', 'open']\", str\n ],\n ...,\n ],\n ],\n ]\n ] = None,\n fielddata_fields: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n fields: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n filter_path: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n forbid_closed_indices: t.Optional[bool] = None,\n groups: t.Optional[\n t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]\n ] = None,\n human: t.Optional[bool] = None,\n include_segment_file_sizes: t.Optional[bool] = None,\n include_unloaded_segments: t.Optional[bool] = None,\n level: t.Optional[\n t.Union[\"t.Literal['cluster', 'indices', 'shards']\", str]\n ] = None,\n pretty: t.Optional[bool] = None,\n ) -> ObjectApiResponse[t.Any]:\n if index not in SKIP_IN_PATH and metric not in SKIP_IN_PATH:\n __path = f\"/{_quote(index)}/_stats/{_quote(metric)}\"\n elif index not in SKIP_IN_PATH:\n __path = f\"/{_quote(index)}/_stats\"\n elif metric not in SKIP_IN_PATH:\n __path = f\"/_stats/{_quote(metric)}\"\n else:\n __path = \"/_stats\"\n __query: t.Dict[str, t.Any] = {}\n if completion_fields is not None:\n __query[\"completion_fields\"] = completion_fields\n if error_trace is not None:\n __query[\"error_trace\"] = error_trace\n if expand_wildcards is not None:\n __query[\"expand_wildcards\"] = expand_wildcards\n if fielddata_fields is not None:\n __query[\"fielddata_fields\"] = fielddata_fields\n if fields is not None:\n __query[\"fields\"] = fields\n if filter_path is not None:\n __query[\"filter_path\"] = filter_path\n if forbid_closed_indices is not None:\n __query[\"forbid_closed_indices\"] = forbid_closed_indices\n if groups is not None:\n __query[\"groups\"] = groups\n if human is not None:\n __query[\"human\"] = human\n if include_segment_file_sizes is not None:\n __query[\"include_segment_file_sizes\"] = include_segment_file_sizes\n if include_unloaded_segments is not None:\n __query[\"include_unloaded_segments\"] = include_unloaded_segments\n if level is not None:\n __query[\"level\"] = level\n if pretty is not None:\n __query[\"pretty\"] = pretty\n __headers = {\"accept\": \"application/json\"}\n return await self.perform_request( # type: ignore[return-value]\n \"GET\", __path, params=__query, headers=__headers\n )", "def test_index_view_with_no_articles(self):\n\t\tresponse = self.client.get(reverse('succinctly:index'))\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertContains(response, \"No summaries are available.\")\n\t\tself.assertQuerysetEqual(response.context['article_list'], [])", "def test_add_empty_measure(self):\n measure_results = {\n 'performance_met': 0,\n 'performance_not_met': 0,\n 'eligible_population_exclusion': 0,\n 'eligible_population_exception': 0,\n 'eligible_population': 0\n }\n\n self.measurement_set.add_measure(\n measure_number='042',\n measure_results=measure_results,\n )\n\n assert self.measurement_set.is_empty()", "def list_metric_no_option(self):\n url = '/metrics'\n resp, body = self.get(url)\n return resp, body", "def test_index_view_with_no_items(self):\n\t\tresponse = self.client.get('/')\n\t\tself.assertEqual(response.status_code, 200)\n\t\tself.assertContains(response, \"Nothing to do!\")\n\t\tself.assertQuerysetEqual(response.context[\"items\"], [])", "def test_index_of_coincidence_none():\n with pytest.raises(ValueError):\n frequency.index_of_coincidence()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether the user has permission for this report.
def check_permission(self, user): return user.has_perms(self.permissions_required)
[ "def authorized_for_reports(self):\n if self.userobject is None:\n return False\n return self.userobject.may_run_reports or self.userobject.superuser", "def is_user_granted_access(self, context):\n\n # Does user not have VIEW permissions?\n if not context['has_view_permission']:\n return False\n\n # Additional requirements if a DataProject requires teams.\n if self.project.has_teams:\n\n # Make sure the user has a Participant record.\n if self.participant is None:\n return False\n\n # Make sure the user is on a team.\n if self.participant.team is None:\n return False\n\n # Make sure the team leader has accepted this user onto their team.\n if not self.participant.team_approved:\n return False\n\n # Make sure the team has been approved by administrators.\n if not self.participant.team.status == 'Active':\n return False\n\n # If no issues, then the user been granted access.\n return True", "def can_access(self, user):\r\n # Sanity check (this should normally be ensured by the caller).\r\n if user.site_id != self.site_id:\r\n return False\r\n return (self.open_access_granted()\r\n or self.permissions().get(user.id) != None)", "def isUserAllowed(self):\n security = getSecurityManager()\n portal = getToolByName(self, 'portal_url').getPortalObject()\n return security.checkPermission(permissions.USE_LINK_MANAGEMENT,\n portal)", "def check_user_perm(self, user, doc, write = False):\n\n try:\n perm = self.user_perms.get(doc = doc, user = user)\n if not write:\n return True\n else:\n return perm.write and write\n except ObjectDoesNotExist:\n return False", "def access_granted(self):\n\t\treturn self.status in self.GRANTED_STATUSES", "def has_permission(self, request, view):\n answer = view.get_object()\n return (\n request.user.is_authenticated\n and answer.referral.units.filter(members__id=request.user.id).exists()\n )", "def show_to_user_if_has_permission(self, item_dict):\n condition_check_path = item_dict.get('render_for_user_when_has_permission')\n if condition_check_path is None:\n return True\n return self.request.user.has_perm(condition_check_path)", "def check_permission(self):\n if self.committee and self.committee.premium:\n if self.premium_but_free():\n return True\n\n # must be authenticated\n if not current_user.is_authenticated:\n return False\n\n if not current_user.is_confirmed():\n return False\n\n # check subscription\n return current_user.subscribed_to_committee(self.committee)\n\n return True", "def has_data_access_permission(user):\n if not user.is_authenticated():\n return False\n elif user.has_perm('monitor.data_access'):\n return True\n raise PermissionDenied('%s does not have monitor.data_access permission.' % user)", "def has_permission(self, request, view):\n try:\n visitor = request.user.visitormodel\n return True\n except UserModel.visitormodel.RelatedObjectDoesNotExist:\n return False", "def view(self, user, project, *args):\n if user.is_anonymous or user.is_client:\n return False\n\n if user.is_administrator:\n return True\n\n if user.is_manager:\n return False\n\n if user.is_advisor:\n return Project.objects.filter(pk=project.pk).accessible_by(user).exists()\n\n return self.admin_permission(user, project, *args)", "def has_access(self, user):\n with TRN:\n # if admin or superuser, just return true\n if user.level in {'superuser', 'admin'}:\n return True\n\n return self._id in Analysis.get_by_status('public') | \\\n user.private_analyses | user.shared_analyses", "def has_perm(self, user_obj, perm, obj=None):\n return permissions.utils.has_permission(obj, user_obj, perm)", "def has_permission(self, request, view):\n user_filter = self._get_user_filter(request)\n if not user_filter:\n # no user filters are present in the token to limit access\n return True\n\n username_param = _get_username_param(request)\n allowed = user_filter == username_param\n if not allowed:\n log.warning(\n u\"Permission JwtHasUserFilterForRequestedUser: user_filter %s doesn't match username %s.\",\n user_filter,\n username_param,\n )\n return allowed", "def has_permission(self, request, view):\n\n if not request.user.is_authenticated:\n return False\n\n if request.method == 'GET':\n if hasattr(request.user, 'profile') or hasattr(request.user, 'driver_profile') or hasattr(request.user,\n 'shop_profile'):\n return True\n\n if request.method == 'POST':\n if hasattr(request.user, 'profile'):\n return True\n\n if request.method == 'PATCH':\n if hasattr(request.user, 'driver_profile'):\n return True\n\n return False", "def has_permission(self, permission):\n \n if self.user is None:\n return False\n if permission is None:\n return True\n if isinstance(permission, basestring):\n permission = Permission.find_label(permission)\n return permission.possessed_by(self.user, self.subaccount)", "def can_request_assistance(user):\n return _is_in_acl(user, 'authorized')", "def has_permission(self, request, view):\n try:\n Membership.objects.get(\n user=request.user,\n bucket=view.bucket,\n is_active=True\n )\n except Membership.DoesNotExist:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }