query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
r""" >>> t = Textile() >>> t.table('(rowclass). |one|two|three|\n|a|b|c|') '\t\n\t\t\n\t\t\tone\n\t\t\ttwo\n\t\t\tthree\n\t\t\n\t\t\n\t\t\ta\n\t\t\tb\n\t\t\tc\n\t\t\n\t\n\n'
def table(self, text): text = text + "\n\n" pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s': self.table_span_re, 'a': self.align_re, 'c': self.c}, re.S | re.M | re.U) return pattern.sub(self.fTable, text)
[ "def test_multi_line(style):\n row = ['Row One\\nColumn One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [10, 3, 5], 2)]\n expected = [\n ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'),\n ('|', ' Column One ', '|', ' ', '|', ' ', '|'),\n ]\n assert actual == expected", "def split_tables(text):\n tables = RE_TABLE.findall(text)\n text = RE_TABLE.sub(\"\", text)\n return text, tables", "def tabulate(self, separator=' ', filler=None):\n\n if filler is None:\n filler = separator\n\n split_regex = re.compile(separator)\n\n def do_split(line):\n return (cell for cell in split_regex.split(line) if cell)\n\n # \"Transpose\" the list of lines into a list of columns\n table_cols = list(itertools.zip_longest(\n *(do_split(line) for line in self._line_list),\n fillvalue=''\n ))\n\n # Compute the width of each column according to the longest cell in it\n table_cols_width = (\n # +1 to keep at least one filler string between columns\n max(len(cell) for cell in col) + 1\n for col in table_cols\n )\n\n # Reformat all cells to fit the width of their column\n table_cols = [\n [\n '{cell:{filler}<{w}}'.format(cell=cell, filler=filler, w=width)\n for cell in col\n ]\n for width, col in zip(table_cols_width, table_cols)\n ]\n # Transpose back the columns to lines\n table_lines = (\n ''.join(cells).rstrip(filler)\n for cells in zip(*table_cols)\n )\n\n return MLString(lines=table_lines)", "def test_text_outside_table(self) -> None:\n assert OUTPUT.body[3] == [\n [\n [\n \"Text outside table\",\n \"Reference footnote 1----footnote1----\",\n \"Reference footnote 2----footnote2----\",\n \"Reference endnote 1----endnote1----\",\n \"Reference endnote 2----endnote2----\",\n \"\",\n \"----media/image2.jpg----\",\n ]\n ]\n ]", "def test_single_line(style):\n row = ['Row One Column One', 'Two', 'Three']\n table = BaseTable([row])\n actual = [tuple(i) for i in table.gen_row_lines(row, style, [18, 3, 5], 1)]\n expected = [\n ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'),\n ]\n assert actual == expected", "def test_two_rows_equal_size(self):\n tab = tabl.Tabl()\n string = tab.to_table([['ab', 'c', 'def'], ['gh', 'i', 'jkl']])\n self.assertEqual('+--+-+---+\\n' + \\\n '|ab|c|def|\\n' + \\\n '+--+-+---+\\n' + \\\n '|gh|i|jkl|\\n' + \\\n '+--+-+---+\\n', string)", "def _enclose_in_table (text):\n\treturn tag_with_contents (\n\t\t'table',\n\t\ttext,\n\t\tclass_='revi_formtable',\n\t)", "def textile(text, head_offset=0, html_type='xhtml', auto_link=False,\r\n encoding=None, output=None):\r\n return Textile(auto_link=auto_link).textile(text, head_offset=head_offset,\r\n html_type=html_type)", "def test_two_rows_first_longer(self):\n tab = tabl.Tabl()\n string = tab.to_table([['abb', 'c', 'def'], ['gh', 'i', 'jkl']])\n self.assertEqual('+---+-+---+\\n' + \\\n '|abb|c|def|\\n' + \\\n '+---+-+---+\\n' + \\\n '|gh |i|jkl|\\n' + \\\n '+---+-+---+\\n', string)", "def test_custom_hor_split(self):\n tab = tabl.Tabl()\n tab.set_hor('~')\n string = tab.to_table([['a']])\n self.assertEqual('+~+\\n' + \\\n '|a|\\n' + \\\n '+~+\\n', string)", "def TableExtract(self):\n\n Regex = r\"\\\\begin\\{table\\}.*?\\\\end\\{table\\}\" # no closing brace on purpose -- this is so that table* is included\n self.TableRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n Regex = r\"\\\\begin\\{table\\*\\}.*?\\\\end\\{table\\*}\"\n self.TableStarRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n\n TableExtracted = self.TableRegex.findall(self.ParsedText) + self.TableStarRegex.findall(self.ParsedText)\n\n for TableText in TableExtracted:\n ThisUID = self.GenerateUID()\n self.ParsedTables[ThisUID] = Table(TableText, ThisUID)", "def _read_torchtext_tabular(cls, input_file):\n return open_split(input_file, lower_case=False)", "def render_text(table, data):\n print(render(renderers.FancyRenderer, table, data))", "def create_table_element(self):\n element = etree.Element('table')\n element.text = '\\n'\n element.tail = '\\n'\n return element", "def test_biolink_tsv(self):\n\n def filtr(s: str) -> str:\n return s.replace(\"\\r\\n\", \"\\n\")\n\n self.single_file_generator(\"tsv\", CsvGenerator, format=\"tsv\", filtr=filtr)", "def test_make_quoted_table(): # ***Incomplete test\n ##########################\n # Arrange.\n intablepath = \"intablepath\"\n outtablepath = \"outtablepath\"\n\n ##########################\n # Act.\n #x = make_quoted_table(intablepath,\n #\t\touttablepath)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.", "def array_html_block_table(self, arr):\n\n (suppress, suppress_thresh) = self._get_suppress()\n\n st_tab = \"style='border: 2px solid black;'\"\n st_tr = \"style='border: 1px dotted; padding: 2px;'\"\n st_th = \"style='border: 1px dotted; padding: 2px; text-align: center;'\"\n st_tdval = \"style='border: 1px dotted; padding: 2px; text-align: right;'\"\n spc = arr.space\n if len(spc.ket_set):\n ket_indices = list(spc.ket_space().index_iter())\n else:\n ket_indices = [None]\n if len(spc.bra_set):\n bra_indices = list(spc.bra_space().index_iter())\n else:\n bra_indices = [None]\n fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)\n\n ht = ''\n\n if self.use_latex_label_in_html:\n ht += '$'+spc._latex_()+'$'\n else:\n # FIXME - here, and elsewhere, use unicode symbols '&#x27e8;' and '&#x27e9;'\n # for html.\n ht += spc._html_()+'<br>'\n\n ht += \"<table style='margin: 0px 0px;'>\\n\"\n\n if spc.ket_set:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n if len(spc.bra_set):\n colgrp_size = spc.bra_space().shape[-1]\n for i in range(spc.bra_space().dim() // colgrp_size):\n ht += (\"<colgroup span=%d \"+st_tab+\"></colgroup>\\n\") % colgrp_size\n else:\n ht += \"<colgroup \"+st_tab+\"></colgroup>\\n\"\n\n if spc.bra_set:\n ht += \"<tbody \"+st_tab+\">\\n\"\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'> </td>'\n\n for b_idx in bra_indices:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left< '\n # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?\n # ht += r' \\right|}$'\n #else:\n ht += '&#x27e8;'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'\n\n ht += '</nobr></td>'\n\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n\n last_k = None\n for k_idx in ket_indices:\n if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:\n if last_k is not None:\n ht += '</tbody>\\n'\n ht += \"<tbody \"+st_tab+\">\\n\"\n last_k = k_idx[-2]\n ht += '<tr '+st_tr+'>'\n if spc.ket_set:\n ht += '<td '+st_th+'><nobr>'\n\n #if self.use_latex_label_in_html:\n # ht += r'$\\scriptsize{\\left| '\n # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?\n # ht += r' \\right>}$'\n #else:\n ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'&#x27e9;'\n\n ht += '</nobr></td>'\n for b_idx in bra_indices:\n if k_idx is None and b_idx is None:\n assert 0\n elif k_idx is None:\n idx = b_idx\n elif b_idx is None:\n idx = k_idx\n else:\n idx = k_idx + b_idx\n v = arr[idx]\n if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):\n if self.zero_color_html != '':\n vs = \"<font color='\"+self.zero_color_html+\"'>0</font>\"\n else:\n vs = \"0\"\n else:\n vs = \"<nobr><tt>\"+fmt(v)+\"</tt></nobr>\"\n ht += '<td '+st_tdval+'>'+vs+'</td>'\n ht += '</tr>\\n'\n ht += '</tbody>\\n'\n ht += '</table>\\n'\n\n return ht", "def make_table(self, a, b, adesc=None, bdesc=None, context=5):\n adesc = six.ensure_text(adesc) or ''\n bdesc = six.ensure_text(bdesc) or ''\n diff = difflib._mdiff(a, b, context=context)\n lines = [self._make_line(d) for d in diff]\n return h.really_unicode(\n self.table_tmpl % (adesc, bdesc, '\\n'.join(lines)))", "def table(self, header, body):\r\n return (\r\n '<table%s>\\n<thead>%s</thead>\\n'\r\n '<tbody>\\n%s</tbody>\\n</table>\\n'\r\n ) % (self._table, header, body)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> t = Textile() >>> t.fBlock("bq", "", None, "", "Hello BlockQuote") ('\\t\\n', '\\t\\t', 'Hello BlockQuote', '', '\\n\\t')
def fBlock(self, tag, atts, ext, cite, content): atts = self.pba(atts) o1 = o2 = c2 = c1 = '' m = re.search(r'fn(\d+)', tag) if m: tag = 'p' if m.group(1) in self.fn: fnid = self.fn[m.group(1)] else: fnid = m.group(1) atts = atts + ' id="fn%s"' % fnid if atts.find('class=') < 0: atts = atts + ' class="footnote"' content = ('<sup>%s</sup>' % m.group(1)) + content if tag == 'bq': cite = self.checkRefs(cite) if cite: cite = ' cite="%s"' % cite else: cite = '' o1 = "\t<blockquote%s%s>\n" % (cite, atts) o2 = "\t\t<p%s>" % atts c2 = "</p>" c1 = "\n\t</blockquote>" elif tag == 'bc': o1 = "<pre%s>" % atts o2 = "<code%s>" % atts c2 = "</code>" c1 = "</pre>" content = self.shelve(self.encode_html(content.rstrip("\n") + "\n")) elif tag == 'notextile': content = self.shelve(content) o1 = o2 = '' c1 = c2 = '' elif tag == 'pre': content = self.shelve(self.encode_html(content.rstrip("\n") + "\n")) o1 = "<pre%s>" % atts o2 = c2 = '' c1 = '</pre>' else: o2 = "\t<%s%s>" % (tag, atts) c2 = "</%s>" % tag content = self.graf(content) return o1, o2, content, c2, c1
[ "def makeNewBlock(self):\n\n block = textlayout.Block(\n width=self._propertyToPoints(\"width\"),\n lineHeight=self._propertyToPoints(\"line_height\"),\n marginTop=self._propertyToPoints(\"margin_top\"),\n marginBottom=self._propertyToPoints(\"margin_bottom\"),\n textAlign=self._property(\"text_align\"),\n maxLines=self._propertyToInt(\"max_lines\"),\n ellipsify=self._propertyToBool(\"ellipsify\")\n )\n\n return block", "def function_from_block(block):\n return Function(block.fields.get('Function', None),\n block.fields.get('Purpose', None), block.fields.get('Inputs', None),\n block.fields.get('Outputs', None))", "def test_nested_three_block_nl_block_nl_block_no_bq1():\n\n # Arrange\n source_markdown = \"\"\">\n> >\n > > list\n> > > item\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::>]\",\n \"[BLANK(1,2):]\",\n \"[block-quote(2,1)::> >\\n > > ]\",\n \"[BLANK(2,4):]\",\n \"[para(3,7):]\",\n \"[text(3,7):list:]\",\n \"[end-para:::True]\",\n \"[block-quote(4,1)::> > > ]\",\n \"[para(4,7):]\",\n \"[text(4,7):item:]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<p>list</p>\n<blockquote>\n<p>item</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def test_nested_three_block_block_block():\n\n # Arrange\n source_markdown = \"\"\"> > > list\n> > > item\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::]\",\n \"[block-quote(1,3)::]\",\n \"[block-quote(1,5)::> > > \\n> > > ]\",\n \"[para(1,7):\\n]\",\n \"[text(1,7):list\\nitem::\\n]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n<p>list\nitem</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def test_block_comment_whitespace_signature(self):\n \n inp = '2_3_block_comment.txt'\n self.run_single_file_case(inp)", "def codeblock(text):\n import textwrap # this is a slow import, do it lazy\n return textwrap.dedent(text).strip('\\n')", "def f_blocks(self, f_blocks):\n \n self._f_blocks = f_blocks", "def executeblock(self, block):\r\n \r\n block_text = \"\\n\\n\" + block.string\r\n line_number = block.start_row\r\n #self.options._update_loose(block.options)\r\n out_value = \"\"\r\n \r\n # This import should not be needed, but it works around a very\r\n # strange bug I encountered once.\r\n import cStringIO\r\n # create file-like string to capture output\r\n code_out = cStringIO.StringIO()\r\n code_err = cStringIO.StringIO()\r\n \r\n captured_exception = None\r\n # capture output and errors\r\n sys.stdout = code_out\r\n sys.stderr = code_err\r\n try:\r\n exec block_text in self.namespace\r\n except Exception, captured_exception:\r\n if isinstance(captured_exception, KeyboardInterrupt):\r\n raise captured_exception\r\n print >> sys.stderr, format_exc() \r\n \r\n # restore stdout and stderr\r\n sys.stdout = sys.__stdout__\r\n sys.stderr = sys.__stderr__\r\n \r\n out_value = code_out.getvalue()\r\n error_value = code_err.getvalue()\r\n \r\n code_out.close()\r\n code_err.close()\r\n\r\n if captured_exception: \r\n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \r\n print >> sys.stderr, error_value\r\n self.namespace = globals()\r\n self.namespace.update(locals())\r\n\r\n if out_value and not self.options.noecho:\r\n if self.options.outfilename == \"-\" :\r\n print >> sys.stderr, out_value\r\n else:\r\n print out_value\r\n if self.myshow:\r\n self.current_figure_list = self.myshow.figure_list[\r\n len(self.total_figure_list):]\r\n self.total_figure_list = self.myshow.figure_list\r\n\r\n #if self.options.silent:\r\n # error_value = \"\"\r\n \r\n return (block.start_row, block.string, out_value, error_value, \r\n self.current_figure_list)", "def make_block(text, blocksize=60, spaces=False, newlines=False):\n if not spaces:\n # Simple block by chars.\n return (text[i:i + blocksize] for i in range(0, len(text), blocksize))\n if newlines:\n # Preserve newlines\n lines = []\n for line in text.split('\\n'):\n lines.extend(make_block(line, blocksize=blocksize, spaces=True))\n return lines\n\n # Wrap on spaces (ignores newlines)..\n words = text.split()\n lines = []\n curline = ''\n for word in words:\n possibleline = ' '.join((curline, word)) if curline else word\n\n if len(possibleline) > blocksize:\n lines.append(curline)\n curline = word\n else:\n curline = possibleline\n if curline:\n lines.append(curline)\n return lines", "def format_string_block(self):\n if self._trim:\n return self._form_trimmed_format_string()\n else:\n return self._form_format_string()", "def testBlock(self):\n\t\t\n\t\tbo = DBSBlock(self.logger, self.dbi, self.owner)\n\t\tbinput = {'block_name': u'/QCD_BCtoMu_Pt20/Summer08_IDEAL_V9_v1/GEN-SIM-RAW#f930d82a-f72b-4f9e-8351-8a3cb0c43b79', 'file_count': u'100', \n\t\t\t 'origin_site': u'cmssrm.fnal.gov', 'last_modification_date': u'1263231733', \n\t\t\t 'create_by': u'/DC=org/DC=doegrids/OU=People/CN=Ajit Kumar Mohapatra 867118', 'block_size': u'228054411650', \n\t\t\t 'open_for_writing': 1, 'last_modified_by': u'anzar@cmssrv49.fnal.gov', 'creation_date': u'1228050132'}\n\t\tbo.insertBlock(binput)", "def test_already_formatted_block_comment(self):\n \n inp = '2_5_block_comment.txt'\n self.run_single_file_case(inp)", "def executeblock(self, block):\n \n block_text = \"\\n\\n\" + block.string\n line_number = block.start_row\n #self.options._update_loose(block.options)\n out_value = \"\"\n \n # This import should not be needed, but it works around a very\n # strange bug I encountered once.\n import cStringIO\n # create file-like string to capture output\n code_out = cStringIO.StringIO()\n code_err = cStringIO.StringIO()\n \n captured_exception = None\n # capture output and errors\n sys.stdout = code_out\n sys.stderr = code_err\n try:\n exec block_text in self.namespace\n except Exception, captured_exception:\n if isinstance(captured_exception, KeyboardInterrupt):\n raise captured_exception\n print >> sys.stderr, format_exc() \n \n # restore stdout and stderr\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n \n out_value = code_out.getvalue()\n error_value = code_err.getvalue()\n \n code_out.close()\n code_err.close()\n\n if captured_exception: \n print >> sys.stderr, \"Error in executing script on block starting line \", line_number ,\": \" \n print >> sys.stderr, error_value\n self.namespace = globals()\n self.namespace.update(locals())\n\n if out_value and not self.options.noecho:\n if self.options.outfilename == \"-\" :\n print >> sys.stderr, out_value\n else:\n print out_value\n if self.myshow:\n self.current_figure_list = self.myshow.figure_list[\n len(self.total_figure_list):]\n self.total_figure_list = self.myshow.figure_list\n\n #if self.options.silent:\n # error_value = \"\"\n \n return (block.start_row, block.string, out_value, error_value, \n self.current_figure_list)", "def test_inline_in_block():\r\n source = '<div>Hello, <em>World</em>!\\n<p>Lipsum.</p></div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])]),\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\\n</div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])]),\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n # Absolutes are left in the lines to get their static position later.\r\n source = '''<p>Hello <em style=\"position:absolute;\r\n display: block\">World</em>!</p>'''\r\n expected = [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, expected)\r\n\r\n # Floats are pull to the top of their containing blocks\r\n source = '<p>Hello <em style=\"float: left\">World</em>!</p>'\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])])", "def code_block(name, arguments, options, content, lineno,\r\n content_offset, block_text, state, state_machine):\r\n language = arguments[0]\r\n highlighter = get_highlighter(language)\r\n if highlighter is None:\r\n error = state_machine.reporter.error(\r\n 'The \"%s\" directive does not support language \"%s\".' % (name, language),\r\n nodes.literal_block(block_text, block_text), line=lineno)\r\n\r\n if not content:\r\n error = state_machine.reporter.error(\r\n 'The \"%s\" block is empty; content required.' % (name),\r\n nodes.literal_block(block_text, block_text), line=lineno)\r\n return [error]\r\n\r\n include_text = highlighter(\"\\n\".join(content))\r\n html = '<div class=\"syntax %s\">\\n%s\\n</div>\\n' % (language, include_text)\r\n raw = nodes.raw('',html, format='html')\r\n return [raw]", "def block(self, dataset: 'Dataset', function_: Callable = None, property_: str = None,\n block: Block = None, block_black_list: BlockBlackList = None, base_on: Block = None):\n block = BlockGenerator._block_args_check(function_, property_, block)\n return block", "def buildBlock(self, b):\n \"\"\"\n s = self.style\n colClass = self.getColClass(s.colWidth)\n b.block(self)\n b.div(class_=colClass, marginright=s.columnMarginRight, width=s.colWidth,\n marginleft=s.columnMarginLeft, margintop=s.columnMarginTop,\n paddingleft=s.columnPaddingLeft, float=s.columnFloat,\n display=s.columnDisplay,\n media=(\n \tMedia(width=s.columnWidthMobile,\n\t\t\t\tdisplay=s.columnDisplayMobile,\n float=s.columnFloatMobile,\n marginleft=s.columnMarginLeftMobile,\n marginright=s.columnMarginRightMobile,\n paddingleft=s.columnPaddingLeftMobile,\n paddingright=s.columnPaddingRightMobile,),\n ))\n \"\"\"\n self.buildColumn(b)\n \"\"\"\n b._div(comment=colClass)\n b._block(self)\n \"\"\"", "def test_nested_three_block_max_block_max_block_max_empty_no_bq2():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > \\n > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >]\",\n \"[BLANK(1,15):]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[icode-block(2,10): :]\",\n \"[text(2,10):\\a>\\a&gt;\\a list: ]\",\n \"[end-icode-block:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n</blockquote>\n</blockquote>\n<pre><code> &gt; list\n</code></pre>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def render_table(self, block):\n before = '<table>\\n<tr>\\n<td>'\n end = '</td>\\n</tr>\\n</table>'\n content = [\"</td>\\n<td>\".join(row) for row in block.data]\n content = \"</td>\\n</tr>\\n<tr>\\n<td>\".join(content)\n block.data = before + content + end\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> t = Textile() >>> t.glyphs("apostrophe's") 'apostrophe&8217;s' >>> t.glyphs("back in '88") 'back in &8217;88' >>> t.glyphs('foo ...') 'foo &8230;' >>> t.glyphs('') '&8212;' >>> t.glyphs('FooBar[tm]') 'FooBar&8482;' >>> t.glyphs("Cat's Cradle by Vonnegut") 'Cat&8217;s Cradle by Vonnegut'
def glyphs(self, text): # fix: hackish text = re.sub(r'"\Z', '\" ', text) glyph_search = ( # apostrophe's re.compile(r"(\w)\'(\w)"), # back in '88 re.compile(r'(\s)\'(\d+\w?)\b(?!\')'), # single closing re.compile(r'(\S)\'(?=\s|' + self.pnct + '|<|$)'), # single opening re.compile(r'\'/'), # double closing re.compile(r'(\S)\"(?=\s|' + self.pnct + '|<|$)'), # double opening re.compile(r'"'), # 3+ uppercase acronym re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'), # 3+ uppercase re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'), # ellipsis re.compile(r'\b(\s{0,1})?\.{3}'), # em dash re.compile(r'(\s?)--(\s?)'), # en dash re.compile(r'\s-(?:\s|$)'), # dimension sign re.compile(r'(\d+)( ?)x( ?)(?=\d+)'), # trademark re.compile(r'\b ?[([]TM[])]', re.I), # registered re.compile(r'\b ?[([]R[])]', re.I), # copyright re.compile(r'\b ?[([]C[])]', re.I), ) glyph_replace = [x % dict(self.glyph_defaults) for x in ( r'\1%(txt_apostrophe)s\2', # apostrophe's r'\1%(txt_apostrophe)s\2', # back in '88 r'\1%(txt_quote_single_close)s', # single closing r'%(txt_quote_single_open)s', # single opening r'\1%(txt_quote_double_close)s', # double closing r'%(txt_quote_double_open)s', # double opening r'<acronym title="\2">\1</acronym>', # 3+ uppercase acronym r'<span class="caps">\1</span>', # 3+ uppercase r'\1%(txt_ellipsis)s', # ellipsis r'\1%(txt_emdash)s\2', # em dash r' %(txt_endash)s ', # en dash r'\1\2%(txt_dimension)s\3', # dimension sign r'%(txt_trademark)s', # trademark r'%(txt_registered)s', # registered r'%(txt_copyright)s', # copyright )] result = [] for line in re.compile(r'(<.*?>)', re.U).split(text): if not re.search(r'<.*>', line): for s, r in zip(glyph_search, glyph_replace): line = s.sub(r, line) result.append(line) return ''.join(result)
[ "def get_glyphs(self, text):\n glyph_renderer = None\n glyphs = [] # glyphs that are committed.\n for c in get_grapheme_clusters(str(text)):\n # Get the glyph for 'c'. Hide tabs (Windows and Linux render\n # boxes)\n if c == '\\t':\n c = ' '\n if c not in self.glyphs:\n if not glyph_renderer:\n glyph_renderer = self.glyph_renderer_class(self)\n self.glyphs[c] = glyph_renderer.render(c)\n glyphs.append(self.glyphs[c])\n return glyphs", "def SoGlyph_getGlyph(*args) -> \"SoGlyph const *\":\n return _coin.SoGlyph_getGlyph(*args)", "def load_glyphs(self): \n self.glyphs = {}\n for id, glyph in self.font_spec[\"char\"].iteritems(): \n g = Glyph(**glyph)\n if id<256:\n self.glyphs[chr(id)] = g", "def get_glyphs(fig):\n return [x for x in fig.renderers if isinstance(x, GlyphRenderer)]", "def textCurves(string, font=\"string\", text=\"string\", name=\"string\", object=bool):\n pass", "def getGlyph(self, char):\n return FontGlyph(char, self, self.cairoContext)", "def add_glyphs(self, directory):\n space = self.font.createMappedChar(ord(\" \"))\n space.width = 500\n\n for k in self.config[\"glyphs\"]:\n # Create character glyph\n g = self.font.createMappedChar(k)\n self.unicode_mapping.setdefault(k, g.glyphname)\n # Get outlines\n src = \"{}/{}.svg\".format(k, k)\n src = directory + os.sep + src\n g.importOutlines(src, (\"removeoverlap\", \"correctdir\"))\n g.removeOverlap()", "def _get_text_glyphs(\n font, text,\n direction, line_direction, base_direction,\n missing='raise'\n ):\n if isinstance(text, str) and direction not in ('top-to-bottom', 'bottom-to-top'):\n # reshape Arabic glyphs to contextual forms\n try:\n text = reshape(text)\n except ImportError as e:\n # check common Arabic range - is there anything to reshape?\n if any(ord(_c) in range(0x600, 0x700) for _c in text):\n logging.warning(e)\n # put characters in visual order instead of logical\n if direction == 'normal':\n # decide direction based on bidi algorithm\n base_dir = {\n 'left-to-right': 'L',\n 'right-to-left': 'R'\n }[base_direction]\n text = get_display(text, base_dir=base_dir)\n lines = text.splitlines()\n if direction in ('right-to-left', 'bottom-to-top'):\n # reverse glyph order for rendering\n lines = tuple(_row[::-1] for _row in lines)\n if line_direction in ('right-to-left', 'bottom-to-top'):\n # reverse line order for rendering\n lines = lines[::-1]\n return tuple(\n tuple(_iter_labels(font, _line, missing))\n for _line in lines\n )", "def _pdfmark_unicode(string):\n try:\n string.encode('ascii')\n except UnicodeEncodeError:\n b = codecs.BOM_UTF16_BE + string.encode('utf-16-be')\n return '<{}>'.format(''.join('{:02X}'.format(byte) for byte in b))\n else:\n # escape special characters\n for a, b in [('\\\\', '\\\\\\\\'), ('(', '\\\\('), (')', '\\\\)'),\n ('\\n', '\\\\n'), ('\\t', '\\\\t')]:\n string = string.replace(a, b)\n return '({})'.format(string)", "def unicode(ctx, text):\n text = ' '.join(text)\n final_text = ''\n for char in text:\n final_text += f\"U+{ord(char):06x} {char} {unicodedata.name(char)}\\n\"\n chat(ctx).send_file(final_text.encode('utf8'), filename='UnicodeAnalysis.txt', title='Unicode', filetype='txt')", "def named_entities_codec(text):\r\n \r\n if isinstance(text, (UnicodeEncodeError, UnicodeTranslateError)):\r\n s = []\r\n for c in text.object[text.start:text.end]:\r\n if ord(c) in codepoint2name:\r\n s.append(u'&%s;' % codepoint2name[ord(c)])\r\n else:\r\n s.append(u'&#%s;' % ord(c))\r\n return ''.join(s), text.end\r\n else:\r\n raise TypeError(\"Can't handle %s\" % text.__name__)", "def test_non_varying_glyphs_bug356():\n actual_path = get_temp_file_path()\n font_path = get_input_path('bug356.otf')\n stderr_path = runner(CMD + ['-s', '-e', '-a', '-o', 'cff',\n '-f', font_path, actual_path])\n expected_path = get_expected_path('bug356.txt')\n assert differ([expected_path, stderr_path, '-l', '1'])", "def get_glyph_as_png(fontfile, cp):\n xcp = '%04X' % cp\n fn = \"/tmp/U+%s.png\" % xcp\n tfn = \"/tmp/U+%s.tmp.png\" % xcp\n subprocess.call([\n \"convert\", \"-background\", \"none\", \"-gravity\", \"center\",\n \"-size\", \"16x16\", \"-fill\", \"black\", \"-font\", fontfile,\n \"-pointsize\", \"16\", \"label:\"+unichr(cp).encode(\"UTF-8\"), tfn])\n subprocess.call([\"pngcrush\", \"-q\", \"-rem\", \"alla\", tfn, fn])\n os.unlink(tfn)\n with open(fn) as fo:\n result = b64encode(fo.read())\n os.unlink(fn)\n return result", "def fromFonttoolsGlyph(klass,font,glyphname):\n glyphset = font.getGlyphSet()\n from beziers.utils.pens import BezierPathCreatingPen\n pen = BezierPathCreatingPen(glyphset)\n glyph = font.getGlyphSet()[glyphname]\n glyph.draw(pen)\n return pen.paths", "def copy_character_glyphs(self, chars):\n if not self.should_copy_character_glyphs:\n return\n print(\" ...copying %d character glyphs...\" % (len(chars)))\n\n for char in chars:\n self.liga_font.selection.none()\n self.liga_font.selection.select(char)\n self.liga_font.copy()\n self.font.selection.none()\n self.font.selection.select(char)\n self.font.paste()\n self.correct_character_width(self.font[ord(char_dict[char])])", "def set_font_name(text):\r\n try:\r\n text.encode(encoding='utf-8').decode('ascii')\r\n except UnicodeDecodeError:\r\n return \"ARIALUNI.TTF\"\r\n return \"impact.ttf\"", "def text_image(self, char):\r\n if char in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':\r\n _index = ord(char)\r\n if _index >= 97:\r\n _index -= 97\r\n else:\r\n _index -= 38\r\n else:\r\n _index = 26\r\n\r\n return alpha_image[_index]", "def check_font_chars(ttf, charset):\n #chars = chain.from_iterable([y + (Unicode[y[0]],) for y in x.cmap.items()] for x in ttf[\"cmap\"].tables)\n try:\n chars_int=set()\n for table in ttf['cmap'].tables:\n for k,v in table.cmap.items():\n chars_int.add(k)\n\n unsupported_chars = []\n supported_chars = []\n for c in charset:\n if ord(c) not in chars_int:\n unsupported_chars.append(c)\n else:\n supported_chars.append(c)\n\n ttf.close()\n return unsupported_chars, supported_chars\n except:\n return False", "def font(obj):\n return match(obj, font_matchers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Capture and store URL references in self.urlrefs. >>> t = Textile()
def getRefs(self, text): pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http(?:s?):\/\/|\/)\S+)(?=\s|$)', re.U) text = pattern.sub(self.refs, text) return text
[ "def RefExtract(self):\n Regex = r\"\\\\ref\\{.*?\\}\"\n self.RefRegex = re.compile(Regex, re.VERBOSE|re.DOTALL)\n\n RefExtracted = self.RefRegex.findall(self.ParsedText)\n\n for Reference in RefExtracted:\n ThisUID = self.GenerateUID()\n self.ParsedRef[ThisUID] = Ref(Reference, ThisUID)", "def add_reference(self, uri, text):\n el = SubElement(self.get_element_person(), 'ref')\n el.set('target', uri)\n el.text = text\n return el", "def _reference(self):\n\t\tpass", "def migrate_url_to_reference_url(connection):\n migration_user_id = migrator.get_migration_user_id(connection)\n doc_ids = connection.execute(\n text(\"SELECT d.id FROM documents d WHERE d.kind='URL'\")).fetchall()\n\n doc_ids = [d.id for d in doc_ids]\n utils.add_to_objects_without_revisions_bulk(connection, doc_ids, \"Document\")\n\n sql = \"\"\"\n UPDATE documents SET\n kind='REFERENCE_URL',\n modified_by_id=:modified_by_id,\n updated_at=NOW()\n WHERE kind='URL'\n \"\"\"\n connection.execute(text(sql),\n modified_by_id=migration_user_id)\n\n connection.execute(text(\"\"\"\n ALTER TABLE documents MODIFY\n kind enum('FILE','REFERENCE_URL') NOT NULL DEFAULT 'REFERENCE_URL';\n \"\"\"))", "def listLinks(self):\n\t\tfor _id in self.refs:\n\t\t\tref = self.refs[_id]\n\t\t\tprint(ref.path)", "def get_referenced_objects(self):\n refs = set()\n for tile_uuid in self.list_tiles():\n tile = self.get_tile(tile_uuid)\n uuid = tile.data.get(\"uuid\", None)\n if uuid is not None:\n refs |= set([uuidToObject(uuid)])\n if IListTile.providedBy(tile):\n uuids = tile.data.get(\"uuids\", [])\n if uuids is None:\n continue\n for uuid in uuids:\n refs |= set([uuidToObject(uuid)])\n elif IRichTextTile.providedBy(tile):\n value = tile.data.get(\"text\")\n if value is None:\n continue\n value = value.raw\n links = extractLinks(value)\n refs |= getObjectsFromLinks(self, links)\n return refs", "def upload_reference(self, task, file_object, filename):\n import shutil\n\n ############################################################\n # ORIGINAL\n ############################################################\n file_path = os.path.join(os.path.join(task.absolute_path), self.reference_path)\n\n # upload it\n reference_file_full_path = self.upload_file(file_object, file_path, filename)\n\n reference_file_file_name = os.path.basename(reference_file_full_path)\n reference_file_base_name = os.path.splitext(reference_file_file_name)[0]\n\n # create a Link instance and return it.\n # use a Repository relative path\n repo = task.project.repository\n\n from stalker import Repository, Link\n\n assert isinstance(repo, Repository)\n relative_full_path = repo.make_relative(reference_file_full_path)\n\n link = Link(full_path=relative_full_path, original_filename=filename)\n\n # create a thumbnail for the given reference\n # don't forget that the first thumbnail is the Web viewable version\n # and the second thumbnail is the thumbnail\n\n ############################################################\n # WEB VERSION\n ############################################################\n web_version_temp_full_path = self.generate_media_for_web(\n reference_file_full_path\n )\n web_version_extension = os.path.splitext(web_version_temp_full_path)[-1]\n\n web_version_file_name = \"%s%s\" % (\n reference_file_base_name,\n web_version_extension,\n )\n web_version_full_path = os.path.join(\n os.path.dirname(reference_file_full_path), \"ForWeb\", web_version_file_name\n )\n web_version_repo_relative_full_path = repo.make_relative(web_version_full_path)\n web_version_link = Link(\n full_path=web_version_repo_relative_full_path,\n original_filename=web_version_file_name,\n )\n\n # move it to repository\n try:\n os.makedirs(os.path.dirname(web_version_full_path))\n except OSError: # path exists\n pass\n shutil.move(web_version_temp_full_path, web_version_full_path)\n\n ############################################################\n # THUMBNAIL\n ############################################################\n # finally generate a Thumbnail\n thumbnail_temp_full_path = self.generate_thumbnail(reference_file_full_path)\n thumbnail_extension = os.path.splitext(thumbnail_temp_full_path)[-1]\n thumbnail_file_name = \"%s%s\" % (reference_file_base_name, thumbnail_extension)\n\n thumbnail_full_path = os.path.join(\n os.path.dirname(reference_file_full_path), \"Thumbnail\", thumbnail_file_name\n )\n thumbnail_repo_relative_full_path = repo.make_relative(thumbnail_full_path)\n thumbnail_link = Link(\n full_path=thumbnail_repo_relative_full_path,\n original_filename=thumbnail_file_name,\n )\n\n # move it to repository\n try:\n os.makedirs(os.path.dirname(thumbnail_full_path))\n except OSError: # path exists\n pass\n shutil.move(thumbnail_temp_full_path, thumbnail_full_path)\n\n ############################################################\n # LINK Objects\n ############################################################\n # link them\n # assign it as a reference to the given task\n task.references.append(link)\n link.thumbnail = web_version_link\n web_version_link.thumbnail = thumbnail_link\n\n return link", "def _process_biblio_ref(self, item):\n a_ref = nodes.Reference()\n logger = logging.getLogger(self.__class__.__name__)\n\n a_ref.set_name(item[\"name\"])\n a_ref.set_title(item.get(\"title\", \"\"))\n a_ref.set_organization(item.get(\"organization\", \"\"))\n a_ref.set_category(item.get(\"category\", \"\"))\n a_ref.set_date(item.get(\"date\", \"\"))\n\n logger.debug(\"Processing Bibliography Reference: \\\"{}\\\"\".format(a_ref.get_name()))\n\n if \"hyperlink\" in item:\n if isinstance(item[\"hyperlink\"], list):\n for hyperlink_item in item[\"hyperlink\"]:\n a_ref.add_hyperlink(hyperlink_item)\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(hyperlink_item))\n else:\n a_ref.add_hyperlink(item[\"hyperlink\"])\n logger.debug(\"- Adding Hyperlink: \\\"{}\\\"\".format(item[\"hyperlink\"]))\n\n return a_ref", "def text_ref(self, ref):\n self._text_ref(ref)", "def update_reference(self, index, uri, text):\n el = self.xpath('./person/ref')[index]\n assert el.tag == 'ref' #check sanity\n el.set('target', uri)\n el.text = text\n return el", "def create_link(self):\n #Access filepath created from capture method\n file_path = App.get_running_app().root.ids.camera_screen.filepath\n #create FileShare object and assign the filepath parameter as file_path and upload it\n #to the web using the api key parameter of FileShare class\n filesharer = FileShare(filepath = file_path)\n #extract url using share method\n #add self. to url so it can be access by other methods in the ImageScreen class\n self.url = filesharer.share()\n #set text of Label widget to url\n self.ids.link.text = self.url", "def references(self):\n out = []\n fields = 'position id doi title authors sourcetitle publicationyear '\\\n 'volume issue first last text fulltext'\n ref = namedtuple('Reference', fields)\n items = self._tail.get('bibliography', {}).get('reference', [])\n if not isinstance(items, list):\n items = [items]\n for item in items:\n info = item['ref-info']\n volisspag = info.get('ref-volisspag', {})\n try:\n auth = info['ref-authors']['author']\n if not isinstance(auth, list):\n auth = [auth]\n authors = [', '.join([d['ce:surname'], d['ce:initials']])\n for d in auth]\n except KeyError: # No authors given\n authors = None\n ids = info['refd-itemidlist']['itemid']\n if not isinstance(ids, list):\n ids = [ids]\n try:\n doi = [d['$'] for d in ids if d['@idtype'] == 'DOI'][0]\n except IndexError:\n doi = None\n new = ref(position=item.get('@id'),\n id=[d['$'] for d in ids if d['@idtype'] == 'SGR'][0],\n doi=doi, authors=authors,\n title=info.get('ref-title', {}).get('ref-titletext'),\n sourcetitle=info.get('ref-sourcetitle'),\n publicationyear=info.get('ref-publicationyear', {}).get('@first'),\n volume=volisspag.get('voliss', {}).get('@volume'),\n issue=volisspag.get('voliss', {}).get('@issue'),\n first=volisspag.get('pagerange', {}).get('@first'),\n last=volisspag.get('pagerange', {}).get('@last'),\n text=info.get('ref-text'),\n fulltext=item.get('ref-fulltext'))\n out.append(new)\n return out or None", "def __init__(self, url):\n\n self._url = url", "def _ref_path(self, name):\n assert name.startswith('refs/')\n return posixpath.join(self._url, name)", "def __init__(self, fullurl):\n self.fullurl = fullurl\n self.urls = []\n self.last_modified = ''", "def add_url(\n self, value, tags=[], context={}, description=\"\", source=\"API\", **kwargs\n ):\n return self.__observable_add(\n value,\n type_obs=\"Url\",\n tags=tags,\n context=context,\n description=description,\n source=source,\n **kwargs\n )", "def test_pathurl_argument_is_working_properly(self):\n f = File(pathurl='shot2')\n self.assertEqual('file://localhost/shot2', f.pathurl)", "def __handle_link_reference_definition_token(\n cls, output_html, next_token, transform_state\n ):\n _ = (transform_state, next_token)\n\n return output_html", "def _put_link(self, url, height, txt):\n self.set_text_color(0, 0, 255)\n self._set_style(\"U\", True)\n self.write(height, txt, url)\n self._set_style(\"U\", False)\n self.set_text_color(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> t = Textile() >>> t.span(r"hello %(bob)span strong and bold% goodbye") 'hello span strong and bold goodbye'
def span(self, text): qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^') pnct = ".,\"'?!;:(" for qtag in qtags: pattern = re.compile(r""" (?:^|(?<=[\s>%(pnct)s])|([\[{])) (%(qtag)s)(?!%(qtag)s) (%(c)s) (?::\(([^)]+?)\))? ([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n]) ([%(pnct)s]*) %(qtag)s (?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s)) """ % {'qtag': qtag, 'c': self.c, 'pnct': pnct, 'selfpnct': self.pnct}, re.X) text = pattern.sub(self.fSpan, text) return text
[ "def _span_word(tag: Callable, text: Callable, word: str, score: float,\n colormap: Callable):\n bg = colormap(score)\n style = \"color:\" + _get_rgb(bg) + \";font-weight:bold;background-color: \" \\\n \"#ffffff\"\n with tag(\"span\", style=style):\n text(\" \" + word + \" \")\n text(\" \")", "def tags_to_spans(tag_sequence: List[str], tag_regex: Pattern) -> List[TypedStringSpan]:\n\n spans: Set[Tuple[str, Tuple[int, int]]] = set()\n span_start = 0\n span_end = 0\n active_tag = None\n for index, string_tag in enumerate(tag_sequence):\n # entity label should math {S,B,I,E}-Entity:Property template\n m = tag_regex.match(string_tag)\n if m is not None:\n span_tag = m.groupdict()['span_tag']\n entity = m.groupdict()['entity']\n if span_tag == 'B':\n # entering new span\n if active_tag is not None:\n spans.add((active_tag, (span_start, span_end)))\n active_tag = entity\n span_start = index\n span_end = index\n elif span_tag == 'S':\n # entity with one token\n if active_tag is not None:\n # add existing span\n spans.add((active_tag, (span_start, span_end)))\n # also add current one-token entity\n active_tag, span_start, span_end = entity, index, index\n spans.add((active_tag, (span_start, span_end)))\n active_tag = None\n elif span_tag == 'E':\n # end of span\n if active_tag == entity:\n # finish current span\n span_end = index\n spans.add((active_tag, (span_start, span_end)))\n else:\n # unexpected: just make span with one token\n if active_tag is not None:\n # add existing span\n spans.add((active_tag, (span_start, span_end)))\n # also add current entity\n active_tag, span_start, span_end = entity, index, index\n spans.add((active_tag, (span_start, span_end)))\n active_tag = None\n elif span_tag == 'I':\n if active_tag == entity:\n # inside span\n span_end += 1\n else:\n # unexpected: assume that this is begin of another span\n if active_tag is not None:\n spans.add((active_tag, (span_start, span_end)))\n active_tag = entity\n span_start = index\n span_end = index\n else:\n assert False, \"Unexpected case\"\n else:\n # The span has ended\n if active_tag is not None:\n spans.add((active_tag, (span_end, span_end)))\n active_tag = None\n\n # Last token might have been a part of a valid span.\n if active_tag is not None:\n spans.add((active_tag, (span_start, span_end)))\n\n return list(spans)", "def span_to_whitespace(html_string, span):\n start = \"<span class=\\\"%s\\\">\" % span\n stop = \"</span>\"\n while True:\n try:\n s = html_string.index(start)\n f = html_string.index(stop, s) + 7\n except ValueError:\n # No more occurances of this span exist in the file.\n break\n\n strip = html_string[s:f]\n stripped = strip_tags(strip)\n chars = whitespacegen(get_spacing(stripped, \"times new roman\"))\n html_string = html_string.replace(strip, chars)\n return html_string", "def iter_fexpr_spans(s: str) -> Iterator[Tuple[int, int]]:\n stack: List[int] = [] # our curly paren stack\n i = 0\n while i < len(s):\n if s[i] == \"{\":\n # if we're in a string part of the f-string, ignore escaped curly braces\n if not stack and i + 1 < len(s) and s[i + 1] == \"{\":\n i += 2\n continue\n stack.append(i)\n i += 1\n continue\n\n if s[i] == \"}\":\n if not stack:\n i += 1\n continue\n j = stack.pop()\n # we've made it back out of the expression! yield the span\n if not stack:\n yield (j, i + 1)\n i += 1\n continue\n\n # if we're in an expression part of the f-string, fast forward through strings\n # note that backslashes are not legal in the expression portion of f-strings\n if stack:\n delim = None\n if s[i : i + 3] in (\"'''\", '\"\"\"'):\n delim = s[i : i + 3]\n elif s[i] in (\"'\", '\"'):\n delim = s[i]\n if delim:\n i += len(delim)\n while i < len(s) and s[i : i + len(delim)] != delim:\n i += 1\n i += len(delim)\n continue\n i += 1", "def makespan(sol={}):\n return sol[\"makespan\"]", "def lex_span(self, name=None):\n if name:\n return self.get(name+self._span_suffix)\n else:\n return self._lex_span", "def make_text(text: str) -> SubAnnotation:\n return SubAnnotation(\"text\", text)", "def highlight(text, phrase, hilighter='<strong class=\"hilight\">\\\\1</strong>'):\n if not phrase or not text:\n return text\n return re.sub(re.compile('(%s)' % re.escape(phrase)), hilighter, text, re.I)", "def pos_to_span(pos: List[str]) -> Span:\n (start, path) = pos[0].split(\":\")\n (end, _) = pos[-1].split(\":\")\n return Span(int(start), int(end), path)", "def bio_to_spans(text: List[str], tags: List[str]) -> List[Tuple[int, int, str]]:\n pointer = 0\n starts = []\n for (\n i,\n t,\n ) in enumerate(tags):\n if t.startswith(\"B-\"):\n starts.append((i, pointer))\n pointer += len(text[i]) + 1\n\n spans = []\n for s_i, s_char in starts:\n label_str = tags[s_i][2:]\n e = 0\n e_char = len(text[s_i + e])\n while len(tags) > s_i + e + 1 and tags[s_i + e + 1].startswith(\"I-\"):\n e += 1\n e_char += 1 + len(text[s_i + e])\n spans.append((s_char, s_char + e_char, label_str))\n return spans", "def text_before_after(txt: str, span: Tuple[int, int], nb_words: int) -> Tuple[str, int, int]:\n start, end = span\n before_txt = txt[:start]\n span_txt = txt[start:end]\n after_txt = txt[end:]\n\n before_txt = ' '.join(before_txt.split(' ')[-nb_words:])\n after_txt = ' '.join(after_txt.split(' ')[:nb_words])\n\n total_txt = ''.join([before_txt, span_txt, after_txt])\n return total_txt, len(before_txt), len(after_txt)", "def helper_fn_sub(text, *params):\n return Sub(\n text.format(*[\"${\" + param.title + \"}\" for param in params]),\n {\n param.title: Ref(param)\n for param in params\n }\n )", "def convert_span(span):\n p = span.getparent()\n\n style = span.get('style')\n if style is None:\n return\n\n builders = []\n if 'bold' in style:\n builders.append(builder.STRONG)\n if 'italic' in style:\n builders.append(builder.EM)\n\n if builders:\n children = []\n if span.text is not None:\n children.append(span.text)\n for c in span.getchildren():\n children.append(c)\n if c.tail is not None and c.tail.strip():\n # Have to wrap the tail text in a span tag,\n # or else it won't get added.\n children.append(builder.SPAN(c.tail))\n\n # Recursively apply the builders.\n el = builders[0](*children)\n for b in builders[1:]:\n el = b(el)\n\n # Replace the old element with the new one.\n p.replace(span, el)", "def markup_text(text, pos=None, trg_pos=None, start_pos=None):\n positions_and_markers = []\n if pos is not None: positions_and_markers.append(( pos, '<|>'))\n if trg_pos is not None: positions_and_markers.append(( trg_pos, '<+>'))\n if start_pos is not None: positions_and_markers.append((start_pos, '<$>'))\n positions_and_markers.sort()\n\n m_text = \"\"\n m_pos = 0\n for position, marker in positions_and_markers:\n m_text += text[m_pos:position] + marker\n m_pos = position\n m_text += text[m_pos:]\n return m_text", "def makeTextCell(table, span, widths, heights, use_headers):\n width = getTotalSpanWidth(span, widths)\n height = getTotalSpanHeight(span, heights)\n text_row = span[0][0]\n text_column = span[0][1]\n text = table[text_row][text_column]\n\n lines = text.split(\"\\n\")\n for i in range(len(lines)):\n width_difference = width - len(lines[i])\n lines[i] = lines[i] + lineBreak(width_difference, \" \")\n\n height_difference = height - len(lines)\n empty_lines = []\n for i in range(0, height_difference):\n empty_lines.append(lineBreak(width, \" \"))\n lines.extend(empty_lines)\n\n output = [\"+\" + lineBreak(width, \"-\") + \"+\"]\n for i in range(0, height):\n output.append(\"|\" + lines[i] + \"|\")\n\n if use_headers and span[0][0] == 0:\n symbol = \"=\"\n else:\n symbol = \"-\"\n output.append(\"+\" + lineBreak(width, symbol) + \"+\")\n\n text = \"\\n\".join(output)\n row_count = getSpanRowCount(span)\n column_count = getSpanColumnCount(span)\n cell = Cell(text, text_row, text_column, row_count, column_count)\n\n return cell", "def styleText(self, start: Any, end: Any) -> None:", "def to_spans(tags: List[str], tokens: List[str], probs: List[float]) -> List[Span]:\n assert len(tags) == len(tokens) == len(probs)\n spans = []\n idx = 0\n while idx < len(tags):\n if tags[idx].startswith(\"B-\"):\n label = tags[idx].split(\"-\")[1]\n span = Span(label, idx, idx, tokens[idx], probs[idx])\n idx += 1\n while idx < len(tags) and tags[idx].startswith(\"I-\"):\n span.end += 1\n span.text += tokens[idx]\n span.confidence += probs[idx]\n idx += 1\n span.confidence /= (span.end - span.start + 1)\n spans.append(span)\n else:\n idx += 1\n\n return spans", "def markupSeq(seq, ulPosList, boldPosList, annots = {}):\n annotStarts = {}\n annotEnds = defaultdict(set)\n for (start, end), aDict in annots.iteritems():\n annotStarts[start] = aDict\n aDict[\"end\"] = end\n\n ulStarts = set([x[0] for x in ulPosList])\n ulEnds = set([x[1] for x in ulPosList])\n boldStarts = set([x[0] for x in boldPosList])\n boldEnds = set([x[1] for x in boldPosList])\n ret = []\n openAnnots = defaultdict(int) # current number of open spans, per cssString\n openTags = set()\n for i, nucl in enumerate(seq):\n if i in annotEnds:\n for tagStr in annotEnds[i]:\n if tagStr in openAnnots:\n openAnnots[tagStr]-=1\n if openAnnots[tagStr]==0:\n ret.append(\"</span>\")\n del openAnnots[tagStr]\n\n if i in annotStarts:\n aDict = annotStarts[i]\n cssParts = []\n for key, val in aDict[\"css\"].iteritems():\n cssParts.append(\"%s:%s\" % (key, val))\n cssStr = \";\".join(cssParts)\n tagStr = \"<span style='%s'>\" % cssStr\n if not tagStr in openAnnots:\n ret.append(tagStr)\n openAnnots[tagStr]+=1\n annotEnds[aDict[\"end\"]].add(tagStr)\n\n if i in ulStarts:\n ret.append(\"<u>\")\n openTags.add(\"u\")\n if i in ulEnds:\n ret.append(\"</u>\")\n if \"u\" in openTags:\n openTags.remove(\"u\")\n if i in boldStarts:\n ret.append(\"<b>\")\n openTags.add(\"b\")\n if i in boldEnds:\n ret.append(\"</b>\")\n if \"strong\" in openTags:\n openTags.remove(\"b\")\n ret.append(nucl)\n if (i+1) % 80==0:\n ret.append(\"<br>\")\n for tag in openTags:\n ret.append(\"</%s>\" % tag)\n return \"\".join(ret)\n #return seq[:start]+\"<u>\"+seq[start:end]+\"</u>\"+seq[end:]", "def highlight_sentence(sent, target, tag_s=\"<span class='target-highlight'>\", tag_e=\"</span>\"):\n\n # case insensitive sub, replaces original casing\n # sent_ = re.sub(target, \"%s%s%s\" % (tag_s, target, tag_e), sent, flags=re.IGNORECASE)\n\n # Case insensitive detection, case-preserving substitution.\n sent_ = re.sub(r\"(?=%s)\" % target, tag_s, sent, flags=re.IGNORECASE)\n sent_ = re.sub(r\"(?<=%s)\" % target, tag_e, sent_, flags=re.IGNORECASE)\n return sent_" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply Textile to a block of text.
def textile(text, head_offset=0, html_type='xhtml', auto_link=False, encoding=None, output=None): return Textile(auto_link=auto_link).textile(text, head_offset=head_offset, html_type=html_type)
[ "def __call__(self, text):\n for unit in self.units:\n text = unit.transform(text)\n return text", "def highlightBlock(self, text):\r\n self.highlight_function(text)", "def apply_to_fig_text(fig: mpl.figure.Figure, fn: Callable[[str], str]):\n for text in fig.findobj(match=plt.Text):\n text.set_text(fn(text.get_text()))", "def text(self, new_text):\n if isinstance(new_text, str):\n self._text = list(new_text[row * self._columns:self._columns] for row in range(self._rows))\n self._update()\n elif isinstance(new_text, list):\n self._text = [''] * self._rows\n for i in range(min(self._rows, len(new_text))):\n self._text[i] = new_text[i][:self._columns]\n self._update()\n else:\n self.text = str(new_text)", "def pushText(self, text):\n for line in text.splitlines():\n self.pushLine(line)", "def write_on_canvas (self, text, line = 0):\n for ind, char in enumerate(text):\n if ind >= self.canvas.size().x:\n return\n self.canvas.__setitem__(coord.Coord(ind, line), char)", "def get_text_blocks(self, method=Ocr.DILATION, params=None):\n if len(self.textBlocks) != 0:\n raise ValueError('self.textLines already achieved!')\n\n block_boxes = []\n blocks = []\n if method == Ocr.DILATION:\n block_boxes = self._get_text_block_by_dilation(params)\n else:\n raise ValueError('Invalid method in get_text_blocks: ' + str(method))\n\n for block_box in block_boxes:\n crop_img = self.img[block_box.y: block_box.y + block_box.h, block_box.x: block_box.x + block_box.w]\n crop_bin_img = self.bin_img[block_box.y: block_box.y + block_box.h, block_box.x: block_box.x + block_box.w]\n blocks.append(TextBlock(crop_img, crop_bin_img, block_box))\n\n if TESTING:\n text_image_copy = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)\n for l in block_boxes:\n cv2.rectangle(text_image_copy, (l.x, l.y), (l.x + l.w, l.y + l.h), (0, 255, 0), 1)\n cv2.imshow('test_blocks', text_image_copy)\n cv2.waitKey(0)\n\n # Assign text block inside:\n self.textBlocks = blocks", "def format_contents(self, text, col_text=COL_TEXT, col_frame=COL_FRAME):\n\n contents = []\n lines = text.splitlines()\n\n for line in lines:\n if len(line) < 1:\n line = ' ' * (self.INNER)\n else:\n line = self.center_text(line, self.INNER)\n\n contents.append(f\"{col_frame}{FRAME['boxV']}{col_text}{line}{col_frame}{FRAME['boxV']}\")\n\n return contents", "def wrap(self, text, width=None, **kwargs):\n width = self.width if width is None else width\n lines = []\n for line in text.splitlines():\n lines.extend(\n (_linewrap for _linewrap in SequenceTextWrapper(\n width=width, term=self, **kwargs).wrap(text))\n if line.strip() else (u'',))\n\n return lines", "def process_text(self, live_post, message_text):\n\n text = message_text[\"text\"]\n len_text = len(text)\n entities = message_text[\"entities\"]\n\n # Process the entities in reversed order to be able to edit the text in place.\n for entity in reversed(entities):\n url = \"\"\n start = entity[\"offset\"]\n end = start + entity[\"length\"]\n\n if entity[\"type\"] == \"url\":\n url = description = text[start:end]\n\n if is_embed(url):\n # Check if this can match an embed block, if so no conversion happens.\n # It matches an embed block if it has a line in the text for itself.\n if end == len_text or text[end] == \"\\n\":\n if start == 0 or text[start - 1] == \"\\n\":\n # This is an embed block, skip to the next entity\n continue\n\n if entity[\"type\"] == \"text_link\":\n url = entity[\"url\"]\n description = text[start:end]\n\n if url:\n url = format_url(url)\n link = f'<a href=\"{url}\">{description}</a>'\n text = text[:start] + link + text[end:]\n\n return super().process_text(live_post=live_post, message_text=text)", "def apply_cipher(func):\n text = args.in_file.read()\n changed_text = func(text)\n args.out_file.write(changed_text)", "def _prep_text(self, text):\n self.text_image = self.font.render(text, True, self.text_color, self.button_color)\n self.text_image_rect = self.text_image.get_rect()\n self.text_image_rect.center = self.rect.center", "def add_text(self, text, *args, **kwargs):\n # Pull down some kwargs.\n section_name = kwargs.pop('section', None)\n\n # Actually do the formatting.\n para, sp = self._preformat_text(text, *args, **kwargs)\n\n # Select the appropriate list to update\n if section_name is None:\n relevant_list = self.story\n else:\n relevant_list = self.sections[section_name]\n\n # Add the new content to list.\n relevant_list.append(para)\n relevant_list.append(sp)\n return", "def textManip(visible=bool):\n pass", "def add_text(self, text):\n if text.startswith(nl):\n text = text[1:]\n\n cls = ''\n prefix = ''\n is_code = False\n is_output = False\n interp_line = False\n after_blank = False # state 'after blank line'\n blank = False\n bullets = 0\n code_indent = 0\n output_indent = 0\n\n for line in text.split(nl):\n sline = line.strip()\n if sline.startswith('#'):\n continue\n\n # handle <ul> <li> ...\n if sline == '*':\n bullets = 1\n elif bullets == 1 and sline.startswith('*'):\n bullets = 2\n elif bullets == 2 and not sline.startswith('*'):\n bullets = 0\n self.commands.append( dict(cmd=\"text\", arg=\"</ul>\", indent=indent, cls=cls, prefix=prefix) )\n\n line = line.rstrip()\n blank = bool(not line)\n indent = len(line) - len(line.lstrip()) + 1\n\n if interp_typecmd and line.strip().startswith(\">>>\"):\n self.commands.append(dict(cmd=\"type\", arg=None))\n cls = \"code\"\n prefix = escape(\">>>\") + nbsp\n is_code = True\n interp_line = True\n # interp.prompt, space, 1 level of block indent\n code_indent = indent + 3+1\n output_indent = code_indent - 4\n\n # blank line; next line at code indent: still code; ELSE reset code\n # non-blank line; next line at code indent - 4: output\n\n # shorter indent than code should be means end of code block; ignore blank lines\n if not interp_line and indent < code_indent and not blank:\n is_code = False; cls = ''\n\n if not interp_line and after_blank and indent != code_indent and not blank:\n is_code = False; cls = ''\n\n if indent==output_indent and not interp_line:\n is_output = True; cls = \"output\"\n\n if is_output and indent < output_indent:\n is_output = False; cls = ''\n\n # ugly hack: force bigger indent on lines of code except for interp lines\n if is_code and not interp_line:\n indent += 4\n\n line = line.lstrip(\"> \")\n arg = escape(line)\n arg = arg.replace(space, nbsp).replace(\"--\", \"&mdash;\")\n if is_code or is_output:\n for name, fn, tag in images:\n arg = arg.replace(name+\"png\", fn)\n arg = arg.replace(fn, tag)\n\n if bullets == 1:\n self.commands.append( dict(cmd=\"text\", arg=\"<ul>\", indent=indent, cls=cls, prefix=prefix) )\n elif bullets == 2:\n arg = \"<li>%s</li>\" % arg.lstrip('*')\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n else:\n self.commands.append( dict(cmd=\"text\", arg=arg, indent=indent, cls=cls, prefix=prefix) )\n\n prefix = ''\n interp_line = False\n after_blank = bool(not line.strip())", "def texCutContext(*args, **kwargs):\n\n pass", "def replace_text_in_cell(cell, text):\n for paragraph in cell.paragraphs[1:]:\n delete_paragraph(paragraph)\n cell.paragraphs[0].runs[0].text = text\n del cell.paragraphs[0].runs[1:]", "def styleText(self, start: Any, end: Any) -> None:", "def wrap_text(surface, text, size=20, color=WHITE_SMOKE, rect=(), aa=True):\n font = pygame.font.Font(STYLE, size)\n frame = pygame.Rect(rect)\n y = frame.top\n line_spacing = -2\n font_height = font.size(\"Tg\")[1]\n\n while text:\n i = 1\n # Determines if the row of text will be outside our area.\n if y + font_height > frame.bottom:\n break\n # Determine maximum width of line.\n while font.size(text[:i])[0] < frame.width and i < len(text):\n i += 1\n # If text is wrapped, then adjust the wrap to the last word.\n if i < len(text):\n i = text.rfind(\" \", 0, i) + 1\n # Blit.\n image = font.render(text[:i], aa, color).convert_alpha()\n surface.blit(image, (frame.left, y))\n y += font_height + line_spacing\n text = text[i:]\n return text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do batched inference on rays using chunk.
def batched_inference(models, embeddings, rays, N_samples, N_importance, use_disp, chunk, white_back): B = rays.shape[0] chunk = 1024*32 results = defaultdict(list) for i in range(0, B, chunk): rendered_ray_chunks = \ render_rays(models, embeddings, rays[i:i+chunk], N_samples, use_disp, 0, 0, N_importance, chunk, dataset.white_back, test_time=True) for k, v in rendered_ray_chunks.items(): results[k] += [v] for k, v in results.items(): results[k] = torch.cat(v, 0) return results
[ "def batched_inference(models,\n coverage_models,\n embeddings,\n rays,\n N_samples,\n N_importance,\n use_disp,\n chunk,\n point_transform_func=None,\n topk=0):\n B = rays.shape[0]\n results = defaultdict(list)\n for i in range(0, B, chunk):\n rendered_ray_chunks = \\\n render_rays(models,\n coverage_models,\n embeddings,\n rays[i:i+chunk],\n N_samples,\n use_disp,\n 0,\n 0,\n N_importance,\n chunk,\n dataset.white_back,\n test_time=True,\n point_transform_func=point_transform_func,\n topk=topk)\n\n for k, v in rendered_ray_chunks.items():\n results[k] += [v.cpu()]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results", "def batchify_rays(num_frames, rays_flat, chunk=1024 * 32, **kwargs):\n all_ret = {}\n for i in range(0, rays_flat.shape[0], chunk):\n ret = render_rays(num_frames, rays_flat[i : i + chunk], **kwargs)\n for k in ret:\n if k not in all_ret:\n all_ret[k] = []\n all_ret[k].append(ret[k])\n\n all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}\n return all_ret", "def _batch_inference(self, batched_inputs):\n outputs = []\n inputs = []\n for idx, input in zip(count(), batched_inputs):\n inputs.append(input)\n if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:\n outputs.extend(self.model.inference(inputs, do_postprocess=False))\n inputs = []\n return outputs", "def _perform_batch_inference(self, input_data, output_data, **kwargs):\n batch_strategy = kwargs[\"BatchStrategy\"]\n max_payload = int(kwargs[\"MaxPayloadInMB\"])\n data_source, batch_provider = self._prepare_data_transformation(input_data, batch_strategy)\n\n # Output settings\n accept = output_data[\"Accept\"] if \"Accept\" in output_data else None\n\n working_dir = self._get_working_directory()\n dataset_dir = data_source.get_root_dir()\n\n for fn in data_source.get_file_list():\n\n relative_path = os.path.dirname(os.path.relpath(fn, dataset_dir))\n filename = os.path.basename(fn)\n copy_directory_structure(working_dir, relative_path)\n destination_path = os.path.join(working_dir, relative_path, filename + \".out\")\n\n with open(destination_path, \"wb\") as f:\n for item in batch_provider.pad(fn, max_payload):\n # call the container and add the result to inference.\n response = self.local_session.sagemaker_runtime_client.invoke_endpoint(\n item, \"\", input_data[\"ContentType\"], accept\n )\n\n response_body = response[\"Body\"]\n data = response_body.read()\n response_body.close()\n f.write(data)\n if \"AssembleWith\" in output_data and output_data[\"AssembleWith\"] == \"Line\":\n f.write(b\"\\n\")\n\n move_to_destination(working_dir, output_data[\"S3OutputPath\"], self.name, self.local_session)\n self.container.stop_serving()", "def inference(tasks, name, convnet_model, convnet_weight_path, input_patch_size,\n output_patch_size, output_patch_overlap, output_crop_margin, patch_num,\n num_output_channels, dtype, framework, batch_size, bump, mask_output_chunk,\n mask_myelin_threshold, input_chunk_name, output_chunk_name):\n with Inferencer(\n convnet_model,\n convnet_weight_path,\n input_patch_size=input_patch_size,\n output_patch_size=output_patch_size,\n num_output_channels=num_output_channels,\n output_patch_overlap=output_patch_overlap,\n output_crop_margin=output_crop_margin,\n patch_num=patch_num,\n framework=framework,\n dtype=dtype,\n batch_size=batch_size,\n bump=bump,\n mask_output_chunk=mask_output_chunk,\n mask_myelin_threshold=mask_myelin_threshold,\n dry_run=state['dry_run'],\n verbose=state['verbose']) as inferencer:\n \n state['operators'][name] = inferencer \n\n for task in tasks:\n handle_task_skip(task, name)\n if not task['skip']:\n if 'log' not in task:\n task['log'] = {'timer': {}}\n start = time()\n\n task[output_chunk_name] = state['operators'][name](\n task[input_chunk_name])\n\n task['log']['timer'][name] = time() - start\n task['log']['compute_device'] = state[\n 'operators'][name].compute_device\n yield task", "def inference(self):\n result = run_swift_benchmark(name=self.benchmark_name, variety='inference', backend='eager')\n self.report_benchmark(**result)", "def chunk_apply(transform, chunks, out: ndarray) -> None:\n for chunk, dims in chunks:\n (rstart, rend), (cstart, cend) = dims\n out[rstart: rend, cstart: cend] = transform(chunk)", "def _process_block(block, visited, inferred_types, backend):\n for op in block.instructions:\n log_debug('handle op {}'.format(op))\n if isinstance(op, instructions.PrimOp):\n if not all(_is_determined(inferred_types[var]) for var in op.vars_in):\n continue\n types_in = [inferred_types[var] for var in op.vars_in]\n # Offer type hints for cases where we need to type non-Tensor literals.\n preferred_types_out = instructions.pattern_map(\n lambda var: inferred_types[var], op.vars_out)\n with _type_inferring():\n objs_out = backend.run_on_dummies(\n op.function, _add_incompatible_batch_dim(types_in))\n types_out = _strip_batch_dim(instructions.pattern_map2(\n lambda tp, val: type_of_pattern(val, backend, preferred_type=tp),\n preferred_types_out, objs_out, leaf_type=instructions.Type))\n _merge_vars(op.vars_out, types_out, inferred_types, backend,\n log_message='update PrimOp vars_out')\n elif isinstance(op, instructions.FunctionCallOp):\n if not all(_is_determined(inferred_types[var]) for var in op.vars_in):\n continue\n # First, bind op.vars_in to op.function.vars_in.\n types_in = [inferred_types[var].tensors for var in op.vars_in]\n _merge_vars(op.function.vars_in, types_in, inferred_types, backend,\n log_message='init function vars_in')\n # Execute type inference.\n types_out = op.function.type_inference(types_in)\n for leaf in instructions.pattern_traverse(\n types_out, leaf_type=instructions.TensorType):\n if not isinstance(leaf, instructions.TensorType):\n msg = ('Expected function output type to be '\n 'a nested list or tuple of TensorType, found {}.').format(leaf)\n raise TypeError(msg)\n # To help with typing recursive base-case return literals, we seed\n # return_vars types before stepping into the function.\n _merge_vars(op.function.vars_out, types_out, inferred_types, backend,\n log_message='update function vars_out')\n # Finally, update op.vars_out with the results of type inference.\n _merge_vars(op.vars_out, types_out, inferred_types, backend,\n log_message='update FunctionCall vars_out')\n # Step into function. Note: it will only be visited once, if recursive.\n _process_graph(op.function.graph, visited, inferred_types, backend)\n # No need to process block.terminator, because all the information\n # that carries about types is already carried by the variable names\n # being the same across blocks", "def batch_mvcnn_voxel_traversal_with_ray_marching(\n M,\n D,\n N,\n F,\n H,\n W,\n padding,\n bbox,\n grid_shape,\n sampling_scheme\n):\n # Set the paths to the files that will be used to construct the cuda kernel\n file_paths = [\n \"ray_tracing.cu\",\n \"utils.cu\",\n \"planes_voxels_mapping.cu\",\n \"feature_similarities.cu\",\n \"sampling_schemes.cu\"\n ]\n\n cu_source_code = parse_cu_files_to_string(file_paths)\n\n tpl = Template(cu_source_code + \"\"\"\n __global__ void batch_mvcnn_planes_voxels_with_ray_marching(\n int n_rays,\n int * ray_idxs,\n float * features,\n float * P,\n float * P_inv,\n float * camera_center,\n float * voxel_grid,\n int * ray_voxel_indices,\n int * ray_voxel_count,\n float * S_new\n ) {\n // Compute the thread\n int r = threadIdx.x + blockDim.x * blockIdx.x;\n if (r >= n_rays)\n return;\n\n // Estimate the ray_start and ray_end for the current pixel\n float ray_start[3], ray_end[3];\n $sampling_scheme(\n ray_idxs[r],\n P_inv,\n camera_center,\n ray_start,\n ray_end\n );\n\n // Compute the similarities between features\n float S[$depth_planes];\n compute_similarities_per_ray(\n features,\n P,\n ray_start,\n ray_end,\n S\n );\n\n // Estimate the ray_voxel_indices and the ray_voxel_count\n voxel_traversal(\n ray_start,\n ray_end,\n ray_voxel_indices + r*$max_voxels*3,\n ray_voxel_count + r\n );\n\n // Map the depth planes to voxel centers\n planes_voxels_mapping(\n voxel_grid,\n ray_voxel_indices + 3*$max_voxels*r,\n ray_voxel_count + r,\n ray_start,\n ray_end,\n S,\n S_new + $max_voxels*r\n );\n }\n \"\"\")\n\n mod = SourceModule(tpl.substitute(\n max_voxels=M,\n depth_planes=D,\n n_views=N,\n padding=padding,\n features_dimensions=F,\n width=W,\n height=H,\n grid_x=grid_shape[0],\n grid_y=grid_shape[1],\n grid_z=grid_shape[2],\n bbox_min_x=bbox[0],\n bbox_min_y=bbox[1],\n bbox_min_z=bbox[2],\n bbox_max_x=bbox[3],\n bbox_max_y=bbox[4],\n bbox_max_z=bbox[5],\n sampling_scheme=sampling_scheme\n ))\n cuda_fp = mod.get_function(\"batch_mvcnn_planes_voxels_with_ray_marching\")\n cuda_fp.prepare(\"i\" + \"P\"*9)\n\n @all_arrays_to_gpu\n def fp(\n ray_idxs,\n features,\n P,\n P_inv,\n camera_center,\n voxel_grid,\n ray_voxel_indices,\n ray_voxel_count,\n S_new,\n threads=2048\n ):\n # Assert everything is the right size, shape and dtype\n assert S_new.shape[1] == M\n assert len(ray_voxel_count.shape) == 1\n assert np.float32 == S_new.dtype\n assert np.int32 == ray_voxel_count.dtype\n\n # Determine the grid and block arguments\n n_rays = len(S_new)\n blocks = n_rays / threads + int(n_rays % threads != 0)\n\n cuda_fp.prepared_call(\n (threads, 1),\n (blocks, 1, 1),\n np.int32(n_rays),\n ray_idxs.gpudata,\n features.gpudata,\n P.gpudata,\n P_inv.gpudata,\n camera_center.gpudata,\n voxel_grid.gpudata,\n ray_voxel_indices.gpudata,\n ray_voxel_count.gpudata,\n S_new.gpudata\n )\n\n return fp", "def launch_inference(self):\n\n self.logger.info('Beginning to submit inference tasks')\n # Make a folder for the models\n model_folder = self.output_dir.joinpath('models')\n model_folder.mkdir(exist_ok=True)\n \n # Submit the chunks to the workflow engine\n for mid in range(len(self.mpnns)):\n # Get a model that is ready for inference\n model = self.ready_models.get()\n \n # Convert it to a pickle-able message\n model_msg = MPNNMessage(model)\n \n # Proxy it once, to be used by all inference tasks\n model_msg_proxy = ps.store.get_store(self.ps_names['infer']).proxy(model_msg, key=f'model-{mid}-{self.inference_batch}')\n \n # Run inference with all segements available\n for cid, (chunk, chunk_msg) in enumerate(zip(self.inference_chunks, self.inference_proxies)):\n self.queues.send_inputs([model_msg_proxy], chunk_msg,\n topic='infer', method='evaluate_mpnn',\n keep_inputs=False,\n task_info={'chunk_id': cid, 'chunk_size': len(chunk), 'model_id': mid})\n self.logger.info('Finished submitting molecules for inference')", "def inference_function(smiles, model_dir, **other_cols):\n # Launch the process pool if this is the first invocation\n # Note: The pool will stay alive until the host process dies\n # OK for HPC (host dies when job completes) but be very careful\n # running this function on persistent servers.\n global pool, models, feat\n import os\n core_count = len(os.sched_getaffinity(0))\n # I use the affinity rather than `os.cpu_count()` to work with aprun's\n # protocol for specifying the affinity of each MPI PE and all its\n # child processes (including those spawned by multiprocessing)\n if pool is None:\n from multiprocessing import Pool\n pool = Pool(core_count)\n\n # Measure the start time and record host name\n from datetime import datetime\n from platform import node\n start_time = datetime.utcnow().isoformat()\n hostname = node()\n\n # Load models\n from glob import glob\n import pickle as pkl\n if models is None:\n model_files = glob(os.path.join(model_dir, '*.pkl'))\n models = {}\n for path in model_files:\n with open(path, 'rb') as fp:\n models[os.path.basename(path)[:-4]] = pkl.load(fp)\n\n # Remove the first step from the pipeline (feature generation from the SMILES)\n feat = None\n for m in models.values():\n feat = m.steps.pop(0)[1]\n\n # Compute features in parallel\n import numpy as np\n n_splits = min(core_count * 4, len(smiles))\n smiles_chunks = np.array_split(smiles, n_splits)\n feature_chunks = pool.map(feat.transform, smiles_chunks)\n\n # Pull in the inference function and run it\n from functools import partial\n result = {'smiles': smiles}\n for name, model in models.items():\n func = partial(run_inference, model)\n result[name] = np.concatenate(pool.map(func, feature_chunks))\n result.update(other_cols)\n\n # Measure the end time\n end_time = datetime.utcnow().isoformat()\n return {\n 'start': start_time,\n 'result': result,\n 'end': end_time,\n 'core_count': core_count,\n 'hostname': hostname\n }", "def _forward_inference(\n self, batch: MultiAgentBatch, **kwargs\n ) -> Union[Mapping[str, Any], Dict[ModuleID, Mapping[str, Any]]]:\n return self._run_forward_pass(\"forward_inference\", batch, **kwargs)", "def run(self, data_batch):\n return self.sess.run(self.predictions, feed_dict={self.data_ph:data_batch})", "def pre_batch(self, batch_size):\n self.results = []", "def infer_with_multiple_checkpoints(self):\n device_ids = self.config['testing']['gpus']\n device = torch.device(\"cuda:{0:}\".format(device_ids[0]))\n\n if(self.inferer is None):\n infer_cfg = self.config['testing']\n infer_cfg['class_num'] = self.config['network']['class_num']\n self.inferer = Inferer(infer_cfg)\n ckpt_names = self.config['testing']['ckpt_name']\n infer_time_list = []\n with torch.no_grad():\n for data in self.test_loader:\n images = self.convert_tensor_type(data['image'])\n images = images.to(device)\n \n # for debug\n # for i in range(images.shape[0]):\n # image_i = images[i][0]\n # label_i = images[i][0]\n # image_name = \"temp/{0:}_image.nii.gz\".format(names[0])\n # label_name = \"temp/{0:}_label.nii.gz\".format(names[0])\n # save_nd_array_as_image(image_i, image_name, reference_name = None)\n # save_nd_array_as_image(label_i, label_name, reference_name = None)\n # continue\n start_time = time.time()\n predict_list = []\n for ckpt_name in ckpt_names:\n checkpoint = torch.load(ckpt_name, map_location = device)\n self.net.load_state_dict(checkpoint['model_state_dict'])\n \n pred = self.inferer.run(self.net, images)\n # convert tensor to numpy\n if(isinstance(pred, (tuple, list))):\n pred = [item.cpu().numpy() for item in pred]\n else:\n pred = pred.cpu().numpy()\n predict_list.append(pred)\n pred = np.mean(predict_list, axis=0)\n data['predict'] = pred\n # inverse transform\n for transform in self.transform_list[::-1]:\n if (transform.inverse):\n data = transform.inverse_transform_for_prediction(data) \n \n infer_time = time.time() - start_time\n infer_time_list.append(infer_time)\n self.save_outputs(data)\n infer_time_list = np.asarray(infer_time_list)\n time_avg, time_std = infer_time_list.mean(), infer_time_list.std()\n logging.info(\"testing time {0:} +/- {1:}\".format(time_avg, time_std))", "def present_batch(self, memory, minibatch_size):", "def batch_load(obj, factor=2):\n if isinstance(obj, xr.DataArray):\n dataset = obj._to_temp_dataset()\n else:\n dataset = obj\n\n # result = xr.full_like(obj, np.nan).load()\n computed = []\n for label, chunk in split_blocks(dataset, factor=factor):\n print(f\"computing {label}\")\n computed.append(chunk.compute())\n result = xr.combine_by_coords(computed)\n\n if isinstance(obj, xr.DataArray):\n result = obj._from_temp_dataset(result)\n\n return result", "def do_inference_v2(context, bindings, inputs, outputs, stream):\n # Transfer input data to the GPU.\n [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]\n # Run inference.\n context.execute_async_v2(bindings=bindings,\n stream_handle=stream.handle)\n # Transfer predictions back from the GPU.\n [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]\n # Synchronize the stream\n stream.synchronize()\n # Return only the host outputs.\n return [out.host for out in outputs]", "def test_run_inception_multicall(self):\n for batch_size in (7, 3, 2):\n img = tf.ones([batch_size, 299, 299, 3])\n _run_with_mock(gan_metrics.run_inception, img)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add current fitter to list for testing
def add_fitter(self): # Check to make sure the fit being added has the some number of # observations of the previous fit if len(self._fit_snapshot_dict) > 0: k = list(self._fit_snapshot_dict.keys())[0] other_num_obs = self._fit_snapshot_dict[k].fit_num_obs current_num_obs = self._fit.fitter.fit_num_obs if current_num_obs != other_num_obs: err = "AIC (and related) tests are only valid for fits using identical input data." QW.QMessageBox.warning(self, "warning", err, QW.QMessageBox.Ok) return if not self._fit.fitter.fit_success: err = "Fit must be performed and successful before it can be added to the AIC list." QW.QMessageBox.warning(self, "warning", err, QW.QMessageBox.Ok) return text, ok = QW.QInputDialog.getText(self, 'Save Fitter', 'Enter Name:') # save deepcopy of fitter if ok: self._fit_snapshot_dict[text] = copy.deepcopy(self._fit.fitter) self._fitter_select.addItem(text) self._fit.event_logger.emit("Fitter {} saved to AIC list.".format(text),"info")
[ "def test_added_to_list(*args, **kwargs):\n if (not loaded_from_fixture(kwargs)):\n update_unit_test_infos(kwargs[\"instance\"].test_list)", "def test_user_current_list_starred(self):\n pass", "def current_hitter(self, current_hitter):\n\n self._current_hitter = current_hitter", "def test_list_added_to_cycle(*args, **kwargs):\n if (not loaded_from_fixture(kwargs)):\n update_unit_test_infos(kwargs[\"instance\"].test_list)", "def test_add_current_list_to_history():\r\n\r\n participants = initialise_participants_dictionary()\r\n add_current_list_to_history(participants)\r\n\r\n history = get_history_list(participants)\r\n\r\n assert len(history) == 1", "def add_flower(self, **kwargs):\n f = Flower(self.my_manager, **kwargs)\n self.flower_list.append(f)\n self.write_list_to_file()\n return f", "def add_filter(self,fltr):\n self.mutex.lock()\n self.filters.append(fltr)\n self.mutex.unlock()", "def auto_add_to_list(self, phrase, list_slug, count=100, result_type=\"recent\"):\n \n result = self.search_tweets(phrase, count, result_type)\n \n for tweet in result[\"statuses\"]:\n try:\n if tweet[\"user\"][\"screen_name\"] == self.BOT_CONFIG[\"TWITTER_HANDLE\"]:\n continue\n \n result = self.TWITTER_CONNECTION.lists.members.create(owner_screen_name=self.BOT_CONFIG[\"TWITTER_HANDLE\"],\n slug=list_slug,\n screen_name=tweet[\"user\"][\"screen_name\"])\n print(\"User %s added to the list %s\" % (tweet[\"user\"][\"screen_name\"], list_slug), file=sys.stdout)\n except TwitterHTTPError as api_error:\n print(api_error)", "def test_set_current_list_to():\r\n\r\n participants = initialise_participants_dictionary()\r\n set_current_list_to([0, 1, 2, 3, 4, 5], participants)\r\n current = get_current_list(participants)\r\n assert current == [0, 1, 2, 3, 4, 5]", "def test_user_list_starred(self):\n pass", "def extend(self, list):", "def addPlaylistFeatures(self, f, playlists, target):\r\n for playlist in playlists:\r\n self.addFeatures(f, playlist, target, playlist[\"weight\"])", "def test_list_saved(*args, **kwargs):\n if not loaded_from_fixture(kwargs):\n update_unit_test_infos(kwargs[\"instance\"])", "def addFeatures(self, f, playlist, target, weight):\r\n #sp = getSP() # update Spotify authorization\r\n tracks = playlist[\"tracks\"]\r\n songs = getSongs(tracks)\r\n # ids = getSongIDs(tracks, songs)\r\n self.addFeaturesSongs(f, songs, target, weight)", "def test_sitter_creation(self):\n self.assertTrue(self.sitter.start_time == self.sitter_start_time)\n self.assertTrue(self.sitter.default_rate == self.family.default_rate)", "def test_experiment_list_extend():\n # Create a minimal ExperimentList instance.\n expts = ExperimentList([Experiment()])\n # Try to extend it.\n expts.extend(expts)", "def add_bet(self, bet):\n\n self.bets.append(bet)\n self.users.append(bet.user)", "def add(self):\n self.ballList.append(Ball())\n self.count(True)", "def __init__(self):\n this = _coin.new_SoAuditorList()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints a message prompting the user to enter one of the commands and returns the uppercase input entered by the user. >>> display_prompt()
def display_prompt() -> str: user_input = input("\nL)oad image S)ave-as \n" + "2)-tone 3)tone X)treme contrast T)int sepia P)osterize \n" + "E)dge detect I)mproved edge detect V)ertical flip H)orizontal flip \n" + "Q)uit \n" + ": " ) user_input = user_input.upper() return user_input
[ "def userInput(prompt: str = \"\") -> str:\n return input(str).lower()", "def prompt(self):\n\t\t_globals._console.write(f'{self.prompt_str} ')", "def read_user_response(self, prompt: str=None) -> str:\n ch = userinput.read_response(prompt)\n return ch.lower()", "def step_see_prompt(context):\n context.cli.expect('wharfee> ')", "def prompt_for_action():\n while True:\n print()\n print(\"What would you like to do?\")\n print()\n print(\" A = add an item to the inventory.\")\n print(\" R = remove an item from the inventory.\")\n print(\" C = generate a report of the current inventory levels.\")\n print(\" O = generate a report of the inventory items to re-order.\")\n print(\" Q = quit.\")\n print()\n action = input(\"> \").strip().upper()\n if action == \"A\": return \"ADD\"\n elif action == \"R\": return \"REMOVE\"\n elif action == \"C\": return \"INVENTORY_REPORT\"\n elif action == \"O\": return \"REORDER_REPORT\"\n elif action == \"Q\": return \"QUIT\"\n else:\n print(\"Unknown action!\")", "def eval_prompt(self, input):\n return input", "def get_user_select(user_select):\n prompt = \"\\nChoose an action:\\n\\n\"\n for (key, val) in user_select.items():\n prompt += f\"{key} - {val}\\n\"\n prompt += \">>> \"\n\n return prompt", "def prompt_alg():\n algorithms = {0: 'random', 1: 'greedy', 2: 'greedy2', 3: 'hillclimber', 4: 'simulated_annealing'}\n print(\"What algorithm should be used (type INFO to get description of algorithms)\")\n print(''.join(['{0}{1}'.format(str(key) + ': ', value + ' ') for key, value in algorithms.items()]), end=' ')\n user_in = input('\\n> ')\n command(user_in)\n try:\n user_in = int(user_in)\n except ValueError:\n print('Invalid number, choose one from list below')\n return prompt_alg()\n if user_in not in algorithms:\n print('Invalid algorithm, choose one from list below')\n return prompt_alg()\n else:\n return algorithms[user_in]", "def prompt_subcommand():\n mapped = OrderedDict([\n (\"clinical\", \"Upload clinical EEG data\"),\n (\"imaging\", \"Upload imaging data\"),\n (\"host\", \"Transfer EEG data from the host PC\"),\n (\"experiment\", \"Upload all experimental data\")\n ])\n completer = WordCompleter([value for _, value in list(mapped.items())])\n cmd = ''\n while cmd not in SUBCOMMANDS:\n res = prompt(\"Action: \", completer=completer)\n for key in mapped:\n if res == mapped[key]:\n cmd = key\n return cmd", "def promptText(message):\n choice = \"\"\n while not choice:\n choice = input(message+\" [text] \")\n try:\n str(choice)\n except:\n print(\"ERROR: Input not recognized. Choose text\\n\")\n choice = \"\"\n return choice", "def user_prompt():\r\n valid_response = False\r\n while not valid_response:\r\n choice = input(\"Hello! What would you like today? (espresso/latte/cappuccino): \").lower()\r\n if choice == \"espresso\" or choice == \"latte\" or choice == \"cappuccino\" or choice == \"off\":\r\n return choice\r\n elif choice == \"report\":\r\n # TODO: 3. Print report of all coffee machine resources if \"report\" is entered\r\n print_resources()\r\n else:\r\n print(\"Invalid response.\")", "def prompt_for_value(message_text):\n\n sys.stdout.write(f\"{message_text}: \")\n sys.stdout.flush()\n return sys.stdin.readline().rstrip()", "def prompt(self, message):\n raise NotImplementedError()", "def get_user_choice():\n\n return input('Your choice: ')", "def after_prompt():\n sys.stdout.write(AFTER_PROMPT)", "def _get_user_input(prompt):\n\n _inp = ''\n while not _inp:\n _inp = input(prompt)\n\n return _inp", "def ask_input(room_details):\n prompt = \"> \"\n next = raw_input(prompt)\n\n if isinstance(room_details, dict) and next in room_details:\n print room_details[next]\n return \"\"\n elif CMD_DESCRIBE == next:\n print \"\"\"\ndescribe - To give an account in words of (someone or something),\nincluding all the relevant characteristics, qualities, or events\n\"\"\"\n return \"\"\n\n return next", "def vqa_prompt(self, question, answer=None) -> str:", "def interact( self ): \n\t\ttext = \"\"\n\t\treadline.set_completer( self._completeline )\n\t\treadline.parse_and_bind(\"tab: complete\")\n\t\twhile self._keeprunning:\n\t\t\ttry:\n\t\t\t\t\n\t\t\t\twhile( text != \"quit\" ):\t\n\t\t\t\t\ttext = input( \"# \" )\n\t\t\t\t\ttext = text.strip()\n\t\t\t\t\t# skips dem newlines\n\t\t\t\t\tif( text != \"\" ):\n\t\t\t\t\t\tif text.startswith( \"#\" ):\n\t\t\t\t\t\t\tprint( self._command_hash( text ) )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttext_lower = text.lower()\n\t\t\t\t\t\t\toper = text_lower.split()[0]\n\t\t\t\t\t\t\tif( oper in self.plugins ):\n\t\t\t\t\t\t\t\tif( '_handle_text' in dir( self.plugins[oper] ) ):\n\t\t\t\t\t\t\t\t\tprint( self.plugins[oper]._handle_text( text ) )\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint( \"{} module doesn't have handle_text\".format( oper ) )\n\t\t\texcept:\n\t\t\t\tprint( \"Something failed. Let's try not to do that again. Displaying traceback...\" )\n\t\t\t\tprint( \"#\" * 40 )\n\t\t\t\t\t\n\t\t\t\t#traceback.print_stack()\n\t\t\t\t#traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)\n\t\t\t\ttraceback.print_exc()\n\t\t\t\tprint( \"#\" * 40 )\n\t\t\t\t#TODO add fault logging to Andy rebootskis\n\t\t\tfinally:\n\t\t\t\tif( text == \"quit\" ):\n\t\t\t\t\tself._keeprunning = False\n\t\t\t\tself._save_before_shutdown()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies the filter. >>> apply_filter(input_entered, final_image)
def apply_filter(user_input: str, image: Image) -> Image: if user_input == "2": filtered_image = two_tone(image, 'yellow', 'cyan') elif user_input == "3": filtered_image = three_tone(image, 'yellow', 'magenta', 'cyan') elif user_input == "X": filtered_image = extreme_contrast(image) elif user_input == "T": filtered_image = sepia(image) elif user_input == "P": filtered_image = posterize(image) elif user_input == "E": thresh = int(input("Threshold? ")) filtered_image = detect_edges(image, thresh) elif user_input == "I": thresh = int(input("Threshold? ")) filtered_image = detect_edges_better(image, thresh) elif user_input == "V": filtered_image = flip_vertical(image) elif user_input == "H": filtered_image = flip_horizontal(image) show(filtered_image) return filtered_image
[ "def run(self):\n filtered_image = self.filter_function(source=self.noisy_image, shape=self.shape, sigma=self.sigma)\n\n # Emit finished signal to end the thread\n self.finished.emit(filtered_image, self.combo_id)", "def spatially_filter_image(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def execute(self, inputs, update = 0, last = 0):\t\t\t \n\t\tif not lib.ProcessingFilter.ProcessingFilter.execute(self, inputs):\n\t\t\treturn None\n\t\t\n\t\timage = self.getInput(1)\n\t\torigType = image.GetScalarType()\n\t\tcast1 = vtk.vtkImageCast()\n\t\tcast1.SetInput(image)\n\t\tcast1.SetOutputScalarTypeToShort()\n\t\t# vtkImageCityBlockDistance requires short input\n\t\tself.vtkfilter.SetInput(cast1.GetOutput())\n\t\t\n\t\tdata = self.vtkfilter.GetOutput()\n\t\tif self.parameters[\"CastToOriginal\"]:\n\t\t\tcast = vtk.vtkImageCast()\n\t\t\tcast.SetInput(self.vtkfilter.GetOutput())\n\t\t\tcast.SetOutputScalarType(origType)\n\t\t\tcast.SetClampOverflow(1)\n\t\t\tdata = cast.GetOutput()\n\t\t\n\t\tif update:\n\t\t\tself.vtkfilter.Update()\n\t\t\n\t\treturn data", "def two_image_filter( image1, image2 ):\n pass", "def filter(self, stack) -> None:\n low_pass = partial(self.low_pass, sigma=self.sigma)\n stack.image.apply(low_pass, is_volume=self.is_volume, verbose=self.verbose)\n\n # apply to aux dict too:\n for auxiliary_image in stack.auxiliary_images.values():\n auxiliary_image.apply(low_pass, is_volume=self.is_volume)", "def my_imfilter(image, filter):\n\n assert filter.shape[0] % 2 == 1\n assert filter.shape[1] % 2 == 1\n\n ############################\n ### TODO: YOUR CODE HERE ###\n\n filtered_image = np.zeros_like(image)\n filter_xcoord = int((filter.shape[0] - 1)/2)\n filter_ycoord = int((filter.shape[1] - 1)/2)\n\n padded_input = np.pad(image, [(filter_xcoord, filter_xcoord), (filter_ycoord,filter_ycoord), (0, 0)], 'reflect')\n \n for layer in range(image.shape[2]):\n for row in range(image.shape[0]):\n for col in range(image.shape[1]):\n x = padded_input[row:row + filter.shape[0], col:col+filter.shape[1], layer]\n filtered_image[row, col, layer] = np.sum(np.multiply(filter, x))\n\n ### END OF STUDENT CODE ####\n ############################\n\n return filtered_image", "def apply_filter(image):\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n kernel = np.ones((5, 5), np.float32) / 15\n filtered = cv2.filter2D(gray, -1, kernel)\n \n return filtered", "def processFilter(self, pInputData):\n return _almathswig.DigitalFilter_processFilter(self, pInputData)", "def onApplyButton(self):\n print('Apply Button Pressed...............')\n try:\n\n # Compute output\n self.logic.process(self.ui.inputSelector.currentNode(), self.ui.outputSelector.currentNode(),\n self.ui.imageThresholdSliderWidget.value, self.ui.invertOutputCheckBox.checked)\n\n # Compute inverted output (if needed)\n if self.ui.invertedOutputSelector.currentNode():\n # If additional output volume is selected then result with inverted threshold is written there\n self.logic.process(self.ui.inputSelector.currentNode(), self.ui.invertedOutputSelector.currentNode(),\n self.ui.imageThresholdSliderWidget.value, not self.ui.invertOutputCheckBox.checked, showResult=False)\n\n except Exception as e:\n slicer.util.errorDisplay(\"Failed to compute results: \"+str(e))\n import traceback\n traceback.print_exc()", "def filter_right(self, event):\n if self.mode.get() == 1: #Only in captured mode\n next_filter = self.filter.get() + 1\n if next_filter>self.filter_no:\n next_filter = 1\n self.filter.set(next_filter)\n else: #In detected mode\n self.filter.set(3)\n\n self.update_image(0)", "def processImage(image, medianKernel=5, bilateralKernel=17, bilateralColor=9):\n image = cv2.medianBlur(image, medianKernel)\n image = cv2.bilateralFilter(image, bilateralKernel, bilateralColor, 200)\n return scharr(image)", "def output_image_processing(input, smoothing: int, threshold: float):\n blur = cv2.GaussianBlur(input, (smoothing, smoothing), 0)\n other, img = cv2.threshold(blur, threshold, 1, cv2.THRESH_BINARY)\n return img", "def applyImageProcessing(self):\n if self.contrast_adjusted == True or self.brightness_adjusted == True:\n contrast = self.contrast_spinbox.value()\n brightness = self.brightness_spinbox.value()\n self.cv_image = cv2.convertScaleAbs(\n self.cv_image, self.processed_cv_image, contrast, brightness)\n if self.image_smoothing_checked == True:\n kernel = np.ones((5, 5), np.float32) / 25\n self.cv_image = cv2.filter2D(self.cv_image, -1, kernel)\n if self.edge_detection_checked == True:\n self.cv_image = cv2.Canny(self.cv_image, 100, 200)\n self.convertCVToQImage(self.cv_image)\n\n # Repaint the updated image on the label # ? Why this need update?\n self.image_label.repaint()", "def save_filter_result(self, source: np.ndarray, combo_id: str):\n print(\"Filter Processing Finished\")\n self.display_image(source=source, widget=self.processedImages[combo_id])", "def getRidgeFilteredImage(self, _img, out=...) -> out:\n ...", "def filter_image(self):\n\t\t#TODO for now simply retrieves image\n\t\tmsg = Int32()\n\t\tmsg.data=0\n\t\tself.feedback_pub.publish(msg)", "def create_hybrid_image(image1, image2, filter):\n\n assert image1.shape[0] == image2.shape[0]\n assert image1.shape[1] == image2.shape[1]\n assert image1.shape[2] == image2.shape[2]\n\n ############################\n ### TODO: YOUR CODE HERE ###\n\n low_frequencies = my_imfilter(image1, filter)\n high_frequencies = image2 - my_imfilter(image2, filter)\n\n hybrid_image = low_frequencies + (high_frequencies)\n np.clip(hybrid_image, 0, 1, out=hybrid_image)\n\n ### END OF STUDENT CODE ####\n ############################\n\n return low_frequencies, high_frequencies, hybrid_image", "def post_process(ps_volume, inv_depth, accum_count):\n\n mask = np.ones(ps_volume.shape[1:], dtype=np.bool)\n inv_depth_image = np.zeros(ps_volume.shape[1:], dtype=np.float64)\n \"\"\" YOUR CODE STARTS HERE \"\"\"\n inv_depth_image = compute_depths(ps_volume, inv_depth)\n mask = inv_depth_image <= np.mean(\n inv_depth_image) + (2.5 * np.std(inv_depth_image))\n inv_depth_image = scipy.ndimage.gaussian_filter(inv_depth_image, 2)\n\n print(mask)\n \"\"\" YOUR CODE ENDS HERE \"\"\"\n\n return inv_depth_image, mask", "def __rendering(x, y, filtered_image, source_image, radius, sigma):\n sum_of_filtered_intensity = 0.0\n sum_of_coefficient = 0.0\n\n for i in range(x - radius, x + radius + 1):\n for j in range(y - radius, y + radius + 1):\n gaussian = gaussian_function(distance(x, y, i, j), sigma)\n sum_of_filtered_intensity += gaussian * source_image[i][j]\n sum_of_coefficient += gaussian\n\n filtered_intensity = sum_of_filtered_intensity / sum_of_coefficient\n\n filtered_image[x][y] = int(round(filtered_intensity))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an image loaded by the user. >>> load()
def load() -> Image: image = load_image(choose_file()) show(image) return image
[ "def get_image():\r\n\r\n file = choose_file()\r\n \r\n if file == \"\":\r\n sys.exit(\"File Open cancelled, exiting program\")\r\n img = load_image(file)\r\n\r\n return img", "def load_image(file):\n return Image.open(os.path.abspath(file))", "def get_image():\n\n # Pop up a dialogue box to select a file\n\n file = choose_file()\n \n # Exit the program if the Cancel button is clicked.\n if file == \"\":\n sys.exit(\"File Open cancelled, exiting program\")\n \n # Open the file containing the image and load it\n img = load_image(file)\n \n return img", "def load(image_path, access='random'):\n\n return pyvips.Image.new_from_file(image_path, access=access)", "def loadImage(filename,extension=None):\n return PImage(pyglet.image.load(filename))", "def open_image(filename):\n return Image.open(filename)", "def load_image(image_name): \n\n image = pygame.image.load(os.path.join(IMAGES_DIR, image_name)).convert()\n return image", "def user32_LoadImage(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"hinst\", \"lpszName\", \"uType\", \"cxDesired\", \"cyDesired\", \"fuLoad\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_image(self, name, pil=False):\n image = Image.open(BytesIO(self.get_file(name).read()))\n if pil:\n return image\n return to_tensor(image)", "def load_img(path):\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `load_img` requires PIL.')\n with open(path, 'rb') as f:\n img = pil_image.open(io.BytesIO(f.read()))\n if img.mode not in ('L', 'I;16', 'I'):\n img = img.convert('L')\n return img", "def load_image(filepath: Path | str, *, backend: ImageLoadingBackend = \"opencv\") -> RawImage:\n if backend == \"opencv\":\n if isinstance(filepath, Path):\n # cv2 can only read string filepaths\n filepath = str(filepath)\n image = cv2.imread(filepath) # type: ignore\n if image is None:\n raise OSError(f\"Image-file could not be read from location '{filepath}'\")\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # type: ignore\n return Image.open(filepath)", "def load_img(path: str) -> np.ndarray:\n return np.array(Image.open(path))", "def image_loader(image_path):\n image = Image.open(image_path)\n image = image.convert('RGB')\n image = loader(image).float()\n #print(image.shape)\n image = Variable(image, requires_grad=False)\n image = image.unsqueeze(0) \n return image.cuda() #assumes using GPU", "def get_img_obj(file_path):\n try:\n img = Image.open(file_path)\n except IOError as ioe:\n print ioe, \", skipping:\", file_path\n return False\n if img.format:\n return img\n return False", "def load_image_UI(self):\n path = get_filepath_UI()\n if path:\n self.load_image(path)", "def get_img():\n args = get_argument()\n img_path = args[\"image\"]\n img = cv2.imread(img_path)\n return img", "def load_image(self,path):\n try:\n image = PIL.Image.open(path)\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n except IOError:\n self.message_handler.handle_error(\"FILE_NOT_LOADED\")\n (self.width, self.height) = image.size\n if self.width>self.max_width or self.height>self.max_height:\n self.message_handler.handle_error(\"IMAGE_TOO_BIG\")\n else:\n self.clear_image(self.width,self.height)\n self.pixels = [piedit.colors.rgb_to_hex(rgb) for rgb in image.getdata()]\n self.draw_program_table()\n self.set_current_file(path)\n self.set_changes_made(False)\n self.set_window_title(os.path.basename(path))", "def readImage(self, *args):\r\n return _osgDB.Input_readImage(self, *args)", "def read_image(self, path: str) -> Image:\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the user_input is a command for a filter. Otherwise, it returns False. >>> is_command_filter(commands, input_entered) True
def is_command_filter(list_commands: list, user_input: str) -> bool: num_filters = 9 length_list = len(list_commands) for i in range((length_list - num_filters), length_list): if user_input == list_commands[i]: return True return False
[ "def is_valid(list_commands: list, user_input: str) -> bool:\r\n for command in list_commands:\r\n if user_input == command:\r\n return True\r\n print(\"No such command\")\r\n return False", "async def filter_command(self, command: commands.Command) -> bool:\n\n try:\n return await command.can_run(self.context)\n except commands.CommandError:\n return False", "def is_input(self, *args) -> \"bool\":\n return _ida_pro.channel_redir_t_is_input(self, *args)", "def is_command(string, tokens=None):\n if not string:\n return False\n\n if is_comment(string):\n return False\n\n if is_quoted(string):\n return False\n\n if is_script(string):\n return False\n return True", "def is_input(self):\r\n return self.delegate.IsInput()", "def _is_command_char(self, char):\n return (char >= 'A' and char <= 'Z') or (char >= 'a' and char <= 'z') or (char == '@')", "def is_input_acceptable(self, user_input, actions):\n print \"uppercase:\", actions, user_input, actions[0]\n is_standard_action = user_input in actions\n# @TODO Should not be using [0]! Need a more elegant approach.\n is_move_action = user_input in actions[0] and len(user_input) == 1\n if is_standard_action or is_move_action:\n return True\n else:\n return False", "def addCommandInput(self, *args) -> \"bool\" :\n return _core.TableCommandInput_addCommandInput(self, *args)", "def __eq__(self, cmd):\r\n try:\r\n # first assume input is a command (the most common case)\r\n return cmd.key in self._matchset\r\n except AttributeError:\r\n # probably got a string\r\n return cmd in self._matchset", "def command_is_known( self, command_name ):\n\t\tfor cmd in self.__commands.commands:\n\t\t\tif cmd.name == command_name:\n\t\t\t\treturn True\n\t\treturn False", "def evaluate_filter(self, *msg_in):\n out = True\n if len(msg_in) == 1:\n msg_in = msg_in[0]\n if self.filter and (not self.is_eof(msg_in)):\n out = self.filter(msg_in)\n assert(isinstance(out, bool))\n return out", "def _contains_shell_cmds(self, cell):\n source = cell[\"source\"]\n\n if source[0].startswith(\"%\"):\n return True\n\n return any(line.startswith(\"!\") for line in source)", "def is_source_stdin(self):\n return self.source == STDIN", "def is_available_command(event, command):\n event_handler = get_event_handler_for_type(event, event_type=command)\n is_available = event_handler is not None\n return is_available", "def validate_command(flag, flag_input):\n is_valid = False\n if flag in validators:\n if validators[flag](flag_input):\n is_valid = True\n else:\n print(f\"Invalid flag argument '{flag_input}' for flag '{flag}'\")\n else:\n print(f\"Invalid flag '{flag}'\")\n return is_valid", "def is_destination_stdin(self):\n return self.destination == STDIN", "def _has_rights(self, command):\n active = False\n registered = False\n if self.session['user'] is None:\n if command not in Config.COMMANDS_UNKNOWN:\n return False\n return True\n else:\n if command not in Config.COMMANDS_USER:\n return False\n return True", "def is_valid_cmd(dcmd):\n\n # try translating it as a key\n if None in translate_key(dcmd):\n # not a key, so return whether or not it is a recognized command\n return dcmd in COMMANDS\n else:\n return True", "def _has_input_prompt(self, lines):\n if isinstance(lines, list):\n return any(line for line in lines\n if line.startswith(self.prompt_first))\n else:\n return (lines.startswith(self.prompt_first) or\n lines.startswith(self.prompt_next))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the user_input is in the valid list of commands. Otherwise, it returns False. >>> is_valid(commands, input_entered) True
def is_valid(list_commands: list, user_input: str) -> bool: for command in list_commands: if user_input == command: return True print("No such command") return False
[ "def is_command_filter(list_commands: list, user_input: str) -> bool:\r\n num_filters = 9\r\n length_list = len(list_commands)\r\n for i in range((length_list - num_filters), length_list):\r\n if user_input == list_commands[i]:\r\n return True\r\n return False", "def is_valid_cmd(dcmd):\n\n # try translating it as a key\n if None in translate_key(dcmd):\n # not a key, so return whether or not it is a recognized command\n return dcmd in COMMANDS\n else:\n return True", "def validate_command(flag, flag_input):\n is_valid = False\n if flag in validators:\n if validators[flag](flag_input):\n is_valid = True\n else:\n print(f\"Invalid flag argument '{flag_input}' for flag '{flag}'\")\n else:\n print(f\"Invalid flag '{flag}'\")\n return is_valid", "def __is_move_input_valid(self, move_input):\r\n direction_set = {'u', 'd', 'l', 'r'}\r\n car_name_set = {'Y', 'B', 'O', 'W', 'G', 'R'}\r\n \r\n # check if input match the pattern \"_,_\" -\r\n if len(move_input) == 3 or move_input[1] == \",\":\r\n \r\n # check if car name is valid and movekey is valid -\r\n if move_input[0] in car_name_set \\\r\n and move_input[2] in direction_set:\r\n \r\n # input is valid\r\n return True\r\n else:\r\n # input is not valid\r\n return False", "def is_input_acceptable(self, user_input, actions):\n print \"uppercase:\", actions, user_input, actions[0]\n is_standard_action = user_input in actions\n# @TODO Should not be using [0]! Need a more elegant approach.\n is_move_action = user_input in actions[0] and len(user_input) == 1\n if is_standard_action or is_move_action:\n return True\n else:\n return False", "def _has_input_prompt(self, lines):\n if isinstance(lines, list):\n return any(line for line in lines\n if line.startswith(self.prompt_first))\n else:\n return (lines.startswith(self.prompt_first) or\n lines.startswith(self.prompt_next))", "def keyboard_valid(self):\n\n if globals.KEYBOARD_STATE['TARGET'] == []:\n return True\n\n for note in globals.KEYBOARD_STATE['TARGET']:\n if note not in globals.KEYBOARD_STATE['RIGHT']:\n return False\n\n if len(globals.KEYBOARD_STATE['WRONG']) >= 2:\n return False\n\n return True", "def check_repeated_inputs(self, user_input):\n input_status = user_input in self.inputs_asked\n\n # append if never asked\n if input_status == False:\n self.inputs_asked.append(user_input)\n\n return input_status", "def check_input(input):\n\n\tinput1=' '.join(input.split())\n\tinput2=input1.split('\"')\n\tcmd=input2[0].strip()\n\tresult = []\n\tif cmd not in ['a', 'c', 'r', 'g', '']:\n\t\tprnterror(\"Error: <\", cmd, \"> is not a valid command.\\n\",\"invalid input\")\n\tif cmd == 'a' or cmd == 'c' or cmd == 'r':\n\t\tif len(input2) != 3:\n\t\t\tprnterror(\"Error: Name of the street is not specified or specified without double quotation.\\n\",\"Invalid Input\")\n\t\tname=input2[1]\n\t\tif name == '':\n\t\t\tprnterror(\"Error: Name of the street can not be empty.\\n\",\"Invalid Input\")\n\t\t\traise IndexError\n\t\tpoints= input2[2].strip()\n\t\tresult.append(cmd)\n\t\tresult.append(name)\n\t\tresult.append(points)\n\telif cmd == 'g':\n\t\tif len(input2) != 1:\n\t\t\tprnterror(\"Error: Street name or coordinates is/are specified for command 'g'.\\n\",\"Invalid Input\")\n\t\t\traise IndexError\n\t\tresult.append(cmd)\n\treturn result", "def is_command_allowed( self, command, hostname = None, options = {}, flavor = None ):\n\t\tif not hostname:\n\t\t\thostname = ucr[ 'hostname' ]\n\n\t\t# first check the group rules. If the group policy allows the\n\t\t# command there is no need to check the user policy\n\t\treturn self._is_allowed( filter( lambda x: x.fromUser == False, self.acls ), command, hostname, options, flavor ) or \\\n\t\t\t self._is_allowed( filter( lambda x: x.fromUser == True, self.acls ), command, hostname, options, flavor )", "def command_is_known( self, command_name ):\n\t\tfor cmd in self.__commands.commands:\n\t\t\tif cmd.name == command_name:\n\t\t\t\treturn True\n\t\treturn False", "def addCommandInput(self, *args) -> \"bool\" :\n return _core.TableCommandInput_addCommandInput(self, *args)", "def is_valid(self):\n for s in set(self._dna_string):\n if s not in self.dna_nucleotides:\n return False\n return True", "def is_command(string, tokens=None):\n if not string:\n return False\n\n if is_comment(string):\n return False\n\n if is_quoted(string):\n return False\n\n if is_script(string):\n return False\n return True", "def is_input_invalid(user_input):\n if user_input == 1 or user_input == 2:\n return False\n else:\n return True", "def has(self, cmd_name: str, qubits: Union[int, Iterable[int]]) -> bool:\n qubits = _to_qubit_tuple(qubits)\n if cmd_name in self._cmd_dict:\n\n if qubits in self._cmd_dict[cmd_name]:\n return True\n\n return False", "def _has_rights(self, command):\n active = False\n registered = False\n if self.session['user'] is None:\n if command not in Config.COMMANDS_UNKNOWN:\n return False\n return True\n else:\n if command not in Config.COMMANDS_USER:\n return False\n return True", "def command_addition_check(command, addition):\r\n # tested in Task5_unittest\r\n # for those the addition are integers or empty\r\n if command in [\"INSERT\", \"PRINT\", \"QUIT\", \"HELP\", \"DELETE\"]:\r\n if command in [\"QUIT\", \"HELP\"]:\r\n # these command cannot have addition range\r\n if len(addition) > 0:\r\n print(\"?\") # error responding\r\n return False\r\n if command in [\"QUIT\", \"HELP\", \"PRINT\", \"DELETE\"]:\r\n # these command may or must have no addition e.g. PRINT (print the whole txt)\r\n if len(addition) == 0:\r\n return True\r\n if command == \"INSERT\":\r\n # insert must have one and only one given line number\r\n if len(addition) == 0:\r\n raise ValueError\r\n # if command have addition len 1, check 1. if the additional is overwhelmed\r\n if command in [\"INSERT\", \"DELETE\"]:\r\n if len(addition) > 1:\r\n print(\"?\") # error responding\r\n return False\r\n # 2. check the range is valid\r\n try:\r\n for i in range(len(addition)):\r\n addition[i] = int(addition[i])\r\n # PRINT is a different case\r\n if command == \"PRINT\":\r\n # print is able to have only one given line number\r\n # PRINT is the only command can have two additional commands, and if it is overwhelmed is checked here\r\n if len(addition) == 1:\r\n return True\r\n if len(addition) > 2:\r\n a = int('sss')\r\n return True\r\n except:\r\n print(\"?\") # error responding\r\n return False\r\n # if the addition range is a string\r\n else:\r\n if (len(addition) == 0) or (len(addition) > 1):\r\n print(\"?\") # error responding\r\n return False\r\n return True", "def validate_input(inp: str):\n valid = True\n inp = inp.lower()\n\n alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',\n 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']\n\n # check if there even is an input\n if inp == '':\n valid = False\n\n # check for every letter if it's in the alphabet\n for letter in inp:\n if letter not in alphabet:\n valid = False\n\n return valid, inp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the given Jacobian approximation satisfies secant conditions for last `npoints` points.
def _check_secant(self, jac_cls, npoints=1, **kw): jac = jac_cls(**kw) jac.setup(self.xs[0], self.fs[0], None) for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])): jac.update(x, f) for k in range(min(npoints, j+1)): dx = self.xs[j-k+1] - self.xs[j-k] df = self.fs[j-k+1] - self.fs[j-k] assert_(np.allclose(dx, jac.solve(df))) # Check that the `npoints` secant bound is strict if j >= npoints: dx = self.xs[j-npoints+1] - self.xs[j-npoints] df = self.fs[j-npoints+1] - self.fs[j-npoints] assert_(not np.allclose(dx, jac.solve(df)))
[ "def bsplinederivfunc(knots, points, nderiv):\n m, n, degree, dim = get_lengths_and_degree(knots, points)\n def c(t):\n pt = np.zeros(dim)\n for i in range(n+1):\n basis = bsplinebasis_deriv(knots, i, degree, nderiv)(t)\n pt += map(lambda x: x * basis, points[i])\n return pt\n return c", "def jacobian_information(self):\n has_jacobian = True\n jacobian_free_solvers = [\"lm-scipy-no-jac\"]\n return has_jacobian, jacobian_free_solvers", "def test_is_feasible_checks_feasibility(self, points):\n for x, kwargs in itertools.product(points, self.projection_kwargs):\n projection = self.Projection(**kwargs)\n y = projection(x)\n \n assert self.is_feasible(x, **kwargs) == projection.is_feasible(x)\n assert self.is_feasible(y, **kwargs) == projection.is_feasible(y)", "def check_stability_pwa(self, eps=1e-3):\n \n if not self.is_state_feedback:\n raise ETCError('Output feedback not yet implemented.')\n \n n = self.plant.nx\n A = {}\n A[1] = np.block([\n [self.Ad + self.Bd, np.zeros((n,n))],\n [np.eye(n), np.zeros((n,n))]\n ])\n A[2] = np.block([\n [self.Ad, self.Bd],\n [np.zeros((n,n)), np.eye(n)]\n ])\n Q = self.Qbar\n \n # CVX variables\n alpha = {(i,j): cvx.Variable(pos=True) for i in range(1,3) \n for j in range(1,3)}\n beta = {(i,j): cvx.Variable(pos=True) for i in range(1,3) \n for j in range(1,3)}\n kappa = {i: cvx.Variable(pos=True) for i in range(1,3)}\n P = {i: cvx.Variable((2*n, 2*n), PSD=True) for i in range(1,3)}\n \n # CVX constraints : make a function of the externally defined lbd\n def make_constraints(lbd):\n con = []\n for i in range(1,3):\n for j in range(1,3):\n con.append(lbd*P[i] - A[i].T @ P[j] @ A[i]\n + ((-1)**i)*alpha[(i,j)]*Q\n + ((-1)**j)*beta[(i,j)]*(A[i].T @ Q @ A[i])\n >> 0) # Eq. (1))\n con.append(P[i] + (-1)**i * kappa[i]* Q # Eq. (2)\n >> _LMIS_SMALL_IDENTITY_FACTOR*np.eye(2*n))\n return con\n \n # Start bisection algorithm: get extreme points\n a = 0\n b = 1\n \n # For b = 1, if GES then it must be feasible\n con = make_constraints(b)\n prob = cvx.Problem(cvx.Minimize(0), con)\n prob.solve()\n if 'infeasible' in prob.status:\n return 1, None\n Pout = (p.value for p in P)\n \n # For a = 0, if it is feasible then this is a deadbeat controller.\n # Can't be better then this\n con = make_constraints(a)\n prob = cvx.Problem(cvx.Minimize(0), con)\n prob.solve()\n if 'optimal' in prob.status:\n return 0, (p.value for p in P)\n \n # Now we should have b = 1 feasible and a = 0 infeasible. Start\n # bisection algorithm\n while b-a > eps:\n c = (a+b)/2\n con = make_constraints(c)\n prob = cvx.Problem(cvx.Minimize(0), con)\n prob.solve()\n if 'optimal' in prob.status:\n b = c\n Pout = (p.value for p in P) # Store output P matrices\n elif 'infeasible' in prob.status:\n a = c\n else:\n warnings.warn(f'{prob.status}: TOL is {b-a}')\n break\n \n return -np.log(b)/2/self.h, Pout", "def test_scipy_eval(self):\n for method in ['2-point',\n '3-point',\n 'cs']:\n hes = Scipy(self.cost_func.problem, self.jacobian)\n hes.method = method\n eval_result = hes.eval(params=self.params)\n self.assertTrue(np.isclose(self.actual_hessian, eval_result).all())", "def checkpts(pts):\n\tnewPts = []\n\tfor i, sp in enumerate(pts):\n\t\tprint \"%d\\t%f\\t%f\\t%f\" % (i + 1, sp.get(X_AXIS), sp.get(Y_AXIS), sp.get(Z_AXIS))\n\t\t_stg.moveTo(sp)\n\t\tres = jop.showConfirmDialog(MainFrame, \"Update this point?\", \"Stage point validation\", jop.YES_NO_OPTION)\n\t\tif res == jop.YES_OPTION:\n\t\t\tnewPts.append(position())\n\t\telse:\n\t\t\tnewPts.append(sp)\n\treturn newPts", "def check_points(self, points):\r\n for point in points:\r\n if (point > self.spec_lines.lines[0]\r\n or point < self.spec_lines.lines[-1]):\r\n print(\"Point {} out of zone 3\".format(self.x_pt))\r\n elif (point > self.spec_lines.lines[1]\r\n or point < self.spec_lines.lines[-2]):\r\n print(\"Point {} out of zone 2\".format(self.x_pt))\r\n elif (point > self.spec_lines.lines[2]\r\n or point < self.spec_lines.lines[-3]):\r\n# print(\"out of zone 1\")\r\n pass\r\n else:\r\n pass", "def is_saddle_point(xy_point, coeff_mat):\n dx2, dy2, dxdy = 0, 0, 0\n x, y = xy_point\n\n order = coeff_mat.shape[0]\n for i in range(order): # x index\n for j in range(order): # y index\n if i > 1:\n dx2 += coeff_mat[i, j] * i * (i - 1) * x**(i - 2) * y**j\n if j > 1:\n dy2 += coeff_mat[i, j] * x**i * j * (j - 1) * y**(j - 2)\n if i > 0 and j > 0:\n dxdy += coeff_mat[i, j] * i * x**(i - 1) * j * y**(j - 1)\n\n if dx2 * dy2 - dxdy**2 < 0:\n logger.info(f'Found saddle point at r1 = {x:.3f}, r2 = {y:.3f} Å')\n return True\n\n else:\n return False", "def test_scipy_eval(self):\n for method in ['2-point',\n '3-point',\n 'cs']:\n hes = Scipy(self.cost_func.problem, self.cost_func.jacobian)\n hes.method = method\n self.cost_func.hessian = hes\n eval_result = self.cost_func.hes_cost(params=self.params)\n self.assertTrue(np.isclose(self.actual, eval_result).all())", "def check_points(self) -> None:\n if self.points >= 100: \n self.biscuits = True", "def solve(self) -> bool:\n if self.unsolvable:\n return False\n\n points = self.get_all_points()\n self.ready = True\n try:\n for point in points:\n point.calculate()\n except UnsolvableError:\n self.unsolvable = True\n return False\n\n if len([point.value for point in points if not point.has_value]) > 0:\n self.propose_most_restricted_point_fill(points)\n\n # Shows poor result times\n # self.propose_lines_fill()\n\n else:\n self.solved = True\n return True", "def verify(self):\n for i in self.coords:\n if np.abs(6*i-int(6*i))>0.1: return False\n if np.abs(self.coords[2]+self.coords[0]+self.coords[1]) > 0.1: return False\n return True", "def is_inside(self,points,save=True):\n \n if points.ndim==1:\n points = np.array([points]) \n \n assert points.shape[1] == 3, \"input point must be x,y,z\"\n\n n = points.shape[0]\n \n ## set up points to check\n vpoints = vtk.vtkPoints()\n map(vpoints.InsertNextPoint,points)\n \n checkPoints = vtk.vtkPolyData()\n checkPoints.SetPoints(vpoints)\n \n ## set up point checking object\n pointChecker = vtk.vtkSelectEnclosedPoints()\n pointChecker.SetInputData(checkPoints)\n pointChecker.SetSurfaceData(self.Surf)\n pointChecker.Update()\n \n ## check the status for each point\n inout = []\n inPoints0 = vtk.vtkPoints()\n outPoints0 = vtk.vtkPoints()\n \n for i in range(checkPoints.GetNumberOfPoints()):\n inout.append(pointChecker.IsInside(i))\n # print i, inout[-1]\n if inout[-1]:\n inPoints0.InsertNextPoint(vpoints.GetPoint(i))\n else:\n outPoints0.InsertNextPoint(vpoints.GetPoint(i))\n \n if save:\n self._vpoints = vpoints\n self._inPoints = vtk.vtkPolyData()\n self._inPoints.SetPoints(inPoints0)\n self._outPoints = vtk.vtkPolyData()\n self._outPoints.SetPoints(outPoints0)\n \n self._pointchecked = True\n \n if n==1:\n return inout[0]\n else:\n return np.array(inout)", "def valid_point_check(self, points):\n\n collision = np.zeros(len(points), dtype=bool)\n for obs in self.obs_list:\n collision = np.logical_or(collision, obs.points_in_obstacle(points))\n return np.logical_not(collision)", "def point_is_on_frontier(point, cell_points):\n if (point[0] + 1, point[1]) not in cell_points or (point[0], point[1] + 1) not in cell_points \\\n or (point[0] - 1, point[1]) not in cell_points or (point[0], point[1] - 1) not in cell_points:\n return True\n else:\n return False", "def evaluate_jacobian_eq(self, out=None):\n pass", "def check_rf(outline, npoints):\n outline = safe_outline(outline)\n grid = complexgrid(outline, npoints)\n grid = inner_grid_points(grid, outline)\n errors = search_grid(outline, grid)\n idx = np.argmin(errors)\n center = grid[idx]\n return RFresult(center, errors[idx], grid, errors)", "def bsplinefunc(knots, points):\n m, n, degree, dim = get_lengths_and_degree(knots, points)\n try:\n dummy = points[0][0]\n pts = points\n except IndexError:\n pts = [[i] for i in points]\n def c(t):\n pt = np.zeros(dim)\n for i in range(n+1):\n basis = bsplinebasis(knots, i, degree)(t)\n pt += map(lambda x: x * basis, pts[i])\n return pt\n return c", "def test_second_derivative():\n\t# Testing the second derivative at x = 0,1,2,3,4\n\tactual = np.array([1.47625,6.96536,10.20501, 25.82899, 10.90807])\n\t# Testing impementation\n\tdef exponential():\n\t\tt = np.linspace(0,4,5)\n\t\tex = np.vectorize(np.exp)\n\t\tex = ex(t)\n\t\treturn ex\n\ttrial = np.dot(ac.second_derivative(0,4,5),exponential())\n\t# Debug message\n\tprint(\"Should be: \",actual,\" but returned this \",trial)\n\tfor m in range(5):\n\t\tnose.tools.assert_almost_equal(actual[m],trial[m],4)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Methode establishing the chained list, by creating a list of nodes objects of integer.
def __init__(self, list_nodes): self.starter_node = Node(list_nodes[0]) current_node = self.starter_node for val in list_nodes[1:]: current_node.link = Node(val) current_node = current_node.link
[ "def linked_list_constructor(node_list: List[int]) -> ListNode:\n head = ListNode()\n curr = head\n for i in range(len(node_list)):\n curr.next = ListNode(node_list[i])\n curr = curr.next\n return head.next", "def node_chain(self, node_id: int) -> List[Node]:\n pass", "def __init__(self, n):\n self.n = n\n self.parent = [x for x in range(n)]", "def challenge_sample_2():\n ll = LinkedList()\n ll.append(5)\n ll.append(9)\n ll.append(4)\n return ll", "def challenge_sample_1():\n ll = LinkedList()\n ll.append(1)\n ll.append(3)\n ll.append(2)\n return ll", "def create_closedList(self):\n # EXAMPLE: return a data structure suitable to hold the set of nodes already evaluated\n return []", "def generate_from_list(tree):\n node = Node(tree[0], tree[1])\n\n for number in range(1, node.num_children + 1):\n tree, child = Node.generate_from_list(tree[2:])\n node.children.append(child)\n\n node.metadata = [int(x) for x in tree[2: 2 + node.num_metadata]]\n\n tree = tree[node.num_metadata:]\n\n return tree, node", "def __init__(self):\n this = _coin.new_SoNodeKitListPart()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, num_parts: int):\n self.cycle = list(SlNode(k) for k in range(num_parts))\n sl2 = self.cycle[-1]\n for sl1 in self.cycle:\n sl2.next = sl1\n sl2 = sl1", "def new_list_nodes(nodes):\n if not type(nodes) is list:\n if ',' in nodes:\n nodes = nodes.split(',')\n elif '-' in nodes:\n nodes = nodes.strip(\"[]\").split('-')\n nodes = range(int(nodes[0]), int(nodes[1])+1)\n else:\n nodes = [nodes]\n\n print(list(nodes))\n return [\"{:02}\".format(int(node)) for node in nodes]", "def iter_list_int_data(scope='class'):\n return SinglyLinkedList(range(10))", "def _build_tree(numbers: List[int]) -> Node:\n tree = Node()\n tree.load(numbers)\n return tree", "def setup_linked_list(items: List[object]) -> LinkedList:\n ll = LinkedList()\n for item in items:\n ll.append(item)\n return ll", "def __init__(self, data):\n self.data = data\n self.childs = LinkedList()", "def __init__(self, *args):\n this = _coin.new_SoNodeList(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def create_linked_list(input_list):\n head = None\n current = None\n for value in input_list:\n if head is None:\n head = Node(value)\n current = head\n else:\n current.next = Node(value)\n current = current.next\n \n return head", "def construct_adjacency_lists(self):\n\n self.root = self.data[\"root\"]\n self.vertices = [node[\"id\"] for node in self.data[\"nodes\"]]\n\n for edge in self.data[\"edges\"]:\n _from = edge[\"from\"]\n _to = edge[\"to\"]\n\n if _from not in self.adj:\n self.adj[_from] = []\n\n self.adj[_from].append(_to)", "def __init__(self, *args):\n this = _coin.new_SoChildList(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def offset_node_lists(node_lists):\n cumsums = cumsum(node_lists)\n for list_ix in range(len(node_lists)):\n for node in node_lists[list_ix]:\n offset = cumsums[list_ix - 1] if list_ix > 0 else 0\n node.id = node.id + offset\n node.parent_id = node.parent_id + offset \\\n if node.parent_id >= 0 \\\n else -1\n return node_lists" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Insert a new node with the data in a chained list after the searched data node.
def inser_node_after(self, data_chosen: int, new_node: int): current_node = self.starter_node while current_node is not None: # search the index of the node for the searched data if current_node.data == data_chosen: break current_node = current_node.link new_data = Node(new_node) new_data.link = current_node.link # The link for the new node takes the link of the current node current_node.link = new_data # The link for the current node takes the data of the new node
[ "def insert_after_node(self, key, data):\n node = ListNode(data)\n p = self.head\n while p is not None:\n if p.data == key:\n node.next = p.next\n p.next = node\n p = p.next", "def add_after_node(self, key, data):\n cur = self.head\n while cur:\n if cur.data == key:\n if cur.next is None:\n self.append(data)\n return\n new_node = Node(data)\n new_node.next = cur.next\n cur.next.prev = new_node\n cur.next = new_node\n new_node.prev = cur\n return\n else:\n cur = cur.next", "def insert_after(self, prev_node, data):\n node = DLLNode(data)\n node.prev = prev_node\n node.next = prev_node.next\n prev_node.next = node\n if node.next is None:\n return\n node.next.prev = node", "def insert_end(self, data):\n node = ListNode(data)\n if self.head:\n p = self.head\n while p.next is not None:\n p = p.next\n p.next = node\n else:\n self.head = node", "def add_before_node(self, key, data):\n cur = self.head\n while cur:\n if cur.data == key:\n if cur.next is None:\n self.append(data)\n return\n new_node = Node(data)\n cur.prev.next = new_node\n new_node.prev = cur.prev\n cur.prev = new_node\n new_node.next = cur\n return\n else:\n cur = cur.next", "def add_to_back(self, data):\r\n\t\tif self._head is None:\r\n\t\t\tself._head = Node(data, self._head)\r\n\t\telse:\r\n\t\t\tprobe = self._head\r\n\t\t\twhile probe.next is not None:\r\n\t\t\t\tprobe = probe.next\r\n\t\t\tprobe.next = Node(data)", "def insert_before(self, key, data):\n node = ListNode(data)\n p = self.head\n while p.next is not None:\n if p.next.data == key:\n node.next = p.next\n p.next = node\n p = p.next", "def insert_after(self, node, value):\n if node is None: # If you specify to insert a data node after an empty node, do nothing\n return\n\n new_node = Node(value)\n new_node.next_node = node.next\n node.next = new_node", "def insert_end(self, data):\n\n new_node = Node(data)\n\n # handle empty list case\n if self.head == None:\n self.head = new_node\n self.head.next_node = new_node\n self.end = new_node\n return\n\n # handle non-empty list case\n if self.end != None:\n self.end.next_node = new_node\n new_node.next_node = self.head\n self.end = new_node\n return", "def insert(self, prev, data):\n node = self.__search(prev)\n if not node:\n raise IndexError\n\n new_node = LinkedListElement(data)\n new_node.next = node.next\n node.next = new_node\n\n if self.back == node:\n self.back = new_node", "def append_at_head(self, data):\n\n node_obj = Node(data)\n\n if not self.head:\n self.head = node_obj\n else:\n node_obj.next = self.head\n self.head = node_obj", "def prepend(self, data: Any) -> None:\n current_node = self.head\n\n new_node = Node(data)\n new_node.next_ptr = new_node\n\n if current_node:\n while current_node.next_ptr != self.head:\n current_node = current_node.next_ptr\n\n current_node.next_ptr = new_node\n new_node.next_ptr = self.head\n\n self.head = new_node\n self.length += 1", "def insert(self, data, index):\n if index == 0:\n self.add(data)\n\n if index > 0:\n new = Node(data)\n\n position = index\n current = self.head\n\n while position > 1:\n current = current.next_node\n position -= 1\n prev = current\n next_node = current.next\n\n prev.next_node = new\n new.next_node = next_node", "def insert_after(self, val, newVal):\n current = self.head\n while current._next:\n if current.val == val:\n new_node = Node(newVal, current._next)\n current._next = new_node\n self._size += 1\n return\n current = current._next", "def append_left(self, data=Any):\n node = Node(data=data, next_node=self.head)\n self.head = node", "def insert(self, data):\n # check if node exists in the tree already\n if self.search(data) is None:\n self.root = self._insert(self.root, data)\n self.size += 1", "def insert(self, data, place):\n new_node = LinkedListNode(data)\n the_place = self.find(place)\n\n if the_place == self.head:\n new_second = self.head\n new_node.next = new_second\n self.head = new_node\n\n\n else:\n new_second = the_place\n same_prev = the_place.prev\n\n same_prev.next = new_node\n new_second.prev = new_node\n new_node.next = new_second\n new_node.prev = same_prev", "def insert_after(self, val, newVal):\n current = self.head\n while current:\n if current.val == val:\n position = current._next\n current._next = Node(newVal)\n current._next._next = position\n self._size += 1\n break\n current = current._next", "def insert(self, idx, data):\n if idx == 0:\n self.push(data)\n else:\n i = 0\n node = self.head\n while(i+1 < idx and node.next):\n i += 1\n node = node.next\n self.insert_after(node, data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete nodes whom the value equal to data_chosen
def delete_node(self, data_chosen): node_del = self.starter_node if node_del is None: print("No node to be deleted") while node_del is not None: if node_del.data == data_chosen: node_del.link = node_del.limk.link node_del = node_del.link
[ "def test_delete_decision_tree_using_delete(self):\n pass", "def del_nodes(self, val):\n if val not in self.keys():\n raise ValueError(\"There is no value to delete.\")\n for node in self.values():\n if val in node:\n del node[val]\n del self[val]", "def delete(self):\n\t\t[ n.delete() for n in self.nodes ]", "def test_delete_node_using_delete(self):\n pass", "def test_removeDeleted(self):\n tree = DndParser('((a:3,(b:2,(c:1,d:1):1):1):2,(e:3,f:3):2);',\n constructor=TreeNode)\n result_not_deleted = deepcopy(tree)\n tree.removeDeleted(lambda x: x.Name in [])\n self.assertEqual(str(tree),str(result_not_deleted))\n deleted = set(['b','d','e','f'])\n result_tree = DndParser('((a:3,((c:1):1):1):2);',constructor=TreeNode)\n is_deleted = lambda x: x.Name in deleted\n tree.removeDeleted(is_deleted)\n self.assertEqual(str(tree),str(result_tree))", "def delete_node(self, node, branch, all_nodes, prevs):\n all_nodes.remove(node)\n if branch:\n new_node = node.true_branch\n else:\n new_node = node.false_branch\n if node == self.root:\n self.root = new_node\n del prevs[new_node]\n else:\n prevs[new_node].remove((node, branch))\n for prev, br in prevs[node]:\n prevs[new_node].append((prev, br))\n if br:\n prev.true_branch = new_node\n else:\n prev.false_branch = new_node", "def delete(node_tree):\n\n if FLAVOR_ID in node_tree:\n node_tree.nodes.remove(node_tree.nodes[_AWHITE_MIX_NODE])\n del node_tree[FLAVOR_ID]", "def nodeDelete(self):\n selNodes = self.currentSelectionModel().selectedNodes()\n if not selNodes or self.model.root in selNodes:\n return\n # gather next selected node in increasing order of desirability\n nextSel = [node.parent for node in selNodes]\n undo.ChildListUndo(self.model.undoList, nextSel)\n nextSel.extend([node.prevSibling() for node in selNodes])\n nextSel.extend([node.nextSibling() for node in selNodes])\n while not nextSel[-1] or nextSel[-1] in selNodes:\n del nextSel[-1]\n for node in selNodes:\n node.delete()\n self.currentSelectionModel().selectNode(nextSel[-1], False)\n self.updateAll()", "def delete_data(self, data_id, var_name, phase):", "def delete_node(self, node_id) -> Node:", "def delete(self, value):\n currentNode = self._head\n prevNode = None\n while currentNode != None and currentNode.data != value:\n prevNode = currentNode\n currentNode = currentNode.next\n if currentNode == None:\n return\n else: # that means currentNode == value\n if prevNode == None: # that means the first item is deleted\n self._head = currentNode.next\n else:\n prevNode.setNextNode(currentNode.next)\n self._size -= 1", "def remove_er_variable(self):\n # loop over selected items in the tree\n idx = self.view.selectedIndexes()[0]\n # get the selected item from the index\n selected_item = idx.model().itemFromIndex(idx)\n selected_text = selected_item.text()\n # get the parent of the selected item\n parent = selected_item.parent()\n # don't allow the last ER variable to be deleted\n if parent.rowCount() == 1:\n return\n # remove the row in the EcosystemRespiration section\n parent.removeRow(selected_item.row())\n # get the NetEcosystemExchange section\n for i in range(self.model.rowCount()):\n section = self.model.item(i)\n if section.text() == \"NetEcosystemExchange\":\n break\n done_it = False\n for i in range(section.rowCount()):\n for j in range(section.child(i).rowCount()):\n if section.child(i).child(j, 1).text() == selected_text:\n section.removeRow(i)\n done_it = True\n break\n if done_it:\n break\n # get the GrossPrimaryProductivity section\n for i in range(self.model.rowCount()):\n section = self.model.item(i)\n if section.text() == \"GrossPrimaryProductivity\":\n break\n done_it = False\n for i in range(section.rowCount()):\n for j in range(section.child(i).rowCount()):\n if section.child(i).child(j, 1).text() == selected_text:\n section.removeRow(i)\n done_it = True\n break\n if done_it:\n break\n self.update_tab_text()", "def deleteNode(self, value):\n current = self.head\n if self.head:\n if current.value == value:\n self.head = current.nextNode\n return\n while current.nextNode:\n if current.nextNode.value ==value:\n if current.nextNode.nextNode:\n current.nextNode = current.nextNode.nextNode\n else:\n current.nextNode = None \n break\n current= current.nextNode", "def delete(root, value):\n node = search(root,value)\n #search for the node\n if node:\n if node.l_child==None:\n transplant(root,node,node.r_child)\n elif node.r_child==None:\n transplant(root,node,node.l_child)\n else:\n successor = tree_minimum(node.r_child) #define the successor as the minimum node in right subtree\n if successor.parent != node:\n transplant(root,successor,successor.r_child) #transplant the successor to the root\n successor.r_child = node.r_child\n successor.r_child.parent = successor\n transplant(root,node,successor)\n successor.l_child = node.l_child\n successor.l_child.parent = successor\n return root\n else:\n return root", "def _remove_tree_node(data, path):\n if not path or not data:\n return\n path_head, *path_tail = path\n if path_head not in data:\n return\n if not path_tail:\n del data[path_head]\n else:\n _remove_tree_node(data[path_head], path_tail)", "def delete(self):\n\t\ttask = self._get_select_item()\n\t\ttask.delete()\n\t\tself._tree()", "def delete_entry(self, *args):\n if len(self.value) > 1 and self.recycle_view_class_pool.selected:\n label = self.recycle_view_class_pool.selected[\"text\"]\n idx = self.imagenet_labels[label]\n self.value.remove(idx)\n self.set_value()", "def test_deleting_elements(self, value):\n ds = DatasetList(value)\n del value[0]\n del ds[0]\n assert ds == value", "def delete_node(self, element_del):\n element = self.first_node\n if element.data != element_del:\n while element.link.data != element_del:\n element = element.link\n element.link = element.link.link\n else:\n element.data = element.link.data\n element.link = element.link.link\n print(\"deletion display\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the string representation of the rucksack, return two sets of its contents
def split_rucksack(input: str): return (set(input[:len(input)//2]), set(input[len(input)//2:]))
[ "def get_subsets(string: str) -> Set:\n strings = set()\n str_len = len(string) + 1\n [strings.add(string[start:stop]) for start in range(str_len) for stop in range(str_len) if stop > start]\n return strings", "def unpack_subreddits(subreddits_str):\n return set(subreddits_str.split('+'))", "def bags(self, rep_str, dset_str):\n base_path = Path(__file__).parent\n if rep_str in str(LoadBags.__accepted_reps).strip('[]'):\n if dset_str == 'QM9':\n pkl_path = (base_path / 'data/qm9_bags.pkl').resolve()\n with open(pkl_path, 'rb') as f:\n dset_bags = pickle.load(f)\n else:\n accept_dests = str(LoadBags.__accepted_reps).strip('[]')\n raise NotImplementedError(\n 'Dataset \\'{}\\' is unsupported. Accepted datasets are {} .'.format(dset_str, accept_dests))\n\n if rep_str == 'BoB':\n dbags = dset_bags[0]\n elif rep_str == 'BAT':\n dbags = dset_bags[1]\n elif rep_str == 'JustBonds':\n dbags = dset_bags[2]\n else:\n accept_reps = str(LoadBags.__accepted_reps).strip('[]')\n raise NotImplementedError(\n 'Representation \\'{}\\' is unsupported. Accepted representations are {} .'.format(rep_str, accept_reps))\n\n self.bags = dbags[0]\n self.bag_sizes = dbags[1]\n\n return self.bags, self.bag_sizes", "def parse_set(string):\n string = string.strip()\n if string:\n return set(string.split(\",\"))\n else:\n return set()", "def kruising(soort1, soort2):\n list = []\n new1 = splitsing(soort1)[0] + splitsing(soort2)[1]\n new2 = splitsing(soort2)[0] + splitsing(soort1)[1]\n list.append(new1)\n list.append(new2)\n return tuple(list)", "def subsets(s):\n # YOUR CODE HERE\n # Got the idea from https://coderbyte.com/algorithm/print-all-subsets-given-set\n # total num of sets\n set_num = int(pow(2, len(s)))\n # convert set to list for easy indexing\n m = list(s)\n\n for i in range(0, set_num):\n # new list to yield\n k = []\n\n # convert to binary so that a 1=add, 0=ignore\n t = \"{0:b}\".format(i)\n\n # pad it according to length of set\n while len(t) < len(s):\n t = '0' + t\n\n # iterate over binary to match 1's\n for j in range(0, len(t)):\n if t[j] == '1':\n k.append(m[j])\n yield k", "def test_subsets(self):\n t = self.t \n self.assertEqual(t.subsets(), frozenset(\n [frozenset('HG'), frozenset('RM')]))", "def parse_bag(text: str) -> Bag:\n text = \"1 \" + text # Add 1 to parent bag to make pattern consistent\n m = re.findall(r\"(\\d+) (\\w+ \\w+) bags?\", text)\n _, color = m.pop(0) # First match is the parent bag\n contents = {color: int(quantity) for quantity, color in m}\n return (color, contents)", "def rle(string):\n return [(p.ilen(g), k) for k, g in it.groupby(string)]", "def _parse_bands(input_str):\n \n result = []\n \n def parse_minmax(subparts, index):\n try:\n if subparts[index] in (\"min\", \"max\"):\n return subparts[index]\n else:\n return float(subparts[index])\n except IndexError:\n return None\n except ValueError:\n raise argparse.ArgumentTypeError(\"Wrong format of band subset.\")\n \n for part in input_str.split(\",\"):\n subparts = part.split(\":\")\n \n if len(subparts) < 1 or len(subparts) > 3:\n raise argparse.ArgumentTypeError(\"Wrong format of band.\")\n \n number = int(subparts[0])\n dmin = parse_minmax(subparts, 1)\n dmax = parse_minmax(subparts, 2)\n result.append((number, dmin, dmax))\n \n return result", "def get_hand_subsets(hand):\n letters = [c for c in hand for i in range(hand[c])]\n hand_subsets = ()\n for i in reversed(range(1, len(letters)+1)):\n for tup in set(itertools.combinations(letters, i)):\n hand_subsets += (''.join(sorted(tup)), )\n return hand_subsets", "def decode_set_control_contents(content_string):\n\n # Oddly, if the set is empty, there are sometimes spurious spaces in\n # field entry. This may be browser madness. Handle it specially.\n if string.strip(content_string) == \"\":\n return []\n return string.split(content_string, \",\")", "def test_get_smallest_set_of_smallest_rings(self):\n\n m1 = Molecule(smiles='C12CCC1C3CC2CC3')\n sssr1 = m1.get_smallest_set_of_smallest_rings()\n sssr1_sizes = sorted([len(ring) for ring in sssr1])\n sssr1_sizes_expected = [4, 5, 5]\n self.assertEqual(sssr1_sizes, sssr1_sizes_expected)\n\n m2 = Molecule(smiles='C1(CC2)C(CC3)CC3C2C1')\n sssr2 = m2.get_smallest_set_of_smallest_rings()\n sssr2_sizes = sorted([len(ring) for ring in sssr2])\n sssr2_sizes_expected = [5, 5, 6]\n self.assertEqual(sssr2_sizes, sssr2_sizes_expected)\n\n m3 = Molecule(smiles='C1(CC2)C2C(CCCC3)C3C1')\n sssr3 = m3.get_smallest_set_of_smallest_rings()\n sssr3_sizes = sorted([len(ring) for ring in sssr3])\n sssr3_sizes_expected = [4, 5, 6]\n self.assertEqual(sssr3_sizes, sssr3_sizes_expected)\n\n m4 = Molecule(smiles='C12=CC=CC=C1C3=C2C=CC=C3')\n sssr4 = m4.get_smallest_set_of_smallest_rings()\n sssr4_sizes = sorted([len(ring) for ring in sssr4])\n sssr4_sizes_expected = [4, 6, 6]\n self.assertEqual(sssr4_sizes, sssr4_sizes_expected)\n\n m5 = Molecule(smiles='C12=CC=CC=C1CC3=C(C=CC=C3)C2')\n sssr5 = m5.get_smallest_set_of_smallest_rings()\n sssr5_sizes = sorted([len(ring) for ring in sssr5])\n sssr5_sizes_expected = [6, 6, 6]\n self.assertEqual(sssr5_sizes, sssr5_sizes_expected)", "def subs_listset(string_list, sub_len):\r\n return [set(subs(x, sub_len)) for x in string_list]", "def getSACKs(self, outtype=DATA_FLOAT, path=PATH_BACKWARD, rel=RELATIVE_LASTACK):\n\t\t\n\t\tsackpacks = []\n\t\t\n\t\tif path == PATH_BACKWARD:\n\t\t\tpkts = self.backward\n\t\telse:\n\t\t\tpkts = self.forward\n\t\t\n\t\tfor ts, p in pkts:\n\t\t\tif outtype == DATA_FLOAT:\n\t\t\t\tts = ts-self.origin\n\t\t\t\tts = dfToFloat(ts)\n\t\t\t\n\t\t\topts = TCPOptions(p.opts)\n\t\t\t\n\t\t\tif dpkt.tcp.TCP_OPT_SACK in opts:\n\t\t\t\tsegs = opts.get(dpkt.tcp.TCP_OPT_SACK)\n\t\t\t\tif rel == RELATIVE_LASTACK:\n\t\t\t\t\tnsegs = []\n\t\t\t\t\tfor s in segs:\n\t\t\t\t\t\tnsegs.append(s-p.ack)\n\t\t\t\t\t#print \"ts,ack\", ts, p.ack\n\t\t\t\t\t#print \"oldsegs\", segs\n\t\t\t\t\t#print \"newsegs\", nsegs\n\t\t\t\t\tsegs = tuple(nsegs)\n\t\t\t\t\n\t\t\t\tsackpacks.append( (ts, p.ack, segs) )\n\t\t\n\t\treturn sackpacks", "def __get_subsets(itemset):\n subsets = []\n length = len(itemset)\n for x in xrange(length):\n subset = itemset[:x] + itemset[x+1:]\n subsets.append(subset)\n return subsets", "def splitElems(self,set):\n set = unique(set)\n split = []\n n = 0\n for e in self.celems[1:]:\n i = set.searchsorted(e)\n split.append(set[n:i])\n n = i\n\n return split,[ asarray(s) - ofs for s,ofs in zip(split,self.celems) ]", "def parse_input(data: Iterator[str]) -> tuple[set[tuple[int, int]], Iterator[Fold]]:\n coords = (line.strip().split(',')\n for line in takewhile(lambda line: line != '\\n', data))\n instructs = (line.strip().split(' ')[-1].split('=') for line in data)\n return (set((int(x), int(y)) for x, y in coords),\n (Fold(Direction(d), int(v)) for d, v in instructs))", "def parse_bag_string(bag_string):\n bags = [] # make an empty array of bags\n matches = bag_string_prog.findall(bag_string)\n if not matches:\n raise ValueError(f\"{bag_string} does not have a regex match\")\n\n for match in matches:\n if \"no other\" not in match[1]: #disqualify that piece of regex that's matching a no other bag string\n bags.append(Bag(match[1], int(match[0]) if match[0] else 0))\n\n return bags # return the array of bags" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize card with territory and symbol
def __init__(self, territory, symbol): self.territory = territory self.symbol = symbol
[ "def __init__(self, pos, card=None):\n self.pos = pos\n self.card = card", "def __init__(self, cards=[]):\n\n # decide which kind of header it belongs to\n try:\n if cards[0].key == 'SIMPLE':\n if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True:\n self._hdutype = GroupsHDU\n elif cards[0].value == True:\n self._hdutype = PrimaryHDU\n else:\n self._hdutype = _ValidHDU\n elif cards[0].key == 'XTENSION':\n xtension = cards[0].value.rstrip()\n if xtension == 'TABLE':\n self._hdutype = TableHDU\n elif xtension == 'IMAGE':\n self._hdutype = ImageHDU\n elif xtension in ('BINTABLE', 'A3DTABLE'):\n self._hdutype = BinTableHDU\n else:\n self._hdutype = _ExtensionHDU\n else:\n self._hdutype = _ValidHDU\n except:\n self._hdutype = _CorruptedHDU\n\n # populate the cardlist\n self.ascard = CardList(cards)", "def __initializeBoard(self):\n self.cities = self.generateCities() # {id : City}\n self.infectionDeck = self.generateInfectionDeck() # [InfectionCard]\n self.playerDeck = self.generatePlayerDeck() # [PlayerCard]\n # start players at ATLANTA\n ## !!!!! just test with player 1 at the moment!\n #self.players[1].location = (\"ATLANTA\")\n self.cities[\"ATLANTA\"].researchStation = 1\n #self.cities[\"LAGOS\"].researchStation = 1\n # just for testing on game page\n #self.cities[\"SEOUL\"].researchStation = 1\n self.infectCitiesStage()\n self.distributeHand()\n #now need to place epidemic cards ( has to be done after hand has been delt)\n self.placeEpidemicCards()\n #self.setRoles()\n self.setStartingLocation()\n self.initialized = 1", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def __init__(self, deck, zoneType=None):\r\n self.deck = deck\r\n Zone.__init__(self, deck, zoneType=zoneType)", "def _init_csc_(self):\n self.csc = CurrencySymbolConverter(self.r)", "def create_cards():\n return {\n \"A\": (0, 11),\n \"2\": 2,\n \"3\": 3,\n \"4\": 4,\n \"5\": 5,\n \"6\": 6,\n \"7\": 7,\n \"8\": 8,\n \"9\": 9,\n \"10\": 10,\n \"J\": 10,\n \"Q\": 10,\n \"K\": 10\n }", "def __init__(self, suit, nb):\n self.suit = suit\n self.rank = nb\n if nb in [11, 12, 13]:\n # assigns the rank and value for jack, queen and king\n self.rank = Card.heads[nb-10]\n self.value = 10\n elif nb == 1:\n # assigns the rank and value for ace\n self.rank = Card.heads[nb-1]\n self.value = 11\n else:\n # assigns the value for all other cards\n self.value = nb", "def __init__(self, who, card, nominated_player=None, nominated_card=None):\n self.player = who\n self.card = card\n self.nominated_player = nominated_player\n self.nominated_card = nominated_card", "def initialise(self):\n\n self.__initialise_chase_mode()\n self.__initialise_frightened_mode()\n Character.initialise(self)", "def __init__(\n self,\n symbol,\n order_type,\n quantity,\n direction\n ):\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction", "def add_card(cls, card, icard=0, comment=''):\n istart = 1 + icard * 4\n sid = integer(card, istart, 'sid')\n ring = integer(card, istart + 1, 'ring' + str(icard))\n phi = double(card, istart + 2, 'phi' + str(icard))\n temperature = double(card, istart + 3, 'T' + str(icard))\n\n return TEMPAX(sid, ring, phi, temperature, comment=comment)", "def __init__(self, numLet, suit):\n\n if type(numLet) == int:\n if numLet < 2 or numLet > 14: raise Exception('Card number must be between 2 and 14 (inclusive).')\n self._numLet = numLet\n for k in self.nEnum:\n if self.nEnum[k] == numLet: self._numLet = k\n elif type(numLet) == str:\n if numLet.upper() not in self.nEnum: raise Exception(\"Card letter must be \\'T\\', \\'J\\', \\'Q\\', \\'K\\', or \\'A\\'.\")\n self._numLet = numLet.upper()\n else: raise Exception('Card number/letter must be number or string.')\n\n if suit.lower() not in self.suits: raise Exception(\"Invalid suit. Valid suits are \\'c\\', \\'d\\', \\'s\\', and \\'h\\'.\")\n self._suit = suit.lower()", "def __init__(self, deck):\n # self._name = name\n self._deck = deck", "def initiate_deck(self):\n for suit in self.suits:\n for i in range(1, 14):\n new_card = Card(i, suit)\n self.cards.append(new_card)", "def add_card(cls, card, comment=''):\n mid = integer(card, 1, 'mid')\n e1_table = integer_or_blank(card, 2, 'T(E1)')\n e2_table = integer_or_blank(card, 3, 'T(E2)')\n nu12_table = integer_or_blank(card, 4, 'T(Nu12)')\n g12_table = integer_or_blank(card, 5, 'T(G12)')\n g1z_table = integer_or_blank(card, 6, 'T(G1z)')\n g2z_table = integer_or_blank(card, 7, 'T(G2z)')\n rho_table = integer_or_blank(card, 8, 'T(Rho)')\n a1_table = integer_or_blank(card, 9, 'T(A1)')\n a2_table = integer_or_blank(card, 10, 'T(A2)')\n\n xt_table = integer_or_blank(card, 12, 'T(Xt)')\n xc_table = integer_or_blank(card, 13, 'T(Xc)')\n yt_table = integer_or_blank(card, 14, 'T(Yt)')\n yc_table = integer_or_blank(card, 15, 'T(Yc)')\n s_table = integer_or_blank(card, 16, 'T(S)')\n ge_table = integer_or_blank(card, 17, 'T(GE)')\n f12_table = integer_or_blank(card, 18, 'T(F12)')\n\n assert len(card) <= 19, 'len(MATT8 card) = %i\\ncard=%s' % (len(card), card)\n return MATT8(mid, e1_table, e2_table, nu12_table, g12_table,\n g1z_table, g2z_table, rho_table,\n a1_table, a2_table, xt_table,\n xc_table, yt_table, yc_table,\n s_table, ge_table, f12_table,\n comment=comment)", "def __init__(self, chips=0):\n self.chips = chips\n self.cards = []\n self.splitCards = []\n self.aces = 0\n self.splitAces = 0", "def test_init():\n c1 = card.Card(0, 12)\n c2 = card.Card(1, 10)\n c3 = card.Card(2, 9)\n c4 = card.Card(0, 1)\n \n # Initialize deck and start game.\n deck = [c1, c2, c3, c4]\n game = lab09.Blackjack(deck)\n \n cornell.assert_equals([c1, c2], game.playerHand)\n cornell.assert_equals([c3], game.dealerHand)\n cornell.assert_equals([c4], deck) # check that cards were removed\n \n deck = card.full_deck() # non-shuffled deck\n game = lab09.Blackjack(deck)\n c1 = card.Card(0, 1)\n c2 = card.Card(0, 2)\n c3 = card.Card(0, 3)\n c4 = card.Card(0, 4)\n \n cornell.assert_equals([c1, c2], game.playerHand)\n cornell.assert_equals([c3], game.dealerHand)\n \n # check that right cards were removed\n cornell.assert_equals(card.full_deck()[3:], deck)\n \n print('The __init__ tests passed')", "def null_card():\n return Card(0,0,0,False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Serialize to bytes. Normally, subclasses should override Packet_serialize.
def _to_bytes(self) -> bytes: x = self._serialize() return pack_varint(self.id) + pack_varint(len(x)) + x
[ "def serialize(self, packet):\n\t\tbuffer = bytearray()\n\t\tscratch = 0\n\t\tscratch_bits = 0\n\t\tfor index, data in enumerate(packet._raw):\n\t\t\tfield = packet.FIELDS[index]\n\t\t\tbits_remaining = field.size\n\n\t\t\twhile True:\n\t\t\t\tbits_used = min(8 - scratch_bits, bits_remaining)\n\n\t\t\t\tdata_to_copy = data >> (bits_remaining - bits_used)\n\t\t\t\tbits_remaining -= bits_used\n\n\t\t\t\t# Copy the bits to the scratch byte.\n\t\t\t\tscratch <<= bits_used\n\t\t\t\tscratch |= data_to_copy & ((1 << bits_used) - 1)\n\t\t\t\tscratch_bits += bits_used\n\n\t\t\t\t# Flush the scratch byte to the buffer.\n\t\t\t\tif scratch_bits == 8:\n\t\t\t\t\tbuffer.append(scratch)\n\t\t\t\t\tscratch = 0\n\t\t\t\t\tscratch_bits = 0\n\n\t\t\t\tif bits_remaining == 0:\n\t\t\t\t\tbreak\n\n\t\t# Shift over remaining bits and pad.\n\t\tif scratch_bits > 0:\n\t\t\tbuffer.append(scratch << (8 - scratch_bits))\n\t\t\t\n\t\treturn buffer", "def __bytes__(self):\n\n return bytes(self._data)", "def to_bytes(self) -> bytes:\n data: list = [self.version, self.head_id, self.tail_id,\n self.available_head_id_of_virtual_step, self.available_head_id_of_deposit,\n self.expires_of_virtual_step, self.expires_of_deposit]\n return MsgPackForDB.dumps(data)", "def get_packet(self) -> bytes:\n return self.header + self.payload", "def bytes(self):\n return bytes(self.b)", "def convert_scapy_packet_to_bytes(packet):\n if six.PY2:\n return str(packet)\n else:\n return bytes(packet)", "def encode(self) -> bytes:\n \n pass", "def encode(self) -> bytes:\n options = self._encode_options()\n self.header_len = (len(options) + 16)\n flags = self._encode_flags()\n # B 1 H 2 L 4\n # src_port dest_port seq_num ack_num flags unused checksum\n head = struct.pack('!HHLLBBH', self.src_port, self.dest_port, self.seq_num, self.ack_num, flags, 0, 0)\n segment = bytearray(head)\n\n if self.options:\n segment.extend(options)\n\n segment.extend(self.payload)\n checksum = RDTSegment.calc_checksum(segment)\n segment[14] = checksum >> 8\n segment[15] = checksum & 0xFF\n return bytes(segment)", "def encode(self):\n return self.format.pack(self.pdu_type, self.reserved1, self.pdu_length, self.reserved2)", "def protobuf_dumps(message):\n message.SerializeToString()", "def to_raw(self):\n return f\"{self.packet_type}{self.token}{self.timestamp}{self.payload_to_raw()}\"", "def _encode(o):\n return pickle.dumps(o, pickle.HIGHEST_PROTOCOL)", "def save_to_packet(self):\n\n array = self.blocks.tostring()\n array += pack_nibbles(self.metadata)\n array += pack_nibbles(self.blocklight)\n array += pack_nibbles(self.skylight)\n packet = make_packet(\"chunk\", x=self.x * 16, y=0, z=self.z * 16,\n x_size=15, y_size=127, z_size=15, data=array)\n return packet", "def _encode_(self, val):\n return pickle.dumps(val, protocol=-1)", "def encode_to_bytes(self, obj):\n old_fp = self.fp\n self.fp = fp = BytesIO()\n self.encode(obj)\n self.fp = old_fp\n return fp.getvalue()", "def serialize(self):\n return bytes(BlockchainEncoder().encode(self.chain), \"utf-8\")", "def to_knx(self) -> bytes:\n return int.to_bytes(self.raw, 2, \"big\")", "def _dumpPickle(self, data):\r\n \r\n return codecs.encode(cPickle.dumps(data,protocol=cPickle.HIGHEST_PROTOCOL), \"base64\").decode()", "def serialize(self, buff):\n try:\n length = len(self.messages)\n buff.write(_struct_I.pack(length))\n for val1 in self.messages:\n _x = val1.type\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.msg\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.metas)\n buff.write(_struct_I.pack(length))\n for val1 in self.metas:\n length = len(val1.pairs)\n buff.write(_struct_I.pack(length))\n for val2 in val1.pairs:\n _x = val2.first\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.second\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deserialize from bytes. Subclasses should override Packet._deserialize.
def _from_bytes(input: bytes) -> None: id, payload = unpack_varint(input) length, payload = unpack_varint(payload) return Packet.get_packet_by_id(id)._deserialize(payload)
[ "def deserialize(self, packet, packet_bytes):\n\t\tinstance = packet()\n\t\tcurrent_byte = 0\n\t\tcurrent_byte_offset = 0\n\t\tfor index, field in enumerate(packet.FIELDS):\n\n\t\t\tbits_required = field.size\n\t\t\tscratch = 0\n\t\t\tscratch_bits = 0\n\t\t\twhile True:\n\t\t\t\tbits_used = min(8 - current_byte_offset, bits_required)\n\t\t\t\tdata_available = packet_bytes[current_byte] & ((1 << (8 - current_byte_offset)) - 1)\n\t\t\t\tdata_to_copy = data_available >> (8 - bits_used - current_byte_offset)\n\n\t\t\t\tcurrent_byte_offset += bits_used\n\t\t\t\tbits_required -= bits_used\n\n\t\t\t\t# Copy the data into the scratch byte.\n\t\t\t\tscratch <<= bits_used\n\t\t\t\tscratch |= data_to_copy\n\t\t\t\tscratch_bits += bits_used\n\n\t\t\t\t# Choose the next byte from the buffer.\n\t\t\t\tif current_byte_offset == 8:\n\t\t\t\t\tcurrent_byte += 1\n\t\t\t\t\tcurrent_byte_offset = 0\n\n\t\t\t\tif bits_required == 0:\n\t\t\t\t\tinstance._raw[index] = scratch\n\t\t\t\t\tbreak\n\n\t\treturn instance", "def deserialize(self, str):\n if python3:\n codecs.lookup_error(\"rosmsg\").msg_type = self._type\n try:\n end = 0\n start = end\n end += 4\n (self.data,) = _get_struct_I().unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) # most likely buffer underfill", "def deserialize(self, str):\n try:\n end = 0\n start = end\n end += 8\n (self.ret,) = _struct_q.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.msg = str[start:end].decode('utf-8')\n else:\n self.msg = str[start:end]\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def decode(cls, data: bytes):\n\n try:\n msg_id = struct.unpack('>b', data[4:5])[0]\n if msg_id == cls.ID:\n bitfield = data[5:]\n bitfield = BitArray(bitfield)\n return cls(bitfield)\n except:\n pass\n return None", "def deserialize(data, cls):\r\n data = array.array('B', data)\r\n obj, offset = BinaryFormatter.deserialize_object(data, 0, cls)\r\n return obj", "def deserialize_ip_desc(serialized):\n proto_wrapper = RedisState()\n proto_wrapper.ParseFromString(serialized)\n serialized_proto = proto_wrapper.serialized_msg\n proto = IPDesc()\n proto.ParseFromString(serialized_proto)\n desc = _ip_desc_from_proto(proto)\n return desc", "def deserialize(partition_bytes):\n return cloudpickle.loads(partition_bytes)", "def decode(cls, data):\n raise NotImplementedError()", "def deserialize(self, str):", "def decode_payload(cls, payload: bytes) -> MsgGenericPayload:\n pass", "def decode(self, bytes):\n\t\tif bytes[0] == 0x0c:\n\t\t\tlength = bytes[1];\n\t\t\treturn bytes[2:length + 2].decode(\"UTF-8\");\n\t\telse:\n\t\t\traise Exception(\"Not an UTF8 string\");", "def from_bytes(self, bytes_data: bytes) -> \"Config\":\n return self.from_str(bytes_data.decode(\"utf8\"))", "def deserialize(self, serialized_byte_stream):\n json_str_obj = serialized_byte_stream.decode()\n data_dict = jsonpickle.decode(json_str_obj)\n\n msg = Message(data=data_dict['data'])\n msg.set_header(data_dict['header'])\n\n return msg", "def decode(record_bytes):\n (timestamp, keysize, valuesize) = decode_metadata(record_bytes[:METADATA_SIZE])\n data_str = record_bytes[METADATA_SIZE:].decode(ENCODING)\n key = data_str[:keysize]\n value = data_str[keysize:]\n return Record(timestamp, keysize, valuesize, key, value)", "def unmarshal(cls, public_key_bytes):", "def decode(self, data):\n\t\traise NotImplementedError()", "def decode(cls, stream):\n _, reserved, item_length, max_num_ops_invoked, \\\n max_num_ops_performed = cls.item_format.unpack(stream.read(8))\n return cls(reserved=reserved, item_length=item_length,\n max_num_ops_invoked=max_num_ops_invoked,\n max_num_ops_performed=max_num_ops_performed)", "def parse_packet(self, data):\n if data.find(self.packet_prefix) != 0:\n raise Exception('Malformed packet')\n\n first_line_length = data.find(b'\\n')\n if first_line_length == -1:\n raise Exception('Malformed packet')\n\n response_type = data[len(self.packet_prefix):first_line_length].decode()\n response_data = data[first_line_length + 1:].decode()\n return response_type, response_data", "def fromPacket(package):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Concatenates multiple boxes together
def concatenate(cls, boxes, axis=0): if len(boxes) == 0: raise ValueError('need at least one box to concatenate') if axis != 0: raise ValueError('can only concatenate along axis=0') format = boxes[0].format datas = [_view(b.toformat(format).data, -1, 4) for b in boxes] newdata = _cat(datas, axis=0) new = cls(newdata, format) return new
[ "def concatenate(boxes_list:List[Boxes], fields:Collection[str]=None) -> Boxes:\n if not boxes_list:\n if fields is None:\n fields = []\n return empty(*fields)\n\n if fields is None:\n # Get fields common to all sub-boxes\n common_fields = set.intersection( *[set(x.get_fields()) for x in boxes_list] )\n else:\n common_fields = fields\n\n coords = np.concatenate([x.get() for x in boxes_list], axis=0)\n new_fields = dict()\n for f in common_fields:\n new_fields[f] = np.concatenate([x.get_field(f) for x in boxes_list], axis=0)\n return Boxes(coords, **new_fields)", "def boxs(board):\n boxes = []\n for grouped in group(board, 3):\n triple = [group(row, 3) for row in grouped]\n zipped = list(zip(*triple))\n rows = [flatten(row) for row in zipped]\n boxes.extend(rows)\n return boxes", "def get_boxes(rows, cols):\n return [s + t for s in rows for t in cols]", "def placeGreedily(self,boxes):\n\t\t\n\t\t#find a better way to map the array\n\t\t#used to keep track of the boxes that are placed\n\t\t_boxes = []\n\t\tfor i in range(len(boxes)):\n\t\t\t_boxes.append((i,boxes[i]))\n\t\t\t\n\t\t\t\n\t\tfor x in range(self.w):\n\t\t\tfor y in range(self.l):\n\t\t\t\tfor b in _boxes:\n\t\t\t\t\tbox_width = b[1][0]\n\t\t\t\t\tbox_length = b[1][1]\n\t\t\t\t\tbox_id = b[0]+1 #need to make sure that the id is not zero\n\t\t\t\t\tif self.placeBox(x,y,box_width,box_length,box_id):\n\t\t\t\t\t\t_boxes.remove(b)\n\t\t\t\t\telif self.placeBox(x,y,box_length,box_width,box_id):\n\t\t\t\t\t\t_boxes.remove(b)\n\t\t\n\t\t#if all the boxes are placed we are done\n\t\tif len(_boxes) == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def concatenate(cubes):\n cubes = equalise_all(cubes)\n cube_list = iris.cube.CubeList(cubes)\n cube = cube_list.concatenate_cube()\n return cube", "def merge_boxes(\n boxes: Sequence[Box],\n) -> List[Box]:\n labels = np.asarray([b.label for b in boxes])\n\n coords = np.asarray([(b.x0, b.x1, b.y0, b.y1) for b in boxes])\n\n # Key that determines if two boxes can be merged, initialized from the box labels\n merge_keys = np.unique(labels, return_inverse=True)[1]\n\n # For each page\n while True:\n adj = np.zeros((len(boxes), len(boxes)), dtype=bool)\n\n # Split boxes between those that belong to a label (and could be merged),\n # and those that do not belong to that label and will prevent the mergers\n for key in np.unique(merge_keys):\n key_filter = merge_keys == key\n\n x0, x1, y0, y1 = coords[key_filter].T\n obs_x0, obs_x1, obs_y0, obs_y1 = coords[~key_filter].T\n\n A = (slice(None), None, None)\n B = (None, slice(None), None)\n\n # Find the bbox of the hypothetical merged boxes\n merged_x0 = np.minimum(x0[A], x0[B])\n merged_x1 = np.maximum(x1[A], x1[B])\n merged_y0 = np.minimum(y0[A], y0[B])\n merged_y1 = np.maximum(y1[A], y1[B])\n\n # And detect if it overlaps existing box of a different label\n dx = np.minimum(merged_x1, obs_x1) - np.maximum(merged_x0, obs_x0)\n dy = np.minimum(merged_y1, obs_y1) - np.maximum(merged_y0, obs_y0)\n merged_overlap_with_other = (dx > 0) & (dy > 0)\n no_box_inbetween = (~merged_overlap_with_other).all(-1)\n\n # Update the adjacency matrix to 1 if two boxes can be merged\n # (ie no box of a different label lie inbetween)\n adj_indices = np.flatnonzero(key_filter)\n adj[adj_indices[:, None], adj_indices[None, :]] = no_box_inbetween\n\n # Build the cliques of boxes that can be merged\n cliques = nx.find_cliques(nx.from_numpy_array(adj))\n\n # These cliques of mergeable boxes can be overlapping: think of a cross\n # like this=\n # *** --- ***\n # --- --- ---\n # *** --- ***\n # for which the two (-) labelled cliques would be the two axis of the cross\n # For each box, we change its label to its first clique number, so the cross\n # looks like this (symbols between the 2 figures don't map to the same indices)\n # *** --- ***\n # ooo ooo ooo\n # *** --- ***\n # and rerun the above process until there is no conflict\n\n conflicting_cliques = False\n seen = set()\n for clique_idx, clique_box_indices in enumerate(cliques):\n for box_idx in clique_box_indices:\n if box_idx in seen:\n # print(\"Already seen\", box_idx)\n conflicting_cliques = True\n else:\n seen.add(box_idx)\n merge_keys[box_idx] = clique_idx\n\n if not conflicting_cliques:\n break\n\n x0, x1, y0, y1 = coords.T.reshape((4, -1))\n\n # Finally, compute the bbox of the sets of mergeable boxes (same `key`)\n merged_boxes = []\n for group_key in dict.fromkeys(merge_keys):\n indices = [i for i, key in enumerate(merge_keys) if group_key == key]\n first_box = boxes[indices[0]]\n merged_boxes.append(\n first_box.evolve(\n x0=min(x0[i] for i in indices),\n y0=min(y0[i] for i in indices),\n x1=max(x1[i] for i in indices),\n y1=max(y1[i] for i in indices),\n )\n )\n\n return merged_boxes", "def correct_boxes(height, width, boxes, aug_type='rotate', **kwargs):\n result = []\n boxes = np.asarray(boxes)\n\n w0 = (width - 0.5) / 2.0\n h0 = (height - 0.5) / 2.0\n for box in boxes:\n x1, y1, x2, y2, class_id = box\n rela_x0 = (x1 + x2) / float(width) / 2\n rela_y0 = (y1 + y2) / float(height) / 2\n rela_w0 = np.abs(x1 - x2) / float(width)\n rela_h0 = np.abs(y1 - y2) / float(height)\n\n if aug_type == 'rotate':\n '''\n as normal, formula for Coordinate point rotation is :\n x_new = (x - w0) * np.cos(angel) - (y - h0) * np.sin(angel) + w0\n y_new = (x - w0) * np.sin(angel) + (y - h0) * np.cos(angel) + h0\n but in our case, the first quadrant should be changed into the forth quadrant in morphology fields.\n '''\n\n angel = kwargs.get('angel', 0)\n angel = angel * 2 * np.pi / 360\n\n fxy = lambda x, y: [(x - w0) * np.cos(angel) - (-y - -h0) * np.sin(angel) + w0,\n -((x - w0) * np.sin(angel) + (-y - -h0) * np.cos(angel) + -h0)]\n\n x11, y11 = fxy(x1, y1)\n x22, y22 = fxy(x2, y2)\n x33, y33 = fxy(x2, y1)\n x44, y44 = fxy(x1, y2)\n\n new_x1 = np.round(np.min([x11, x22, x33, x44])).astype(int)\n new_x2 = np.round(np.max([x11, x22, x33, x44])).astype(int)\n new_y1 = np.round(np.min([y11, y22, y33, y44])).astype(int)\n new_y2 = np.round(np.max([y11, y22, y33, y44])).astype(int)\n\n new_x1 = np.max([0, new_x1])\n new_x2 = np.min([width, new_x2])\n new_y1 = np.max([0, new_y1])\n new_y2 = np.min([height, new_y2])\n\n result.append([new_x1, new_y1, new_x2, new_y2, class_id])\n\n elif aug_type == 'flip':\n if kwargs.get('flip_code', 1) == 1:\n new_x1 = width - x2\n new_x2 = width - x1\n new_y1 = y1\n new_y2 = y2\n elif kwargs.get('flip_code', 0) == 0:\n new_y1 = height - y2\n new_y2 = height - y1\n new_x1 = x1\n new_x2 = x2\n elif kwargs.get('flip_code', -1) == -1:\n new_x1 = width - x2\n new_x2 = width - x1\n new_y1 = height - y2\n new_y2 = height - y1\n result.append([new_x1, new_y1, new_x2, new_y2, class_id])\n\n elif aug_type == 'resize':\n new_h, new_w = kwargs.get('new_h'), kwargs.get('new_w')\n bg_h, bg_w = kwargs.get('bg_h'), kwargs.get('bg_w')\n\n dh = (bg_h - new_h) / 2.0\n dw = (bg_w - new_w) / 2.0\n\n abs_new_x0 = new_w * rela_x0\n abs_new_y0 = new_h * rela_y0\n abs_new_w0 = new_w * rela_w0\n abs_new_h0 = new_h * rela_h0\n\n if dh >= 0 and dw >= 0:\n new_x1 = abs_new_x0 - abs_new_w0 / 2.0 + dw\n new_x2 = abs_new_x0 + abs_new_w0 / 2.0 + dw\n new_y1 = abs_new_y0 - abs_new_h0 / 2.0 + dh\n new_y2 = abs_new_y0 + abs_new_h0 / 2.0 + dh\n new_x1 = np.max([dw, new_x1])\n new_x2 = np.min([dw + new_w, new_x2])\n new_y1 = np.max([dh, new_y1])\n new_y2 = np.min([dh + new_h, new_y2])\n\n elif dh < 0 and dw >= 0:\n new_x1 = abs_new_x0 - abs_new_w0 / 2.0 + dw\n new_x2 = abs_new_x0 + abs_new_w0 / 2.0 + dw\n new_y1 = abs_new_y0 + dh - abs_new_h0 / 2.0\n new_y2 = new_y1 + abs_new_h0\n new_y1 = np.max([dh, new_y1])\n new_y2 = np.min([dh + new_h, new_y2])\n\n elif dh >= 0 and dw < 0:\n new_x1 = abs_new_x0 + dw - abs_new_w0 / 2.0\n new_x2 = new_x1 + abs_new_w0\n new_y1 = abs_new_y0 - abs_new_h0 / 2.0 + dh\n new_y2 = abs_new_y0 + abs_new_h0 / 2.0 + dh\n new_x1 = np.max([dw, new_x1])\n new_x2 = np.min([dw + new_w, new_x2])\n\n else:\n new_x1 = abs_new_x0 + dw - abs_new_w0 / 2.0\n new_x2 = new_x1 + abs_new_w0\n new_y1 = abs_new_y0 + dh - abs_new_h0 / 2.0\n new_y2 = new_y1 + abs_new_h0\n\n new_x1 = np.max([0, new_x1])\n new_x2 = np.min([new_x2, bg_w - 1])\n new_y1 = np.max([0, new_y1])\n new_y2 = np.min([new_y2, bg_h - 1])\n if new_x1 >= bg_w or new_y1 >= bg_h:\n continue\n result.append([new_x1, new_y1, new_x2, new_y2, class_id])\n\n return np.asarray(result, dtype=int)", "def concatenate(boxlists, fields=None):\n if not isinstance(boxlists, list):\n raise ValueError(\"boxlists should be a list\")\n if not boxlists:\n raise ValueError(\"boxlists should have nonzero length\")\n for boxlist in boxlists:\n if not isinstance(boxlist, np_box_list.BoxList):\n raise ValueError(\n \"all elements of boxlists should be BoxList objects\"\n )\n concatenated = np_box_list.BoxList(\n np.vstack([boxlist.get() for boxlist in boxlists])\n )\n if fields is None:\n fields = boxlists[0].get_extra_fields()\n for field in fields:\n first_field_shape = boxlists[0].get_field(field).shape\n first_field_shape = first_field_shape[1:]\n for boxlist in boxlists:\n if not boxlist.has_field(field):\n raise ValueError(\"boxlist must contain all requested fields\")\n field_shape = boxlist.get_field(field).shape\n field_shape = field_shape[1:]\n if field_shape != first_field_shape:\n raise ValueError(\n \"field %s must have same shape for all boxlists \"\n \"except for the 0th dimension.\" % field\n )\n concatenated_field = np.concatenate(\n [boxlist.get_field(field) for boxlist in boxlists], axis=0\n )\n concatenated.add_field(field, concatenated_field)\n return concatenated", "def box(N):\n print()\n for i in range(N):\n for j in range(N):\n print('*', end='')\n print()", "def stack_boxes_doit(self):\n df = self.cat_galtbl\n Nstack = self.Nstack\n\n N2 = int(Nstack/2)\n assert Nstack%2 == 0, 'Nstack should be an even integer.'\n\n dfcols = list(df.columns.values)\n extra_vals = df[[c for c in dfcols if c not in ['x','y','z']]] # get df of just extra values\n\n idx_omag = np.floor(np.log10(np.max(df.index.values))) # order of magnitude of max df.index.value\n nblist = [] # list to hold DataFrames for each new box\n nLin = np.linspace(-N2,N2-1,Nstack)*self.cat_Lbox # array of ints -N2 to N2-1, defines deltas in each direction\n deltax,deltay,deltaz = np.meshgrid(nLin,nLin,nLin) # holds deltas for each new box\n for b in range(Nstack**3): # for each new box\n boxdf = extra_vals.copy(deep=True)\n dx,dy,dz = deltax.flat[b], deltay.flat[b], deltaz.flat[b]\n boxdf['x'] = df.x + dx\n boxdf['y'] = df.y + dy\n boxdf['z'] = df.z + dz\n\n idx_offset = np.int32(b*10**(idx_omag+1)) # add this to the indices to ensure unique indices for each galaxy\n boxdf.index = boxdf.index + idx_offset\n\n nblist.append(boxdf)\n\n newdf = pd.concat(nblist, ignore_index=False)\n # index of original galaxy is retained => each index is repeated Nstack^3 times.\n\n return newdf", "def draw_box(img, boxes):\n box = ImageDraw.Draw(img)\n for i in range(boxes.shape[0]):\n data = list(boxes[i])\n shape = [data[0], data[1], data[2], data[3]]\n box.rectangle(shape, outline =\"#02d5fa\", width=3)\n return img", "def expand_boxes(self, boxes, scale):\n w_half = (boxes[:, 2] - boxes[:, 0]) * .5\n h_half = (boxes[:, 3] - boxes[:, 1]) * .5\n x_c = (boxes[:, 2] + boxes[:, 0]) * .5\n y_c = (boxes[:, 3] + boxes[:, 1]) * .5\n\n w_half *= scale\n h_half *= scale\n\n boxes_exp = np.zeros(boxes.shape)\n boxes_exp[:, 0] = x_c - w_half\n boxes_exp[:, 2] = x_c + w_half\n boxes_exp[:, 1] = y_c - h_half\n boxes_exp[:, 3] = y_c + h_half\n\n return boxes_exp", "def change_box_order(boxes, order):\n assert order in {\"xyxy2xywh\", \"xywh2xyxy\"}\n concat_fn = torch.cat if isinstance(boxes, torch.Tensor) else np.concatenate\n\n a = boxes[:, :2]\n b = boxes[:, 2:]\n if order == \"xyxy2xywh\":\n return concat_fn([(a + b) / 2, b - a], 1)\n return concat_fn([a - b / 2, a + b / 2], 1)", "def prepare_boxlist(self, boxes, scores, image_shape):\n boxes = boxes.reshape(-1, 7)\n scores = scores.reshape(-1)\n boxlist = RBoxList(boxes, image_shape, mode=\"xywha\")\n boxlist.add_field(\"scores\", scores)\n return boxlist", "def get_boxes_all_list(self):\n return reduce(lambda x,y: x+y, self.box_locations.values(), [])", "def show_hitboxes(self):\n for bp in self.body_parts:\n color = (255, 0, 0, 255) if bp.slot - 100 < 0 else (0, 0, 255, 255)\n self.master.add(box.Box(bp.shape, color))", "def make_arms():\n f1 = box(pos=(-22,2.5,58), axis=(1,0,0),\n length=35, width=5, height=2, color=color.green)\n f2 = box(pos=(22,2.5,58), axis=(1,0,0),\n length=35, width=5, height=2, color=color.green)\n list_of_arms = [ f1, f2 ]\n return list_of_arms", "def serialize(box_list):\r\n return [\r\n (box.element_tag,\r\n ('Anon' if (box.style.anonymous and\r\n type(box) not in (boxes.TextBox, boxes.LineBox))\r\n else '') + type(box).__name__[:-3],\r\n # All concrete boxes are either text, replaced, column or parent.\r\n (box.text if isinstance(box, boxes.TextBox)\r\n else '<replaced>' if isinstance(box, boxes.ReplacedBox)\r\n else serialize(getattr(box, 'column_groups', ()) + box.children)))\r\n for box in box_list]", "def gather(boxlist, indices, fields=None):\n if indices.size:\n if np.amax(indices) >= boxlist.num_boxes() or np.amin(indices) < 0:\n raise ValueError(\"indices are out of valid range.\")\n subboxlist = np_box_list.BoxList(boxlist.get()[indices, :])\n if fields is None:\n fields = boxlist.get_extra_fields()\n for field in fields:\n extra_field_data = boxlist.get_field(field)\n subboxlist.add_field(field, extra_field_data[indices, ...])\n return subboxlist" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
is the backend fueled by numpy?
def is_numpy(self): return isinstance(self.data, np.ndarray)
[ "def has_numpy(self):\r\n self._check_numpy()\r\n return self._found_numpy", "def is_numpy_type(x):\n return type(x).__module__ == np.__name__", "def is_numpy_array(x):\n return _is_numpy(x)", "def _is_numpy_array(obj: object) -> bool:\n return _as_numpy_array(obj) is not None", "def is_numpy(self):\r\n return not torch.is_tensor(self.sample_points)", "def _is_ndarray(value):\n # TODO(tomhennigan) Support __array_interface__ too.\n return hasattr(value, \"__array__\") and not (\n isinstance(value, ops.Tensor)\n or isinstance(value, resource_variable_ops.BaseResourceVariable)\n or hasattr(value, \"_should_act_as_resource_variable\")\n\n # For legacy reasons we do not automatically promote Numpy strings.\n or isinstance(value, np.str_)\n # NumPy dtypes have __array__ as unbound methods.\n or isinstance(value, type)\n # CompositeTensors should be flattened instead.\n or isinstance(value, composite_tensor.CompositeTensor))", "def IsArray(self) -> bool:", "def _verify_np_symbol(op_name, func_name, sym):\n from .numpy._symbol import _Symbol as np_symbol\n if not isinstance(sym, np_symbol):\n raise TypeError('Operator `{}` registered in backend is known as `{}` in Python. '\n 'This is a numpy operator which can only accept '\n 'MXNet numpy ndarrays, while received a legacy ndarray. '\n 'Please ensure that you have activated numpy semantics by calling '\n '`npx.set_np()` in your code. If you still see this error with numpy '\n 'semantics activated, please call `as_np_ndarray()` upon the legacy '\n 'ndarray to convert it to an MXNet numpy ndarray, and then feed the '\n 'converted array to this operator.'\n .format(op_name, func_name))", "def is_dtype_numpy(dtype):\n is_torch = is_dtype_tensor(dtype)\n is_num = dtype in (int, float, complex)\n if hasattr(dtype, \"__module__\"):\n is_numpy = dtype.__module__ == \"numpy\"\n else:\n is_numpy = False\n return (is_num or is_numpy) and not is_torch", "def _is_npy_target(self):\n return self.args_parser.output_file_type == 'npy'", "def _verify_legacy_symbol(op_name, func_name, sym):\n from .numpy._symbol import _Symbol as np_symbol\n if isinstance(sym, np_symbol):\n raise TypeError('Operator `{}` registered in backend is known as `{}` in Python. '\n 'This is a legacy operator which can only accept '\n 'legacy ndarrays, while received an MXNet numpy ndarray. '\n 'Please call `as_nd_ndarray()` upon the numpy ndarray to '\n 'convert it to a legacy ndarray, and then feed the converted '\n 'array to this operator.'\n .format(op_name, func_name))", "def _numpy_array(self, arr):\n if arr.__class__.__name__ == 'HOOMDGPUArray':\n return arr.get()\n else:\n return arr", "def is_any_array(a):\n return is_sparse(a) or isinstance(a, np.ndarray)", "def is_nd(M):\n return isinstance(M, np.ndarray)", "def __isNumeric(self, arr):\n try:\n return arr.dtype.kind in 'biufc'\n except AttributeError:\n return False", "def _is_arraylike(input_array):\n return (hasattr(input_array, '__len__') or\n hasattr(input_array, 'shape') or\n hasattr(input_array, '__array__'))", "def is_interactive_backend():\n return 'ipympl' in mpl.get_backend()", "def has_shared_memory(self):\n if self.obj is self.arr:\n return True\n if not isinstance(self.obj, np.ndarray):\n return False\n obj_attr = wrap.array_attrs(self.obj)\n return obj_attr[0] == self.arr_attr[0]", "def _is_atleast_1d_numpy_array(data):\n return NUMPY and isinstance(data, numpy.ndarray) and data.ndim > 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the kwarray.ArrayAPI implementation for the data
def _impl(self): return kwarray.ArrayAPI.coerce(self.data)
[ "def get_array(self): # real signature unknown; restored from __doc__\n pass", "def array(self) -> ArrayLike:\n # error: \"SingleDataManager\" has no attribute \"arrays\"; maybe \"array\"\n return self.arrays[0] # type: ignore[attr-defined]", "def array(self):\n raise NotImplementedError", "def __arrow_array__(self, type=None):\n return self.data", "def __array_interface__(self) -> dict:\n shape = bufferprotocol.getshape(self.__sexp__._cdata)\n data = openrlib.ffi.buffer(self._R_GET_PTR(self.__sexp__._cdata))\n strides = bufferprotocol.getstrides(self.__sexp__._cdata,\n shape,\n self._R_SIZEOF_ELT)\n return {'shape': shape,\n 'typestr': self._NP_TYPESTR,\n 'strides': strides,\n 'data': data,\n 'version': 3}", "def __array__(self):\n return self.numpy()", "def get_array(self):\n return self._raw_data", "def array_data(self):\r\n self.INC_array=[]\r\n self.local_IEN_array=[]\r\n self.KV_xi_array=[]\r\n self.KV_eta_array=[]\r\n \r\n if self.dimension==1:\r\n \r\n self.INC_array.append(self.INC(0))\r\n self.local_IEN_array.append(self.local_IEN(0))\r\n self.KV_xi_array.append(self.knot_vector(self.number_elements,self.order,self.mp))\r\n \r\n elif self.dimension==2:\r\n \r\n for pnum in np.arange(len(self.num_bases)):\r\n \r\n self.INC_array.append(self.INC(pnum))\r\n self.local_IEN_array.append(self.local_IEN(pnum))\r\n self.KV_xi_array.append(self.knot_vector(self.number_elements[pnum,0],self.order[pnum,0],self.mp[pnum,0]))\r\n self.KV_eta_array.append(self.knot_vector(self.number_elements[pnum,1],self.order[pnum,1],self.mp[pnum,1]))", "def getArray(wpdi, field, name):\n # Point Data\n if field == 0:\n arr = wpdi.PointData[name]\n # Cell Data:\n elif field == 1:\n arr = wpdi.CellData[name]\n # Field Data:\n elif field == 2:\n arr = wpdi.FieldData[name]\n # Row Data:\n elif field == 6:\n arr = wpdi.RowData[name]\n else:\n raise Exception('Field association not defined. Try inputing Point, Cell, Field, or Row data.')\n return arr", "def __from_arrow__(self, data):\n return self.construct_array_type()(data)", "def getdata(self):\n return Array._from_apply(\"wf.maskedarray.getdata\", self)", "def _numpy_array(self, arr):\n if arr.__class__.__name__ == 'HOOMDGPUArray':\n return arr.get()\n else:\n return arr", "def document_array_cls(self) -> Type[DocumentArray]:\n return self._document_array_cls", "def do_array(self, parent=None, ident=0):\n # TC_ARRAY classDesc newHandle (int)<size> values[size]\n log_debug(\"[array]\", ident)\n _, classdesc = self._read_and_exec_opcode(\n ident=ident + 1,\n expect=(\n TerminalCode.TC_CLASSDESC,\n TerminalCode.TC_PROXYCLASSDESC,\n TerminalCode.TC_NULL,\n TerminalCode.TC_REFERENCE,\n ),\n )\n\n array = JavaArray(classdesc)\n\n self._add_reference(array, ident)\n\n (size,) = self._readStruct(\">i\")\n log_debug(\"size: {0}\".format(size), ident)\n\n array_type_code = TypeCode(ord(classdesc.name[0]))\n assert array_type_code == TypeCode.TYPE_ARRAY\n type_code = TypeCode(ord(classdesc.name[1]))\n\n if type_code in (TypeCode.TYPE_OBJECT, TypeCode.TYPE_ARRAY):\n for _ in range(size):\n _, res = self._read_and_exec_opcode(ident=ident + 1)\n log_debug(\"Object value: {0}\".format(res), ident)\n array.append(res)\n elif type_code == TypeCode.TYPE_BYTE:\n array = JavaByteArray(self.object_stream.read(size), classdesc)\n elif self.use_numpy_arrays and numpy is not None:\n array = numpy.fromfile(\n self.object_stream,\n dtype=NUMPY_TYPE_MAP[type_code],\n count=size,\n )\n else:\n for _ in range(size):\n res = self._read_value(type_code, ident)\n log_debug(\"Native value: {0}\".format(repr(res)), ident)\n array.append(res)\n\n return array", "def array_ctypes(self):\n return None", "def asArray( cls, value, typeCode=None ):\n return value", "def GetArray(self, idx):\n self.Proxy.UpdatePipeline()\n if not self.GetFieldData().GetArrayInformation(idx):\n return None\n if isinstance(idx, str):\n return ArrayInformation(self.Proxy, self, idx)\n elif idx >= len(self) or idx < 0:\n raise IndexError\n return ArrayInformation(self.Proxy, self, self.GetFieldData().GetArrayInformation(idx).GetName())", "def _subarray(self, fieldtype, name):\n address = self._buffer.fieldaddress(name)\n A = _rawffi.Array(fieldtype._ffishape_)\n return A.fromaddress(address, 1)", "def getData(self) :\n\n # If we haven't executed, execute.\n if not self.isExecuted :\n self.execute()\n\n # Return the array.\n return self.data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Passthrough method to view or reshape
def view(self, *shape): data_ = _view(self.data, *shape) return self.__class__(data_, self.format)
[ "def getViewMatrix( self):", "def viewManip(bottomLeft=bool, drawCompass=bool, size=\"string\", bottomRight=bool, zoomToFitScene=bool, compassAngle=float, visible=bool, topRight=bool, restoreCenter=bool, dragSnap=bool, levelCamera=bool, minOpacity=float, topLeft=bool, fitToView=bool):\n pass", "def test_Image_view():\n im = galsim.ImageI(25,25, wcs=galsim.AffineTransform(0.23,0.01,-0.02,0.22,\n galsim.PositionI(13,13)))\n im._fill(17)\n assert im.wcs == galsim.AffineTransform(0.23,0.01,-0.02,0.22, galsim.PositionI(13,13))\n assert im.bounds == galsim.BoundsI(1,25,1,25)\n assert im(11,19) == 17 # I'll keep editing this pixel to new values.\n do_pickle(im)\n\n # Test view with no arguments\n imv = im.view()\n assert imv.wcs == im.wcs\n assert imv.bounds == im.bounds\n imv.setValue(11,19, 20)\n assert imv(11,19) == 20\n assert im(11,19) == 20\n do_pickle(im)\n do_pickle(imv)\n\n # Test view with new origin\n imv = im.view(origin=(0,0))\n assert im.wcs == galsim.AffineTransform(0.23,0.01,-0.02,0.22, galsim.PositionI(13,13))\n assert imv.wcs == galsim.AffineTransform(0.23,0.01,-0.02,0.22, galsim.PositionI(12,12))\n assert im.bounds == galsim.BoundsI(1,25,1,25)\n assert imv.bounds == galsim.BoundsI(0,24,0,24)\n imv.setValue(10,18, 30)\n assert imv(10,18) == 30\n assert im(11,19) == 30\n imv2 = im.view()\n imv2.setOrigin(0,0)\n assert imv.bounds == imv2.bounds\n assert imv.wcs == imv2.wcs\n do_pickle(imv)\n do_pickle(imv2)\n\n # Test view with new center\n imv = im.view(center=(0,0))\n assert im.wcs == galsim.AffineTransform(0.23,0.01,-0.02,0.22, galsim.PositionI(13,13))\n assert imv.wcs == galsim.AffineTransform(0.23,0.01,-0.02,0.22, galsim.PositionI(0,0))\n assert im.bounds == galsim.BoundsI(1,25,1,25)\n assert imv.bounds == galsim.BoundsI(-12,12,-12,12)\n imv.setValue(-2,6, 40)\n assert imv(-2,6) == 40\n assert im(11,19) == 40\n imv2 = im.view()\n imv2.setCenter(0,0)\n assert imv.bounds == imv2.bounds\n assert imv.wcs == imv2.wcs\n with assert_raises(galsim.GalSimError):\n imv.scale # scale is invalid if wcs is not a PixelScale\n do_pickle(imv)\n do_pickle(imv2)\n\n # Test view with new scale\n imv = im.view(scale=0.17)\n assert im.wcs == galsim.AffineTransform(0.23,0.01,-0.02,0.22, galsim.PositionI(13,13))\n assert imv.wcs == galsim.PixelScale(0.17)\n assert imv.bounds == im.bounds\n imv.setValue(11,19, 50)\n assert imv(11,19) == 50\n assert im(11,19) == 50\n imv2 = im.view()\n with assert_raises(galsim.GalSimError):\n imv2.scale = 0.17 # Invalid if wcs is not PixelScale\n imv2.wcs = None\n imv2.scale = 0.17\n assert imv.bounds == imv2.bounds\n assert imv.wcs == imv2.wcs\n do_pickle(imv)\n do_pickle(imv2)\n\n # Test view with new wcs\n imv = im.view(wcs=galsim.JacobianWCS(0., 0.23, -0.23, 0.))\n assert im.wcs == galsim.AffineTransform(0.23,0.01,-0.02,0.22, galsim.PositionI(13,13))\n assert imv.wcs == galsim.JacobianWCS(0., 0.23, -0.23, 0.)\n assert imv.bounds == im.bounds\n imv.setValue(11,19, 60)\n assert imv(11,19) == 60\n assert im(11,19) == 60\n imv2 = im.view()\n imv2.wcs = galsim.JacobianWCS(0.,0.23,-0.23,0.)\n assert imv.bounds == imv2.bounds\n assert imv.wcs == imv2.wcs\n do_pickle(imv)\n do_pickle(imv2)\n\n # Go back to original value for that pixel and make sure all are still equal to 17\n im.setValue(11,19, 17)\n assert im.array.min() == 17\n assert im.array.max() == 17\n\n assert_raises(TypeError, im.view, origin=(0,0), center=(0,0))\n assert_raises(TypeError, im.view, scale=0.3, wcs=galsim.JacobianWCS(1.1, 0.1, 0.1, 1.))\n assert_raises(TypeError, im.view, scale=galsim.PixelScale(0.3))\n assert_raises(TypeError, im.view, wcs=0.3)", "def reshape_output(self, output, batch_size, set_size):\n\n output_sizes = output.size()\n # print('output_sizes:',output_sizes)\n reshaped = output.view(batch_size, set_size, *output_sizes[1:])\n return reshaped", "def genIm(self, size=[512, 512], views=[[0, -1, 0]], \n background=[1.0, 1.0, 1.0], projection=True,\n shading=True, mag=10, out='im', fh='test.tiff', \n zoom=1.0, az = 0, el=0,crop=False, cam=None):\n if not hasattr(self, 'actor'):\n self.addActor()\n # Generate a renderer window\n win = vtkRenWin()\n win.OffScreenRenderingOn()\n # Set the number of viewports\n win.setnumViewports(len(views))\n # Set the background colour\n win.setBackground(background)\n # Set camera projection \n win.setProjection(projection)\n win.SetSize(size[0], size[1])\n win.Modified()\n win.OffScreenRenderingOn()\n \n for i, view in enumerate(views):\n# win.addAxes([self.actor,], color=[0.0, 0.0, 0.0], viewport=i)\n win.setView(view, i)\n# win.setProjection(projection, viewport=i)\n win.renderActors([self.actor,], zoom=zoom)\n win.rens[0].GetActiveCamera().Azimuth(az)\n win.rens[0].GetActiveCamera().Elevation(el)\n if cam is not None:\n win.rens[0].SetActiveCamera(cam)\n win.Render()\n if out == 'im':\n im = win.getImage()\n if crop is True:\n mask = np.all(im == 1, axis=2)\n mask = ~np.all(mask, axis=1)\n im = im[mask, :, :]\n mask = np.all(im == 1, axis=2)\n mask = ~np.all(mask, axis=0)\n im = im[:, mask, :]\n return im, win\n elif out == 'fh':\n win.getScreenshot(fh, mag=mag)\n return", "def multi_view(fig, vert_coords, faces, morphometry_data):\n mesh_in_central_position = bv.brain_morphometry_view(fig, vert_coords, faces, morphometry_data)\n x, y, z = st.coords_a2s(vert_coords)\n\n # Create lateral view\n x1, y1, z1 = st.rotate_3D_coordinates_around_axes(x, y, z, st.deg2rad(90), 0, 0);\n mayavi_mesh_m1 = mlab.triangular_mesh(x1, y1, z1, faces, scalars=morphometry_data, color=(1, 0, 0))\n dt._print_mlab_view()\n\n x2, y2, z2 = st.rotate_3D_coordinates_around_axes(x, y, z, st.deg2rad(90), 0, 0);\n x2, y2, z2 = st.scale_3D_coordinates(x2, y2, z2, 1.5)\n # = rotate_3D_coordinates_around_axes(x, y, z, rotx, roty, rotz)\n # = scale_3D_coordinates(x, y, z, x_scale_factor, y_scale_factor=None, z_scale_factor=None)\n # = mirror_3D_coordinates_at_axis(x, y, z, axis, mirror_at_axis_coordinate=None)\n # = point_mirror_3D_coordinates(x, y, z, point_x, point_y, point_z):\n x2, y2, z2 = st.translate_3D_coordinates_along_axes(x, y, z, 200, 0, 0)\n mayavi_mesh_m2 = mlab.triangular_mesh(x2, y2, z2, faces, scalars=morphometry_data, color=(0, 0, 1))\n dt._print_mlab_view()\n meshes = [mayavi_mesh_m1, mayavi_mesh_m2]\n return meshes", "def asSubdSurfaceTransformed(*args, **kwargs):\n \n pass", "def reshape(self, cn, rows=None): # real signature unknown; restored from __doc__\n pass", "def shape(self):\n if hasattr(self.view, 'shape'):\n print(self.view.shape)\n else:\n print(\"No data view set.\")", "def verticalFieldOfView(*args, **kwargs):\n \n pass", "def render(view=False, preview=False):", "def from_view(cls, view):\n return cls(view.position, view.image, view.original)", "def to_prototypes_view(self, var_buffer: np.ndarray) -> np.ndarray:\n raise NotImplementedError(\"You should implement this!\")", "def _reshape(self, x: torch.FloatTensor) -> torch.FloatTensor:\n new_shape = x.size()[:-1] + (self.n_graph, self.hidden_size)\n x = x.view(*new_shape)\n\n return x.permute(0, 2, 1, 3)", "def reshape(self, dims: tuple[int, ...]) -> Series:", "def draw(self, view):\n super().draw()", "def _view_2d(\n self,\n figure_id=None,\n new_figure=False,\n image_view=True,\n render_lines=True,\n line_colour=\"r\",\n line_style=\"-\",\n line_width=1.0,\n render_markers=True,\n marker_style=\"o\",\n marker_size=5,\n marker_face_colour=\"k\",\n marker_edge_colour=\"k\",\n marker_edge_width=1.0,\n render_numbering=False,\n numbers_horizontal_align=\"center\",\n numbers_vertical_align=\"bottom\",\n numbers_font_name=\"sans-serif\",\n numbers_font_size=10,\n numbers_font_style=\"normal\",\n numbers_font_weight=\"normal\",\n numbers_font_colour=\"k\",\n render_axes=True,\n axes_font_name=\"sans-serif\",\n axes_font_size=10,\n axes_font_style=\"normal\",\n axes_font_weight=\"normal\",\n axes_x_limits=None,\n axes_y_limits=None,\n axes_x_ticks=None,\n axes_y_ticks=None,\n figure_size=(7, 7),\n label=None,\n **kwargs,\n ):\n import warnings\n\n warnings.warn(\n Warning(\n \"2D Viewing of Coloured TriMeshes is not \"\n \"supported, falling back to TriMesh viewing.\"\n )\n )\n return TriMesh._view_2d(\n self,\n figure_id=figure_id,\n new_figure=new_figure,\n image_view=image_view,\n render_lines=render_lines,\n line_colour=line_colour,\n line_style=line_style,\n line_width=line_width,\n render_markers=render_markers,\n marker_style=marker_style,\n marker_size=marker_size,\n marker_face_colour=marker_face_colour,\n marker_edge_colour=marker_edge_colour,\n marker_edge_width=marker_edge_width,\n render_numbering=render_numbering,\n numbers_horizontal_align=numbers_horizontal_align,\n numbers_vertical_align=numbers_vertical_align,\n numbers_font_name=numbers_font_name,\n numbers_font_size=numbers_font_size,\n numbers_font_style=numbers_font_style,\n numbers_font_weight=numbers_font_weight,\n numbers_font_colour=numbers_font_colour,\n render_axes=render_axes,\n axes_font_name=axes_font_name,\n axes_font_size=axes_font_size,\n axes_font_style=axes_font_style,\n axes_font_weight=axes_font_weight,\n axes_x_limits=axes_x_limits,\n axes_y_limits=axes_y_limits,\n axes_x_ticks=axes_x_ticks,\n axes_y_ticks=axes_y_ticks,\n figure_size=figure_size,\n label=label,\n )", "def _rescale(self, samp, **kwargs):\n \"\"\"\n Here is where the subclass where overwrite rescale method\n \"\"\"\n return samp", "def horizontalFieldOfView(*args, **kwargs):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers all connecting variables associated with this component to the gekko model. kwargs
def register_connecting(self, **kwargs): # Aliases a = self.aqua MV = self.m.MV SV = self.m.Var FV = self.m.FV # Initial Conditions T0 = kwargs.get('T0', 25) I0 = kwargs.get('I0', 5e6) N0 = kwargs.get('N0', 0) ppb0 = kwargs.get('ppb0', 1) # beds = kwargs.get('beds', [(0, 30)]) # wG0 = kwargs.get('wG0', 1/3) # wS0 = kwargs.get('wS0', 2/3) # w0 = ppb0 * len(beds) * (wG0 + wS0) w0 = 0 # weird gekko bug, instability if not set to 0 # Register connecting a.register_connecting('T', MV, T0) a.register_connecting('I', MV, I0) a.register_connecting('N', MV, N0) a.register_connecting('ppb', FV, ppb0) # a.ppb = self.m.Param(value=ppb0) a.register_connecting('w', SV, w0, lb=0) a.register_connecting('dNup', SV, 0, lb=0)
[ "def save_kwargs(self, kwargs: dict) -> None:\n d = kwargs.copy()\n d[\"eps\"] = self.eps\n d[\"torch_dtype\"] = self.torch_dtype\n d[\"importance_sampler\"] = self.importance_nested_sampler\n save_to_json(d, os.path.join(self.output, \"config.json\"))", "def __init__(self):\r\n self.glacierModelDict = {}\r\n self.nxGraphDict = {}\r\n self.model_results ={}", "def add_variables(self, new_variables):\n super().add_variables(new_variables)\n self.model_mapping = get_model_mapping(self, self.joint_model)", "def _set_up_model(self) -> None:\n self._add_vars_x_i_in_theta_i()\n self._add_vars_z_i()\n self._add_linear_cons()\n self._add_objective()", "def solver_kwargs(self, value):\n self._solver_kwargs = value", "def __init__(self):\n self.G = nx.MultiDiGraph()\n self.registry = {}\n # self.load_biothings()\n self.all_edges_info = self.G.edges(data=True)\n self.all_labels = {d[-1]['label'] for d in self.all_edges_info}\n self.all_inputs = {d[-1]['input_type'] for d in self.all_edges_info}\n self.all_outputs = {d[-1]['output_type'] for d in self.all_edges_info}", "def init_gm(self):\n self.data_x = self.tik_instance.Tensor(self.y_grad_dtype,\n [self.y_grad_gm_size],\n name=\"data_x\",\n scope=tik.scope_gm)\n self.data_y_grad = self.tik_instance.Tensor(self.y_grad_dtype,\n [self.y_grad_gm_size],\n name=\"data_y_grad\",\n scope=tik.scope_gm)\n self.data_weight = self.tik_instance.Tensor(self.weight_dtype,\n [self.weight_gm_size],\n name=\"data_weight\",\n scope=tik.scope_gm)\n self.data_target = self.tik_instance.Tensor(self.target_dtype,\n [self.target_gm_size],\n name=\"data_target\",\n scope=tik.scope_gm)\n self.data_total_weight = self.tik_instance.Tensor(\n self.x_dtype, [self.data_total_weight_size],\n name=\"data_total_weight\", scope=tik.scope_gm)\n self.output = self.tik_instance.Tensor(self.x_dtype,\n [self.output_gm_size],\n name=\"output\",\n scope=tik.scope_gm)", "def _handle_kwargs(self, **kwargs):\n self.__dict__.update(kwargs)", "def _init_kwargs(self, kwargs, kws):\n for k in kws:\n if k in kwargs:\n setattr(self, k, kwargs[k])", "def _setup(self):\n self.graph_convolution_1 = GCNConv(self.number_of_features, self.args.first_gcn_dimensions)\n self.graph_convolution_2 = GCNConv(self.args.first_gcn_dimensions, self.args.second_gcn_dimensions)\n self.fully_connected_1 = torch.nn.Linear(self.args.second_gcn_dimensions, self.args.first_dense_neurons)\n self.fully_connected_2 = torch.nn.Linear(self.args.first_dense_neurons, self.args.second_dense_neurons)", "def set_opts(self, **kwargs):\n self._opts.update(kwargs)\n for kwarg in kwargs:\n if kwarg == 'x0':\n self.x0 = kwargs[kwarg]\n self._changed[kwarg] = True", "def set_params(self, **kwargs):\n _api.warn_external(\n \"'set_params()' not defined for locator of type \" +\n str(type(self)))", "def dnn_registerLayer(*args, **kwargs):\n ...", "def bind(self):\n\n # binds important computation engines\n self.nm.bind(self.beads, self.ensemble)\n self.forces.bind(self.beads, self.cell, self.flist)\n self.ensemble.bind(self.beads, self.nm, self.cell, self.forces, self.prng)\n self.init.init_stage2(self)\n\n # binds output management objects\n self.properties.bind(self)\n self.trajs.bind(self)\n for o in self.outputs:\n o.bind(self)\n\n self.chk = CheckpointOutput(\"RESTART\", 1, True, 0)\n self.chk.bind(self)\n\n # registers the softexit routine\n softexit.register(self.softexit)", "def create_connections(self):\n \n self.connect(self.cancel_button, SIGNAL('clicked()'), self.close_dialog) \n self.connect(self.import_button, SIGNAL('clicked()'), self.import_alembic_dialog)\n self.combo_box.currentIndexChanged.connect(self.on_comboBox_changed)\n self.sequence_list_wdg.currentItemChanged.connect(self.on_sequenceList_changed)\n self.shots_list_wdg.currentItemChanged.connect(self.on_shotList_changed)", "def new_comm_kwargs(cls, *args, **kwargs):\n kwargs.setdefault('address', 'address')\n return args, kwargs", "def __init__(self):\r\n super().__init__()\r\n self._coords = dict()\r\n self._vertexLabels = dict()", "def inject_network_info(self, *args, **kwargs):\n pass", "def populate_model(self, **kwargs):\n T = self._opts['T']\n nu = self._dims['u']\n if self._changed['T']: # first time def or change of horizon\n # make sure to delete old optimization variables if they exist\n self._removeOld()\n # define optimization variables for input\n u = {}\n for i in range(nu):\n for t in range(T):\n u[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,\n name='u[{},{}]'.format(t, i))\n # update the model so it knows the variables\n self._model.update()\n # add control constraints\n umin, umax = self._opts['umin'], self._opts['umax']\n has_umin, has_umax = ~np.isnan(umin), ~np.isnan(umax)\n for i in range(nu):\n for t in np.arange(has_umin.shape[0])[has_umin[:, i]]:\n u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n for t in np.arange(has_umax.shape[0])[has_umax[:, i]]:\n u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._model.update()\n # indicate that model is up to date\n for name in ['T', 'umin', 'umax']:\n self._changed[name] = False\n # make variables accessible as object variables\n self.u = u\n else:\n # change input constraints\n if self._changed['umin']:\n umin = self._opts['umin']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n self._changed['umin'] = False\n if self._changed['umax']:\n umax = self._opts['umax']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._changed['umax'] = False\n # finally update and include all changes\n self._model.update()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Registers all equations and intermediates associated with this component to the gekko model. kwargs
def register_equations(self, **kwargs): # ------- # Aliases # ------- m = self.m a = self.aqua # ---------- # Parameters # ---------- # Growing bed definition beds = kwargs.get('beds', [(0, 30)]) # -------------------- # Connecting Variables # -------------------- T = a.T I = a.I # noqa N = a.N ppb = a.ppb w = a.w dNup = a.dNup # --------------------------- # Equations and Intermediates # --------------------------- time = m.SV(value=0) m.timevar = time m.Equation(time.dt() == 1) bed_models = [ (PlantBed(), plant_day, harvest_day) for plant_day, harvest_day in beds ] bed_vars = [ bed.register_equations( m, plant_day, harvest_day, time, T, I, N, **kwargs ) for bed, plant_day, harvest_day in bed_models ] m.Equation(w == ppb * sum([var[0] for var in bed_vars])) m.Equation(dNup == ppb * sum([var[1] for var in bed_vars]))
[ "def _set_up_model(self) -> None:\n self._add_vars_x_i_in_theta_i()\n self._add_vars_z_i()\n self._add_linear_cons()\n self._add_objective()", "def setup(self):\n # Convert initial params into matlab array\n self.initial_params_mat = matlab.double(list(self.initial_params))\n self.eng.workspace['x_data'] = matlab.double(self.data_x.tolist())\n self.eng.workspace['y_data'] = matlab.double(self.data_y.tolist())\n\n self.eng.evalc('global e_data')\n if self.data_e is not None:\n self.eng.workspace['e_data'] = matlab.double(self.data_e.tolist())\n else:\n self.eng.workspace['e_data'] = matlab.double([])\n\n eval_path = os.path.join(os.path.dirname(__file__),\n 'matlab_curve_controller')\n self.eng.addpath(eval_path)\n\n if self.value_ranges is not None:\n lb, ub = zip(*self.value_ranges)\n self.eng.workspace['lower_bounds'] = matlab.double(lb)\n self.eng.workspace['upper_bounds'] = matlab.double(ub)\n else:\n # if no bounds are set, then pass empty arrays to\n # fitoptions function\n self.eng.workspace['lower_bounds'] = matlab.double([])\n self.eng.workspace['upper_bounds'] = matlab.double([])\n\n params = self.problem.param_names\n self.eng.workspace['init_params'] = self.initial_params_mat\n self.eng.evalc(\"opts = fitoptions('StartPoint', init_params,\"\n \"'Method', 'NonLinearLeastSquares',\"\n f\"'Algorithm', '{self.minimizer}',\"\n \"'Lower', lower_bounds,\"\n \"'Upper', upper_bounds)\")\n\n self.eng.evalc(f\"ft = fittype(@({', '.join(params)}, x, y)\"\n f\"double(eval_r(x, y, {', '.join(params)}))',\"\n \"'options', opts, 'independent', {'x', 'y'},\"\n \"'dependent', 'z')\")", "def __init__(self):\r\n self.glacierModelDict = {}\r\n self.nxGraphDict = {}\r\n self.model_results ={}", "def register_connecting(self, **kwargs):\n # Aliases\n a = self.aqua\n MV = self.m.MV\n SV = self.m.Var\n FV = self.m.FV\n\n # Initial Conditions\n T0 = kwargs.get('T0', 25)\n I0 = kwargs.get('I0', 5e6)\n N0 = kwargs.get('N0', 0)\n ppb0 = kwargs.get('ppb0', 1)\n # beds = kwargs.get('beds', [(0, 30)])\n # wG0 = kwargs.get('wG0', 1/3)\n # wS0 = kwargs.get('wS0', 2/3)\n # w0 = ppb0 * len(beds) * (wG0 + wS0)\n w0 = 0 # weird gekko bug, instability if not set to 0\n\n # Register connecting\n a.register_connecting('T', MV, T0)\n a.register_connecting('I', MV, I0)\n a.register_connecting('N', MV, N0)\n a.register_connecting('ppb', FV, ppb0)\n # a.ppb = self.m.Param(value=ppb0)\n\n a.register_connecting('w', SV, w0, lb=0)\n a.register_connecting('dNup', SV, 0, lb=0)", "def populate_model(self, **kwargs):\n T = self._opts['T']\n nu = self._dims['u']\n if self._changed['T']: # first time def or change of horizon\n # make sure to delete old optimization variables if they exist\n self._removeOld()\n # define optimization variables for input\n u = {}\n for i in range(nu):\n for t in range(T):\n u[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,\n name='u[{},{}]'.format(t, i))\n # update the model so it knows the variables\n self._model.update()\n # add control constraints\n umin, umax = self._opts['umin'], self._opts['umax']\n has_umin, has_umax = ~np.isnan(umin), ~np.isnan(umax)\n for i in range(nu):\n for t in np.arange(has_umin.shape[0])[has_umin[:, i]]:\n u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n for t in np.arange(has_umax.shape[0])[has_umax[:, i]]:\n u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._model.update()\n # indicate that model is up to date\n for name in ['T', 'umin', 'umax']:\n self._changed[name] = False\n # make variables accessible as object variables\n self.u = u\n else:\n # change input constraints\n if self._changed['umin']:\n umin = self._opts['umin']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n self._changed['umin'] = False\n if self._changed['umax']:\n umax = self._opts['umax']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._changed['umax'] = False\n # finally update and include all changes\n self._model.update()", "def init_gm(self):\n self.data_x = self.tik_instance.Tensor(self.y_grad_dtype,\n [self.y_grad_gm_size],\n name=\"data_x\",\n scope=tik.scope_gm)\n self.data_y_grad = self.tik_instance.Tensor(self.y_grad_dtype,\n [self.y_grad_gm_size],\n name=\"data_y_grad\",\n scope=tik.scope_gm)\n self.data_weight = self.tik_instance.Tensor(self.weight_dtype,\n [self.weight_gm_size],\n name=\"data_weight\",\n scope=tik.scope_gm)\n self.data_target = self.tik_instance.Tensor(self.target_dtype,\n [self.target_gm_size],\n name=\"data_target\",\n scope=tik.scope_gm)\n self.data_total_weight = self.tik_instance.Tensor(\n self.x_dtype, [self.data_total_weight_size],\n name=\"data_total_weight\", scope=tik.scope_gm)\n self.output = self.tik_instance.Tensor(self.x_dtype,\n [self.output_gm_size],\n name=\"output\",\n scope=tik.scope_gm)", "def initialize_computational_space(self, **kwargs):\n x_start = kwargs.pop('x_start', 0)\n x_end = kwargs.pop('x_end', 1)\n x_step = kwargs.pop('x_step', 0.1)\n\n y_start = kwargs.pop('y_start', 0)\n y_end = kwargs.pop('y_end', 1)\n z_start = kwargs.pop('z_start', 0)\n z_end = kwargs.pop('z_end', 1)\n yz_step = kwargs.pop('yz_step', 0.1)\n\n self.x_range = np.arange(x_start, x_end + x_step, x_step)\n self.y_range = np.arange(y_start, y_end + yz_step, yz_step)\n self.z_range = np.arange(z_start, z_end + yz_step, yz_step)", "def bind(self):\n\n # binds important computation engines\n self.nm.bind(self.beads, self.ensemble)\n self.forces.bind(self.beads, self.cell, self.flist)\n self.ensemble.bind(self.beads, self.nm, self.cell, self.forces, self.prng)\n self.init.init_stage2(self)\n\n # binds output management objects\n self.properties.bind(self)\n self.trajs.bind(self)\n for o in self.outputs:\n o.bind(self)\n\n self.chk = CheckpointOutput(\"RESTART\", 1, True, 0)\n self.chk.bind(self)\n\n # registers the softexit routine\n softexit.register(self.softexit)", "def gemmEquations(self, node, makeEquations): \n nodeName = node.output[0]\n \n # Get inputs\n inputName1, inputName2, inputName3 = node.input\n shape1 = self.shapeMap[inputName1]\n shape2 = self.shapeMap[inputName2]\n shape3 = self.shapeMap[inputName3]\n input1 = self.varMap[inputName1]\n input2 = self.constantMap[inputName2]\n input3 = self.constantMap[inputName3]\n \n self.shapeMap[nodeName] = self.shapeMap[inputName3]\n if makeEquations:\n \n # Pad shape if needed\n if len(shape1) == 1:\n shape1 = [1] + shape1\n input1 = input1.reshape(shape1)\n elif shape1[1] == 1:\n shape1 = shape1[::-1]\n input1 = input1.reshape(shape1)\n if len(shape3) == 1:\n shape3 = [1] + shape3\n input3 = input3.reshape(shape3)\n if shape1[0] != shape3[0]:\n shape3 = shape3[::-1]\n input3 = input3.reshape(shape3)\n\n # Assume that first input is variables, second is Matrix for MatMul, and third is bias addition\n assert shape1[-1] == shape2[0]\n assert shape1[0] == shape3[0]\n assert shape2[1] == shape3[1]\n\n # Create new variables\n self.shapeMap[nodeName] = self.shapeMap[node.input[2]]\n outputVariables = self.makeNewVariables(nodeName)\n outputVariables = outputVariables.reshape(shape3)\n # Generate equations\n for i in range(shape1[0]):\n for j in range(shape2[1]):\n e = MarabouUtils.Equation()\n for k in range(shape1[1]):\n e.addAddend(input2[k][j], input1[i][k])\n\n # Put output variable as the last addend last\n e.addAddend(-1, outputVariables[i][j])\n e.setScalar(-input3[i][j])\n self.addEquation(e)", "def __init__(self, model, **kwargs):\n super(QuadraticUtility, self).__init__(model)\n self._opts = {}\n self._dims = {'u': 1}\n self._changed = {'T': True}", "def geometry_optimization(self):\n input = self.sample_qe_inputs\n input[\"control_params\"][\"calculation\"] = \"'vc-relax'\"\n return input", "def gate(self, *args, **kwargs):\n for i in self.fcmdict:\n self.fcmdict[i].gate(*args, **kwargs)\n return self", "def calc_inputs(self):\n \n # Initialize input_dict\n input_dict = {}\n\n # Add subset inputs\n for subset in self.subsets:\n subset.calc_inputs(input_dict)\n\n # Remove unused subset inputs\n del input_dict['transform']\n del input_dict['system']\n\n # Add calculation-specific inputs\n input_dict['distance'] = self.displacementdistance\n input_dict['symprec'] = self.symmetryprecision\n input_dict['strainrange'] = self.strainrange\n input_dict['numstrains'] = self.numstrains\n input_dict['a_mult'] = self.system_mods.a_mults[1] - self.system_mods.a_mults[0]\n input_dict['b_mult'] = self.system_mods.b_mults[1] - self.system_mods.b_mults[0]\n input_dict['c_mult'] = self.system_mods.c_mults[1] - self.system_mods.c_mults[0]\n\n # Return input_dict\n return input_dict", "def geodesic_equation(self):\n raise NotImplementedError(\n 'The geodesic equation tensor is not implemented.')", "def populate_model(self, v=None, **kwargs):\n T = self._opts['T']\n nu, nx = self._dims['u'], self._dims['x']\n if self._changed['T']: # first time def or change of horizon\n # make sure to delete old optimization variables if they exist\n self._removeOld()\n # define optimization variables for input and state\n u, x = {}, {}\n for i in range(nu):\n for t in range(T):\n u[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,\n name='u[{},{}]'.format(t, i))\n for i in range(nx):\n for t in range(T+1):\n x[t, i] = self._model.addVar(vtype=GRB.CONTINUOUS,\n name='x[{},{}]'.format(t, i))\n # update the model so it knows the variables\n self._model.update()\n # add control constraints\n umin, umax = self._opts['umin'], self._opts['umax']\n has_umin, has_umax = ~np.isnan(umin), ~np.isnan(umax)\n for i in range(nu):\n for t in np.arange(has_umin.shape[0])[has_umin[:, i]]:\n u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n for t in np.arange(has_umax.shape[0])[has_umax[:, i]]:\n u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n # update intitial state, if provided\n if 'x0' in kwargs:\n self.x0 = kwargs['x0']\n # add constraint on initial state\n self.x0con = {}\n for i in range(nx):\n self.x0con[0, i] = self._model.addConstr(\n lhs=x[0, i], sense=GRB.EQUAL, rhs=self.x0[i],\n name='dyn[0,{}]'.format(i))\n # add system dynamics\n A, B = self._mats['A'], self._mats['B']\n if ('E' in self._mats):\n w = np.inner(v, self._mats['E'])\n else:\n w = np.zeros((T, nx))\n # dynamic evolution of state and output\n self.dyncon = {}\n for t in range(T):\n for i in range(nx):\n # put w on RHS to speed up constraint updates\n self.dyncon[t, i] = self._model.addConstr(\n lhs=(x[t+1, i] - quicksum([A[i, k] * x[t, k]\n for k in range(nx)]) -\n quicksum([B[i, k] * u[t, k] for k in range(nu)])),\n sense=GRB.EQUAL, rhs=w[t, i],\n name='dyn[{},{}]'.format(t+1, i))\n self._model.update()\n # add state constraints\n xmin, xmax = self._opts['xmin'], self._opts['xmax']\n has_xmin, has_xmax = ~np.isnan(xmin), ~np.isnan(xmax)\n for i in range(nx):\n for t in np.arange(has_xmin.shape[0])[has_xmin[:, i]]:\n x[t+1, i].setAttr(GRB.Attr.LB, xmin[t, i])\n for t in np.arange(has_xmax.shape[0])[has_xmax[:, i]]:\n x[t+1, i].setAttr(GRB.Attr.UB, xmax[t, i])\n self._model.update()\n # indicate that model is up to date\n for name in ['T', 'x0', 'umin', 'umax', 'xmin', 'xmax', 'v']:\n self._changed[name] = False\n # make variables accessible as object variables\n self.u, self.x, self.v = u, x, v\n else:\n # change input constraints\n if self._changed['umin']:\n umin = self._opts['umin']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.LB, umin[t, i])\n self._changed['umin'] = False\n if self._changed['umax']:\n umax = self._opts['umax']\n for i in range(nu):\n for t in range(T):\n self.u[t, i].setAttr(GRB.Attr.UB, umax[t, i])\n self._changed['umax'] = False\n # change state constraints\n if self._changed['xmin']:\n xmin = self._opts['xmin']\n # xmin[np.isnan(xmin)] = - np.Inf\n for i in range(nx):\n for t in range(T):\n self.x[t+1, i].setAttr(GRB.Attr.LB, xmin[t, i])\n self._changed['xmin'] = False\n if self._changed['xmax']:\n xmax = self._opts['xmax']\n # xmax[np.isnan(xmax)] = np.Inf\n for i in range(nx):\n for t in range(T):\n self.x[t+1, i].setAttr(GRB.Attr.UB, xmax[t, i])\n self._changed['xmax'] = False\n # change initial state\n if self._changed['x0']:\n for i in range(nx):\n self._model.getConstrByName('dyn[0,{}]'.format(i)).setAttr(\n GRB.Attr.RHS, self.x0[i])\n self._changed['x0'] = False\n # change effect of disturbance vector on dynamics (if any)\n if v is not None:\n if not np.all(v == self.v):\n self.v = v\n w = np.inner(v, self._mats['E'])\n for i in range(nx):\n for t in range(T):\n self._model.getConstrByName(\n 'dyn[{},{}]'.format(t+1, i)).setAttr(\n GRB.Attr.RHS, w[t, i])\n # finally update and include all changes\n self._model.update()", "def __init_extra__(self):\n M = self.realization_of().M()\n category = self.realization_of()._category\n # This changes Monomial into Hazewinkel Lambda\n M.module_morphism(self._from_Monomial_on_basis,\n codomain = self, category = category\n ).register_as_coercion()\n # This changes Hazewinkel Lambda into Monomial\n self.module_morphism(self._to_Monomial_on_basis,\n codomain = M, category = category\n ).register_as_coercion()\n\n # cache for the coordinates of the elements\n # of the monomial basis with respect to the HWL basis\n self._M_to_self_cache = {}\n # cache for the coordinates of the elements\n # of the HWL basis with respect to the monomial basis\n self._M_from_self_cache = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the monomial basis with respect to the HWL basis\n self._M_transition_matrices = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the HWL basis with respect to the monomial basis\n self._M_inverse_transition_matrices = {}", "def _addInputs(self):\n\n self.inputs = {}\n inputs = _getInputs(self.region)\n for name, spec in inputs.items():\n description = _getDescription(spec)\n elementCount = _getElementCount(spec)\n if elementCount > 0:\n # Skipping this check because it might be expensive.\n if True: # self.region.getSchema().isInputConnected(name):\n\n if elementCount == 1:\n self.add_trait(name, Float(label=name, desc=description))\n else:\n self.add_trait(name, List(label=name, desc=description))\n # Add traits to display the max outputs as well\n self.add_trait(name+\"_maxIndices\", Str(label=name+\"_maxIndices\", desc=description))\n self.add_trait(name+\"_maxValues\", Str(label=name+\"_maxValues\", desc=description))\n self.add_trait(name+\"_numNonZeros\", Str(label=name+\"_numNonZeros\", desc=description))\n # Store the Spec in the dictionary of inputs\n self.inputs[name] = spec\n if self.pcaBasis:\n # Add another trait for the projected input\n self.add_trait('PCA', List())\n self.add_trait('PCA_diff_with_previous_iteration', List())", "def __init__(self, model, A, B, **kwargs):\n super(LinearSystem, self).__init__(model)\n self._opts = {}\n self._mats = {'A': np.asarray(A), 'B': np.asarray(B)}\n if 'C' in kwargs:\n self._mats['C'] = np.asarray(kwargs['C'])\n else:\n self._mats['C'] = np.eye(self._mats['A'].shape[1])\n self._dims = {'x': self._mats['A'].shape[1],\n 'u': self._mats['B'].shape[1],\n 'y': self._mats['C'].shape[0]}\n if 'D' in kwargs:\n self._mats['D'] = np.asarray(kwargs['D'])\n else:\n self._mats['D'] = np.zeros((self._dims['y'], self._dims['u']))\n if 'E' in kwargs:\n self._mats['E'] = np.asarray(kwargs['E'])\n self._dims['v'] = self._mats['E'].shape[1]\n if 'x0' in kwargs:\n self.x0 = kwargs['x0']\n self._changed = {'T': True}", "def set_regparams(kwargs):\n\n # Initialize regularization parameter values\n w,b = 0,0\n\n # Get new regularization parameters from keywords\n temp_kwargs = kwargs.copy()\n for name,value in temp_kwargs.iteritems():\n w = kwargs.pop(\"reg_param_w\",0)\n b = kwargs.pop(\"reg_param_b\",0)\n if kwargs:\n raise TypeError(\"Unknown keyword arguments to error function\")\n\n # Return regularization parameters\n return (w,b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new HIT on AMT.
def create_hit(hit_options): options = settings.AMT_DEFAULT_HIT_OPTIONS options.update(hit_options) scheme = 'https' if options['use_https'] else 'http' from interface import AMT_INTERFACE path = AMT_INTERFACE.get_assignment_url() url = (scheme + '://' + settings.PUBLIC_DNS + ':8002' + path if settings.HAVE_PUBLIC_IP else scheme + '://' + settings.AMT_CALLBACK_HOST + path) question = ExternalQuestion( external_url=url, frame_height=options['frame_height']) conn = get_amt_connection(options['sandbox']) try: create_response = conn.create_hit( question=question, title=options['title'], description=options['description'], reward=Price(amount=options['reward']), duration=timedelta(minutes=options['duration']), max_assignments=options['num_responses'], approval_delay=0) except MTurkRequestError: logger.debug(traceback.format_exc()) raise AMTException( """ Could not reach Amazon Mechanical Turk. Check that you are using https mode, and defined a valid assignment. Details of the exception have been logged to the ampcrowd server. """ ) return create_response[0].HITId
[ "def create_hit(**kwargs):\n response = objective_turk.client().create_hit(**kwargs)\n logger.debug(response)\n #pylint: disable=protected-access\n return objective_turk.Hit._new_from_response(response['HIT'])", "def create_hit_with_hit_type(**kwargs):\n if 'HITTypeId' not in kwargs:\n raise ValueError('missing required argument HITTypeId')\n elif 'Question' not in kwargs:\n raise ValueError('missing required argument Question')\n elif 'MaxAssignments' not in kwargs:\n raise ValueError('missing required argument MaxAssignments')\n\n hit_type = kwargs['HITTypeId']\n logger.info(\n 'creating HIT using HITTypeId %s. Title, Description, Reward, and Keywords from calling script will be ignored.',\n hit_type,\n )\n response = objective_turk.client().create_hit_with_hit_type(**kwargs)\n logger.debug(response)\n #pylint: disable=protected-access\n return objective_turk.Hit._new_from_response(response['HIT'])", "def create_hit_with_hit_type(HITTypeId=None, MaxAssignments=None, LifetimeInSeconds=None, Question=None, RequesterAnnotation=None, UniqueRequestToken=None, AssignmentReviewPolicy=None, HITReviewPolicy=None, HITLayoutId=None, HITLayoutParameters=None):\n pass", "def create_hit(self, **kwargs):\n try:\n # print(self.client)\n response = self.client.create_hit(**kwargs)\n return response\n except ClientError as e:\n print(e)\n return None", "def create_hit_with_hit_type(\n client: MTurkClient,\n frame_height: int,\n page_url: str,\n hit_type_id: str,\n num_assignments: int = 1,\n lifetime_in_seconds: int = 60 * 60 * 24 * 31,\n) -> Tuple[str, str, Dict[str, Any]]:\n page_url = page_url.replace(\"&\", \"&amp;\")\n amazon_ext_url = (\n \"http://mechanicalturk.amazonaws.com/\"\n \"AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd\"\n )\n question_data_structure = (\n '<ExternalQuestion xmlns=\"{}\">'\n \"<ExternalURL>{}</ExternalURL>\" # noqa: E131\n \"<FrameHeight>{}</FrameHeight>\"\n \"</ExternalQuestion>\"\n \"\".format(amazon_ext_url, page_url, frame_height)\n )\n\n is_sandbox = client_is_sandbox(client)\n\n # Create the HIT\n response = client.create_hit_with_hit_type(\n HITTypeId=hit_type_id,\n MaxAssignments=num_assignments,\n LifetimeInSeconds=lifetime_in_seconds,\n Question=question_data_structure,\n )\n\n # The response included several fields that will be helpful later\n hit_type_id = response[\"HIT\"][\"HITTypeId\"]\n hit_id = response[\"HIT\"][\"HITId\"]\n\n # Construct the hit URL\n url_target = \"workersandbox\"\n if not is_sandbox:\n url_target = \"www\"\n hit_link = \"https://{}.mturk.com/mturk/preview?groupId={}\".format(url_target, hit_type_id)\n return hit_link, hit_id, response", "def postHITs(num_hits, hit_type, mt_conn):\n \n protos = getIncompleteProtos(hit_type)\n \n if len(protos) <= 0:\n print(\"Warning: Retainer starting without any types of HIT to post\")\n return\n proto = protos[0]\n \n qual_arr = []\n if proto.worker_locale != '' and proto.worker_locale is not None:\n qual_arr.append(LocaleRequirement('EqualTo', proto.worker_locale))\n if proto.approval_rating > 0:\n qual_arr.append(PercentAssignmentsApprovedRequirement('GreaterThan', proto.approval_rating))\n \n quals = Qualifications(qual_arr)\n \n eh = ExternalHit(title=proto.title,\n description=proto.description,\n keywords=proto.keywords,\n url=proto.url,\n frame_height=1200,\n reward_as_usd_float=float(proto.reward),\n assignment_duration=proto.assignment_duration,\n lifetime=proto.lifetime,\n max_assignments=proto.max_assignments,\n qualifications=quals,\n auto_approval_delay=proto.auto_approval_delay) # 1 day\n \n \n\n for i in range(num_hits):\n try:\n turk_hit = eh.post(mt_conn)\n print \"Posted HIT ID \" + turk_hit.HITId\n \n django_hit = Hit(proto_id = proto.id,\n hit_id=turk_hit.HITId,\n hit_type_id = proto.hit_type_id, \n title=proto.title, \n description=proto.description, \n keywords=proto.keywords, \n url=proto.url, \n reward=proto.reward, \n assignment_duration=proto.assignment_duration, \n lifetime=proto.lifetime,\n max_assignments=proto.max_assignments,\n auto_approval_delay=proto.auto_approval_delay,\n worker_locale=proto.worker_locale,\n approval_rating=proto.approval_rating,\n retainertime=proto.retainertime)\n django_hit.save()\n \n except Exception, e:\n print \"Got exception posting HIT:\\n\" + str(e)", "def create_attempt(self):\n data = {'exam_id': self.course_data.exam_id, 'start_clock': True}\n response = self.client.post(self.attempt_api_path, data=data, headers=self.post_headers)\n response_data = json.loads(response.text)\n self.attempt_id = response_data.get('exam_attempt_id')", "def launch_hit(self, template_location, input_data, reward=0,\n frame_height=9000, title=None, description=None,\n keywords=None, duration=900, max_assignments=1,\n country='US', hits_approved=10000, lifetime=604800,\n percent_approved=95):\n if self.sandbox:\n percent_approved = 0\n hits_approved = 0\n hit_properties = {'Title': title,\n 'Description': description,\n 'Keywords': keywords,\n 'MaxAssignments': max_assignments,\n 'LifetimeInSeconds': lifetime,\n 'AssignmentDurationInSeconds': duration,\n 'QualificationRequirements': [\n {\n 'QualificationTypeId': '00000000000000000040',\n 'Comparator': 'GreaterThanOrEqualTo',\n 'IntegerValues': [hits_approved]\n },\n {\n 'QualificationTypeId': '00000000000000000071',\n 'Comparator': 'EqualTo',\n 'LocaleValues': [\n {'Country': country},\n ],\n },\n {\n 'QualificationTypeId': '000000000000000000L0',\n 'Comparator': 'GreaterThanOrEqualTo',\n 'IntegerValues': [percent_approved],\n }\n ],\n 'Reward': str(reward)}\n\n # Setup HTML Question.\n env = self.get_jinja_env()\n template = env.get_template(template_location)\n template_params = {'input': json.dumps(input_data)}\n html = template.render(template_params)\n html_question = self.create_html_question(html, frame_height)\n\n hit_properties['Question'] = html_question\n\n hit = self.mtc.create_hit(**hit_properties)\n return hit", "def newTrialHit(self, **attrlinks):\n return TrialHit(self, **attrlinks)", "def Create(self, request):\n test_plan = mtt_messages.Convert(\n request, ndb_models.TestPlan, from_cls=mtt_messages.TestPlan)\n _ValidateTestPlan(test_plan)\n test_plan.key = None\n test_plan.put()\n test_scheduler.ScheduleTestPlanCronJob(test_plan.key.id())\n return mtt_messages.Convert(test_plan, mtt_messages.TestPlan)", "def generatecampaign(id):\n create_campaign_hits(id)\n flash(\"Mechanical Turk campaign created!\")\n return redirect(url_for('listcampaigns'))", "def newExperimentHit(self, **attrlinks):\n return ExperimentHit(self, **attrlinks)", "def create_meeting(self, incident):\n\n z = Zoom().create(incident.report, incident.report)\n meeting = self.create(\n incident=incident, weblink=z[\"weblink\"], challenge=z[\"challenge\"]\n )\n\n return meeting", "def create(request):\n\ttry:\n\t\tjson_obj = commonHttp.get_json(request.body)\n\n\t\t# Check request json\n\t\treq_attrs = [\n\t\t\texpectedAttr[\"DESC\"],\n\t\t\texpectedAttr[\"TYPE\"],\n\t\t\texpectedAttr[\"LOC\"],\n\t\t\t]\n\n\t\tcommonHttp.check_keys(json_obj, req_attrs)\n\n\t\tnew_location = None\n\t\t\n\t\tif json_obj.get(expectedAttr[\"LOC\"]):\n\t\t\tnew_location = create_location(json_obj.get(expectedAttr[\"LOC\"]))\n\n\t\tactivation_time = json_obj.get(expectedAttr[\"ACT_TIME\"])\n\t\tdeactivation_time = json_obj.get(expectedAttr[\"DEACT_TIME\"])\n\n\t\tnew_incident = Incident(\n\t\t\tactivation_time=activation_time,\n\t\t\tdeactivation_time=deactivation_time,\n\t\t\tdescription=json_obj[expectedAttr[\"DESC\"]],\n\t\t\tincident_type=json_obj[expectedAttr[\"TYPE\"]],\n\t\t\tlocation=new_location\n\t\t\t)\n\n\t\tcommonHttp.save_model_obj(new_incident)\n\n\t\tresponse = JsonResponse({\n\t\t\t\"id\" : new_incident.id,\n\t\t\t\"success\" : True\n\t\t\t})\n\n\t\treturn response\n\n\texcept commonHttp.HttpBadRequestException as e:\n\t\treturn HttpResponseBadRequest(e.reason_phrase)", "def create_compensation_hit_with_hit_type(\n client: MTurkClient,\n reason: str,\n hit_type_id: str,\n num_assignments: int = 1,\n) -> Tuple[str, str, Dict[str, Any]]:\n amazon_ext_url = (\n \"http://mechanicalturk.amazonaws.com/\"\n \"AWSMechanicalTurkDataSchemas/2017-11-06/QuestionForm.xsd\"\n )\n question_data_structure = (\n f'<QuestionForm xmlns=\"{amazon_ext_url}\">'\n \"<Question>\"\n \"<QuestionIdentifier>workerid</QuestionIdentifier>\"\n \"<DisplayName>Confirm Worker ID</DisplayName>\"\n \"<IsRequired>true</IsRequired>\"\n \"<QuestionContent>\"\n f\"<Text>This compensation task was launched for the following reason: {reason}... Enter Worker ID to submit</Text>\"\n \"</QuestionContent>\"\n \"<AnswerSpecification>\"\n \"<FreeTextAnswer>\"\n \"<Constraints>\"\n '<Length minLength=\"2\" />'\n '<AnswerFormatRegex regex=\"\\S\" errorText=\"The content cannot be blank.\"/>'\n \"</Constraints>\"\n \"</FreeTextAnswer>\"\n \"</AnswerSpecification>\"\n \"</Question>\"\n \"</QuestionForm>\"\n )\n\n is_sandbox = client_is_sandbox(client)\n\n # Creates a compensation HIT to be completed in the next month\n response = client.create_hit_with_hit_type(\n HITTypeId=hit_type_id,\n MaxAssignments=num_assignments,\n LifetimeInSeconds=60 * 60 * 24 * 31,\n Question=question_data_structure,\n )\n\n # The response included several fields that will be helpful later\n hit_type_id = response[\"HIT\"][\"HITTypeId\"]\n hit_id = response[\"HIT\"][\"HITId\"]\n\n # Construct the hit URL\n url_target = \"workersandbox\"\n if not is_sandbox:\n url_target = \"www\"\n hit_link = \"https://{}.mturk.com/mturk/preview?groupId={}\".format(url_target, hit_type_id)\n return hit_link, hit_id, response", "def createExamMethod(request):\n try:\n courseId = ndb.Key(urlsafe=getattr(request, 'courseId'))\n course = courseId.get()\n if course is None:\n raise Exception(\"Invalid courseId\")\n except Exception, E:\n print str(E)\n return Response(response=1, description=str(E))\n try:\n uploaderId = ndb.Key(urlsafe=getattr(request, 'uploaderId'))\n uploader = uploaderId.get()\n if uploader is None:\n raise Exception(\"Invalid uploaderId\")\n except Exception, E:\n print str(E)\n return Response(response=1, description=str(E))\n\n newExam = Exam()\n # storing details\n setattr(newExam, 'examTitle', getattr(request, 'examTitle'))\n setattr(newExam, 'examDesc', getattr(request, 'examDesc'))\n setattr(newExam, 'dueDate', getattr(request, 'dueDate'))\n setattr(newExam, 'dueTime', getattr(request, 'dueTime'))\n setattr(newExam, 'urlList', getattr(request, 'urlList'))\n\n dateUploaded = str(datetime.datetime.now() + datetime.timedelta(hours=5, minutes=30))\n setattr(newExam, 'uploaderId', uploaderId)\n setattr(newExam, 'courseId', courseId)\n setattr(newExam, 'dateUploaded', dateUploaded)\n examId = newExam.put()\n\n # adding examId to course.examIds\n course.examIds.append(examId)\n course.put()\n\n # deleting from memcache\n memcache.delete(courseId.urlsafe())\n\n # sending notification\n title = course.courseName\n notificationText = \"New Exam added!\"\n createNotification(course.studentIds, 'Campus Connect',\n notificationText, 'exam',\n examId.urlsafe(), courseId.urlsafe())\n sendNotification(topicName=courseId.urlsafe(), id=examId.urlsafe(), title=title,\n text=notificationText, type='exam')\n return Response(response=0, description=\"OK\", key=examId.urlsafe())", "def create_request(self):\n date_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')\n present_time = date_time[0:-3] + 'Z'\n # Using the web service post() method to create request\n response = requests.post(url=bid_url, headers={'Authorization': self.api_key}, json={\n \"type\": self.bid_type.get(),\n \"initiatorId\": self.current_user.id,\n \"dateCreated\": present_time,\n \"subjectId\": Subject().get_id_by_name(self.subject.get()),\n \"additionalInfo\": {\"competency\": self.competency.get(), \"hours_per_week\": self.hours_per_session.get(),\n \"sessions_per_week\": self.sessions_per_week.get(),\n \"rate_per_session\": self.rate_per_session.get()}\n }\n )\n json_data = response.json()\n # Destroying current window and jumping to next screen by calling the main() method from the NewRequestDetails \n # class\n self.window.destroy()\n NewRequestDetails(json_data).main()", "def create_tap_flow(self, **attrs):\n return self._create(_tap_flow.TapFlow, **attrs)", "def create_for(self, contest):\n return self.create(contest.site_sport, contest.start, contest.end)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replace self.old_str by self.new_str. text arg from visit_file method of SearchInFiles class.
def word_matched(self, full_path, text): self.for_replace.append(full_path) if not self.only_print: old_str, new_str = self.search_word, self.new_str text = text.replace(old_str, new_str) open(full_path, 'w').write(text) print(f"Word => {old_str}\nReplaced by => {new_str}\nIn File: {full_path}") print('-' * 100)
[ "def ireplace(text, old, new):\n assert(isinstance(text, str) and isinstance(old, str))\n use_string_format = '%s' in new\n\n old_len = len(old)\n to_replace = []\n for match in iter_find(text.lower(), old.lower()):\n match = text[match:match+old_len]\n if match not in to_replace:\n if use_string_format:\n to_replace.append((match, new % match))\n else:\n to_replace.append((match, new))\n for rule in to_replace:\n text = text.replace(*rule)\n return text", "def rewrite(self, hgvs_string):\n pass", "def ReplaceStringInFile(findRe, repStr, filePath):\n tempName = filePath+'~'\n input = open(filePath)\n output = open(tempName, 'w')\n s = input.read()\n output.write(findRe.sub(repStr, s))\n output.close()\n input.close()\n UpdateFile(filePath, tempName)\n print('Notice: replaced deprecated copyright notice in %s' % filePath)", "def replace_text_in_file(file_name, old, new, isRegExp=False):\n # Read source file, store the lines and update the content of the lines\n modified = False\n with open(file_name, mode=\"r\", encoding=\"utf-8-sig\") as f_sou:\n lines = list()\n for lin in f_sou:\n if isRegExp:\n lin1 = re.sub(old, new, lin)\n else:\n lin1 = lin.replace(old, new)\n lines.append(lin1)\n if lin1 != lin:\n modified = True\n # Write the lines to the new file\n if modified:\n with open(file_name, mode=\"w\", encoding=\"utf-8\") as f_des:\n f_des.writelines(lines)", "def edit_text(self, old_text, new_text):\n span = self.__find_span_by_text(text=old_text)\n assert span is not None, \"Failed to find element with text \" + old_text\n x, y = self.chrome.get_absolute_center(span)\n pyautogui.click(x, y, clicks=3, interval=0.1)\n sleep(1)\n pyautogui.doubleClick(x, y)\n sleep(1)\n pyautogui.typewrite(new_text, interval=0.25)\n sleep(1)\n pyautogui.press('enter')\n sleep(1)\n Log.info('Replace \"{0}\" with \"{1}\".'.format(old_text, new_text))", "def _replace(self, str_to_delete='', str_to_insert='', pre_str=None, post_str=None, pre_ind=None, post_ind=None):\n\n\t\tif str_to_delete:\n\t\t\tpos = self.source.find(str_to_delete)\n\t\t\tif pos == -1:\n\t\t\t\tprint(\"Can't find str_to_delete. Source not replaced.\")\n\t\t\t\treturn\n\t\t\tself.source = self.source.replace(str_to_delete, str_to_insert)\n\t\telse:\n\t\t\tif pre_str is not None:\n\t\t\t\tpos = self.source.find(pre_str)\n\t\t\t\tif pos == -1:\n\t\t\t\t\tprint(\"Can't find pre_str. Source not replaced.\")\n\t\t\t\t\treturn\n\t\t\t\tpre_ind = pos + len(pre_str)\n\t\t\telif pre_ind is None:\n\t\t\t\tpre_ind = len(self.source)\n\n\t\t\tif post_str is not None:\n\t\t\t\tpos = self.source[pre_ind:].find(post_str)\n\t\t\t\tif pos == -1:\n\t\t\t\t\tprint(\"Can't find post_str. Source not replaced.\")\n\t\t\t\t\treturn\n\t\t\t\tpost_ind = pre_ind + pos\n\t\t\telif post_ind is None:\n\t\t\t\tpost_ind = len(self.source)\n\t\t\t\n\t\t\tself.source = self.source[:pre_ind] + str_to_insert + self.source[post_ind:]", "def update_refstring(self, string):\n self.refstring = string\n self._lines = []\n self._contains_index = None\n self.changed = True\n\n #The only other references that become out of date are the contains\n #and preamble attributes which are determined by the parsers.\n #Assuming we did everything right with the rt update, we should be\n #able to just use the new contains index to update those.\n icontains = self.contains_index\n ichar = self.charindex(icontains, 0)\n self.preamble = string[:ichar]\n self.contains = string[ichar + 9:]", "def replace_items(infile, old_word, new_word):\n if not os.path.isfile(infile):\n print(\"Error on replace_word, not a regular file: \" + infile)\n sys.exit(1)\n\n f1 = open(infile, 'r').read()\n f2 = open(infile, 'w')\n m = f1.replace(old_word, new_word)\n f2.write(m)", "def update(self, index, new_char):\n assert index < len(self._file_content)\n old_file = deepcopy(self._file_content)\n self._file_content[index] = new_char\n Logger.log_str('File is updated from: \\n{} \\nto \\n{}'.\n format(\"\".join(val for val in old_file), \"\".join(val for val in self._file_content)))", "def replace(self, replaceWord): #$NON-NLS-1$\r", "def _change_all_plain(self, s: str) -> tuple[int, str]:\n find, change = self.find_text, self.change_text\n # #1166: s0 and find0 aren't affected by ignore-case.\n s0 = s\n find0 = self.replace_back_slashes(find)\n if self.ignore_case:\n s = s0.lower()\n find = find0.lower()\n count, prev_i, result = 0, 0, []\n while True:\n progress = prev_i\n # #1166: Scan using s and find.\n i = s.find(find, prev_i)\n if i == -1:\n break\n # #1166: Replace using s0 & change.\n count += 1\n result.append(s0[prev_i:i])\n result.append(change)\n prev_i = max(prev_i + 1, i + len(find)) # 2021/01/08 (!)\n assert prev_i > progress, prev_i\n # #1166: Complete the result using s0.\n result.append(s0[prev_i:])\n return count, ''.join(result)", "def replace_line(self,file,search,replace):\n for line in fileinput.input(file, inplace=1):\n if search in line:\n line = line.replace(search,replace)\n sys.stdout.write(line)", "def replace(text,what,with_what,start=0,stop=None,\n\n SearchObject=TextSearch,join=join,joinlist=joinlist,tag=tag,\n string_replace=string.replace,type=type,\n StringType=types.StringType):\n if type(what) is not TextSearchType:\n so = SearchObject(what)\n else:\n so = what\n what = so.match\n if stop is None:\n if start == 0 and len(what) < 2:\n return string_replace(text,what,with_what)\n stop = len(text)\n t = ((text,sWordStart,so,+2),\n # Found something, replace and continue searching\n (with_what,Skip+AppendTagobj,len(what),-1,-1),\n # Rest of text\n (text,Move,ToEOF)\n )\n found,taglist,last = tag(text,t,start,stop)\n if not found:\n return text\n return join(taglist)", "def _replace(self, replaceWord, replaceAll=False): #@UnusedVariable #$NON-NLS-1$\r\n pass", "def text(self, new_text):\n if isinstance(new_text, str):\n self._text = list(new_text[row * self._columns:self._columns] for row in range(self._rows))\n self._update()\n elif isinstance(new_text, list):\n self._text = [''] * self._rows\n for i in range(min(self._rows, len(new_text))):\n self._text[i] = new_text[i][:self._columns]\n self._update()\n else:\n self.text = str(new_text)", "def update_task(new_text):\n content = read_content()\n task_index = find_task_lineindex(content)\n\n content[task_index] = new_text + \"\\n\"\n\n with open(\"index.html\", \"w\") as html:\n html.writelines(content)", "def replacements(in_string,old_substrings,new_substrings):\n for (old,new) in zip(old_substrings,new_substrings):\n in_string = in_string.replace(old, new)\n return in_string", "def replace_from_file():\n if len(sys.argv) < 3:\n _usage_replace_from_file()\n sys.exit(1)\n\n fromto_file = sys.argv[1]\n f = open(fromto_file, 'r')\n fromto_lines = f.readlines()\n f.close()\n\n filenames = wildcard_notation(sys.argv[2:])\n\n for filename in filenames:\n f = open(filename, 'r')\n text = f.read()\n f.close()\n replacements = False\n for line in fromto_lines:\n if line.startswith('#'):\n continue\n words = line.split()\n if len(words) == 2:\n from_text, to_text = words\n\n if from_text in text:\n backup_filename = filename + '.old~~'\n shutil.copy(filename, backup_filename)\n print 'replacing %s by %s in' % (from_text, to_text), filename\n text = text.replace(from_text, to_text)\n replacements = True\n if replacements:\n f = open(filename, 'w')\n f.write(text)\n f.close()", "def tidy_texfile(texfile, tex_changes, new_texfile=None):\n\n thetex = open(texfile, 'r').read()\n \n # Add strings\n if 'addstrs' in tex_changes:\n alladdstrs = [a + '\\n' for a in tex_changes['addstrs']]\n thetex = alladdstrs + thetex\n\n # Replace strings\n if 'replacestrs' in tex_changes:\n for r in tex_changes['replacestrs']: thetex = thetex.replace(r[0],r[1])\n \n open(new_texfile,'w').writelines(thetex)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
list the books of a given user by uid
def list_user_books(self,uid,start,end): sqls="SELECT a.bid,isbn10,isbn13,title,subtitle,author,translators,publisher,pubdate,price,pages,update_time,create_time,quantity,\ series,keywords,summary,b.status \ FROM "+TABLE_USERBOOK+" a RIGHT JOIN "+TABLE_BOOK+" b ON a.bid=b.bid WHERE a.uid=%d " % uid if end: sqls+= " LIMIT %d,%d" % (start,end) logger.debug(sqls) result= db.query(sqls) books=[] if result: for r in result: book = self.compose_book(r) books.append(book) return books
[ "def get_listed_books(self, user_num):\n c = self.db.cursor()\n c.execute(\"\"\"\n SELECT \n B.title AS Title, \n B.ISBN AS ISBN, \n B.author AS Author, \n UB.points AS Points,\n CQ.qualityDescription AS Quality,\n UB.id AS id,\n B.coverImageUrl AS Cover \n FROM \n UserBooks UB \n INNER JOIN \n Books B on UB.bookId = B.id \n INNER JOIN \n CopyQualities CQ ON UB.copyQualityId = CQ.id \n WHERE \n userId = ?\n AND\n UB.available == 1\n \"\"\",\n (user_num,))\n rows = c.fetchall()\n self.db.commit()\n return rows", "def getBorrowedBooksByUserID(self, lmsUserID):\n self.cursor.execute(\n \"SELECT * FROM BookBorrowed WHERE LmsUserID = %s AND status = 'borrowed'\", (lmsUserID,))\n res = self.cursor.fetchall()\n return res", "def books(self):\n return Book.objects.filter(author=self.id)", "def list_books(self):\n return [common.JsonObject.make_from_dict(input_dict=book) for book in self.find(find_filter={\"portion_class\": \"book\"})]", "def get_available_copies(self, book_id, user_num):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"\n SELECT\n UserBooks.id AS userBooksId,\n UserBooks.userId AS userId,\n UserBooks.points AS points,\n CAST \n ((julianday('now') - \n julianday(UserBooks.dateCreated)) \n AS INTEGER) AS timeHere,\n Books.title AS title,\n Books.author AS author,\n Books.coverImageUrl AS coverImageUrl,\n Users.username AS username,\n CopyQualities.qualityDescription AS qualityDescription\n FROM\n UserBooks INNER JOIN \n Books on UserBooks.bookId = Books.id INNER JOIN\n CopyQualities on UserBooks.copyQualityId = \n CopyQualities.id INNER JOIN\n Users on UserBooks.userId = Users.id\n WHERE\n Books.id = ? AND\n UserBooks.available = 1 AND\n UserBooks.userId != ?\n \"\"\",\n (book_id, user_num))\n rows = c.fetchall()\n log.info(f\"Fetched all available books for Book {book_id} that are not owned by {user_num}\")\n except sqlite3.Error as e:\n log.error(f\"Error fetching available books for Book {book_id} that are not owned by{user_num} -- {e}\")\n raise Exception\n return rows", "def get_books(self, id):\n books = []\n\n book = Book(self.db)\n for book_id in self.db.cursor().execute('SELECT book_id FROM book_author WHERE author_id = ' + str(id)):\n books.append(book.get_book(book_id[0]))\n\n return books", "def books():\n\n return render_template('books/index.html', books=current_user.books)", "async def get_books():\n return books", "def getfriends(uid):\n\tprint \"Get %s 's friend list\" % str(uid)\n\tdict1 = {}\n\tClient = client.Client.instance()\n\tif Client._init ==0:\n\t\tClient.init()\n\tusers = Client.get_friend_bilateral(uid).users\n\tfor user in users:\n\t\tno = user['id']\n\t\tuname = user['screen_name']\n\t\tdict1[no] = uname\n\treturn dict1", "def print_book_list():\n print_all_books()", "def test_list_has_all_books_for_user(dummy_request, db_session, one_user):\n db_session.add(one_user)\n\n data = {\n 'email': one_user.email,\n 'password': 'password',\n }\n dummy_request.GET = data\n books = _list_books(dummy_request, one_user)\n assert len(books) == len(one_user.books)", "def list_books(request):\n # return HttpResponse(request.user.username)\n\n books = Book.objects.exclude(\n date_reviewed__isnull=True).prefetch_related('authors')\n\n context = {\n 'books': books,\n }\n return render(request, 'books/list.html', context)", "def get_userBooksID(self, user_num):\n c = self.db.cursor()\n c.execute(\"\"\"SELECT id FROM UserBooks WHERE userId=?\"\"\", (user_num,))\n rows = c.fetchall()\n self.db.commit()\n return rows", "def books():\n all_books = DB.titles()\n body = [u'<h1>My Bookshelf</h1>', '<ul>']\n item_template = u'<li><a href=\"/book/{id}\">{title}</a></li>'\n for book in all_books:\n body.append(item_template.format(**book))\n body.append(u'</ul>')\n return u'\\n'.join(body)", "def books_by_author(): \r\n\t\r\n with AbstractDatabaseConnection('library.db') as conn:\r\n cursor = conn.cursor()\r\n cursor.execute(\"\"\"\r\n SELECT b.title\r\n FROM author as a, books as b\r\n\t\tWHERE a.author_id = b.author_id AND a.name = '%s'\r\n \"\"\" % request.args.get('name'))\r\n\r\n result = cursor.fetchall()\r\n return build_result(result, 200)", "def view_books(request):\n\n if request.user.is_authenticated() and request.user.is_superuser:\n books = models.Book.objects.all()\n else:\n books = models.Book.objects.filter(hidden=False)\n\n books_list = books.extra(select={'lower_title': 'lower(title)'}).order_by('lower_title')\n\n paginator = Paginator(books_list, 50) \n\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n try:\n books = paginator.page(page)\n except (EmptyPage, InvalidPage):\n books = paginator.page(paginator.num_pages)\n\n latest_books = models.Book.objects.filter(hidden=False).order_by('-created')[:5]\n\n import datetime\n # show active books in last 30 days\n now = datetime.datetime.now()-datetime.timedelta(30)\n\n from django.db.models import Count\n\n latest_active = [models.Book.objects.get(id=b['book']) for b in models.BookHistory.objects.filter(modified__gte = now, book__hidden=False).values('book').annotate(Count('book')).order_by(\"-book__count\")[:5]]\n \n return render_to_response('portal/books.html', {\"request\": request, \n \"title\": \"Booki books\", \n \"books\": books,\n \"page\": page, \n \"latest_books\": latest_books,\n \"latest_active\": latest_active\n })", "def get_books_liked_by_user(user_id, user_book_rating, min_rating):\n user2_book_rating = user_book_rating[user_book_rating[\"user_id\"]==user_id]\n books_liked = user2_book_rating[user2_book_rating[\"rating\"] >= min_rating][\"book_id\"].to_list()\n return books_liked", "def author(request, pk):\n books = Book.objects.filter(author=pk)\n return render(request, 'book_list.html', {'books': books})", "async def get_users():\n session: Session = Session()\n count_table = session.query(\n BorrowingUserTable.user_key,\n functions.count(\n BorrowingUserTable.key).label(\"borrowed_books\")\n ).filter(\n BorrowingUserTable.return_date == None\n ).group_by(\n BorrowingUserTable.user_key\n ).subquery()\n ret = session.query(\n UserTable,\n functions.coalesce(\n count_table.c.borrowed_books, 0\n ).label(\"borrowed_books\")\n ).outerjoin(\n count_table,\n UserTable.key == count_table.c.user_key\n ).order_by(\n UserTable.lastname,\n UserTable.firstname,\n UserTable.classname\n ).all()\n logger.info(ret)\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get book by isbn from douban and insert it into local db
def insert_book(self,isbn,uid=None): try: if not uid: uid=1 book = self.get_book_byisbn(isbn) if book and book.id: #check if it's already in user book list? sqls="select 1 FROM %s WHERE `uid`=%d and `bid`=%d" %(TABLE_USERBOOK,uid,book.id) result=db.query(sqls) if result: logger.debug(("already exist:",isbn)) return else: self.add_userbook(uid,book.id) else: book = self.get_book_byisbn_fromremote(isbn) if book : t=db.transaction() bid = self.create_book(book) if bid: self.add_userbook(uid,bid) else: logger.warn(('failed to get bid:', bid)) t.commit() else: logger.warn(('book not returned:%s' % isbn)) except Exception,e: logger.error(e)
[ "def select_book(self, isbn):\n return self.cur.execute('SELECT * FROM books WHERE isbn=?', (isbn,)).fetchone()", "def add_ISBN(self, isbn, category, title, author, genre, views = 0):\n \n \n isbn = ISBN(ISBN=isbn,category=category,title=title,author=author,genre=genre)\n isbn.save()\n return isbn", "def post(self, isbn):\n book = db.session.query(Book).filter(Book.isbn == isbn).first()\n if book is not None:\n if not book.available:\n return {\"Message\": \"Book is not available for renting\"}, 423\n\n # Retrieve user_id of the user who sent the request\n current_user = get_jwt_identity()\n user = db.session.query(User).filter(User.username == current_user).first()\n user.books.append(book)\n book.available = False\n db.session.commit()\n\n return {'message': 'You have borrowed book with isbn{}'.format(isbn)}, 200\n\n else:\n return {'Message': 'Book with that isbn is not available'}, 404", "def store_book(connect, cursor, book_info):\n cursor.execute(\"\"\"SELECT * from book WHERE id = (?)\"\"\", (book_info.get('book_id'),))\n results = cursor.fetchall()\n results = [dict(ix) for ix in results]\n connect.commit()\n if len(results) > 0:\n return\n\n similar_books = book_info.get('similar_books')\n similar_books_list = \"\"\n for i, each_book_info in enumerate(similar_books, 1):\n similar_books_list += str(i) + \". \" + each_book_info.get('bookname') \\\n + \": \" + each_book_info.get('url')\n similar_books_list += \"\\n\"\n\n cursor.execute(\"\"\"insert into book values (?,?,?,?,?,?,?,?,?,?,?)\"\"\", (\n book_info.get('book_name'),\n book_info.get('book_url'),\n book_info.get('book_id'),\n book_info.get('book_ISBN'),\n book_info.get('book_author_url'),\n book_info.get('author_name'),\n book_info.get('book_rating'),\n book_info.get('rating_count'),\n book_info.get('review_count'),\n book_info.get('image_url'),\n similar_books_list\n ))\n connect.commit()\n print(\"There are \" + str(get_book_table_size(connect, cursor)) + \" books in boot_tb table\")", "def order_add_book_isbn(request):\n if isbn.isValid(isbn.isbn_strip(request.POST['ISBN'])):\n # try:\n book = Book.get_book(isbn.isbn_strip(request.POST['ISBN']))\n if not book:\n raise Http404('No book with that ISBN found')\n order_add_book(request, book)\n return order_render_as_response(request)\n else:\n # this ASIN isn't well-formatted, so return 400-bad-request error message\n return HttpResponseBadRequest()", "def get_books_by_ISBN(self, ISBN):\n c = self.db.cursor()\n try:\n c.execute(\"\"\"SELECT\n title,\n author,\n ISBN,\n externalLink,\n Users.username as listingUser,\n CopyQualities.qualityDescription as copyQuality,\n CAST ((julianday('now') - julianday(UserBooks.dateCreated)) AS INTEGER) AS timeHere,\n UserBooks.points as pointsNeeded,\n UserBooks.id as userBooksId,\n UserBooks.userId AS userId,\n Books.id AS booksId,\n IFNULL(coverImageUrl, '/static/images/book.png') AS coverImageUrl\n FROM Books\n INNER JOIN UserBooks\n on Books.id = UserBooks.bookId\n INNER JOIN CopyQualities\n on UserBooks.copyQualityId = CopyQualities.id\n INNER JOIN Users\n on UserBooks.userId = Users.id\n WHERE\n ISBN = ? AND\n UserBooks.available == 1\n ORDER BY\n UserBooks.dateCreated\n \"\"\",\n (ISBN,))\n isbn_match = c.fetchall()\n log.info(\"BSDB: Get_Books_By_ISBN (local) Results\")\n self.print_results(isbn_match)\n return isbn_match\n except sqlite3.Error as e:\n log.error(e)\n return {}", "def by_isbn(api_key, isbn):\n return Query(api_key, isbn=isbn)", "def _process_book(link):\n # download and parse book info\n data = DOWNER.download(link)\n dom = dhtmlparser.parseString(\n utils.handle_encodnig(data)\n )\n dhtmlparser.makeDoubleLinked(dom)\n\n # some books are without price in expected elements, this will try to get\n # it from elsewhere\n price = None\n try:\n price = _strip_content(zapi.get_price(dom))\n except UserWarning:\n price = dom.find(\"p\", {\"class\": \"vaseCena\"})\n\n if price:\n price = price[0].getContent().replace(\"&nbsp;\", \" \")\n price = filter(lambda x: x.isdigit(), price.strip())\n\n if price:\n price = price[0] + \"kč\"\n else:\n price = \"-1\"\n else:\n price = \"-1\"\n\n # required informations\n pub = Publication(\n title=_strip_content(zapi.get_title(dom)),\n authors=_parse_authors(zapi.get_author(dom)),\n price=price,\n publisher=_strip_content(zapi.get_publisher(dom))\n )\n\n # optional informations\n pub.optionals.URL = link\n pub.optionals.pages = _strip_content(zapi.get_pages(dom))\n pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom))\n pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom))\n pub.optionals.binding = _strip_content(zapi.get_binding(dom))\n\n # post checks\n if pub.title.startswith(\"E-kniha:\"):\n pub.title = pub.title.replace(\"E-kniha:\", \"\", 1).strip()\n pub.optionals.is_ebook = True\n\n if pub.optionals.ISBN:\n if \" \" in pub.optionals.ISBN:\n pub.optionals.ISBN = pub.optionals.ISBN.split(\" \")[0]\n\n if \"(\" in pub.optionals.ISBN:\n pub.optionals.ISBN = pub.optionals.ISBN.split(\"(\")[0]\n\n return pub", "def add_book_to_db(book: dict) -> None:\n if \"title\" in book:\n title = request.form['title']\n else:\n title = \"\"\n\n if \"authors\" in book:\n authors = \";\\n\".join(request.form['authors'].split(';'))\n else:\n authors = \"\"\n\n if \"publishedDate\" in book:\n published_date = request.form['publishedDate']\n else:\n published_date = \"\"\n\n if \"\" in book:\n industry_identifiers = request.form['industryIdentifiers']\n single_identifiers = industry_identifiers.split(';')\n industry_identifiers = \";\\n\".join([f\"{i.split(',')[0]}({i.split(',')[1]})\\n\" for i in single_identifiers])\n else:\n industry_identifiers = \"\"\n\n page_count = request.form['pageCount']\n links = \";\\n\".join(request.form['links'].split(','))\n languages = \";\\n\".join(request.form['languages'].split(','))\n\n book = Book(title=title,\n authors=authors,\n publishedDate=published_date,\n industryIdentifiers=industry_identifiers,\n pageCount=page_count,\n imageLinks=links,\n language=languages\n )\n\n DATABASE.session.add(book)\n DATABASE.session.commit()", "def lookup_Book_by_ID(self, Book_id):\n command = u\"\"\"self.cur.execute(\"SELECT * FROM Book WHERE Book_id = %s\")\"\"\" % Book_id\n #print command\n exec(command)\n data = self.cur.fetchone()\n if data == None:\n return False\n data = list(data)\n data = self.change_str_from_mysql(data)\n return data", "def lookup_Book_by_ID(self, Book_id):\n command = u\"\"\"self.cur.execute(\"SELECT * FROM Book WHERE Book_id = %s\")\"\"\" % Book_id\n #print command\n exec(command)\n data = self.cur.fetchone()\n data = list(data)\n data = self.change_str_from_mysql(data)\n return data", "def update_insert_books_tb(self, book_dic):\r\n book_dic = remove_empty_string(book_dic)\r\n book_id = book_dic['book_id']\r\n if self.is_book_exist(book_dic):\r\n # If book_id already exist in table, then update the book\r\n self.update_books_tb(book_dic, book_id)\r\n print(f'Book with id {book_id} is updated')\r\n else:\r\n # Book does not exist, then insert\r\n self.insert_books_tb(book_dic)\r\n print(f'Book with id {book_id} is created')", "def select_book_dict(self, isbn):\n tpl = self.cur.execute('SELECT * FROM books WHERE isbn=?',(isbn,)).fetchone()\n book = {}\n for i, column in enumerate(self.column_names):\n book[column] = tpl[i]\n return book", "def add_Book(self, Book_info):\n Book_info = self.change_str_to_mysql(Book_info)\n Book_info = tuple(Book_info)\n command = u\"\"\"self.cur.execute(\"INSERT INTO Book VALUES('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')\")\"\"\" % Book_info\n #print command\n exec(command)", "def load_books():\n\n print \"Book!\"\n\n # open the csv file and unpack it\n # with open(\"/home/vagrant/src/best_books/data/bestbooks.csv\") as general:\n\n # creating relative path, base upon the _file_ Python global.\n # it makes the code to be more portable and easier to work with\n filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"data\", \"bestbooks.csv\")\n print \"Loading filename: %s\" % filename\n with open(filename) as general:\n reader = csv.reader(general)\n\n #unpacking each row in the file and looping over it.\n #appending each title to the titles list\n\n for award, year, genre, title, author, author2, author3 in reader:\n\n # if title == \"English Passengers\" and \"Hilary Mantel\" in [author, author2, author3]:\n # pdb.set_trace()\n # The date is in the file as year string;\n # we need to convert it to an actual datetime object.\n year = int(year)\n author = author.strip()\n award = award.strip()\n\n #create book object\n #first, we'll check if this current book title we already have in the book table\n #if we don't, then we have to create a book object\n #add it to session and commit it to the database\n #using func.lower helps to compare data without case sensitivity\n book = Book.query.filter(func.lower(Book.title) == func.lower(title)).first()\n if not book:\n book = Book(title=title)\n db.session.add(book)\n db.session.commit()\n\n #create award object\n book_award = Award.query.filter(func.lower(Award.name) == func.lower(award)).first()\n if not book_award:\n book_award = Award(name=award)\n db.session.add(book_award)\n db.session.commit()\n\n #create book award object\n get_book_award = BookAward.query.filter(BookAward.year == year,\n BookAward.book_id == book.book_id,\n BookAward.award_id == book_award.award_id).first()\n if not get_book_award:\n books_awards = BookAward(book_id=book.book_id,\n award_id=book_award.award_id,\n year=year)\n db.session.add(books_awards)\n db.session.commit()\n\n #create genre object\n if genre:\n new_genre = Genre.query.filter(func.lower(Genre.genre) == func.lower(genre)).first()\n if not new_genre:\n new_genre = Genre(genre=genre)\n db.session.add(new_genre)\n db.session.commit()\n\n #create book genre object\n get_book_genre = BookGenre.query.filter(BookGenre.book_id == book.book_id,\n BookGenre.genre_id == new_genre.genre_id).first()\n if not get_book_genre:\n books_genres = BookGenre(book_id=book.book_id,\n genre_id=new_genre.genre_id)\n db.session.add(books_genres)\n db.session.commit()\n\n #create first author object\n this_author = Author.query.filter(func.lower(Author.name) == func.lower(author)).first()\n if not this_author:\n this_author = Author(name=author)\n db.session.add(this_author)\n db.session.commit()\n\n #create book author object for the first author\n get_book_author = BookAuthor.query.filter(BookAuthor.book_id == book.book_id,\n BookAuthor.author_id == this_author.author_id).first()\n if not get_book_author:\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=this_author.author_id)\n db.session.add(books_authors)\n db.session.commit()\n\n\n # need to check if the book has a second author\n # if it does then we will check if this author is in the database\n # if it doesn't then we'll create a new author object,\n # add it to session and commit to the database.\n if author2:\n new_author2 = Author.query.filter(func.lower(Author.name) == func.lower(author2)).first()\n if not new_author2:\n new_author2 = Author(name=author2)\n db.session.add(new_author2)\n db.session.commit()\n\n # once we added this author to our database author table\n # we can create a books author connection object to the books authors table\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author2.author_id)\n\n # if we have this author in our database authors table, then\n # we have to check if we have this author book assossiation in our\n # books authors table.\n # if we don't, then we'll create this assossiation object in the\n # books authors table\n else:\n get_book_author2 = BookAuthor.query.filter(BookAuthor.book_id == book.book_id,\n BookAuthor.author_id == new_author2.author_id).first()\n if not get_book_author2:\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author2.author_id)\n db.session.add(books_authors)\n db.session.commit()\n\n # need to check if the book has a third author\n # if it does then we will check if this author is in the database\n # if it doesn't then we'll create a new author object,\n # add it to session and commit to the database\n if author3:\n new_author3 = Author.query.filter(func.lower(Author.name) == func.lower(author3)).first()\n if not new_author3:\n new_author3 = Author(name=author3)\n db.session.add(new_author3)\n db.session.commit()\n\n # once we added this author to our database author table\n # we can create a books author connection object to the books authors table\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author3.author_id)\n\n # if we have this author in our database authors table, then\n # we have to check if we have this author book assossiation in our\n # books authors table.\n # if we don't, then we'll create this assossiation object in the\n # books authors table\n else:\n get_book_author3 = BookAuthor.query.filter(BookAuthor.book_id == book.book_id,\n BookAuthor.author_id == new_author3.author_id).first()\n if not get_book_author3:\n books_authors = BookAuthor(book_id=book.book_id,\n author_id=new_author3.author_id)\n db.session.add(books_authors)\n db.session.commit()", "def insert_book(author: str, others: str, o_authors: str, lang: int, title: str, o_title: str, trans_title: str,\r\n place: str, publisher: str, year: str, pages: str, script: int, _type: int, notes: str,\r\n republished: int = 0) -> int:\r\n\r\n for para in ((o_authors, \"o_authors\"), (author, \"author\"), (others, \"other_authors\"), (title, \"title\"),\r\n (o_title, \"o_title\"), (trans_title, \"rans_title\"), (place, \"places\"), (publisher, \"publishers\"),\r\n (year, \"year\"), (pages, \"pages\"), (notes, \"notes\")):\r\n assert type(para[0]) is str, \"Parameter is not of type STRING: {} - {}\".format(para[1], para[0])\r\n\r\n for para in ((lang, \"lang\"), (script, \"script\"), (_type, \"_type\")):\r\n assert type(para[0]) is int, \"Parameter is not of type INT: {} - {}\".format(para[1], para[0])\r\n\r\n with sql.connect(\"knjige.sqlite\") as conn:\r\n c = conn.cursor()\r\n c.execute(\r\n \"INSERT INTO books(author, others, lang, title, o_title, trans_title, places, publishers, year, pages, \"\r\n \"script, type, o_authors, republished, notes) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);\",\r\n (author, others, lang, title, o_title, trans_title, place, publisher, year, pages, script, _type,\r\n o_authors, republished, notes))\r\n rowid = c.lastrowid\r\n c.close()\r\n conn.commit()\r\n return rowid", "def search_by_ISBN():\n\n isbn = request.form[\"choice\"]\n\n # Search DB with ISBN only to pull book page with only selected isbn\n try:\n result = db.execute(\"SELECT DISTINCT * FROM books WHERE isbn LIKE :isbn\", {\"isbn\":(\"%\"+isbn+\"%\")}).fetchall()\n\n except exc.IntegrityError as e:\n flash(\"Unable to find anything.\")\n return render_template(\"error.html\")\n \n # Pull user reviews for selected isbn\n try:\n reviews = db.execute(\"SELECT * FROM reviews WHERE isbn=:isbn\", {\"isbn\":isbn}).fetchall()\n \n except:\n flash(\"Unable to find anything.\")\n return render_template(\"error.html\")\n\n # Pull GoodReads data for selected isbn\n try:\n data = urlopen(\"https://www.goodreads.com/book/review_counts.json?isbns=%s&key=%s\" % (isbn, key))\n data = json.loads(data.read())\n book_data = data['books']\n\n except:\n flash(\"Something went wrong.\")\n return render_template(\"error.html\")\n \n return render_template(\"book.html\", data=result, reviews=reviews, goodreads = book_data)", "def test_store_indb(self):\n self.assertTrue(book_scraper.store_to_db())", "def isbn(self):\n return self._book_dict[\"isbn\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove book from user book list
def remove_userbook(self,uid,bid): sqls="DELETE FROM %s WHERE `uid`=%d and `bid`=%d" %(TABLE_USERBOOK,uid,bid) db.query(sqls)
[ "def remove_book(self, book: Book):\n self.books.remove(book)", "def remove_book(self, in_title, in_author):\n title=in_title.lower()\n author=in_author.lower()\n if title and not title.isspace() and author and not author.isspace():\n for book in self.booklist:\n if book.title==title and book.author==author and book.status==\"avalible\":\n self.booklist.remove(book)\n return(\"The book is now deleted\")\n elif book.title==title and book.author==author and book.status==\"borrowed\":\n return(\"The book must be retured back, can therefor not be removed.\")\n else:\n return(\"Book not found.\")\n else:\n return \"Fill in title AND author\"", "def remove_book(self):\n \n try:\n self.clr_scr()\n serial_no=input(\"Enter serial number of book:\\t\\t\") #enter serial_no of book you want to delete.\n Library.library.pop(serial_no,\"No such item to delete\")\n print(\"\\n\\n\")\n print('****************Book removed successfuly from library database.*********************')\n time.sleep(1)\n return self.main_menu()\n \n except Exception as msg:\n print(\"ERROR------->>>>>>\",msg)", "def RemoveBook(self, title):\n stored_title = book.Book.TransformTitle(title)\n if stored_title in self.__books:\n stored_book = self.__books[stored_title]\n thickness = stored_book.GetThickness()\n del self.__books[stored_title]\n self._IncreaseCapacity(thickness)\n else:\n raise RuntimeError(\"Removal failed: Book not found in shelf.\")", "def order_remove_book(request, book_pk):\n try:\n book = get_object_or_404(Book, pk=book_pk)\n if book.order == request.session['order']:\n book.delete()\n else:\n raise Exception(\"Tried to remove a book from the current order that wasn't in the current order\")\n except KeyError:\n logging.info(\"Tried to remove a book from the current order, but there isn't a current order\")\n raise\n\n return order_render_as_response(request)", "def on_booklist_delete_clicked(self, obj):\n store, the_iter = self.blist.get_selected()\n if not the_iter:\n return\n data = self.blist.get_data(the_iter, [0])\n self.booklist.delete_book(cuni(data[0]))\n self.blist.remove(the_iter)\n self.unsaved_changes = True\n self.top.run()", "def remove_book(request, groupid):\n\n if not request.GET.has_key(\"book\"):\n return pages.ErrorPage(request, \"500.html\")\n\n book = models.Book.objects.get(url_title=request.GET[\"book\"])\n book.group = None\n\n try:\n book.save()\n except:\n transaction.rollback()\n else:\n transaction.commit()\n\n return HttpResponseRedirect(reverse(\"view_group\", args=[groupid]))", "def remove_reservation(self, r_id=None, user=None, book=None):\n try: # if the reservation exists remove it\n if user is not None:\n reservation = self.get(reserved_by=user, book=book)\n reservation = self.get(pk=r_id)\n reservation.delete()\n except Reservation.DoesNotExist: # else die quetly\n pass", "def delete_book_from_library(book_name):\n book_name = book_name.title()\n file.pop(book_name)\n with open('personal_library.json', 'w') as f:\n json.dump(file, f)", "def removeBooking(self, idNum):\n booking_id = (idNum, )\n self.cursor.execute(\"DELETE FROM bookings WHERE id=?\", booking_id)", "def removeEvent(self, bookID):\n try:\n listEvents = self.service.events().list(calendarId = \"primary\", q = bookID).execute()\n getEvents = listEvents.get(\"items\", [])\n for event in getEvents:\n eventId = event['id']\n event = self.service.events().delete(calendarId='primary', eventId= eventId).execute()\n print('Event removed from calendar')\n except:\n print(\"Doesn't exist on calendar\")\n pass", "def remove(uid):", "def del_bookmark(book_name):\n book_path = os.path.join(BOOKMARK_DIRECTORY, book_name)\n if os.path.isfile(book_path):\n os.remove(book_path)\n if '/' in book_name:\n book_sub = book_name.split('/')[:-1]\n if len(book_sub) > 0:\n for n in range(len(book_sub)):\n cs = ''.join(sd + '/' for sd in book_sub[:len(book_sub)-n])\n subpath = os.path.join(BOOKMARK_DIRECTORY, cs)\n if len(os.listdir(subpath)) < 1:\n os.rmdir(subpath)", "def remove(self, isbn):\n if isbn in self.isbns:\n if self.isbns[isbn] == 1:\n del self.isbns[isbn]\n else:\n self.isbns[isbn] -= 1", "def delete_book(name):\n if check_book_in_library(name):\n delete_book_from_library(name)\n msg = f\"Deleted {name} from the library\"\n else:\n msg = (\"\" if name == \"\" else f\"{name} was not found in the Library\")\n click.echo(msg)", "def delete_book(book_id):\n if len(MyLibrary.books) <= book_id or book_id < 0:\n abort(404)\n book = [MyLibrary.books[book_id]]\n MyLibrary.DeleteBook(book)\n return jsonify({'result': True})", "def delete_User(self):\n User.user_lst.remove(self)", "def cancel_remove(self, event=None):\r\n try:\r\n cur_sel_vals = list(self.select_item().split(', '))\r\n cur_sel_vals[8] = cur_sel_vals[8].rstrip('\\n')\r\n except IndexError:\r\n return\r\n ans = askyesno('M.Y. Hotel', f'Do you really want to remove {cur_sel_vals[1]}\\'s booking ?')\r\n if ans < 1:\r\n return\r\n cursor.execute(f'DELETE FROM HOTEL_INFO WHERE BOOK_ID=\"{cur_sel_vals[0]}\" and NAME=\"{cur_sel_vals[1]}\" and'\r\n f' ROOM_NO=\"{cur_sel_vals[2]}\"'\r\n f' and DATE_OF_CIN=\"{cur_sel_vals[3]}\" and DATE_OF_COUT=\"{cur_sel_vals[4]}\" and ROOM_TYPE='\r\n f'\"{cur_sel_vals[5]}\" and '\r\n f'PH_NO=\"{cur_sel_vals[6]}\" and PRICE=\"{cur_sel_vals[7]}\" and PAID_THRU=\"{cur_sel_vals[8]}\"')\r\n connection.commit()", "def remove(self, user: Optional[str] = None):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a published pipeline by name and version
def get_published_pipeline(ws,name,version): published_pipelines = PublishedPipeline.list(ws) for pipe in published_pipelines: p_name = pipe.name p_version = pipe.version if(p_name == name and p_version is not None and p_version==version): return pipe else: return None
[ "def get_pipeline(self, pipeline_name):\n return self.find('*/pipeline[@name=\"%s\"]' % pipeline_name)", "def get_pipeline(self, project, pipeline_id, pipeline_version=None):\n route_values = {}\n if project is not None:\n route_values['project'] = self._serialize.url('project', project, 'str')\n if pipeline_id is not None:\n route_values['pipelineId'] = self._serialize.url('pipeline_id', pipeline_id, 'int')\n query_parameters = {}\n if pipeline_version is not None:\n query_parameters['pipelineVersion'] = self._serialize.query('pipeline_version', pipeline_version, 'int')\n response = self._send(http_method='GET',\n location_id='28e1305e-2afe-47bf-abaf-cbb0e6a91988',\n version='6.0-preview.1',\n route_values=route_values,\n query_parameters=query_parameters)\n return self._deserialize('Pipeline', response)", "def load_pipeline(name):\r\n pipeline = _load(name, get_pipelines_paths())\r\n if pipeline is None:\r\n raise ValueError(\"Unknown pipeline: {}\".format(name))\r\n\r\n return pipeline", "def get_pipeline(pipeline_file, pipeline_name):\n logger.info('starting to extract pipeline ({}) from {}'.format(pipeline_name, pipeline_file))\n try:\n with open(pipeline_file) as file:\n pipeline = json.load(file)\n logger.info('file {} opened'.format(pipeline_file))\n except Exception as e:\n logger.error('could not extract pipeline ({}) from {}'.format(pipeline_name, pipeline_file))\n logger.error(e)\n return\n pipeline = pipeline['pipelines'][pipeline_name]\n logger.info('pipeline loaded')\n return pipeline", "def get_pipeline():", "def get_pipeline_definition(pipeline_config: Dict[str, Any], pipeline_name: Optional[str] = None) -> Dict[str, Any]:\n if pipeline_name is None:\n if len(pipeline_config[\"pipelines\"]) != 1:\n raise PipelineConfigError(\"The YAML contains multiple pipelines. Please specify the pipeline name to load.\")\n return pipeline_config[\"pipelines\"][0]\n\n matching_pipelines = [p for p in pipeline_config[\"pipelines\"] if p[\"name\"] == pipeline_name]\n\n if len(matching_pipelines) == 1:\n return matching_pipelines[0]\n\n if not matching_pipelines:\n raise PipelineConfigError(\n f\"Cannot find any pipeline with name '{pipeline_name}' declared in the YAML file. \"\n f\"Existing pipelines: {[p['name'] for p in pipeline_config['pipelines']]}\"\n )\n raise PipelineConfigError(\n f\"There's more than one pipeline called '{pipeline_name}' in the YAML file. \"\n \"Please give the two pipelines different names.\"\n )", "def get_existing_pipeline(self, name: str) -> bool:\r\n for p in self.list_pipelines():\r\n if p and p.get('Name', \"\") == name:\r\n self.pipeline_id = p.get(\"Id\")\r\n return True\r\n return False", "def upload_pipeline_version(\n self,\n pipeline_package_path: str,\n pipeline_version_name: str,\n pipeline_id: Optional[str] = None,\n pipeline_name: Optional[str] = None,\n description: Optional[str] = None,\n ) -> kfp_server_api.V2beta1PipelineVersion:\n\n if all([pipeline_id, pipeline_name\n ]) or not any([pipeline_id, pipeline_name]):\n raise ValueError('Either pipeline_id or pipeline_name is required.')\n\n if pipeline_name:\n pipeline_id = self.get_pipeline_id(pipeline_name)\n kwargs = dict(\n name=pipeline_version_name,\n pipelineid=pipeline_id,\n )\n\n if description:\n kwargs['description'] = description\n\n response = self._upload_api.upload_pipeline_version(\n pipeline_package_path, **kwargs)\n\n link = f'{self._get_url_prefix()}/#/pipelines/details/{response.pipeline_id}/version/{response.pipeline_version_id}'\n if self._is_ipython():\n import IPython\n html = f'<a href=\"{link}\" target=\"_blank\" >Pipeline details</a>.'\n IPython.display.display(IPython.display.HTML(html))\n else:\n print(f'Pipeline details: {link}')\n\n return response", "def get_stage(cls, name):\n return cls.pipeline_stages[name][0]", "def get_pipeline(pickle_file=DEFAULT_FILE, root='.', prot=DEFAULT_PROT):\n if os.path.isfile(pickle_file):\n return restore_pipeline(pickle_file)\n else:\n pipeline = Pipeline(pickle_file=os.path.abspath(str(pickle_file)),\n root=os.path.abspath(str(root)),\n prot=int(prot))\n pipeline.save()\n return pipeline", "def get_pipeline_id(self, name: str) -> Optional[str]:\n pipeline_filter = json.dumps({\n 'predicates': [{\n 'operation': _FILTER_OPERATIONS['EQUALS'],\n 'key': 'display_name',\n 'stringValue': name,\n }]\n })\n result = self._pipelines_api.list_pipelines(filter=pipeline_filter)\n if result.pipelines is None:\n return None\n if len(result.pipelines) == 1:\n return result.pipelines[0].pipeline_id\n elif len(result.pipelines) > 1:\n raise ValueError(\n f'Multiple pipelines with the name: {name} found, the name needs to be unique.'\n )\n return None", "def get_pipeline_definition(pipeline_name, working_dir):\n logger.debug(\"starting\")\n\n pipeline_path = pypyr.moduleloader.get_pipeline_path(\n pipeline_name=pipeline_name,\n working_directory=working_dir)\n\n logger.debug(f\"Trying to open pipeline at path {pipeline_path}\")\n try:\n with open(pipeline_path) as yaml_file:\n yaml_loader = yaml.YAML(typ='safe', pure=True)\n pipeline_definition = yaml_loader.load(yaml_file)\n logger.debug(\n f\"found {len(pipeline_definition)} stages in pipeline.\")\n except FileNotFoundError:\n logger.error(\n \"The pipeline doesn't exist. Looking for a file here: \"\n f\"{pipeline_name}.yaml in the /pipelines sub directory.\")\n raise\n\n logger.debug(\"pipeline definition loaded\")\n\n logger.debug(\"done\")\n return pipeline_definition", "def async_get_pipeline(hass: HomeAssistant, pipeline_id: str | None = None) -> Pipeline:\n pipeline_data: PipelineData = hass.data[DOMAIN]\n\n if pipeline_id is None:\n # A pipeline was not specified, use the preferred one\n pipeline_id = pipeline_data.pipeline_store.async_get_preferred_item()\n\n pipeline = pipeline_data.pipeline_store.data.get(pipeline_id)\n\n # If invalid pipeline ID was specified\n if pipeline is None:\n raise PipelineNotFound(\n \"pipeline_not_found\", f\"Pipeline {pipeline_id} not found\"\n )\n\n return pipeline", "def get_pipeline_version(\n self,\n pipeline_id: str,\n pipeline_version_id: str,\n ) -> kfp_server_api.V2beta1PipelineVersion:\n return self._pipelines_api.get_pipeline_version(\n pipeline_id=pipeline_id,\n pipeline_version_id=pipeline_version_id,\n )", "def get(identifier: str) -> FeaturePipeline:\n if identifier not in _registry.keys():\n raise KeyError(f'Identifier {identifier} is not associated with any `FeaturePipeline`.')\n return _registry[identifier]", "def update(self) -> str:\n if not self.exists():\n fatal(f\"Pipeline {self.name} does not exist\")\n else:\n L.info(f\"Updating {self.name} pipeline\")\n\n res = requests.patch(f\"{API}/{self.slug}\", json=self.pipeline)\n res.raise_for_status()\n return f\"Updated {self.name}!\"", "def _create_pipeline(self, pipeline_name: str,\n components: List[BaseComponent],\n beam_pipeline_args: Optional[List[str]] = None):\n return tfx_pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=self._pipeline_root(pipeline_name),\n components=components,\n enable_cache=True,\n beam_pipeline_args=beam_pipeline_args,\n )", "def _create_pipeline(\n self,\n pipeline_name: Text,\n pipeline_components: List[base_node.BaseNode],\n beam_pipeline_args: List[Text] = None) -> tfx_pipeline.Pipeline:\n return tfx_pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=self._pipeline_root(pipeline_name),\n components=pipeline_components,\n beam_pipeline_args=beam_pipeline_args)", "def fetch_pipeline_by_id(cls, id):\n return (\n Session.query(PipelineStore)\n .filter(PipelineStore.id == id)\n .one()\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
numpy compatible implementation of heaviside function
def npHeaviside(x): return np.piecewise(x, [x<0, x==0, x>0], [lambda arg: 0.0, lambda arg: 0.5, lambda arg: 1.0])
[ "def test_heaviside(self):\n self.assertEqual(m.heaviside(-1), 0)\n self.assertEqual(m.heaviside(0), 0.5)\n self.assertEqual(m.heaviside(1), 1)\n self.assertEqual(m.heaviside(-np.inf), 0)\n self.assertTrue(np.isnan(m.heaviside(np.nan)))\n self.assertEqual(m.heaviside(np.inf), 1)\n \n np.testing.assert_array_equal(m.heaviside(np.array([-1, 0, 1])),\n np.array([0, 0.5, 1]))\n \n a = np.array([-1, 0, 1])\n m.heaviside(a, out=a)\n np.testing.assert_array_equal(a, np.array([0, 0, 1]))\n\n a = np.array([-1, 0, 1], np.double)\n m.heaviside(a, out=a)\n np.testing.assert_array_equal(a, np.array([0, 0.5, 1]))", "def heaviside_derivative(x):\n return np.zeros(x.shape)", "def heaviside(x):\n return (x >= 0) * 1", "def check_hessian(f, hess_analytical, x0, delta = 1e-5, verbose = True):\n hessian_analytical = np.array(hess_analytical)\n hessian_num = hessian_numerical(f, x0, delta)\n if verbose:\n print('check_hessian: hessian_analytical = ', hessian_analytical)\n print('check_hessian: hessian_num = ', hessian_num)\n print('check_hessian: hessian difference = ', \n hessian_analytical - hessian_num)\n \n return np.sqrt(np.sum((hessian_analytical - hessian_num) ** 2))", "def linear_heaviside(x, A, b):\n x_var_diag = tf.matrix_diag_part(x.var)\n mu = x.mean / (tf.sqrt(x_var_diag) + EPSILON)\n \n def heaviside_covariance(x):\n mu1 = tf.expand_dims(mu, 2)\n mu2 = tf.transpose(mu1, [0,2,1])\n\n s11s22 = tf.expand_dims(x_var_diag, axis=2) * tf.expand_dims(x_var_diag, axis=1)\n rho = x.var / (tf.sqrt(s11s22))# + EPSILON)\n rho = tf.clip_by_value(rho, -1/(1+EPSILON), 1/(1+EPSILON))\n\n return bu.heavy_g(rho, mu1, mu2)\n \n z_mean = bu.gaussian_cdf(mu)\n y_mean = tf.matmul(z_mean, A.mean) + b.mean\n z_cov = heaviside_covariance(x)\n y_cov = linear_covariance(z_mean, z_cov, A, b)\n return gv.GaussianVar(y_mean, y_cov)", "def npDirac(x, h):\n return npHeaviside(x)*npHeaviside(h-x)*1.0/h", "def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y", "def default_hessian(self, x, f):\r\n n = len(x)\r\n G = zeros((n,n))\r\n h = 1e-3\r\n \r\n for i in range(n):\r\n for j in range(n):\r\n\r\n G[i,j] = (f(x + h*self._basisvec(n,(i,j),(1,1))) - f(x + h*self._basisvec(n,(i,j), (1,-1)))\r\n - f(x + h*self._basisvec(n,(i,j),(-1,1))) + f(x + h*self._basisvec(n,(i,j),(-1,-1))))/(4*h**2)\r\n G = (G + G.T)/2\r\n return linalg.inv(G)", "def hhi(a: np.ndarray, axis: int) -> np.ndarray:\n return np.square(a/a.sum(axis=axis)).sum(axis=axis)", "def ev_H(H, vr):\n N = H.shape[0]\n x = np.linspace(0, 1, N)\n v = np.zeros(N)\n v[(x > (2 / 3))] = vr\n\n op = H + v * np.eye(N)\n return op", "def evaluate_H_at_hx(self, hx = 0.):\n\t\treturn self.hamiltonian_cont(time = hx)", "def hessian(self, var, bayesianOptimizer):\n bayesianOptimizer.raiseAnError(NotImplementedError,'Hessian is not yet developed for this acqusition function')", "def get_Hv():\n \n vn = np.zeros((nx,ny+1)) \n vs = np.zeros((nx,ny+1))\n ve = np.zeros((nx,ny+1))\n vw = np.zeros((nx,ny+1))\n ue = np.zeros((nx,ny+1))\n uw = np.zeros((nx,ny+1))\n τyyn = np.zeros((nx,ny+1))\n τyys = np.zeros((nx,ny+1))\n τyxe = np.zeros((nx,ny+1))\n τyxw = np.zeros((nx,ny+1))\n Hv = np.zeros((nx,ny+1))\n \n j = np.arange(1,ny) # v-cell centers in domain interior\n \n vn[:,j] = (v[:,j+1] + v[:,j])/2\n vs[:,j] = (v[:,j] + v[:,j-1])/2\n \n i = np.arange(0,nx-1)\n ve[IJ(i,j)] = (v[IJ(i+1,j)] + v[IJ(i,j)])/2\n ve[nx-1,j] = vbc_r\n i = np.arange(1,nx)\n vw[IJ(i,j)] = (v[IJ(i,j)] + v[IJ(i-1,j)])/2\n vw[0,j] = vbc_l\n \n i = np.arange(0,nx)\n ue[IJ(i,j)] = (u[IJ(i+1,j-1)] + u[IJ(i+1,j)])/2\n uw[IJ(i,j)] = (u[IJ(i,j-1)] + u[IJ(i,j)]) /2\n \n τyyn[:,j] = -2*ν*(v[:,j+1] - v[:,j]) /Δy\n τyys[:,j] = -2*ν*(v[:,j] - v[:,j-1])/Δy\n \n i = np.arange(0,nx-1)\n τyxe[IJ(i,j)] = -ν*(v[IJ(i+1,j)]-v[IJ(i,j)])/Δx - ν*(u[IJ(i+1,j)]-u[IJ(i+1,j-1)])/Δy\n τyxe[nx-1,j] = -ν*(vbc_r-v[nx-1,j])/(Δx/2) - ν*(u[nx,j]-u[nx,j-1])/Δy \n \n i = np.arange(1,nx)\n τyxw[IJ(i,j)] = -ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx - ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy\n τyxw[0,j] = -ν*(v[0,j]-vbc_l)/(Δx/2) - ν*(u[0,j]-u[0,j-1])/Δy\n \n Hv[:,j] = -((vn[:,j]*vn[:,j] - vs[:,j]*vs[:,j])/Δy + (ve[:,j]*ue[:,j] - vw[:,j]*uw[:,j])/Δx) \\\n -((τyyn[:,j] - τyys[:,j])/Δy + (τyxe[:,j] - τyxw[:,j])/Δx)\n \n return Hv", "def calc_hessian_at(self, x: np.ndarray) -> np.ndarray:\n return hessian_approximation(self.f, x)", "def hessenberg_ev(H):\n m, n = H.shape\n assert(m == n)\n assert(np.linalg.norm(H[np.tril_indices(m, -2)]) < 1.0e-6)\n ee, V = np.linalg.eig(H)\n return ee, V", "def evaluate_fgh(self, args, fixed=None, fgh=2):\n args_ = [arg if arg.__class__ in native_types else value(arg) for arg in args]\n # Note: this is passed-by-reference, and the args_ list may be\n # changed by _evaluate (e.g., for PythonCallbackFunction).\n # Remember the original length of the list.\n N = len(args_)\n f, g, h = self._evaluate(args_, fixed, fgh)\n # Guarantee the return value behavior documented in the docstring\n if fgh == 2:\n n = N - 1\n if len(h) - 1 != n + n * (n + 1) // 2:\n raise RuntimeError(\n f\"External function '{self.name}' returned an invalid \"\n f\"Hessian matrix (expected {n + n*(n+1)//2 + 1}, \"\n f\"received {len(h)})\"\n )\n else:\n h = None\n if fgh >= 1:\n if len(g) != N:\n raise RuntimeError(\n f\"External function '{self.name}' returned an invalid \"\n f\"derivative vector (expected {N}, \"\n f\"received {len(g)})\"\n )\n else:\n g = None\n # Note: the ASL does not require clients to honor the fixed flag\n # (allowing them to return non-0 values for the derivative with\n # respect to a fixed numeric value). We will allow clients to\n # be similarly lazy and enforce the fixed flag here.\n if fixed is not None:\n if fgh >= 1:\n for i, v in enumerate(fixed):\n if not v:\n continue\n g[i] = 0\n if fgh >= 2:\n for i, v in enumerate(fixed):\n if not v:\n continue\n for j in range(N):\n if i <= j:\n h[i + (j * (j + 1)) // 2] = 0\n else:\n h[j + (i * (i + 1)) // 2] = 0\n return f, g, h", "def getHessian(fgradient):\n def hess(x):\n return evaluateHessian(fgradient,x)\n return hess", "def h_vector(self):\n from sage.arith.all import binomial\n d = self.dimension()\n f = self.f_vector() # indexed starting at 0, since it's a Python list\n h = []\n for j in range(0, d + 2):\n s = 0\n for i in range(-1, j):\n s += (-1)**(j-i-1) * binomial(d-i, j-i-1) * f[i+1]\n h.append(s)\n return h", "def _h_function(self,h):\n return self.contribution * np.exp(-1.0 * h / self.a)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
numpy compatible implementation of dirac delta. This implementation is representing a disrete version of dirac with width h and height 1/h. Area under dirac is equal to 1.
def npDirac(x, h): return npHeaviside(x)*npHeaviside(h-x)*1.0/h
[ "def derivative(a,b,n):\n dx = (b-a)/(n-1)\n D=(1/2)*(np.eye(n+1,n+1,1)-np.eye(n+1,n+1,-1))\n D[0][0] = -1\n D[-1][-1] = 1\n D[0][1] = 1\n D[-1][-2] = -1\n D = D/(dx)\n return D", "def derivative(data, dt):\n\tdata = np.insert(data, 0, data[0])\n\tdata = np.diff(data/dt)\n\treturn data", "def d_dphi(grid, arr):\n neg_2h = np.roll(arr, 2, axis=1)\n pos_2h = np.roll(arr, -2, axis=1)\n neg_h = np.roll(arr, 1, axis=1)\n pos_h = np.roll(arr, -1, axis=1)\n h = grid.dphi\n return (neg_2h - 8*neg_h + 8*pos_h - pos_2h) / (12 * h)", "def diameter(H, A):\n\tD = 1329*(10**(-H/5))/np.sqrt(A)\n\treturn D", "def diameter(self):\n\t\treturn self.r * 2", "def _dnedx(self, x, dx=0.01):\n assert len(x) == self._plasma.grid.dimension\n\n x = np.array(x, dtype=float)\n dx = np.array(dx, dtype=float)\n if (dx.ndim == 0):\n assert dx > 0\n dx = np.zeros_like(x) + dx\n else:\n assert dx.ndims == self._plasma.grid.dimension\n assert np.all(dx > 0)\n\n # before calculating derivatives, we need to identify the near boundary\n # points, where center derivative can not be used, one side derivative\n # must be used instead\n dx_plus = np.copy(dx)\n dx_minus = np.copy(dx)\n ne_plus = np.empty_like(x)\n ne_minus = np.empty_like(x)\n for i,d in enumerate(dx):\n try:\n coords = np.copy(x)\n coords[i] += dx[i]\n ne_plus[i] = self._plasma.get_ne(coords, eq_only=self._eq_only,\n time=self._time)\n except ValueError:\n dx_plus[i] = 0\n ne_plus[i] = self._plasma.get_ne(x, eq_only=self._eq_only,\n time=self._time)\n try:\n coords = np.copy(x)\n coords[i] -= dx[i]\n ne_minus[i] = self._plasma.get_ne(coords,eq_only=self._eq_only,\n time=self._time)\n except ValueError:\n dx_minus[i] = 0\n ne_minus[i] = self._plasma.get_ne(x,eq_only=self._eq_only,\n time=self._time)\n\n # Every direction must have at least one side within plasma region\n assert np.all(dx_plus+dx_minus > 0)\n return (ne_plus - ne_minus)/(dx_plus + dx_minus)", "def fourPtCenteredDiff(x,y):\n #calculate dydx by center differencing using array slices\n dydx = np.zeros(y.shape,float) #we know it will be this size\n dydx[2:-2] = (y[0:-4] -8*y[1:-3] + 8*y[3:-1] - y[4:])/(12*(x[2:-2] - x[1:-3])) #center difference\n dydx[0] = (y[1]-y[0])/(x[1]-x[0]) #forward difference\n dydx[1] = (y[2]-y[1])/(x[2]-x[1])\n dydx[-1] = (y[-1] - y[-2])/(x[-1] - x[-2]) #backward difference\n dydx[-2] = (y[-2] - y[-3])/(x[-2] - x[-3])\n return dydx", "def derivative(\n self, t: float, state: np.ndarray, u: np.ndarray) -> np.ndarray:\n pass", "def derivative(f, h = 1e-5):\n \n return lambda x_n: (f(x_n + h / 2) - f(x_n - h / 2)) / h", "def calculate_diameter(self) -> qty.Length:\n # given: friction loss and flow rate\n rho = self._fluid.density()\n mu = self._fluid.kinematic_viscosity()\n pi = math.pi\n dpf = self._dp_fric\n V = self._flow_rate\n l = self._length\n f = 0.03\n i = 0\n di: float = 0.0\n while i < self._max_iterations:\n di = (f * l / dpf * rho * 8.0 / (pi ** 2.0) * V ** 2.0) ** (1.0 / 5.0)\n A = pi * di ** 2.0 / 4.0\n v = V / A\n re = reynolds_number(v, di, mu)\n rel_pipe_rough = self._rough / di\n f_new = darcy_friction_factor(re, rel_pipe_rough)\n if abs(f_new - f) <= 1.0e-5:\n break\n else:\n f = f_new\n i += 1\n if i == self._max_iterations:\n raise OverflowError('too many iterations. no solution found')\n self._cross_section.diameter = qty.Length(di)\n return qty.Length(di)", "def _calc_psi_deriv(self):\n try:\n self.bkg['psi'].mean()\n except:\n self.build_bkg()\n \n # psi = self.eqdsk.psi\n # self.dpsidR = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n # self.dpsidZ = np.zeros((self.eqdsk.nzbox, self.eqdsk.nrbox))\n psi = self.bkg['psi']\n self.dpsidR = np.zeros((self.nz, self.nR))\n self.dpsidZ = np.zeros((self.nz, self.nR)) \n deriv = np.gradient(psi)\n # Note np.gradient gives y\n # derivative first, then x derivative\n ddR = deriv[1]\n ddZ = deriv[0]\n # dRdi = np.asarray(1.0)/np.gradient(self.R_eqd)\n # dRdi = np.tile(dRdi, [self.eqdsk.nzbox,1])\n # dZdi = np.asarray(1.0)/np.gradient(self.Z_eqd)\n # dZdi = np.tile(dZdi, [self.eqdsk.nrbox,1])\n # dZdi = np.transpose(dZdi)\n dRdi = np.asarray(1.0)/np.gradient(self.bkg['R'])\n dRdi = np.tile(dRdi, [self.nz,1])\n dZdi = np.asarray(1.0)/np.gradient(self.bkg['z'])\n dZdi = np.tile(dZdi, [self.nR,1])\n dZdi = np.transpose(dZdi)\n #print(\"shape ddR:\",np.shape(ddR),'shape dRdi:', np.shape(dRdi))\n #print('shape ddZ:',np.shape(ddZ),'shape dZdi:', np.shape(dZdi))\n \n self.dpsidR[:, :] = ddR*dRdi\n self.dpsidZ[:, :] = ddZ*dZdi", "def derivative(x, f, h=1e-3):\n return (f(x + h) - f(x - h)) / (2 * h)", "def derivative(x):\n return x * (1 - x)", "def dshape_deta(xi, eta):\n dN_deta = np.array([-(0.25 - 0.25*xi)*(1 - eta) + (0.25*xi - 0.25)*(-eta - xi - 1),\n -(1 - eta) * (0.25 * xi + 0.25) + (-0.25 * xi - 0.25) * (-eta + xi - 1),\n (eta + 1) * (0.25 * xi + 0.25) + (0.25 * xi + 0.25) * (eta + xi - 1),\n (0.25 - 0.25 * xi) * (eta + 1) + (0.25 - 0.25 * xi) * (eta - xi - 1),\n -0.5 * (1 - xi) * (xi + 1),\n (1 - eta) * (0.5 * xi + 0.5) + (-eta - 1) * (0.5 * xi + 0.5),\n 0.5 * (1 - xi) * (xi + 1),\n (0.5 - 0.5 * xi) * (1 - eta) + (0.5 - 0.5 * xi) * (-eta - 1), ]) # vector\n return dN_deta", "def dshape_deta(xi, eta):\n lm = 1 - xi - eta\n dN_deta = np.array([1 - 4*lm,\n -4*xi,\n 0,\n 4*xi,\n -1 + 4*eta,\n 4*(lm - eta)])\n return dN_deta", "def derivative(x, dim):\n dx = x.narrow(dim, 1, x.size(dim)-1) - x.narrow(dim, 0, x.size(dim)-1)\n return torch.cat((dx, dx.narrow(dim, -1, 1)), dim=dim)", "def derivative(self, variable):\n\tui = variable*index_expression[::] + \\\n\t index_expression[2::] + index_expression[...]\n\tli = variable*index_expression[::] + \\\n\t index_expression[:-2:] + index_expression[...]\n\td_values = 0.5*(self.values[ui]-self.values[li])/self.spacing[variable]\n\tdiffaxis = self.axes[variable]\n\tdiffaxis = 0.5*(diffaxis[2:]+diffaxis[:-2])\n\td_axes = self.axes[:variable]+[diffaxis]+self.axes[variable+1:]\n\td_default = None\n\tif self.default is not None:\n\t d_default = Numeric.zeros(self.rank*(3,), Numeric.Float)\n\treturn self._constructor(d_axes, d_values, d_default, 0)", "def __frac_diff(x: list[float], d: float) -> list[float]:\n\n def next_pow2(n):\n # we assume that the input will always be n > 1,\n # so this brief calculation should be fine\n return (n - 1).bit_length()\n\n n_points = len(x)\n fft_len = 2 ** next_pow2(2 * n_points - 1)\n prod_ids = np.arange(1, n_points)\n frac_diff_coefs = np.append([1], np.cumprod((prod_ids - d - 1) / prod_ids))\n dx = ifft(fft(x, fft_len) * fft(frac_diff_coefs, fft_len))\n return np.real(dx[0:n_points])", "def compute_delta(self, capteur):\n self.delta_hsv = lib.mod.compute_modulation(capteur, self.coef_matrice)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function computes the coefficients of the fourier series representation of the function f, which is periodic on the interval [start,end] up to the degree N.
def coeff(f, start, end, N): return coeff_fft(f, start, end, N)
[ "def fourier_series(self, x, f, n=0):\n # Make the parameter objects for all the terms\n a0, *cos_a = parameters(','.join(['a{}'.format(i) for i in range(0, n + 1)]))\n sin_b = parameters(','.join(['b{}'.format(i) for i in range(1, n + 1)]))\n\n # Construct the series\n series = a0 + sum(ai * cos(i * f * x) + bi * sin(i * f * x)\n for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))\n\n return series", "def fourier_coefficients(X, Y, T, nrange):\n # N = len(X)\n # T = np.linspace(0.0, 1.0, N)\n dt = T[1] - T[0]\n # The array storing cofficients\n Cr = np.zeros(2*nrange + 1, dtype=float) # real part\n Cc = np.zeros(2*nrange + 1, dtype=float) # complex part\n\n for n in range(-nrange, nrange+1):\n s = np.sum(np.exp(-2*np.pi*1j*n*T)*(X + Y*1j)*dt)\n Cr[n+nrange] = s.real\n Cc[n+nrange] = s.imag\n\n return Cr, Cc", "def chebyshev_coeffs(f, n):\n #chevy extremizers\n chubby_extrema = np.cos((np.pi * np.arange(n*2)) / n)\n #funciton evaluated at chev. extremizers\n samples = f(chubby_extrema)\n #fft cooeficients\n coeffs = np.real(fft(samples))[:n+1] / n\n #turn fft coeefecinets into cheb. coefficients\n coeffs[0] /= 2\n coeffs[n] /= 2\n return coeffs", "def fourier(self):\n n = self.nsamples()\n yC = self.vpp / self.adcrange\n xC = 1.e-6\n yF = yC * (2.0/n) * np.abs(fft(self.bulk)[:n//2])\n xF = xC * fftfreq(n,(self.Dt)*1.e-9)[:n//2] \n return xF,yF", "def fourier_series(x, *a):\n output = 0\n output += a[0]/2\n w = a[1]\n for n in range(2, len(a), 2):\n n_ = n/2\n val1 = a[n]\n val2 = a[n+1]\n output += val1*np.sin(n_*x*w)\n output += val2*np.cos(n_*x*w)\n return output", "def _fourier_series_helper(self, N, L, scale_function):\n from sage.all import pi, sin, cos, srange\n x = self.default_variable()\n a0 = self.fourier_series_cosine_coefficient(0,L)\n result = a0/2 + sum([(self.fourier_series_cosine_coefficient(n,L)*cos(n*pi*x/L) +\n self.fourier_series_sine_coefficient(n,L)*sin(n*pi*x/L))*\n scale_function(n)\n for n in srange(1,N)])\n return result.expand()", "def fourier_series(a, b, N, T, x):\n # numpy matrix version of code below\n a = a[:N+1]\n b = b[:N+1]\n\n \"\"\"\n y = np.zeros(x.shape)\n for k in range(N+1):\n kk = k * 2 * np.pi / T\n y += (b[k] * np.sin(kk*x) + a[k] * np.cos(kk*x))\n \"\"\"\n k = np.arange(N+1)\n kk = k * 2 * np.pi / T\n y = np.sum(b * np.sin(np.outer(x, kk)) + a * np.cos(np.outer(x, kk)), axis=1)\n return y", "def fourier_series():\r\n # Define fourier variables and sine\r\n a0,a1,a2,a3,a4,a5,a6 = parameters(','.join(['a{}'.format(i) for i in range(0, 7)]))\r\n return a0*(1+sin(-1.571)) \\\r\n + a1*(1+sin(1*x+1.571)) \\\r\n + a2*(1+sin(2*x-1.571)) \\\r\n + a3*(1+sin(3*x+1.571)) \\\r\n + a4*(1+sin(4*x-1.571)) \\\r\n + a5*(1+sin(5*x+1.571)) \\\r\n + a6*(1+sin(6*x-1.571))", "def fbessel_coeffs(f, N, order=0):\n import numpy as np\n import scipy.integrate as si\n import scipy.special as ss\n nx = len(f)\n x = np.linspace(0.0, 1.0, nx)\n zeros = ss.jn_zeros(order, N)\n a = np.zeros(N)\n for i in range(N):\n a[i] = ( 2.0 / ss.jn(order + 1, zeros[i])**2\n * si.simps(x * f * ss.jn(order, zeros[i] * x), x) )\n return a", "def fourier_series_cosine_coefficient(self,n,L):\n from sage.all import cos, pi\n x = var('x')\n result = sum([(f(x)*cos(pi*x*n/L)/L).integrate(x, a, b)\n for (a,b), f in self.list()])\n if is_Expression(result):\n return result.simplify_trig()\n return result", "def fourier_series(dates, period, series_order):\n # Fourier Detrend\n # periods, order, start_shift, and scaling (multi or univariate)\n # then just subtract\n\n # convert to days since epoch\n dates = pd.date_range(\"2020-01-01\", \"2022-01-01\", freq=\"D\")\n t = np.array(\n (dates - datetime.datetime(1970, 1, 1)).total_seconds().astype(float)\n ) / (3600 * 24.0)\n result = np.column_stack(\n [\n fun((2.0 * (i + 1) * np.pi * t / period))\n for i in range(series_order)\n for fun in (np.sin, np.cos)\n ]\n )", "def fourier_series_sine_coefficient(self,n,L):\n from sage.all import sin, pi\n x = var('x')\n result = sum([(f(x)*sin(pi*x*n/L)/L).integrate(x, a, b)\n for (a,b), f in self.list()])\n if is_Expression(result):\n return result.simplify_trig()\n return result", "def _atten_coeffs(t, f):\n # Based on the code from Besson, et.al.\n # but simplified significantly since w1=0\n\n t_C = t - scipy.constants.zero_Celsius\n w0 = np.log(1e-4)\n # w1 = 0\n w2 = np.log(3.16)\n\n b0 = -6.7489 + t_C * (0.026709 - 8.84e-4 * t_C)\n b1 = -6.2212 - t_C * (0.070927 + 1.773e-3 * t_C)\n b2 = -4.0947 - t_C * (0.002213 + 3.32e-4 * t_C)\n\n if isinstance(t, np.ndarray) and isinstance(f, np.ndarray):\n # t and f are both arrays, so return 2-D array of coefficients\n # where each row is a single t and each column is a single f.\n a = np.broadcast_to(b1[:,np.newaxis], (len(t), len(f)))\n b = np.zeros((len(t),len(f)))\n # Use numpy slicing to calculate different values for b when\n # f<1e9 and f>=1e9. Transpose b0, b1, b2 into column vectors\n # so numpy multiplies properly\n b[:,f<1e9] += (b0[:,np.newaxis] - b1[:,np.newaxis]) / w0\n b[:,f>=1e9] += (b2[:,np.newaxis] - b1[:,np.newaxis]) / w2\n\n elif isinstance(f, np.ndarray):\n # t is a scalar, so return an array of coefficients based\n # on the frequencies\n a = np.full(len(f), b1)\n b = np.zeros(len(f))\n # Again use numpy slicing to differentiate f<1e9 and f>=1e9\n b[f<1e9] += (b0 - b1) / w0\n b[f>=1e9] += (b2 - b1) / w2\n\n # Past this point, f must be a scalar\n # Then an array or single coefficient is returned based on the type of t\n elif f < 1e9:\n a = b1\n b = (b0 - b1) / w0\n else:\n a = b1\n b = (b2 - b1) / w2\n\n return a, b", "def get_freqs(Fs, n):\n\n return np.linspace(0, Fs / 2, int(n / 2 + 1))", "def piecewise_polynomial_coefficients_in_half_interval(func, n_intervals, polynomial_order):\n intervals = dyadic_intervals_in_half_interval(n_intervals)\n coefficients = zeros((polynomial_order + 1, n_intervals))\n for i in range(n_intervals):\n a, b = intervals[i]\n coefficients[:, i] = optimal_polynomial_coefficients(func, polynomial_order, a, b) if a != b else func(b)\n return coefficients", "def fourier_series_partial_sum_hann(self,N,L):\n from sage.all import cos, pi\n return self._fourier_series_helper(N, L, lambda n: (1+cos(pi*n/N))/2)", "def fourier_frequencies(self):\n return jnp.reciprocal(self.fourier_periods)", "def fourier_series_partial_sum_filtered(self,N,L,F):\n return self._fourier_series_helper(N, L, lambda n: F[n])", "def fftfreq(n, dtype=torch.float, device=torch.device(\"cpu\")):\n return (torch.arange(n, dtype=dtype, device=device) + n // 2) % n - n // 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function evaluates the fourier series of degree N with the coefficient vectors a and b and the period length T at the points in the array x.
def fourier_series(a, b, N, T, x): # numpy matrix version of code below a = a[:N+1] b = b[:N+1] """ y = np.zeros(x.shape) for k in range(N+1): kk = k * 2 * np.pi / T y += (b[k] * np.sin(kk*x) + a[k] * np.cos(kk*x)) """ k = np.arange(N+1) kk = k * 2 * np.pi / T y = np.sum(b * np.sin(np.outer(x, kk)) + a * np.cos(np.outer(x, kk)), axis=1) return y
[ "def fourier_series(x, *a):\n output = 0\n output += a[0]/2\n w = a[1]\n for n in range(2, len(a), 2):\n n_ = n/2\n val1 = a[n]\n val2 = a[n+1]\n output += val1*np.sin(n_*x*w)\n output += val2*np.cos(n_*x*w)\n return output", "def fourier_series(self, x, f, n=0):\n # Make the parameter objects for all the terms\n a0, *cos_a = parameters(','.join(['a{}'.format(i) for i in range(0, n + 1)]))\n sin_b = parameters(','.join(['b{}'.format(i) for i in range(1, n + 1)]))\n\n # Construct the series\n series = a0 + sum(ai * cos(i * f * x) + bi * sin(i * f * x)\n for i, (ai, bi) in enumerate(zip(cos_a, sin_b), start=1))\n\n return series", "def fourier_series():\r\n # Define fourier variables and sine\r\n a0,a1,a2,a3,a4,a5,a6 = parameters(','.join(['a{}'.format(i) for i in range(0, 7)]))\r\n return a0*(1+sin(-1.571)) \\\r\n + a1*(1+sin(1*x+1.571)) \\\r\n + a2*(1+sin(2*x-1.571)) \\\r\n + a3*(1+sin(3*x+1.571)) \\\r\n + a4*(1+sin(4*x-1.571)) \\\r\n + a5*(1+sin(5*x+1.571)) \\\r\n + a6*(1+sin(6*x-1.571))", "def fourier_series(dates, period, series_order):\n # Fourier Detrend\n # periods, order, start_shift, and scaling (multi or univariate)\n # then just subtract\n\n # convert to days since epoch\n dates = pd.date_range(\"2020-01-01\", \"2022-01-01\", freq=\"D\")\n t = np.array(\n (dates - datetime.datetime(1970, 1, 1)).total_seconds().astype(float)\n ) / (3600 * 24.0)\n result = np.column_stack(\n [\n fun((2.0 * (i + 1) * np.pi * t / period))\n for i in range(series_order)\n for fun in (np.sin, np.cos)\n ]\n )", "def fourier(self):\n n = self.nsamples()\n yC = self.vpp / self.adcrange\n xC = 1.e-6\n yF = yC * (2.0/n) * np.abs(fft(self.bulk)[:n//2])\n xF = xC * fftfreq(n,(self.Dt)*1.e-9)[:n//2] \n return xF,yF", "def fourier_coefficients(X, Y, T, nrange):\n # N = len(X)\n # T = np.linspace(0.0, 1.0, N)\n dt = T[1] - T[0]\n # The array storing cofficients\n Cr = np.zeros(2*nrange + 1, dtype=float) # real part\n Cc = np.zeros(2*nrange + 1, dtype=float) # complex part\n\n for n in range(-nrange, nrange+1):\n s = np.sum(np.exp(-2*np.pi*1j*n*T)*(X + Y*1j)*dt)\n Cr[n+nrange] = s.real\n Cc[n+nrange] = s.imag\n\n return Cr, Cc", "def DFT(x):\n N = len(x)\n n = np.arange(N)\n k = n.reshape((N, 1))\n M = np.exp(-2j * np.pi * k * n / N)\n return np.dot(M, x)", "def Df(x, t):\n#######\n if type(x) <> 'numpy.ndarray': x = numpy.array(x) # convert to numpy array\n N = x.shape[0] # length of the original array \n df = [] # initial derivative empyy list\n for k in range(N): # loop for calculation \n if k == 0: # first point case\n dx = x[k + 1] - x[k]\n dt = t[k + 1] - t[k]\n elif k == N - 1: # last point case\n dx = x[k] - x[k - 1]\n dt = t[k] - t[k - 1]\n else: # remaining cases\n dx = x[k + 1] - x[k - 1]\n dt = t[k + 1] - t[k - 1] \n df.append(dx/dt) # add point to the list\n return numpy.array(df)", "def _fourier_series_helper(self, N, L, scale_function):\n from sage.all import pi, sin, cos, srange\n x = self.default_variable()\n a0 = self.fourier_series_cosine_coefficient(0,L)\n result = a0/2 + sum([(self.fourier_series_cosine_coefficient(n,L)*cos(n*pi*x/L) +\n self.fourier_series_sine_coefficient(n,L)*sin(n*pi*x/L))*\n scale_function(n)\n for n in srange(1,N)])\n return result.expand()", "def DTFT(ns, signal, omega):\n dtft = np.zeros(omega.shape, dtype=complex)\n for n, x in zip(ns, signal):\n dtft += x * np.exp(-1j * omega * n)\n\n return dtft", "def fourier_series_value(self,x,L):\n xnew = x - int(RR(x/(2*L)))*2*L\n endpts = self.end_points()\n if xnew == endpts[0] or xnew == endpts[-1]:\n return (self.functions()[0](endpts[0]) + self.functions()[-1](endpts[-1]))/2\n else:\n return self(xnew)", "def fourier(numThetas, cosTheta, filtIntens):\r\n \r\n # //\r\n #// We will interpret theta/(theta/2) with respect to the local surface normal of the star to\r\n #// be the spatial domain \"x\" coordinate - this is INDEPENDENT of the distance to, and linear\r\n #// radius of, the star! :-)\r\n\r\n pi = math.pi #//a handy enough wee quantity\r\n halfPi = pi / 2.0\r\n\r\n #//number of sample points in full intensity profile I(theta), theta = -pi/2 to pi/2 RAD:\r\n numX0 = 2 * numThetas - 1\r\n\r\n #//We have as input the itnesity half-profile I(cos(theta)), cos(theta) = 1 to 0\r\n #//create the doubled root-intensity profile sqrt(I(theta/halfPi)), theta/halfPi = -1 to 1\r\n #//this approach assumes the real (cosine) and imaginary (sine) components are in phase\r\n rootIntens2 = [0.0 for i in range(numX0)]\r\n x0 = [0.0 for i in range(numX0)]\r\n\r\n #var normIntens;\r\n #//negative x domain of doubled profile:\r\n j = 0\r\n for i in range(numThetas-1, 0, -1):\r\n x0[j] = -1.0*math.acos(cosTheta[1][i]) / halfPi\r\n normIntens = filtIntens[i] / filtIntens[0] #//normalize\r\n rootIntens2[j] = math.sqrt(normIntens)\r\n #//console.log(\"i \" + i + \" cosTheta \" + cosTheta[1][i] + \" filtIntens \" + filtIntens[i] + \" normIntens \" + normIntens\r\n #// + \" j \" + j + \" x0 \" + x0[j] + \" rootIntens2 \" + rootIntens2[j] );\r\n j+=1\r\n #}\r\n #//positive x domain of doubled profile:\r\n for i in range(numThetas, numX0):\r\n j = i - (numThetas-1)\r\n x0[i] = math.acos(cosTheta[1][j]) / halfPi\r\n normIntens = filtIntens[j] / filtIntens[0] #//normalize\r\n #//rootIntens2[i] = Math.sqrt(normIntens);\r\n rootIntens2[i] = normIntens\r\n #//console.log(\"j \" + j + \" cosTheta \" + cosTheta[1][j] + \" filtIntens \" + filtIntens[j] + \" normIntens \" + normIntens\r\n #// + \" i \" + i + \" x0 \" + x0[i] + \" rootIntens2 \" + rootIntens2[i] );\r\n #}\r\n\r\n #//create the uniformly sampled spatial domain (\"x\") and the complementary\r\n #//spatial frequecy domain \"k\" domain\r\n #//\r\n #//We're interpreting theta/halfPi with respect to local surface normal at surface\r\n #//of star as the spatial domain, \"x\"\r\n minX = -2.0\r\n maxX = 1.0\r\n numX = 100 #//(is also \"numK\" - ??)\r\n deltaX = (maxX - minX) / numX\r\n\r\n #//Complentary limits in \"k\" domain; k = 2pi/lambda (radians)\r\n #// - lowest k value corresponds to one half spatial wavelength (lambda) = 2 (ie. 1.0 - (-1.0)):\r\n maxLambda = 2.0 * 2.0\r\n #// stupid?? var minK = 2.0 * pi / maxLambda; //(I know, I know, but let's keep this easy for the human reader)\r\n #// - highest k value has to do with number of points sampling x: Try Nyquist sampling rate of\r\n #// two x points per lambda\r\n minLambda = 8.0 * 2.0 * deltaX\r\n maxK = 2.0 * pi / minLambda #//\"right-going\" waves\r\n minK = -1.0 * maxK #//\"left-going\" waves\r\n deltaK = (maxK - minK) / numX\r\n #// console.log(\"maxK \" + maxK + \" minK \" + minK + \" deltaK \" + deltaK);\r\n\r\n\r\n x = [0.0 for i in range(numX)]\r\n k = [0.0 for i in range(numX)]\r\n\r\n #var ii;\r\n for i in range(numX):\r\n ii = 1.0 * i\r\n x[i] = minX + ii*deltaX\r\n k[i] = minK + ii*deltaK\r\n #// console.log(\"i \" + i + \" x \" + x[i] + \" k \" + k[i]);\r\n \r\n\r\n #//Interpolate the rootIntens2(theta/halfpi) signal onto uniform spatial sampling:\r\n #//doesn't work: var rootIntens3 = interpolV(rootIntens2, x0, x);\r\n rootIntens3 = [0.0 for i in range(numX)]\r\n\r\n for i in range(numX):\r\n rootIntens3[i] = ToolBox.interpol(x0, rootIntens2, x[i])\r\n #//console.log(\"i \" + i + \" x \" + x[i] + \" rootIntens3 \" + rootIntens3[i]);\r\n \r\n\r\n #//returned variable ft:\r\n #// Row 0: wavenumber, spatial frequency, k (radians)\r\n #// Row 1: cosine transform (real component)\r\n #// Row 2: sine transform (imaginary component\r\n ft = [ [ 0.0 for i in range(numX-1) ] for j in range(3) ]\r\n\r\n #var argument, rootFt;\r\n #//numXFloat = 1.0 * numX;\r\n #//Outer loop is over the elements of vector holding the power at each frequency \"k\"\r\n for ik in range(numX-1):\r\n #//initialize ft\r\n ft[0][ik] = k[ik]\r\n rootFtCos = 0.0\r\n rootFtSin = 0.0\r\n ft[1][ik] = 0.0\r\n ft[2][ik] = 0.0\r\n #//Inner llop is cumulative summation over spatial positions \"x\" - the Fourier cosine and sine series\r\n for ix in range(numX-1):\r\n #//ixFloat = 1.0 * ix;\r\n argument = -1.0 * k[ik] * x[ix]\r\n #//console.log(\"ik \" + ik + \" ix \" + ix + \" argument \" + argument + \" x \" + x[ix] + \" rootIntens3 \" + rootIntens3[ix]);\r\n #// cosine series:\r\n rootFtCos = rootFtCos + rootIntens3[ix] * math.cos(argument)\r\n #// sine series:\r\n rootFtSin = rootFtSin + rootIntens3[ix] * math.sin(argument);\r\n #} //ix loop\r\n ft[1][ik] = rootFtCos #// * rootFtCos; //Power\r\n ft[2][ik] = rootFtSin #// * rootFtSin;\r\n #//console.log(\"ik \" + ik + \" k \" + k[ik] + \" ft[1] \" + ft[1][ik]);\r\n #} //ik loop\r\n\r\n return ft", "def fourier_series_sine_coefficient(self,n,L):\n from sage.all import sin, pi\n x = var('x')\n result = sum([(f(x)*sin(pi*x*n/L)/L).integrate(x, a, b)\n for (a,b), f in self.list()])\n if is_Expression(result):\n return result.simplify_trig()\n return result", "def fourier_series_cosine_coefficient(self,n,L):\n from sage.all import cos, pi\n x = var('x')\n result = sum([(f(x)*cos(pi*x*n/L)/L).integrate(x, a, b)\n for (a,b), f in self.list()])\n if is_Expression(result):\n return result.simplify_trig()\n return result", "def taylor(x,f,i,n):\n #total = 0\n #dx = x[i] - x[i-1]\n #for j in range(n):\n # mat = np.linalg.matrix_power(np.matrix(ac.derivative(x[0],x[-1],n)),j) @ f\n # total = total + (mat[i-1]*(dx**j))\n def tay(e):\n total = 0\n for j in range(n):\n mat = np.linalg.matrix_power(np.matrix(ac.derivative(x[0],x[-1],n)),j) @ f\n total = total + (mat[i]*((x[e]-x[i])**j)/np.factorial(j))\n return total\n fx = np.vectorize(tay)\n return (x,fx(x))", "def fft(z, t):\n Y = numpy.fft.fft(z)\n #Y[0] = 0.5*Y[0]\n N = len(Y)\n D = (t[-1] - t[0]) / (len(t) - 1)\n return Y[:N/2+1]/float(N/2), numpy.arange(N/2+1) / (N * D)", "def fourier_expansion(t_grid, period, order):\n fourier_term_matrix = [\n trig_fun((2.0 * np.pi * (k + 1) * np.array(t_grid) / period))\n for k in range(order)\n for trig_fun in (np.sin, np.cos)\n ]\n return np.stack(fourier_term_matrix, axis=1)", "def dft(ts):\n#-------------------------------------------------------------------------------\n\n n = len(ts)\n ddn = 2*math.pi/n\n sin = math.sin\n cos = math.cos\n\n awr = []; awi = []\n\n for j in range(n):\n wr = wi = 0\n ddnj = ddn*j\n\n for k in range(n):\n dd = ddnj*k\n v = ts[k]\n wr += v * cos(dd)\n wi += v * sin(dd)\n\n awr.append(wr)\n awi.append(wi)\n\n return awr, awi", "def trapez(func, a, b, n):\n width = float(b - a)/n\n x_i = np.linspace(a, b, n + 1) # Erzeugung eines Arrays linker\n f_i = (func(x_i[:-1]) + func(x_i[1:]))/2 # und rechter Grenzen, Fktwerte\n return sum(f_i) * width # zwischen beiden gemittelt" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string of a specific number of emojis based on the passedin number. >>> make_haiku_line(5)
def make_haiku_line(num): make_line = True line = [] while make_line: syllable = random.choice(EMOJI_LIST) line.append(emoji.emojize(syllable, use_aliases=True)) if len(line) >= num: make_line = False for emoticon in line: emoji.emojize(emoticon) return line
[ "def label_from_number(number):\n row_label_chars = []\n while number > 0:\n modulo = (number - 1) % 26\n row_label_chars.insert(0, chr(65 + modulo))\n number = (number - modulo) / 26\n return ''.join(row_label_chars)", "def repeat_string(word, number):\n\n a_number = int(number)\n #Sets the number entry as a integer\n print word * a_number\n #Prints the string repeated a number of times equal to the value of a_number", "def generate_n_chars(n,char):\n try:\n n=int(n)\n char=str(char)\n except:\n raise TypeError(\"Integer and a char/string needed.\")\n return \"\".join(char for x in range(n))", "def generate_number_lines(number_of_lines=6, start=0, end=20):\n lines = [r'\\documentclass[letterpaper]{article}',\n r'\\usepackage{geometry}',\n r'\\geometry{landscape,a4paper,total={170mm,257mm},left=10mm,right=10mm,top=30mm}',\n r'\\usepackage{tikz}',\n r'\\usepackage{amsmath}',\n r'\\usetikzlibrary{arrows}',\n r'\\begin{document}',\n r'\\pagenumbering{gobble}',\n r'\\begin{LARGE}',\n r'']\n\n numbers = ','.join([str(x) for x in range(start, end + 1)])\n for _ in range(number_of_lines):\n lines.append(r'')\n lines.append(r'{\\Large $-$}')\n lines.append(r'\\begin{tikzpicture}')\n lines.append(r'\\draw[latex-latex, thick] ' + '({},0) -- ({},0) ;'.format(start - 1, end + 1))\n lines.append(r'\\foreach \\x in {' + numbers + '}')\n lines.append(r'\\draw[shift={(\\x,0)},color=black, thick] (0pt,3pt) -- (0pt,-3pt);')\n lines.append(r'\\foreach \\x in {' + numbers + '}')\n lines.append(r'\\draw[shift={(\\x,0)},color=black, thick] (0pt,0pt) -- (0pt,-3pt) node[below] ')\n lines.append(r'{\\textbf{\\x}};')\n lines.append(r'\\end{tikzpicture}')\n lines.append(r'{\\Large $+$}')\n lines.append(r'\\\\')\n lines.append(r'\\vspace*{50px}')\n lines.append(r'')\n\n lines.append(r'\\end{LARGE}')\n lines.append(r'\\end{document}')\n\n return '\\n'.join(lines)", "def create_str_domino(number):\r\n if number == 0:\r\n line_1 = ' '\r\n line_2 = ' '\r\n line_3 = ' '\r\n elif number == 1:\r\n line_1 = ' '\r\n line_2 = ' * '\r\n line_3 = ' '\r\n elif number == 2:\r\n line_1 = '* '\r\n line_2 = ' '\r\n line_3 = ' *'\r\n elif number == 3:\r\n line_1 = '* '\r\n line_2 = ' * '\r\n line_3 = ' *'\r\n elif number == 4:\r\n line_1 = '* *'\r\n line_2 = ' '\r\n line_3 = '* *'\r\n elif number == 5:\r\n line_1 = '* *'\r\n line_2 = ' * '\r\n line_3 = '* *'\r\n else:\r\n line_1 = '* *'\r\n line_2 = '* *'\r\n line_3 = '* *'\r\n return line_1, line_2, line_3", "def repeat(s: str, n: int) -> str:\n return s*n", "def line_namer(i):\n r = []\n if (((i + 1) // 2) + 1) < 10:\n r = [\"{} |\".format(((i + 1) // 2) + 1)]\n else:\n r = [\"{}|\".format(((i + 1) // 2) + 1)]\n return r", "def int_to_7char_str(i):\n #the pins always have 7 digits\n pin = str(i)\n l = len(pin)\n if (l < 7):\n zeros = \"\"\n for j in range(7-l):\n zeros += \"0\"\n pin = zeros + pin\n return pin", "def create_pyramid(height, line_gen):\n return \"\\n\".join([line_gen(i + 1, height) for i in range(height)])", "def create_chessboard(size: int=8):\r\n num_pieces = int(size / 2)\r\n num_lines = size\r\n\r\n for i in range(0, num_lines):\r\n if i % 2 == 0:\r\n print(num_pieces * '{}{}'.format(WHITE, BLACK))\r\n else:\r\n print(num_pieces * '{}{}'.format(BLACK, WHITE))", "def to_chinese(self, number):\n chinese_numeral_dict = {\n '0': '零',\n '1': '一',\n '2': '二',\n '3': '三',\n '4': '四',\n '5': '五',\n '6': '六',\n '7': '七',\n '8': '八',\n '9': '九'\n }\n chinese_unit_map = [('', '十', '百', '千'),\n ('万', '十万', '百万', '千万'),\n ('亿', '十亿', '百亿', '千亿'),\n ('兆', '十兆', '百兆', '千兆'),\n ('吉', '十吉', '百吉', '千吉')]\n chinese_unit_sep = ['万', '亿', '兆', '吉']\n reversed_n_string = reversed(str(number))\n result_lst = []\n unit = 0\n for integer in reversed_n_string:\n if integer is not '0':\n result_lst.append(chinese_unit_map[unit // 4][unit % 4])\n result_lst.append(chinese_numeral_dict[integer])\n unit += 1\n else:\n if result_lst and result_lst[-1] != '零':\n result_lst.append('零')\n unit += 1\n\n result_lst.reverse()\n if result_lst[-1] is '零':\n result_lst.pop()\n result_lst = list(''.join(result_lst))\n for unit_sep in chinese_unit_sep:\n flag = result_lst.count(unit_sep)\n while flag > 1:\n result_lst.pop(result_lst.index(unit_sep))\n flag -= 1\n return ''.join(result_lst)", "def add_line_numbers(source: str) -> str:\n return \"\\n\".join(f\"{n: <4}{line}\" for (n, line) in enumerate(source.split(\"\\n\"), 1))", "def mostra_linha(tamanho):\n print('-' * tamanho)", "def _str(i: int) -> str:\n if i < 0 or i > 999:\n raise ValueError(\"0 <= i <= 999\")\n if 0 <= i <= 9:\n s = \"__\" + str(i)\n elif 10 <= i <= 99:\n s = \"_\" + str(i)\n else:\n s = str(i)\n return s", "def rangoli(size: int) -> str:\n assert size <= len(string.ascii_lowercase)\n\n letters = string.ascii_lowercase[:size]\n mid_row = size\n num_rows = size * 2 - 1\n\n rangoli_lines = []\n\n for row in range(1, num_rows + 1):\n distance_from_mid = abs(mid_row - row)\n row_letters_right = letters[distance_from_mid:]\n # letters on the left are same as all but first on right reversed\n row_letters = (\n list(reversed(row_letters_right[1:])) + list(row_letters_right)\n )\n # pad row letters to size using `-`\n padding = [\"-\"] * (size - len(row_letters_right))\n if padding:\n row_letters = padding + row_letters + padding\n row_str = \"-\".join(row_letters)\n rangoli_lines.append(row_str)\n\n return \"\\n\".join(rangoli_lines)", "def create_big_katakana_headline(self):\n name_of_game = \"\\u30AB\\u30BF\\u30AB\\u30CA\\u306E\\u30B2\\u30FC\\u30E0\"\n headline_label = Label(\n self._root,\n text=name_of_game,\n font=self.fonts.big_katakana_headline_font)\n\n headline_label.place(x=100, y=250)", "def cap_at_n(lines, n=2000):\n text = \"\"\n for line in lines:\n if len(text) + len(line) + 1 < n - 3:\n text += line + \"\\n\"\n else:\n text += \"...\"\n break\n return text", "def letters(n):\n phrase = ''\n # special case for one thousand\n if n == 1000:\n phrase = 'oneThousand'\n\n # handle the hundreds\n if 100 <= n < 1000:\n hundreds = n//100\n phrase += mappings[hundreds] + 'Hundred'\n # the phrase now looks like 'threehundred'\n n = n - hundreds*100\n # add an 'and' to allow for 'threehundredandtwentyfour'\n if n != 0:\n phrase += 'And'\n if 20 <= n < 100:\n tens = n//10\n phrase += mappings[tens*10]\n n -= tens*10\n if 10 <= n < 20:\n phrase += mappings[n]\n n = 0\n if 1 <= n < 10:\n phrase += mappings[n]\n return len(phrase)", "def get_emoji(i):\n if i < 0 or i >= len(map_id_to_emoji):\n raise KeyError('Invalid Emoji ID')\n return map_id_to_emoji[i]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that the function returns ValueError on an invalid email.
def testInvalidEmail(self): with self.assertRaises(ValueError): melange_db.email_validator(None, 'invalid_email_address')
[ "def test_email_parsing_fail():\n\n assert_raises(exceptions.InvalidEmail, email.validate, \"userexample.com\")\n assert_raises(exceptions.InvalidEmail, email.validate, \"user@examplecom\")\n assert_raises(exceptions.InvalidEmail, email.validate, \"userexamplecom\")", "def test_malformedEmailAddress(self):\n return self.specifyBogusEmail('hello, world!')", "def _validate_email(ctx, param, value):\n if not is_valid_email(value):\n click.secho(\"ERROR: Invalid email format\", fg=\"red\")\n sys.exit(1)\n return value", "def validate_email(property, email):\n if not email:\n raise Exception('Uh-oh. You forgot an email!')\n elif len(email) > 128:\n raise Exception('Uh-oh. That email is too long!')\n elif not re.match(r'[^@]+@[^@]+\\.[^@]+', email):\n raise Exception('%s is not a valid email address.' % email)", "def validate_email_addr(email, err_obj, err_key):\n\n if not email:\n err_obj[err_key] = 'Email Address Required'\n return\n\n if not re.match('[^@]+@[^@]+\\.[^@]+', email):\n err_obj[err_key] = 'Invalid Email Address'\n return\n\n return True", "def test_validation_incorrect_email(self, schema):\n\n data = {\n 'email': 'very@incorrect_email.com',\n 'password': 'password'\n }\n\n errors = schema.validate(data)\n assert errors\n assert 'email' in errors", "def test_error_email_address(self):\n \n message = \"begin ims1.0\\nmsg_type request\\nmsg_id ex005\\ne-mail foo.bar@to_to\\ntime 1999/04/01 to 1999/05/01\\nsta_list FI001,UK001\\narr rms2.0\\nstop\"\n \n parser = IMSParser()\n \n try:\n parser.parse(message)\n self.fail(\"should launch an exception\")\n except ParsingError, p_err:\n self.assertEqual(p_err.message,\"Error[line=4,pos=7]: Next keyword should be an email address but instead was 'foo.bar@to_to' (keyword type ID).\")\n self.assertEqual(p_err.suggestion,'The email address might be missing or is malformated')", "def test_wrong_email(default_app, invalid_email):\n app = default_app\n # login user\n authenticate(app)\n\n res = app.get(\"/email/change\")\n form = res.form\n form[\"email\"] = invalid_email\n res = form.submit()\n assert \"Error! Incorrect e-mail format\" in res", "def test_bad_email(self):\n self.register_user(data=self.user)\n self.user_login[\"user\"][\"email\"] = \"mail.com\"\n response = self.user_login_req(data=self.user_login)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data['errors']['error'][0],\n \"Incorrect email or password.\")", "def test_emailservice_validation():\n\n emailService = EmailService()\n _test(\"emailService.validate_email_address('marzi@dtu.dk')\",\n (bool(emailService.validate_email_address('marzi@dtu.dk'))))\n _test(\"emailService.validate_email_address('marzi@dtu.dk')\",\n (bool(emailService.validate_email_address('marzi@dtu.dk'))))\n _test(\"not emailService.validate_email_address('@gmail.com')\",\n (bool(not emailService.validate_email_address('@gmail.com'))))\n _test(\"not emailService.validate_email_address('test@gmail')\",\n (bool(not emailService.validate_email_address('test@gmail'))))", "def test_register_invalid_email(self):\n self.reg_data['email'] = 'wrong'\n self.register(msg=\"Invalid Email. Enter valid email to register\", code=400)", "def test_validate_missing_email(self, schema):\n\n data = {\n 'password': 'password',\n 'password_confirmation': \"password\"\n }\n\n errors = schema.validate(data)\n assert errors\n assert errors['email']", "def testEmailPatternMatchesValidEmailAddresses(self):\n pattern = regex.REGEXES_AND_ERRORS_DICTIONARY['emailValidationPattern']\n self._patternMatchHelper(pattern, POTENTIAL_EMAIL_ADDRESSES)", "def email_validator(email):\n\n # regex for validating an email\n regex = \"^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$\"\n\n # validate email\n if(re.search(regex, email)):\n return True\n else:\n return False", "def test_validate_user_raises_error_for_email_not_in_database(dummy_request):\n data = {\n 'email': FAKE.email(),\n 'password': 'password'\n }\n with pytest.raises(HTTPForbidden):\n validate_user(dummy_request.dbsession, data)", "def test_create_user_with_invalid_email(self):\n response = self.create_post_request({\n 'username': 'mosteel',\n 'password': 'test',\n 'first_name': 'Clark',\n 'last_name': 'Kent',\n 'email': 'someone',\n })\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(response.data['email'], [\n 'Enter a valid email address.'\n ])", "def test_eval_bad_input_msg_failure(self):\n\n def test_func():\n raise ValueError(\"test func\")\n\n with pytest.raises(AssertionError) as aerr:\n testing.eval_bad_input(test_func, ValueError, \"testing function\")\n\n assert str(aerr).find(\"unexpected error message\") >= 0\n return", "def valid_email(x: str) -> bool:\n if isinstance(x, str) and re.match(EMAIL_PATTERN, x):\n return True\n else:\n return False", "def test_user_signup_fail_no_email(self):\n\n with self.assertRaises(TypeError):\n invalidEmailUser = User.signup(username=\"invalidEmailUser\",\n password=\"HASHED_PASSWORD\",\n image_url=\"\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a correct dict is returned for a db model.
def testForDBModel(self): class Books(db.Model): item_freq = db.StringProperty() freq = db.IntegerProperty() details = db.TextProperty() released = db.BooleanProperty() entity = Books() entity.item_freq = '5' entity.freq = 4 entity.details = 'Test Entity' entity.released = True entity.put() expected_dict = {'freq': 4, 'item_freq': '5', 'details': 'Test Entity', 'released': True} self.assertEqual(melange_db.toDict(entity), expected_dict)
[ "def test_if_to_dict_returns_dict(self):\n b = BaseModel()\n self.assertTrue(type(b.to_dict()) is dict)", "def test_model_read_as_dict(self):\n tm = TestModel.create(count=8, text='123456789', a_bool=True)\n column_dict = {\n 'id': tm.id,\n 'count': tm.count,\n 'text': tm.text,\n 'a_bool': tm.a_bool,\n }\n self.assertEqual(sorted(tm.keys()), sorted(column_dict.keys()))\n\n self.assertSetEqual(set(tm.values()), set(column_dict.values()))\n self.assertEqual(\n sorted(tm.items(), key=itemgetter(0)),\n sorted(column_dict.items(), key=itemgetter(0)))\n self.assertEqual(len(tm), len(column_dict))\n for column_id in column_dict.keys():\n self.assertEqual(tm[column_id], column_dict[column_id])\n\n tm['count'] = 6\n self.assertEqual(tm.count, 6)", "def test_dict(self):\n dummy = self.dummy\n test_dict = dummy.to_dict()\n self.assertTrue(\"__class__\" in test_dict)\n self.assertIsInstance(test_dict[\"__class__\"], str)\n self.assertTrue(\"id\" in test_dict)\n self.assertIsInstance(test_dict[\"id\"], str)\n self.assertTrue(\"created_at\" in test_dict)\n self.assertIsInstance(test_dict[\"created_at\"], str)\n self.assertTrue(\"updated_at\" in test_dict)\n self.assertIsInstance(test_dict[\"updated_at\"], str)\n dummy.test = 10\n test_dict = dummy.to_dict()\n self.assertTrue(\"test\" in test_dict)\n dummy.save()", "def _looks_like_database(obj):\n return (isinstance(obj, _Backend) or\n all(hasattr(obj, attr) for attr in\n ('find', 'all_items', 'delete', 'save'))\n )", "def test_type_objects(self):\n self.assertEqual(type(storage.all()), dict)", "def test_to_dict_not_dunder_dict(self):\n bm = BaseModel()\n self.assertNotEqual(bm.to_dict(), bm.__dict__)", "def _check_model(self, model, model_types, name):\n if not isinstance(model, dict):\n raise web.HTTPError(400, \"Invalid JSON data: %r\" % model)\n if not set(model).issubset(set(model_types)):\n raise web.HTTPError(400, \"Invalid JSON keys: %r\" % model)\n for key, value in model.items():\n if not isinstance(value, model_types[key]):\n raise web.HTTPError(\n 400,\n \"%s.%s must be %s, not: %r\"\n % (name, key, model_types[key], type(value)),\n )", "def test_dict_of_primitive():\n\n @model\n class Foo:\n names: Dict[str, bool]\n\n assert Foo.from_server({'names': {'a': True, 'b': False}}) == Foo(names={'a': True, 'b': False})", "def test_when_kwargs_passed_is_empty(self):\n my_dict = {}\n b = BaseModel(**my_dict)\n self.assertTrue(type(b.id) is str)\n self.assertTrue(type(b.created_at) is datetime)\n self.assertTrue(type(b.updated_at) is datetime)", "def test_get(self):\n correct_fields = {\n \"features\": self.features,\n \"num_features\": self.num_features,\n \"target\": self.target,\n \"method\": self.method,\n \"num_examples\": self.num_examples,\n }\n\n print(self.model)\n for field, ans in correct_fields.items():\n self.assertEqual(self.model._get(field), ans, \"{} failed\".format(field))", "def check_db_entry(self):\n raise NotImplementedError", "def is_dict(obj):\n return isinstance(obj, dict)", "def test_db_map(self):\n class WildDBNames(Model):\n\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\n content = columns.Text(db_field='words_and_whatnot')\n numbers = columns.Integer(db_field='integers_etc')\n\n db_map = WildDBNames._db_map\n self.assertEquals(db_map['words_and_whatnot'], 'content')\n self.assertEquals(db_map['integers_etc'], 'numbers')", "def test_all_field_opts_model(self, all_field_opts):\n for field in all_field_opts:\n api_keys = field.keys()\n # Tests if API and model have same number of keys\n assert len(self.model_keys) == len(api_keys)\n # Tests if the API and model keys and value types are equal\n for key in api_keys:\n assert key in self.model_keys\n assert type(field[key]) in field_opt_model[key]", "def test_model_prediction(self):\n self.assertTrue(type(self.pred) is dict)", "def test_review_does_dict(self):\n\n dict4 = self.review2.to_dict()\n newDict = Review(**dict4)\n\n self.assertEqual(newDict.id, self.review2.id)\n self.assertIsNot(newDict, self.review2)\n self.assertIsInstance(newDict.created_at, datetime)\n self.assertIsInstance(newDict.updated_at, datetime)", "def test_for_creating_instance_and_to_dict(self):\n b2 = BaseModel()\n b2.name = \"Holberton\"\n b2.my_number = 89\n b3 = b2.to_dict()\n self.assertEqual(type(b3), dict)\n self.assertTrue('__class__' in b3)\n self.assertTrue('id' in b3)\n self.assertTrue('created_at' in b3)\n self.assertTrue('updated_at' in b3)\n self.assertTrue('name' in b3)\n self.assertTrue('my_number' in b3)\n\n b4 = BaseModel(**b3)\n self.assertEqual(b2.id, b4.id)\n self.assertEqual(b2.created_at, b4.created_at)\n self.assertEqual(b2.updated_at, b4.updated_at)\n self.assertEqual(b2.name, b4.name)\n self.assertEqual(b2.my_number, b4.my_number)\n self.assertNotEqual(b2, b4)", "def test_if_to_dict_returns_class_dunder_method(self):\n b = BaseModel()\n self.assertTrue(\"__class__\" in b.to_dict())", "def test_when_kwargs_passed_is_more_than_default(self):\n my_dict = {\"id\": uuid4(), \"created_at\": datetime.utcnow().isoformat(),\n \"updated_at\": datetime.utcnow().isoformat(),\n \"name\": \"Firdaus\"}\n b = BaseModel(**my_dict)\n self.assertTrue(hasattr(b, \"name\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print a list of n hits from Google
def get_n_results(query, n): from pprint import pprint gs = GoogleSearch(query) for hit in gs.get_results(n): pprint(hit) print
[ "def google_search(search_term, num_results=1):\r\n results = []\r\n for url in googlesearch.search(search_term, start=1, stop=1+num_results, num=1):\r\n results.append(url)\r\n return results", "def get_google_results(search):\r\n \r\n url='http://www.google.com/search?q='\r\n address = url + \"+\".join(search.split())\r\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5)\\\r\n AppleWebKit/537.36 (KHTML, like Gecko) Cafari/537.36'}\r\n #Start HTML Session\r\n session=HTMLSession()\r\n r=session.get(address, headers=headers)\r\n\r\n #Render address in HTML and gets Soup\r\n r.html.render()\r\n soup = BeautifulSoup(r.html.html, 'lxml')\r\n \r\n #Extract Results\r\n result = soup.find(id='result-stats')\r\n num_results = re.search(r'(\\d.*)\\s', result.contents[0]).group(1)\r\n print(f\"Total Google Results for '{search}': {num_results}\")\r\n return num_results", "def scrape_google(dom):\n results = []\n filtered = []\n searches = [\"100\", \"200\", \"300\", \"400\", \"500\"]\n data = \"\"\n urllib._urlopener = AppURLopener()\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n headers = {'User-Agent': user_agent, }\n #opener.addheaders = [('User-Agent','Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)')]\n for n in searches:\n url = \"http://google.com/search?hl=en&lr=&ie=UTF-8&q=%2B\" + dom + \"&start=\" + n + \"&sa=N&filter=0&num=100\"\n try:\n sock = urllib.urlopen(url)\n data += sock.read()\n sock.close()\n except AttributeError:\n request = urllib.request.Request(url, None, headers)\n response = urllib.request.urlopen(request)\n data += str(response.read())\n results.extend(unique(re.findall(\"href=\\\"htt\\w{1,2}:\\/\\/([^:?]*[a-b0-9]*[^:?]*\\.\" + dom + \")\\/\", data)))\n # Make sure we are only getting the host\n for f in results:\n filtered.extend(re.findall(\"^([a-z.0-9^]*\" + dom + \")\", f))\n time.sleep(2)\n return unique(filtered)", "def print_search_results(results):\n\n for index, result in enumerate(results['results']):\n\n print('[{num}] {title} ({year})'.format(\n num = colored(index+1, 'green'),\n title = colored(result['title'], 'yellow'),\n year = result['release_date'].split('-')[0])\n )\n print(result['overview'][:80] + '...')", "def google_index(self):\n user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36'\n headers = { 'User-Agent' : user_agent}\n query = {'q': 'info:' + self._url}\n google = \"https://www.google.com/search?\" + urlencode(query)\n data = requests.get(google, headers=headers)\n data.encoding = 'ISO-8859-1'\n soup = BeautifulSoup(str(data.content), 'html.parser')\n try:\n check = soup.find(id=\"rso\").find(\"div\").find(\"div\").find(\"h3\").find(\"a\")\n href = check['href']\n return 0\n except AttributeError:\n return 2", "def _collect_from_google(q, start=0, stop=20, save_dir='./'):\n import math\n import os\n import requests\n from pyquery import PyQuery as pq\n\n # check the directory exists.\n if not os.path.exists('{0}/{1}'.format(save_dir, q)):\n os.makedirs('{0}/{1}'.format(save_dir, q))\n\n url = 'https://www.google.com/search'\n params = {\n 'q': q,\n 'tbm': 'isch',\n 'tbs': 'itp:face'\n }\n\n # crawl links of images\n links = []\n for n in range(start, stop, 20):\n params['start'] = n\n\n response = requests.get(url=url, params=params, timeout=_TIMEOUT)\n html = pq(response.text)\n\n # parse links of images\n for item in html('#ires tr a img').items():\n links.append(item.attr('src'))\n\n # save images\n a = int(math.log10(stop)) + 1\n save_count = 0\n if len(links) > 0:\n for (i, link) in enumerate(links):\n try:\n img = requests.get(url=link, timeout=_TIMEOUT).content\n except:\n continue\n\n file_name = str(i).zfill(a)\n with open('{0}/{1}/google_{2}.jpg'.format(save_dir, q, file_name), 'wb') as f:\n f.write(img)\n save_count += 1\n\n print('Number of images saved is : {}'.format(save_count))", "def googlesearch(request):\n\n if request.method == 'POST':\n q = request.POST.get(u'q','')\n elif request.method == 'GET':\n q = request.GET.get(u'q','')\n\n\n if request.GET.has_key('start'):\n start = request.GET.get(u'start','')\n start = int(start)\n else:\n start = 0\n \n\n if q:\n r = search('site:www.visualspace.nl '+q, rsz='large',hl='nl',lr='lang_nl', start=start)\n results = r['results']\n if results:\n currentPageIndex = r['cursor']['currentPageIndex']\n estimatedResultCount = r['cursor']['estimatedResultCount']\n pages = len(r['cursor']['pages'])\n \n moreResultsUrl = r['cursor']['moreResultsUrl']\n \n if pages > 1:\n if start:\n prev = (currentPageIndex-1)*8\n \n if int(currentPageIndex)+1 < pages: \n next = (currentPageIndex+1)*8\n \n if int(estimatedResultCount) < start+8 :\n end = estimatedResultCount\n elif int(estimatedResultCount) > start+8:\n end = start+8\n else:\n end = 64\n \n # logging.debug(\"%s - %s van circa %s resultaten\" % (start+1, end, estimatedResultCount))\n # logging.debug(\"hee ik heb results: %s\" % r)\n # logging.debug(\"currentPageIndex: %s\" % r['cursor']['currentPageIndex'])\n # logging.debug(\"estimatedResultCount: %s\" % r['cursor']['estimatedResultCount'])\n # logging.debug(\"pages: %s\" % r['cursor']['pages'])\n # logging.debug(\"len: %s\" % len(r['cursor']['pages']))\n \n \n c = RequestContext(request, locals())\n\n return render_to_response('googlesearch.html', c)", "def main(url):\n words = fetch_words(url )\n print_items(words)", "def get_google_search_links(message):\n error = validate_command(message, [\"limit\"])\n if error:\n return error\n\n # First store the searched item in DB\n chat = storage_models.ChatHistory(discord_member_id=message.author_id, search_data=message.content)\n chat.insert()\n # google search\n limit = message.flags.get(\"limit\", 5)\n search_results = [url for url in search(message.content, stop=limit)]\n print(f\"Fetched {len(search_results)} results.\")\n return \"\\n\".join(search_results) if search_results else \"No data fetched.\"", "def print_results(results):", "def display_knn(query, knn_ids_array):\n print(\"Query string:\\n\", query,\"\\n\")\n print (\"Ten nearest neighbours:\\n\")\n for v in ids.tolist():\n print(reflist[v])", "def hashtags_distribution(data):\n\n tags_count = {}\n tags_to_objectid = {}\n hashtag_counts = []\n\n n = len(data['results'])\n avg = 0\n\n for row in data['results']:\n num_tags = len(row['tags'])\n\n if num_tags not in tags_to_objectid:\n tags_to_objectid[num_tags] = []\n tags_count[num_tags] = 0\n\n tags_to_objectid[num_tags].append(row['objectId'])\n tags_count[num_tags] += 1\n\n avg += num_tags\n hashtag_counts.append(num_tags)\n\n for k, v in tags_count.items():\n print \"%d hashtags: %d rows\" % (k, v)\n\n # compute average\n avg = avg / n\n\n sorted(hashtag_counts)\n\n print \"Total rows: %d\" % n\n print \"Average # of hashtags: %d\" % avg\n print \"Median # of hashtags: %d\" % median(hashtag_counts)", "def print_search(self, retrieved_docs, print_count=9999, book_num=None): \n i = 0\n if book_num is None:\n for book in retrieved_docs:\n for sentence in retrieved_docs[book]:\n if len(self.sentences[book][sentence].split()) < 40:\n print(\"book:\", book, \"sentence:\", sentence, self.sentences[book][sentence])\n if i > print_count:\n break\n i += 1\n else:\n if retrieved_docs.get(book_num) is not None:\n for sentence in retrieved_docs[book_num]:\n print(\"book:\", book_num, \"sentence:\", sentence, self.sentences[book_num][sentence])\n if i > print_count:\n break\n i += 1", "def get_jokes():\n\n dad_jokes = ''\n\n icanhasdadjoke_url = 'https://icanhazdadjoke.com/search'\n headers = {'User-Agent': 'https://github.com/alvarezcindy', \n 'Accept': 'text/plain'}\n\n # Calls 4 pages of results\n for n in range(1,5):\n params = {'current_page': n,\n 'limit': '30'}\n\n jokes = requests.get(icanhasdadjoke_url, headers=headers, params=params)\n dad_jokes += jokes.text\n \n return dad_jokes", "def popular_authors():\n\n results = fetch_query(\n \"\"\"select author_slug.name, count(log.path)\n from author_slug, log\n where log.path = '/article/' || author_slug.slug\n and log.status = '200 OK'\n group by author_slug.name\n order by count(log.path) desc;\"\"\"\n )\n print('\\n\\n' + \"Authors listed by popularity as defined by \"\n \"total article views:\" + '\\n')\n for item in results:\n print(item[0] + \": \" + str(\"{:,}\".format(item[1])) + \" views\")", "def print_results(self):\n\n print(\"TOP CONTACTED DOMAINS\")\n print(\"Num. Requests\\tDomain\")\n sorted_domains = sorted(\n self.networking_counters[\"domains\"].items(), key=lambda x: -x[1]\n )\n for domain_tuple in sorted_domains:\n print(f\"{domain_tuple[1]:>12}\\t{domain_tuple[0]:>5}\")\n print(\"TOP CONTACTED IPs\")\n print(\"Num. Requests\\tIP\")\n sorted_ips = sorted(\n self.networking_counters[\"domains\"].items(), key=lambda x: -x[1]\n )\n for ip_tuple in sorted_ips:\n print(f\"{ip_tuple[1]:>12}\\t{ip_tuple[0]:>12}\")\n print(\"TOP CONTACTED URLs\")\n print(\"Num. Requests\\tURL\")\n sorted_urls = sorted(\n self.networking_counters[\"domains\"].items(), key=lambda x: -x[1]\n )\n for url_tuple in sorted_urls:\n print(f\"{url_tuple[1]:>12}\\t{url_tuple[0]:>12}\")\n\n print(\"\\nNETWORK INFRASTRUCTURE\")\n for file_network in self.networking_infrastructure.items():\n contacted_addresses = file_network[1].values()\n if any(contacted_addresses):\n print(f\"File Hash: {file_network[0]}\")\n for network_inf in file_network[1].items():\n if network_inf[1]:\n print(f\"\\t{network_inf[0]}\")\n for address in network_inf[1]:\n if address[\"type\"] in (\"domain\", \"ip_address\"):\n print(f'\\t\\t{address[\"id\"]}')\n else:\n print(f'\\t\\t{address[\"context_attributes\"][\"url\"]}')", "def print_word_count(url):\n\twc = {}\n\twith urlopen(url) as story:\n\t\tfor line in story:\n\t\t\tline_words = line.decode('utf-8').split()\n\t\t\tfor word in line_words:\n\t\t\t\twc.setdefault(word, 0)\n\t\t\t\twc[word] += 1\n\t\n\tmost_used = 0\n\tmost_used_word = ''\n\tfor word_key in wc:\n\t\tif(wc[word_key] > most_used):\n\t\t\tmost_used = wc[word_key]\n\t\t\tmost_used_word = word_key\n\n\tprint('{} is used {} times'.format(most_used_word, wc[most_used_word]))", "def google_results_urls(search_term, number_results, language_code, site):\n\t# make sure the inputs have the right types\n\tassert isinstance(search_term, str), 'Search term must be a string'\n\tassert isinstance(number_results, int), 'Number of results must be an integer'\n\tassert isinstance(site, str), 'Site must be a string'\n\n\tquery_string = search_term.replace(' ', '+') + ' site:' + site\n\n\t# get google results (tbm: nws means google news only)\n\tpayload = {'tbm': 'nws', 'q': query_string, 'num': number_results, 'hl': language_code}\n\n\tresponse = requests.get('https://www.google.com/search', params=payload, headers=USER_AGENT)\n\n\tsoup = BeautifulSoup(response.text, 'html.parser')\n\n\t# only grab HTTP(S) links\n\turl_regex = \"^https?://\"\n\tlinks = [link.get('href') for link in soup.findAll('a', attrs={'href': re.compile(url_regex)})]\n\t# see if they're from the right site\n\treturn [l for l in links if (l.startswith(\"https://www.\" + site) or l.startswith(\"http://www.\" + site))]", "def get_occurrences():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine if sentiment is positive, negative, or neutral algorithm to figure out if sentiment is positive, negative or neutral uses sentiment polarity from TextBlob, VADER Sentiment and sentiment from textprocessing URL
def sentiment_analysis(text): # pass text into sentiment url if True: ret = get_sentiment_from_url(text, sentimentURL) if ret is None: sentiment_url = None else: sentiment_url, neg_url, pos_url, neu_url = ret else: sentiment_url = None # pass text into TextBlob text_tb = TextBlob(text) # pass text into VADER Sentiment analyzer = SentimentIntensityAnalyzer() text_vs = analyzer.polarity_scores(text) # determine sentiment from our sources if sentiment_url is None: #threshold values if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05: sentiment = "negative" elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05: sentiment = "positive" else: sentiment = "neutral" else: # this works if the above function executes properly if text_tb.sentiment.polarity < 0 and text_vs['compound'] <= -0.05 and sentiment_url == "negative": sentiment = "negative" elif text_tb.sentiment.polarity > 0 and text_vs['compound'] >= 0.05 and sentiment_url == "positive": sentiment = "positive" else: sentiment = "neutral" polarity = (text_tb.sentiment.polarity + text_vs['compound']) / 2 # output sentiment polarity print("************") print("Sentiment Polarity: " + str(round(polarity, 3))) # output sentiment subjectivity (TextBlob) print("Sentiment Subjectivity: " + str(round(text_tb.sentiment.subjectivity, 3))) # output sentiment print("Sentiment (url): " + str(sentiment_url)) print("Sentiment (algorithm): " + str(sentiment)) print("Overall sentiment (textblob): ", text_tb.sentiment) print("Overall sentiment (vader): ", text_vs) print("sentence was rated as ", round(text_vs['neg']*100, 3), "% Negative") print("sentence was rated as ", round(text_vs['neu']*100, 3), "% Neutral") print("sentence was rated as ", round(text_vs['pos']*100, 3), "% Positive") print("************") return polarity, text_tb.sentiment.subjectivity, sentiment
[ "def sentiment_analysis_by_text(self,tweet):\n blob = TextBlob(tweet['text'].decode('ascii', errors=\"replace\"))\n sentiment_polarity = blob.sentiment.polarity\n if sentiment_polarity < 0:\n sentiment = self.NEGATIVE\n elif sentiment_polarity <= 0.25:\n sentiment = self.NEUTRAL\n else:\n sentiment = self.POSITIVE\n tweet['sentiments'] = sentiment", "def extract_sentiment(text):\n text = TextBlob(text)\n return text.sentiment.polarity", "def compute_sentiment_for_tweet(tweet):\n text = polyglot.text.Text(tweet.lower(), hint_language_code='it')\n scores = [word.polarity for word in text.words if word.polarity != 0]\n return np.mean(scores) if scores else 0.0", "def calculate_sentiment(positive_words,negative_words,tweet_text):\n\tpos = 0\n\tneg = 0\n\tfor x in tweet_text:\n\t\tif np.any(positive_words==x):\n\t\t\tpos+=1\n\t\telif np.any(negative_words==x):\n\t\t\tneg+=1\n\treturn(pos,neg)", "def sentiment_analysis(text):\n return SentimentIntensityAnalyzer().polarity_scores(skip_gutenberg_header_and_tail(text))", "def print_polarity_from_input(quest, text):\n if quest == 'naive':\n blob = Naive_Analysis(text).sentiment\n return blob\n #this will be: Sentiment(classification='pos', p_pos=0.5702702702702702, p_neg=0.4297297297297299)\n else:\n blob = TextBlob(text).sentiment\n return blob.polarity", "def classify(tweet):\n score = 0\n for token in nltk_tokenizer.tokenize(tweet):\n if token in dict:\n wordInfos = pandas.DataFrame(dict[token]).ix[:,0] #if multiple descriptions for different POS tags, just use the first one for simplicity\n polarity = wordInfos['POLARITY']\n if polarity == 'negative':\n score += -1.0\n elif polarity == 'positive':\n score += 1.0\n return 'negative' if score<0 else 'positive' if score>0 else 'neutral'", "def polarity(text):\n \n vader_analyzer = SentimentIntensityAnalyzer()\n return (vader_analyzer.polarity_scores(text))", "def perform_sentimental_analysis():\r\n\r\n # Initialize variables\r\n results = []\r\n positive_sentiments_count = 0\r\n negative_sentiments_count = 0\r\n neutral_sentiments_count = 0\r\n\r\n tesla_processed_content_df = pd.read_csv('tesla_processed_content.csv')\r\n print(tesla_processed_content_df)\r\n\r\n # Compute compound polarity scores\r\n for line in tesla_processed_content_df.processed_content:\r\n\r\n pol_score = sia.polarity_scores(line)\r\n pol_score['text'] = line\r\n\r\n results.append(pol_score)\r\n\r\n\r\n # Read dataframe and setup compound score based on polarity scores\r\n results_df = pd.DataFrame.from_records(results)\r\n results_df.head()\r\n results_df['score'] = 0\r\n results_df.loc[results_df['compound'] > 0, 'score'] = 1\r\n results_df.loc[results_df['compound'] < -0.2, 'score'] = -1\r\n results_df.head()\r\n df2 = results_df[['text', 'score', 'compound']]\r\n print(df2)\r\n df2.to_csv('tesla_sentiment_analysis.csv', mode='a', encoding='utf-8', index=False)\r\n\r\n # Compute count of positive, negative and neutral sentiments\r\n df_positive = results_df[results_df.score == 1]\r\n positive_sentiments_count = positive_sentiments_count + df_positive.score.count()\r\n\r\n df_neutral = results_df[results_df.score == 0]\r\n neutral_sentiments_count = neutral_sentiments_count + df_neutral.score.count()\r\n\r\n df_negative = results_df[results_df.score == -1]\r\n negative_sentiments_count = negative_sentiments_count + df_negative.score.count()\r\n\r\n print(\"Positive Sentiments Count: \", positive_sentiments_count)\r\n print(\"Neutral Sentiments Count: \", neutral_sentiments_count)\r\n print(\"Negative Sentiments Count: \", negative_sentiments_count)\r\n\r\n input_content_count = positive_sentiments_count + negative_sentiments_count + neutral_sentiments_count\r\n\r\n # Compute percentage of positive, negative and neutral sentiments\r\n positive_sentiments_percentage = (positive_sentiments_count / input_content_count) * 100\r\n negative_sentiments_percentage = (negative_sentiments_count / input_content_count) * 100\r\n neutral_sentiments_percentage = (neutral_sentiments_count / input_content_count) * 100\r\n print(\"Positive Sentiments Percentage: \", round(positive_sentiments_percentage, 2))\r\n print(\"Neutral Sentiments Percentage: \", round(neutral_sentiments_percentage, 2))\r\n print(\"Negative Sentiments Percentage: \", round(negative_sentiments_percentage, 2))\r\n\r\n # Conclude Results\r\n if positive_sentiments_percentage > negative_sentiments_percentage:\r\n print(\r\n 'Positive sentiments percentage is more than Negative sentiments percentage based on the content analysed '\r\n 'on cnbc, so one should buy (i.e. invest) stocks of Tesla')\r\n else:\r\n print(\r\n 'Negative sentiments percentage is more than Positive sentiments percentage based on the content analysed '\r\n 'on cnbc, so one should sell (i.e. not invest) stocks of Tesla')", "def sentiment(text):\n words = pattern_split.split(text.lower())\n sentiments1 = map(lambda word: wordlist.get(word, 0), words)\n sentiments = []\n for k in sentiments1:\n\tif k != 0:\n\t\tsentiments.append(k)\n if sentiments:\n # How should you weight the individual word sentiments? \n # You could do N, sqrt(N) or 1 for example. Here I use sqrt(N)\n sentiment = float(sum(sentiments))/math.sqrt(len(sentiments))\n else:\n sentiment = 0\n print 'from function',sentiment\n return sentiment", "def sentiment(tweet, language):\r\n\tsentiment=0\r\n\ttext=Text(tweet, hint_language_code = language)\r\n\tfor w in text.words:\r\n\t\tsentiment+=w.polarity\r\n\treturn sentiment", "def _extract_sentiment_from_text(self, corpus_list, doc_name_to_id_dict):\n vader = SentimentIntensityAnalyzer()\n '''\n Go through the documents and rate their sentiment\n '''\n doc_count=0\n sentiment_feature_dict=defaultdict(list)\n for doc_name, row_id in doc_name_to_id_dict.iteritems():\n logger.debug(\"Extracting sentiment from: \" + doc_name)\n doc=corpus_list[row_id]\n ''' \n doc is one document from our corpus\n '''\n sentences=doc.split(\".\")\n pos_count=0\n neg_count=0\n prev_word_was_positive=False\n prev_word_was_negative=False\n pos_neg_count=0\n count=0\n longest_run_of_positives=0\n longest_run_of_negatives=0\n run_of_positives_count=0\n run_of_negatives_count=0\n score=vader.polarity_scores(' '.join(sentences))\n compound_polarity=score['compound']\n '''\n Rate the overall polarity of the document (1 positive, 0 negative)\n '''\n if compound_polarity>0:\n compound_polarity=1\n else:\n compound_polarity=0\n\n '''\n Rate each word in the corpus for sentiment and construct the word-based\n features\n '''\n for sentence in sentences:\n words=sentence.split(\" \")\n for word in words:\n score=vader.polarity_scores(word)\n '''\n If the negative sentiment of a word is greater than the positive sentiment\n '''\n if score['pos']>abs(score['neg']):\n pos_count+=1\n if prev_word_was_negative:\n pos_neg_count+=1\n prev_word_was_negative=False\n if run_of_negatives_count>longest_run_of_negatives:\n longest_run_of_negatives=run_of_negatives_count\n run_of_negatives_count=0\n else:\n run_of_positives_count+=1\n prev_word_was_positive=True\n\n '''\n If the positive sentiment of a word is greater than the negative sentiment\n '''\n if score['pos']<abs(score['neg']):\n neg_count+=1\n if prev_word_was_positive:\n prev_word_was_positive=False\n pos_neg_count+=1\n if run_of_positives_count>longest_run_of_positives:\n longest_run_of_positives=run_of_positives_count\n run_of_negatives_count=0\n else:\n run_of_negatives_count+=1\n prev_word_was_negative=True\n count+=1\n\n sentiment_feature_dict[doc_name].append([pos_count,neg_count,pos_neg_count,longest_run_of_negatives,longest_run_of_positives,compound_polarity])\n \n return sentiment_feature_dict", "def market_sentiment(raw_data):\n # TODO\n pass", "def my_sentiment_analyzer(documents):\r\n # Create a SentimentIntensityAnalyzer object.\r\n sid_obj = SentimentIntensityAnalyzer()\r\n preds = np.zeros(len(documents))\r\n\r\n for i, doc in enumerate(documents):\r\n sentiment_dict = sid_obj.polarity_scores(doc)\r\n\r\n if not sentiment_dict['neg'] > 0.3:\r\n if sentiment_dict['pos']-sentiment_dict['neg'] > 0:\r\n preds[i] = 1\r\n if not sentiment_dict['pos'] > 0.3:\r\n if sentiment_dict['pos']-sentiment_dict['neg'] <= 0:\r\n preds[i] = 0\r\n return preds", "def analyze(self, text):\n # split sentences into words\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n \n score = 0\n \n for word in tokens:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n \n return score", "def detect_sentiment(text):\r\n\r\n document = language.types.Document(\r\n content=text,\r\n type=language.enums.Document.Type.PLAIN_TEXT)\r\n\r\n sentiment = client.analyze_sentiment(document).document_sentiment\r\n\r\n return sentiment.score, sentiment.magnitude", "def test_neutral_only(self):\n my_sentiment = sentiment_analyzer.get_sentiment(\"The sky is blue.\")\n self.assertTrue((my_sentiment[\"Overall_Sentiment\"] == 'Neutral') &\n (my_sentiment[\"Positive_Sentences\"] == 0) &\n (my_sentiment[\"Negative_Sentences\"] == 0) &\n (my_sentiment[\"Neutral_Sentences\"] == 1) &\n (my_sentiment[\"Total_Sentences\"] == 1))", "def __get_tweet_polarity(self, tweet):\n analysis = TextBlob(self.__normalize_tweet(tweet))\n return analysis.sentiment.polarity", "def sentiment_analysis(self,tweet):\n tweet['emoticons'] = []\n tweet['sentiments'] = []\n self.sentiment_analysis_by_emoticons(tweet)\n if ((len(tweet['sentiments']) == 0) or (tweet['sentiments'] == self.NEUTRAL) or (tweet['sentiments'] == self.CONFUSED)):\n self.sentiment_analysis_by_text(tweet)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Coordinates (x,y,z) of all nodes
def node_coordinates(self): return self._nc
[ "def get_node_coordinates(self):\n xn = self._centers_to_nodes(self.x)\n yn = self._centers_to_nodes(self.y)\n gn = Grid2D(x=xn, y=yn)\n return gn.xy", "def get_coords(self):\n\t\treturn self.x, self.y, self.z", "def mesh_node_coords(self) -> np.ndarray:\n return self._mesh_node_coords", "def coords(self, height, width):\n self.asArtist(height, width)\n result = []\n for node in self.postorder(include_self=True):\n result.append([node.Name, id(node), node.x2, node.y2] + [map(id, node.Children)])\n return result", "def position(self):\n\t\t\n\t\tx_all,y_all,z_all = list(),list(),list()\n\t\tfor ant in self.antennas:\n\t\t\tx,y,z = ant.position\n\t\t\tx_all.append(x)\n\t\t\ty_all.append(y)\n\t\t\tz_all.append(z)\n\t\t\n\t\treturn (x_all,y_all,z_all)", "def coordinates(self):\n\n nAtoms = len(self.atoms)\n coordinates = np.zeros((nAtoms,3))\n for i in range(nAtoms):\n coordinates[i,:] = self.atoms[i].coordinates()\n return coordinates", "def _get_node_coords(self, horizontal_grid):\n dual_area_cube = horizontal_grid.extract_cube(\n NameConstraint(var_name='dual_area'))\n node_lat = dual_area_cube.coord(var_name='vlat')\n node_lon = dual_area_cube.coord(var_name='vlon')\n\n # Fix metadata\n node_lat.bounds = None\n node_lon.bounds = None\n node_lat.var_name = 'nlat'\n node_lon.var_name = 'nlon'\n node_lat.standard_name = 'latitude'\n node_lon.standard_name = 'longitude'\n node_lat.long_name = 'node latitude'\n node_lon.long_name = 'node longitude'\n node_lat.convert_units('degrees_north')\n node_lon.convert_units('degrees_east')\n\n # Convert longitude to [0, 360]\n self._set_range_in_0_360(node_lon)\n\n return (node_lat, node_lon)", "def get_coords(self):\n c = self.mesh.coordinates()\n return c[:,0],c[:,1]", "def _get_coordinates(self, node):\n return [\n round(float(node.attrib['xmin']), 3),\n round(float(node.attrib['ymax']), 3),\n round(float(node.attrib['xmax']), 3),\n round(float(node.attrib['ymin']), 3)\n ]", "def getListOfNodePositions(self):\n return self.mesh.node_positions[:,0:2]", "def getNodeXY(id):\n for n in nodes:\n if n[0] == id:\n return (n[2], n[3])", "def get_coords(self):\n return self.x1, self.y1, self.x2, self.y2", "def atom_coords(self):\n coords = np.array([atom.coords for atom in self.atoms])\n return coords", "def coords(self):\n atom1_coords = self.atoms[0].coords\n atom2_coords = self.atoms[1].coords\n return (atom1_coords, atom2_coords)", "def xy(self):\n return self._x, self._y", "def getCoords(self):\n\n ensemble = self._ensemble\n if ensemble._confs is None:\n return None\n indices = ensemble._indices\n index = self._index\n if indices is None:\n coords = ensemble._confs[index].copy()\n which = ensemble._weights[index].flatten()==0\n coords[which] = ensemble._coords[which]\n else:\n coords = ensemble._confs[index, indices].copy()\n which = ensemble._weights[index, indices].flatten()==0\n coords[which] = ensemble._coords[indices][which]\n return coords", "def vertex_coordinates(self): \n v1,v2 = self.__vertices\n return [v1.coordinate(), v2.coordinate()]", "def _edge_coords_3d_iter(edges):\n for a, b in edges:\n yield (a.coord + tuple([int(a.floor)]), b.coord + tuple([int(b.floor)]))", "def get_cvert_coords_x(cell_ID, nodes, cells):\r\n coord_x = [nodes[i-1].nx for i in cells[cell_ID].itr_nodes]\r\n\r\n return coord_x", "def getCoords(self): # real signature unknown; restored from __doc__\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
provides a unique list of boundary codes
def boundary_codes(self): return [code for code in self.valid_codes if code > 0]
[ "def get_boundary_list(self):\n # TODO MAYBE: store boundaries in separate list (?)\n return [self[ii] for ii in range(self.n_obstacles) if self[ii].is_boubndary]", "def get_boundary_coords():\n coords = []\n for x in range(calib.M_SIZE_X):\n for y in range(calib.M_SIZE_Y):\n if cfg.get_chip_coords(x,y)[0] == -1:\n coords.append((x,y))\n \n return coords", "def mbr_identifiers(self):\n return []", "def license_codes(legalcodes):\n return sorted(set(lc[\"license_code\"] for lc in legalcodes))", "def get_chr_names (self):\n l = set(self.regions.keys())\n return l", "def getcodes(data):\n print(\"working on codes...\")\n codelist = []\n for i in range(len(data)):\n if data[i][23] not in codelist:\n codelist.append(data[i][23])\n if data[i][26] not in codelist:\n codelist.append(data[i][26])\n return codelist", "def non_gap_bases(baselist):\n\treturn [b for b in baselist if b not in gaps]", "def valid_bridge_ids(bridge_data: List[list]) -> List[int]:\n valid_ids = []\n for i in range(len(bridge_data)):\n valid_ids.append(i + 1)\n return valid_ids", "def get_bnd_numbering(self):\n\n self.logger.debug(\"Getting boundary numbering\")\n nptfr = self.get_bnd_npoin()\n nbor = np.zeros((nptfr), dtype=np.int32)\n self.error = HermesFile._hermes.get_bnd_numbering(\\\n self.fformat,\n self.my_id,\n self.typ_bnd_elem,\n nbor,\n nptfr)\n # Switching to Python Numbering\n nbor -= 1\n\n return nbor", "def get_codes_list(df_data):\n codes_list = df_data['Code'].tolist()\n codes_list.remove('OAVG')\n codes_list.append('OED')\n return codes_list", "def detect_coupled_bases(self, basis_list):\n #Create list of states to filter down\n states = numpy.array(range(self.nstates))\n #Find the Intersections\n for basis in basis_list:\n states = numpy.intersect1d(self.coupled_states[basis], states) #Find the state indicies which overlap with where the basis is fully coupled\n return state_index\n #Remove states where all other basis functions are not 0", "def get_participating_regions(self):\n # For first cut just returning east and west\n return [\"east\", \"west\"]", "def get_authentic_bbs(bv):\n good_bbs = list()\n\n total_symbols = get_symbols(bv)\n # filter symbols to those that can be referenced in code\n total_symbols = [\n s for s in total_symbols if s.type.name in\n ['ImportedFunctionSymbol', 'FunctionSymbol', 'DataSymbol']\n ]\n\n # code refs from strings\n for s in bv.strings:\n good_bbs.extend(get_code_ref_bbs(bv, s.start))\n\n # code refs from symbols\n for sym in total_symbols:\n good_bbs.extend(get_code_ref_bbs(bv, sym.address))\n\n # filter out duplicate bb\n return set(good_bbs)", "def find_domain_boundary(self):\n boundary_edges = []\n boundary_edge_inds = []\n boundary_neighborhoods = []\n boundary_neighborhood_inds = []\n\n fs = self.perturbation_currents[0]\n\n edge_list = self.G_pruned.edges()\n\n for i, e in enumerate(edge_list):\n bord = self.bordering_cycles[tuple(sorted(e))]\n\n if len(bord) != 2:\n #print \"# bordering cycles:\", len(bord), \"!?\"\n continue\n\n c1, c2 = self.cycles[bord[0]], self.cycles[bord[1]]\n f1, f2 = fs[bord[0]], fs[bord[1]]\n \n if np.sign(c1.orientation()*f1) \\\n != np.sign(c2.orientation()*f2):\n boundary_edges.append(e)\n boundary_edge_inds.append(i)\n\n local_neighborhood_1 = [g for g in c1.edges\n if tuple(sorted(g)) != e]\n local_neighborhood_2 = [g for g in c2.edges\n if tuple(sorted(g)) != e]\n\n neighs = local_neighborhood_1 + local_neighborhood_2\n neigh_inds = [edge_list.index(tuple(sorted(n)))\n for n in neighs]\n \n boundary_neighborhoods.append(neighs)\n boundary_neighborhood_inds.append(neigh_inds)\n\n # edge tuples\n self.domain_boundary = boundary_edges\n\n # indices of the edges in the edge list\n self.domain_boundary_inds = boundary_edge_inds\n \n # edges that are in the same cycle as the boundary edge\n self.boundary_neighborhoods = boundary_neighborhoods\n self.boundary_neighborhood_inds = boundary_neighborhood_inds", "def getCprMask(self) -> List[int]:\n ...", "def get_grain_boundaries(self) -> List[\"GrainBoundary\"]:\n return list(self)", "def get_valid_cids(ibs):\n # FIXME: configids need reworking\n chip_config_rowid = ibs.get_chip_config_rowid()\n cid_list = ibs.db.get_all_rowids_where(FEATURE_TABLE, 'config_rowid=?', (chip_config_rowid,))\n return cid_list", "def boundary_defs(self):\n if self._boundary_defs is None:\n Nbdry=self.n_boundaries\n\n bdefs=np.zeros(Nbdry, self.boundary_dtype)\n for i in range(Nbdry):\n bdefs['id'][i]=\"boundary %d\"%(i+1)\n bdefs['name']=bdefs['id']\n if self.boundary_scheme=='id':\n bdefs['type']=bdefs['id']\n elif self.boundary_scheme in ['element','grouped']:\n self.log.warning(\"This is outdated, and may be incorrect for DFM output with out-of-order exchanges\")\n bc_segs=self.bc_segs() \n self.infer_2d_elements()\n if self.boundary_scheme=='element':\n bdefs['type']=[\"element %d\"%( self.seg_to_2d_element[seg] )\n for seg in bc_segs]\n elif self.boundary_scheme=='grouped':\n bc_groups=self.group_boundary_elements()\n bdefs['type']=[ bc_groups['name'][self.seg_to_2d_element[seg]] \n for seg in bc_segs]\n elif self.boundary_scheme == 'lgrouped':\n bc_lgroups=self.group_boundary_links()\n if 0:\n # old way, assume the order of things we return here should\n # follow the order of they appearin pointers\n bc_exchs=np.nonzero(self.pointers[:,0]<0)[0]\n self.infer_2d_links()\n bdefs['type']=[ bc_lgroups['name'][self.exch_to_2d_link['link'][exch]] \n for exch in bc_exchs]\n else:\n self.log.info(\"Slowly setting boundary info\")\n for bdry0 in range(Nbdry):\n exchs=np.nonzero(-self.pointers[:,0] == bdry0+1)[0]\n assert len(exchs)==1 # may be relaxable.\n exch=exchs[0]\n # 2018-11-29: getting some '0' types.\n # this is because exch_link is mapping to a non-boundary\n # link.\n exch_link=self.exch_to_2d_link['link'][exch]\n bdefs['type'][bdry0] = bc_lgroups['name'][exch_link]\n self.log.info(\"Done setting boundary info\")\n else:\n raise ValueError(\"Boundary scheme is bad: %s\"%self.boundary_scheme)\n self._boundary_defs=bdefs\n return self._boundary_defs", "def locate_restriction_sites(self):\r\n result = []\r\n for i in range(len(self.sq)):\r\n for j in range(4, 13, 1):\r\n if i+j > len(self.sq):\r\n break\r\n else:\r\n og_sq = self.sq[i: i+j]\r\n list = []\r\n x = -1\r\n while x >= -len(og_sq):\r\n if og_sq[x] == \"A\":\r\n list.append(\"T\")\r\n elif og_sq[x] == \"G\":\r\n list.append(\"C\")\r\n elif og_sq[x] == \"T\":\r\n list.append(\"A\")\r\n elif og_sq[x] == \"C\":\r\n list.append(\"G\")\r\n x -= 1\r\n comple_sq = \"\"\r\n for k in list:\r\n\r\n comple_sq += k\r\n if (og_sq) == (comple_sq):\r\n result.append((i+1, j))\r\n return result", "def getNoGoLanduseTypes():\n\n noGoLanduse = [1, 2]\n return noGoLanduse" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Are coordinates geographical (LONG/LAT)?
def is_geo(self): return self._projstr == "LONG/LAT"
[ "def is_geo(self) -> bool:\n return self._projstr == \"LONG/LAT\"", "def locn_is_latlong():\n s = read_command(\"g.region\", flags='pu')\n kv = parse_key_val(s, ':')\n if kv['projection'].split(' ')[0] == '3':\n return True\n else:\n return False", "def test_validate_coordinates():\n lat_less_than = (-91.00, 1.0)\n lat_more_than = (91.00, 1.0)\n lon_less_than = (1.00, -181.0)\n lon_more_than = (1.00, 181.0)\n\n assert validate_coordinates(lat_less_than) == [lat_less_than, \"latitude less than -90\"]\n assert validate_coordinates(lat_more_than) == [lat_more_than, \"latitude greater than 90\"]\n assert validate_coordinates(lon_less_than) == [lon_less_than, \"longitude less than -180\"]\n assert validate_coordinates(lon_more_than) == [lon_more_than, \"longitude greater than 180\"]", "def verifyLatLon(lat:float, lon:float) -> bool:\n return verifyLatitude(lat) and verifyLongitude(lon)", "def is_geocoded(self):\n return (self._lat, self._lon) != (None, None)", "def has_coordinates(geometry):\n try:\n return 'coordinates' in geometry\n except (AttributeError, TypeError):\n return False", "def test_get_coords_list_valid(self):\n coupon = COUPON_FACTORY.create_coupon()\n coords_list = coupon.get_location_coords_list()\n self.assertAlmostEqual(int(float(coords_list[0][0])), -73)\n self.assertAlmostEqual(int(float(coords_list[0][1])), 41)", "def are_coordinates_in_city(coordinates, cities):\n if 'latitudeInDegrees' in coordinates:\n coordinates['y'] = coordinates[\"latitudeInDegrees\"]\n coordinates['x'] = coordinates[\"longitudeInDegrees\"]\n\n lat = coordinates['y']\n long = coordinates['x']\n\n location = gis_utils.reverse_geocode_addr([long, lat])\n if location['address']['City'] in cities and \\\n location['address']['Region'] == 'Massachusetts':\n return True\n\n return False", "def coordinate_checker(self, a, b):\n self.assertAlmostEqual(a[\"lat\"], b[\"lat\"], 3)\n self.assertAlmostEqual(a[\"lng\"], b[\"lng\"], 3)", "def verify_location(coordinates):\n\n coordinates.columns = coordinates.columns.str.lower()\n\n # Verify that all air quality locations are (roughly) within Oregon\n for ind in coordinates.index:\n\n lat = coordinates.at[ind, 'latitude']\n long = coordinates.at[ind, 'longitude']\n\n if (40 <= lat <= 47) and (-125 <= long <= -115):\n pass\n else:\n warnings.warn(f\"Coordinate not in Oregon at index: {ind}\")", "def is_location_in_city(address, coordinates):\n if address:\n return is_address_in_city(address)\n if coordinates:\n return are_coordinates_in_city(coordinates, gis_utils.NEIGHBORHOODS)\n\n return True", "def test_geographical_coordinates_with_valid_address(self):\n valid_address = \"576 Natoma St., San Francisco CA\"\n geo_coords = GeographicalCoordinates(valid_address)\n\n self.assertNotEqual(geo_coords.latitude, 0.0)\n self.assertNotEqual(geo_coords.longitude, 0.0)\n self.assertEqual(geo_coords.status, 'OK')", "def artistLocationGeo(self):\n try:\n lat = float(self.locationGraph.objects(self.locationURI, self.latPredicate).next())\n lon = float(self.locationGraph.objects(self.locationURI, self.longPredicate).next())\n print \"Latitude is\", lat\n print \"Longitude is\", lon\n return lat, lon\n except StopIteration: # If generator is empty\n print \"No geodata!\"\n except AttributeError: # If locationURI hasn't been defined\n print \"LocationURI not defined!\"", "def has_location(state: State) -> bool:\n return (\n isinstance(state, State)\n and isinstance(state.attributes.get(ATTR_LATITUDE), float)\n and isinstance(state.attributes.get(ATTR_LONGITUDE), float)\n )", "def _check_area(self):\n (lat_max, lon_min, lat_min, lon_max) = self.area\n if not (\n -90 <= lat_max <= 90\n and -90 <= lat_min <= 90\n and -180 <= lon_min <= 180\n and -180 <= lon_max <= 180\n and lat_max > lat_min\n and lon_max != lon_min\n ):\n raise ValueError(\n \"Provide coordinates as lat_max lon_min lat_min lon_max. \"\n \"Latitude must be in range -180,+180 and \"\n \"longitude must be in range -90,+90.\"\n )", "def coordinate_length_ok(latitude, longitude):\n if len(str(latitude)) > 6 and len(str(longitude)) > 6:\n return True\n return False", "def lonlat2xy(lon,lat):\n from pylab import meshgrid,cos,pi\n r = 6371.e3\n #lon = lon-lon[0]\n if lon.ndim == 1:\n lon,lat = meshgrid(lon,lat)\n x = 2*pi*r*cos(lat*pi/180.)*lon/360.\n y = 2*pi*r*lat/360.\n return x,y", "def test_2d(self):\n x = geo_uri(\"geo:40.685922,-111.853206;crs=wgs84\")\n self.assertEqual('wgs84', x.crs)\n self.assertAlmostEqual(40.685922, x.lattitude, places=6)\n self.assertAlmostEqual(-111.853206, x.longitude, places=6)\n self.assertIsNone(x.altitude)\n self.assertEqual(\"geo:40.685922,-111.853206;crs=wgs84\", str(x))", "def is_in_box(self, lat, lng):\n is_between_horizontal = self.right >= lat >= self.left\n is_between_vertical = self.top >= lng >= self.bottom\n coord_is_in_box = is_between_horizontal and is_between_vertical\n print('IsInBox ({},{})? {}'.format(lat, lng, coord_is_in_box))\n return coord_is_in_box", "def verifyLongitude(lon:float) -> bool:\n return (-180 <= lon <= 180)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The maximum number of nodes for an element
def max_nodes_per_element(self): maxnodes = 0 for local_nodes in self.element_table: n = len(local_nodes) if n > maxnodes: maxnodes = n return maxnodes
[ "def number_of_nodes():\n return 3", "def max_component_size(self) -> int:\n nodes_len = len(self.nodes)\n stack: List[int] = []\n visited = [False]*nodes_len\n max_size = 0\n\n for u in range(nodes_len):\n if visited[u]:\n continue\n\n size = 1\n visited[u] = True\n stack.append(u)\n while stack:\n v = stack.pop()\n v_edges = self.edges[v]\n\n for k in range(nodes_len):\n if not v_edges[k]: # Not a neighbour\n continue\n if visited[k]:\n continue\n\n size += 1\n visited[k] = True\n stack.append(k)\n\n if size > max_size:\n max_size = size\n\n return max_size", "def max_items(self) -> ConfigNodePropertyInteger:\n return self._max_items", "def _max_cardinality_node(G, choices, wanna_connect):\n max_number = -1\n for x in choices:\n number = len([y for y in G[x] if y in wanna_connect])\n if number > max_number:\n max_number = number\n max_cardinality_node = x\n return max_cardinality_node", "def num_nodes(self):\n return self.numNodes.value", "def max_n_edges(self):\n m = 0\n for n in self.nodes:\n k = self.edges_connected(n)\n print(k)\n if k > m:\n m = k\n return k", "def getNumOfNodes(self):\n return self.__num_of_nodes", "def number_of_nodes(self):\n\t\treturn number_of_nodes(self.network)", "def number_of_nodes(self) -> int:\n return pulumi.get(self, \"number_of_nodes\")", "def get_num_of_children(self):\n\n return len(self.children)", "def number_of_nodes(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.node\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, int)", "def _get_max_item_count(self):\n return 7 - len(self.constants)", "def n_elements(self, f: Feature) -> int:\n return self.features[id(f)].n_elements()", "def countMaxOSMNodesForStation(self):\r\n for st in self.UICstlist:\r\n if self.max_OSM_nodes_for_station < len(st.OSMref) :\r\n self.max_OSM_nodes_for_station = len(st.OSMref)", "def max_n_elements(self):\n ty = self._header.eieio_type\n return (UDP_MESSAGE_MAX_SIZE - self._header.size) // (\n ty.key_bytes + ty.payload_bytes)", "def __len__(self):\n num_of_nodes = 0\n cur_node = self.head\n while cur_node:\n num_of_nodes += 1\n cur_node = cur_node.next\n return num_of_nodes", "def NoOfSpanningTreeRoots(self):\n return self._get_attribute('noOfSpanningTreeRoots')", "def maximum_element_size_for_length(length):\r\n\r\n return (2 ** (7 * length)) - 2", "def max_size(self):\n return self.info_sliced.largest_intermediate", "def num_node_labels(self):\n # TODO: change to unique as what we did in graph.py\n return max([gen.num_node_labels for gen in self.generators])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Type is either mesh or Dfsu2D (2 horizontal dimensions)
def is_2d(self): return self._type <= 0
[ "def is_p2dg(mesh, dimension):\n\n return (mesh.continuity == -1 and\n mesh.shape.dimension == dimension and\n mesh.shape.type == 'lagrangian' and\n mesh.shape.degree == 2)", "def is2D(self, unit: 'int const'=0) -> \"SbBool\":\n return _coin.SoMultiTextureCoordinateElement_is2D(self, unit)", "def is_p2(mesh, dimension):\n\n return (mesh.continuity > -1 and\n mesh.shape.dimension == dimension and\n mesh.shape.type == 'lagrangian' and\n mesh.shape.degree == 2)", "def FieldType(self) -> _n_2_t_4:", "def isMesh(self, node):\r\n node = self.convertToPyNode(node)\r\n if nodeType(node) == 'transform':\r\n if nodeType(node.getShape()) == 'mesh':\r\n return 1\r\n else:\r\n return 0", "def itkStochasticFractalDimensionImageFilterIUL2IUL2_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUL2IUL2_cast(*args)", "def testField1TSSetFieldNoProfileSBTPerGeoTypes(self):\n fname=\"Pyfile78.med\"\n coords=DataArrayDouble([-0.3,-0.3,0., 0.2,-0.3,0., 0.7,-0.3,0., -0.3,0.2,0., 0.2,0.2,0., 0.7,0.2,0., -0.3,0.7,0., 0.2,0.7,0., 0.7,0.7,0. ],9,3)\n targetConn=[0,3,4,1, 1,4,2, 4,5,2, 6,7,4,3, 7,8,5,4];\n m0=MEDCouplingUMesh(\"mesh\",3) ; m0.setCoords(coords)\n m0.allocateCells()\n for elt in [[0,1,2,3],[1,2,3,4],[2,3,4,5],[3,4,5,6],[4,5,6,7],[5,6,7,8]]:#6\n m0.insertNextCell(NORM_TETRA4,elt)\n pass\n for elt in [[0,1,2,3,4],[1,2,3,4,5],[2,3,4,5,6],[3,4,5,6,7],[4,5,6,7,8]]:#5\n m0.insertNextCell(NORM_PYRA5,elt)\n pass\n for elt in [[0,1,2,3,4,5],[1,2,3,4,5,6],[2,3,4,5,6,7],[3,4,5,6,7,8]]:#4\n m0.insertNextCell(NORM_PENTA6,elt)\n pass\n m0.checkCoherency2()\n m1=MEDCouplingUMesh(); m1.setName(\"mesh\")\n m1.setMeshDimension(2);\n m1.allocateCells(5);\n m1.insertNextCell(NORM_TRI3,3,targetConn[4:7]);\n m1.insertNextCell(NORM_TRI3,3,targetConn[7:10]);\n m1.insertNextCell(NORM_QUAD4,4,targetConn[0:4]);\n m1.insertNextCell(NORM_QUAD4,4,targetConn[10:14]);\n m1.insertNextCell(NORM_QUAD4,4,targetConn[14:18]);\n m1.setCoords(coords);\n m3=MEDCouplingUMesh(\"mesh\",0) ; m3.setCoords(coords)\n m3.allocateCells()\n m3.insertNextCell(NORM_POINT1,[2])\n m3.insertNextCell(NORM_POINT1,[3])\n m3.insertNextCell(NORM_POINT1,[4])\n m3.insertNextCell(NORM_POINT1,[5])\n #\n mm=MEDFileUMesh()\n mm.setMeshAtLevel(0,m0)\n mm.setMeshAtLevel(-1,m1)\n mm.setMeshAtLevel(-3,m3)\n mm.write(fname,2)\n #### The file is written only with one mesh and no fields. Let's put a field on it geo types per geo types.\n mm=MEDFileMesh.New(fname)\n fs=MEDFileFields()\n fmts=MEDFileFieldMultiTS()\n f1ts=MEDFileField1TS()\n for lev in mm.getNonEmptyLevels():\n for gt in mm.getGeoTypesAtLevel(lev):\n p0=mm.getDirectUndergroundSingleGeoTypeMesh(gt)\n f=MEDCouplingFieldDouble(ON_CELLS) ; f.setMesh(p0)\n arr=DataArrayDouble(f.getNumberOfTuplesExpected()) ; arr.iota()\n f.setArray(arr) ; f.setName(\"f0\")\n f1ts.setFieldNoProfileSBT(f)\n pass\n pass\n self.assertEqual(mm.getNonEmptyLevels(),(0,-1,-3))\n for lev in [0,-1,-3]:\n mm.getDirectUndergroundSingleGeoTypeMeshes(lev) # please let this line, it is for the test to emulate that\n pass\n fmts.pushBackTimeStep(f1ts)\n fs.pushField(fmts)\n fs.write(fname,0)\n del fs,fmts,f1ts\n #### The file contains now one mesh and one cell field with all cells wathever their level ang type fetched.\n fs=MEDFileFields(fname)\n self.assertEqual(len(fs),1)\n self.assertEqual(len(fs[0]),1)\n f1ts=fs[0][0]\n self.assertEqual(f1ts.getFieldSplitedByType(),[(0,[(0,(0,4),'','')]),(3,[(0,(4,6),'','')]),(4,[(0,(6,9),'','')]),(14,[(0,(9,15),'','')]),(15,[(0,(15,20),'','')]),(16,[(0,(20,24),'','')])])\n self.assertTrue(f1ts.getUndergroundDataArray().isEqual(DataArrayDouble([0,1,2,3,0,1,0,1,2,0,1,2,3,4,5,0,1,2,3,4,0,1,2,3]),1e-12))\n pass", "def _faceDivStencilz(self):\n if self.dim == 1 or self.dim == 2:\n return None\n elif self.dim == 3:\n Dz = kron3(ddx(self.nCz), speye(self.nCy), speye(self.nCx))\n return Dz", "def TypeHandle(self) -> _n_2_t_12:", "def dtype(self):\n return self.mesh.dtype", "def isTwoComponents(self):\r\n if self.format in [\"slinear\",\"slogarithmic\",\"scomplex\",\r\n \"smith\",\"sadmittance\",\"plinear\",\r\n \"plogarithmic\",\"polar\"]:\r\n return True\r\n else:\r\n return False", "def itkStochasticFractalDimensionImageFilterIF2IUL2_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIF2IUL2_cast(*args)", "def convert_to_2d(self, A: np.ndarray = None, d1: float = 400, d2: float= 400) -> np.ndarray :\n # set A to the Identity matrix, if you haven't been given another one explicitly.\n if A is None:\n A: np.ndarray = np.identity(4,dtype=np.float)\n\n # faces is an N x 3 x 4 matrix built from the imported mesh.\n faces: np.ndarray = self.apply_transform(A)\n #------------------------------------------------------------------------\n # TODO: you write this. Build a perspective converter (3 x 4) matrix, and multiply it by each of the transformed\n # vectors in the list, \"faces\" generated just before this comment. (Base this on the apply_transform() method I wrote.) Don't forget to\n # divide by w at the end. For each point, return just the normalized (x, y), not (x, y, 1) or (wx, wy, w).\n\n\n\n\n #------------------------------------------------------------------------", "def _get_dsl_fmap_shape_nc1hwc0():\n valid_shape = ConvParam.fusion_para.get(\"valid_shape\")\n if valid_shape:\n fmap_shape_nc1hwc0 = tuple(shape_to_list(valid_shape))\n else:\n fmap_shape_nc1hwc0 = tuple(shape_to_list(data.shape))\n return fmap_shape_nc1hwc0", "def _conv2d_fusion_para(inputs, outputs):\r\n input_memory_type = inputs.get(\"addr_type\") \\\r\n if \"addr_type\" in inputs else 0\r\n output_memory_type = outputs.get(\"addr_type\") \\\r\n if \"addr_type\" in outputs else 0\r\n valid_shape = inputs.get(\"valid_shape\") \\\r\n if \"valid_shape\" in inputs else ()\r\n slice_offset = inputs.get(\"slice_offset\") \\\r\n if \"slice_offset\" in inputs else ()\r\n l1_fusion_type = inputs.get(\"L1_fusion_type\") \\\r\n if \"L1_fusion_type\" in inputs else -1\r\n\r\n fmap_l1_addr_flag = inputs.get(\"L1_addr_flag\", \"nothing\")\r\n fmap_l1_valid_size = inputs.get(\"L1_valid_size\", -1)\r\n\r\n l1_fusion_enable_flag = get_L1_info(\"L1_fusion_enabled\")\r\n if not l1_fusion_enable_flag:\r\n l1_fusion_type = -1\r\n\r\n valid_shape = _shape_to_list(valid_shape)\r\n slice_offset = _shape_to_list(slice_offset)\r\n\r\n l2_fusion_enable_flag = get_L1_info(\"L2_fusion_enabled\")\r\n\r\n if not l2_fusion_enable_flag and (not l1_fusion_enable_flag):\r\n input_memory_type = 0\r\n output_memory_type = 0\r\n valid_shape = []\r\n slice_offset = []\r\n l1_fusion_type = -1\r\n\r\n if input_memory_type not in (0, 1, 2):\r\n err_man.raise_err_input_mem_type(\"conv2d\", input_memory_type)\r\n if output_memory_type not in (0, 1, 2):\r\n err_man.raise_err_output_mem_type(\"conv2d\", output_memory_type)\r\n if valid_shape and not slice_offset:\r\n err_man.raise_err_specific_user(\"conv2d\", \"if valid_shape exists \"\\\r\n + \"slice_offset can not be []\")\r\n\r\n fusion_para = {\"input_memory_type\": input_memory_type,\r\n \"output_memory_type\": output_memory_type,\r\n \"valid_shape\": valid_shape, \"slice_offset\": slice_offset, \\\r\n \"l1_fusion_type\": l1_fusion_type, \\\r\n \"fmap_l1_addr_flag\": fmap_l1_addr_flag, \\\r\n \"fmap_l1_valid_size\": fmap_l1_valid_size}\r\n\r\n return fusion_para", "def itkStochasticFractalDimensionImageFilterIUC2IUL2_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUC2IUL2_cast(*args)", "def itkStochasticFractalDimensionImageFilterIUL2IUC2_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterIUL2IUC2_cast(*args)", "def surface(degreeU=int, degreeV=int, formU=\"string\", worldSpace=bool, knotU=float, objectSpace=bool, formV=\"string\", point=\"string\", knotV=float, pointWeight=\"string\", name=\"string\"):\n pass", "def convert_to_2d(self):\n if self._2d is None:\n self._2d = numpy.zeros((16, 16), numpy.uint32)\n if self._dimension is BiomesShape.Shape3D and self._3d is not None:\n # convert from 3D\n self._2d[:, :] = numpy.kron(\n numpy.reshape(self._3d[:, 0, :], (4, 4)), numpy.ones((4, 4))\n )\n self._dimension = BiomesShape.Shape2D", "def itkStochasticFractalDimensionImageFilterID2IUL2_cast(*args):\n return _itkStochasticFractalDimensionImageFilterPython.itkStochasticFractalDimensionImageFilterID2IUL2_cast(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does the mesh consist of triangles only?
def is_tri_only(self): return self.max_nodes_per_element == 3 or self.max_nodes_per_element == 6
[ "def test_vertex_only(self):\n\n v = g.random((1000, 3))\n v[g.np.floor(g.random(90) * len(v)).astype(int)] = v[0]\n\n mesh = g.trimesh.Trimesh(v)\n\n assert len(mesh.vertices) < 950\n assert len(mesh.vertices) > 900", "def using_triangle():\r\n return HAS_TRIANGLE", "def is_triangle(self):\n return (self.p3.x - self.p1.x) * (self.p2.y - self.p1.y) != (\n self.p3.y - self.p1.y) * (self.p2.x - self.p1.x)", "def isMesh(self, node):\r\n node = self.convertToPyNode(node)\r\n if nodeType(node) == 'transform':\r\n if nodeType(node.getShape()) == 'mesh':\r\n return 1\r\n else:\r\n return 0", "def boolean_difference_mesh_mesh(A, B):\n pass", "def is_triangle_exist(a, b, c):\n if a + b > c and a + c > b and b + c > a:\n return True\n else:\n return False", "def is_triangle(centers_of_mass):\n l1 = np.linalg.norm(centers_of_mass[1] - centers_of_mass[0])\n l2 = np.linalg.norm(centers_of_mass[2] - centers_of_mass[0])\n l3 = np.linalg.norm(centers_of_mass[2] - centers_of_mass[1])\n return (l1 < l2 + l3) and (l2 < l1 + l3) and (l3 < l1 + l2)", "def test_mesh():\n mesh = pv.Plane()\n assert not point_cloud(mesh)", "def check_properties(mesh):\n has_triangle_normals = mesh.has_triangle_normals()\n has_vertex_normals = mesh.has_vertex_normals()\n has_texture = mesh.has_textures()\n edge_manifold = mesh.is_edge_manifold(allow_boundary_edges=True)\n edge_manifold_boundary = mesh.is_edge_manifold(allow_boundary_edges=False)\n vertex_manifold = mesh.is_vertex_manifold()\n self_intersecting = mesh.is_self_intersecting()\n watertight = mesh.is_watertight()\n orientable = mesh.is_orientable()\n _trimesh = util.o3d_mesh_to_trimesh(mesh)\n convex = trimesh.convex.is_convex(_trimesh)\n\n print(f\" no vertices: {len(mesh.vertices)}\")\n print(f\" no triangles: {len(mesh.triangles)}\")\n print(f\" dims (x, y, z): {dimensions(mesh)}\")\n print(f\" has triangle normals: {has_triangle_normals}\")\n print(f\" has vertex normals: {has_vertex_normals}\")\n print(f\" has textures: {has_texture}\")\n print(f\" edge_manifold: {edge_manifold}\")\n print(f\" edge_manifold_boundary: {edge_manifold_boundary}\")\n print(f\" vertex_manifold: {vertex_manifold}\")\n print(f\" self_intersecting: {self_intersecting}\")\n print(f\" watertight: {watertight}\")\n print(f\" orientable: {orientable}\")\n print(f\" convex: {convex}\")\n print(f\" components: {_trimesh.body_count}\")", "def boolean_intersection_mesh_mesh(A, B):\n pass", "def triangular(self):\n return self.kernel.size == 0", "def areTriangleStripGenerated(self) -> \"SbBool\":\n return _coin.SoReorganizeAction_areTriangleStripGenerated(self)", "def checkDegenerateFaces(self):\n print(\"Checking mesh for degenerate faces...\")\n\n for face in self.faces:\n\n seenPos = set()\n vList = []\n for v in face.adjVerts():\n pos = tuple(v.pos.tolist()) # need it as a hashable type\n if pos in seenPos:\n raise ValueError(\"ERROR: Degenerate mesh face has repeated vertices at position: \" + str(pos))\n else:\n seenPos.add(pos)\n vList.append(v.pos)\n\n # Check for triangular faces with colinear vertices (don't catch other such errors for now)\n if(len(vList) == 3):\n v1 = vList[1] - vList[0]\n v2 = vList[2]-vList[0]\n area = norm(cross(v1, v2))\n if area < 0.0000000001*max((norm(v1),norm(v2))):\n raise ValueError(\"ERROR: Degenerate mesh face has triangle composed of 3 colinear points: \\\n \" + str(vList))\n\n\n print(\" ...test passed\")", "def is3DTextCountedAsTriangles(self) -> \"SbBool\":\n return _coin.SoGetPrimitiveCountAction_is3DTextCountedAsTriangles(self)", "def is_triangle(a, b, c):\n sides = [a, b, c]\n sides.sort()\n\n # side lengths may not be negative or zero, check the smallest side\n if sides[0] <= 0:\n return False\n\n # sum of the two smaller sides must be larger than the longest side\n if sides[0] + sides[1] > sides[2]:\n return True\n\n return False", "def is_right_triangle(vertices):\n assert len(vertices) == 3, \"not a triangle: %s\" % vertices\n xaxis = 0\n yaxis = 0\n # see how many pairs share a yaxis\n if vertices[0][0] - vertices[1][0] == 0:\n yaxis += 1\n if vertices[1][0] - vertices[2][0] == 0:\n yaxis += 1\n if vertices[2][0] - vertices[0][0] == 0:\n yaxis += 1\n # see how many pairs share a xaxis\n if vertices[0][1] - vertices[1][1] == 0:\n xaxis += 1\n if vertices[1][1] - vertices[2][1] == 0:\n xaxis += 1\n if vertices[2][1] - vertices[0][1] == 0:\n xaxis += 1\n \n # iff there is one pair on the same xaxis and one pair on the same yaxis \n return xaxis==1 and yaxis==1", "def valid_triangle(sides):\n for permutation in permutations(sides):\n if (permutation[0] + permutation[1]) <= permutation[2]:\n return False\n return True", "def testequilateraltriangles(self):\n self.assertEqual(classify_triangle(1, 1, 1), 'Equilateral', '1,1,1 should be equilateral')", "def regular(self):\n degs = {len([e for e in self._edges if x in e])\n for x in self._vertices}\n return len(degs) == 1", "def _validate(self):\n # check that element connectivity contains integers\n # NOTE: this is neccessary for some plotting functionality\n if not np.issubdtype(self.t[0, 0], np.signedinteger):\n msg = (\"Mesh._validate(): Element connectivity \"\n \"must consist of integers.\")\n raise Exception(msg)\n # check that vertex matrix has \"correct\" size\n if self.p.shape[0] > 3:\n msg = (\"Mesh._validate(): We do not allow meshes \"\n \"embedded into larger than 3-dimensional \"\n \"Euclidean space! Please check that \"\n \"the given vertex matrix is of size Ndim x Nvertices.\")\n raise Exception(msg)\n # check that element connectivity matrix has correct size\n nvertices = {'line': 2, 'tri': 3, 'quad': 4, 'tet': 4, 'hex': 8}\n if self.t.shape[0] != nvertices[self.refdom]:\n msg = (\"Mesh._validate(): The given connectivity \"\n \"matrix has wrong shape!\")\n raise Exception(msg)\n # check that there are no duplicate points\n tmp = np.ascontiguousarray(self.p.T)\n if self.p.shape[1] != np.unique(tmp.view([('', tmp.dtype)]\n * tmp.shape[1])).shape[0]:\n msg = \"Mesh._validate(): Mesh contains duplicate vertices.\"\n warnings.warn(msg)\n # check that all points are at least in some element\n if len(np.setdiff1d(np.arange(self.p.shape[1]), np.unique(self.t))) > 0:\n msg = (\"Mesh._validate(): Mesh contains a vertex \"\n \"not belonging to any element.\")\n raise Exception(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
export elements to new geometry
def elements_to_geometry(self, elements, node_layers="all"): elements = np.sort(elements) # make sure elements are sorted! # extract information for selected elements node_ids, elem_tbl = self._get_nodes_and_table_for_elements( elements, node_layers=node_layers ) node_coords = self.node_coordinates[node_ids] codes = self.codes[node_ids] # create new geometry geom = _UnstructuredGeometry() geom._set_nodes( node_coords, codes=codes, node_ids=node_ids, projection_string=self.projection_string, ) geom._set_elements(elem_tbl, self.element_ids[elements]) geom._reindex() geom._type = self._type # if not self.is_2d: # original file was 3d layers_used = self.layer_ids[elements] unique_layer_ids = np.unique(layers_used) n_layers = len(unique_layer_ids) if ( self._type == UnstructuredType.Dfsu3DSigma or self._type == UnstructuredType.Dfsu3DSigmaZ ) and n_layers == 1: # If source is 3d, but output only has 1 layer # then change type to 2d geom._type = UnstructuredType.Dfsu2D geom._n_layers = None if node_layers == "all": print( "Warning: Only 1 layer in new geometry (hence 2d), but you have kept both top and bottom nodes! Hint: use node_layers='top' or 'bottom'" ) else: geom._type = self._type geom._n_layers = n_layers lowest_sigma = self.n_layers - self.n_sigma_layers + 1 geom._n_sigma = sum(unique_layer_ids >= lowest_sigma) # If source is sigma-z but output only has sigma layers # then change type accordingly if ( self._type == UnstructuredType.DfsuVerticalProfileSigmaZ or self._type == UnstructuredType.Dfsu3DSigmaZ ) and n_layers == geom._n_sigma: geom._type = UnstructuredType(self._type.value - 1) geom._top_elems = geom._get_top_elements_from_coordinates() return geom
[ "def deformGeometry(self):\r\n cmds.modelEditor('modelPanel4', e=True, nurbsCurves=True, polymeshes=True)\r\n #DeformLibrary.matchCircleDirectionTest(self.LocatorGrp)\r\n DeformLibrary.deformGeoToImage(self.LocatorGrp, self.ResolutionList)\r\n DeformLibrary.cleanUpforEdit2(self.LocatorGrp, self.Objects, self.relationshipList, self.mesh)\r\n self.Objects = []\r\n self.relationshipList = []", "def exportVtk(self, filename):\n print(\"Exporting results to '%s'...\" % filename)\n\n # --- Create points and polygon definitions from our node network\n points = self.outputData.coords.tolist()\n\n # --- Make sure topology is VTK-compatible; i.e.: 0-based\n #polygons = (self.outputData.edof-1).tolist()\n topo = np.zeros([self.outputData.edof.shape[0], 3], dtype=int)\n for i in range(self.outputData.edof.shape[0]):\n topo[i, 0] = self.outputData.edof[i,1]/2 - 1\n topo[i, 1] = self.outputData.edof[i, 3] / 2 - 1\n topo[i, 2] = self.outputData.edof[i, 5] / 2 - 1\n\n polygons = (topo).tolist()\n\n # --- Specify both vector and scalar data for each element\n #pointData = vtk.PointData(vtk.Scalars(self.outputData.a.tolist(), name=\"Displacement\"))\n #cellData = vtk.CellData(vtk.Scalars(max(self.outputData.stress), name=\"maxvmstress\"),\\\n # vtk.Vectors(self.outputData.stress, \"stress\"))\n cellData = vtk.CellData(vtk.Scalars(self.outputData.stress, name=\"Von Mises\"))\n\n # --- Create the structure of the element network\n structure = vtk.PolyData(points=points, polygons=polygons)\n\n # --- Store everything in a vtk instance\n #vtkData = vtk.VtkData(structure, pointData, cellData)\n vtkData = vtk.VtkData(structure, cellData)\n\n # --- Save the data to the specified file\n vtkData.tofile(filename, \"ascii\")", "def WriteGeometry(self, *args):\n return _TopTools.TopTools_ShapeSet_WriteGeometry(self, *args)", "def to_xml_element(self):\n\n element = super().to_xml_element()\n element.set(\"type\", \"projection\")\n\n subelement = ET.SubElement(element, \"camera_position\")\n subelement.text = ' '.join(map(str, self._camera_position))\n\n subelement = ET.SubElement(element, \"look_at\")\n subelement.text = ' '.join(map(str, self._look_at))\n\n subelement = ET.SubElement(element, \"wireframe_thickness\")\n subelement.text = str(self._wireframe_thickness)\n\n subelement = ET.SubElement(element, \"wireframe_color\")\n color = self._wireframe_color\n if isinstance(color, str):\n color = _SVG_COLORS[color.lower()]\n subelement.text = ' '.join(str(x) for x in color)\n\n if self._wireframe_domains:\n id_list = [x.id for x in self._wireframe_domains]\n subelement = ET.SubElement(element, \"wireframe_ids\")\n subelement.text = ' '.join([str(x) for x in id_list])\n\n # note that this differs from the slice plot colors\n # in that \"xs\" must also be specified\n if self._colors:\n for domain, color in sorted(self._colors.items(),\n key=lambda x: x[0].id):\n subelement = ET.SubElement(element, \"color\")\n subelement.set(\"id\", str(domain.id))\n if isinstance(color, str):\n color = _SVG_COLORS[color.lower()]\n subelement.set(\"rgb\", ' '.join(str(x) for x in color))\n subelement.set(\"xs\", str(self._xs[domain]))\n\n subelement = ET.SubElement(element, \"horizontal_field_of_view\")\n subelement.text = str(self._horizontal_field_of_view)\n\n # do not need to write if orthographic_width == 0.0\n if self._orthographic_width > 0.0:\n subelement = ET.SubElement(element, \"orthographic_width\")\n subelement.text = str(self._orthographic_width)\n\n return element", "def write_nodes_gexf(self, out_file):\n viz_color_shape = {'standard' : (42, 55, 235, \"disc\"), 'spheroplast':(255, 255, 0, \"square\"),\n 'curved': (41, 235, 3, \"triangle\"), 'filament': (211, 3, 235, \"diamond\")}\n count = 0\n for key, lst in self.nodeWithTypes.items():\n for elt in lst:\n r, g, b, shape = viz_color_shape[key]\n out_file.write(\" <node id=\\\"%s\\\" label=\\\"%s\\\" >\\n\" % (getNodeLetter(count), key))\n out_file.write(' <viz:color r=\"%d\" g=\"%d\" b=\"%d\" />\\n' % (r, g, b))\n out_file.write(' <viz:position x=\"%f\" y=\"%f\" z=\"0.0\" />\\n' % (elt[0], elt[1]))\n out_file.write(' <viz:shape value=\"%s\" />\\n' % shape)\n out_file.write(' <viz:size value=\"10\"/>\\n')\n out_file.write(\" </node>\\n\")\n count += 1\n out_file.write(\" <node id=\\\"SURFACE\\\" label=\\\"surfaceGhost\\\">\\n\")\n out_file.write(' <viz:color r=\"135\" g=\"135\" b=\"135\" />\\n')\n out_file.write(' <viz:position x=\"0.0\" y=\"0.0\" z=\"0.0\" />\\n')\n out_file.write(' <viz:shape value=\"disc\" />\\n')\n out_file.write(' <viz:size value=\"0.01\"/>\\n')\n out_file.write(\" </node>\\n\")", "def write_edges_gexf(self, out_file):\n count = 0\n print(\"Here are all the nodes\", self.nodes)\n for i in range(len(self.edges)):\n for j in range(len(self.edges[i])):\n if self.edges[i][j] != 0 and i != j:\n out_file.write(\" <edge id = \\\"%d\\\" source=\\\"%s\\\" target=\\\"%s\\\" weight=\\\"%f\\\">\\n\" % (count, getNodeLetter(i), getNodeLetter(j), self.edges[i][j]))\n out_file.write(\" <attvalues>\\n\")\n out_file.write(\" <attvalue for=\\\"0\\\" value=\\\"%s\\\" />\\n\" % self.get_edge_type(i, j))\n out_file.write(\" <attvalue for=\\\"1\\\" value=\\\"%f\\\" />\\n\" % self.get_edge_dist(i, j))\n out_file.write(\" <attvalue for=\\\"2\\\" value=\\\"%f\\\" />\\n\" % self.get_edge_ohms(i, j))\n out_file.write(\" </attvalues>\\n\")\n out_file.write(' <viz:color r=\"255\" g=\"0\" b=\"0\" />\\n')\n out_file.write(' <viz:thickness value=\"5\" />\\n')\n out_file.write(' <viz:shape value=\"solid\" />\\n')\n out_file.write(\" </edge>\\n\")\n count += 1\n for node in self.edgeWithTypes['celltosurface']:\n for loc in self.edgeWithTypes['celltosurface'][node]:\n print(\"Here is the node:\", node)\n out_file.write(\" <edge id = \\\"%d\\\" source=\\\"%s\\\" target=\\\"SURFACE\\\" weight=\\\"%f\\\">\\n\" % (count, getNodeLetter(self.nodes.index([round(x, 6) for x in node])), self.weight(node, loc)))\n out_file.write(\" <attvalues>\\n\")\n out_file.write(\" <attvalue for=\\\"0\\\" value=\\\"celltosurface\\\" />\\n\")\n out_file.write(\" <attvalue for=\\\"1\\\" value=\\\"%f\\\" />\\n\" % self.distance(node, loc))\n out_file.write(\" <attvalue for=\\\"2\\\" value=\\\"%f\\\" />\\n\" % self.get_edge_ohms(node, loc))\n out_file.write(\" </attvalues>\\n\")\n out_file.write(' <viz:color r=\"235\" g=\"111\" b=\"3\" />\\n')\n out_file.write(' <viz:thickness value=\"0.05\" />\\n')\n out_file.write(' <viz:shape value=\"solid\" />\\n')\n out_file.write(\" </edge>\\n\")\n count += 1", "def DumpGeometry(self, *args):\n return _TopTools.TopTools_ShapeSet_DumpGeometry(self, *args)", "def save_obj(self, filename):\n verts_3d = np.concatenate((self.verts_2d, np.ones_like(self.verts_2d[:, :1])), 1)\n mesh = trimesh.Trimesh(vertices=verts_3d, faces=self.faces, process=False)\n trimesh.exchange.export.export_mesh(mesh, filename)", "def rebuild(self, selected_elems, elem_attr=None):\n new_points_map = dict()\n new_points_index = 0\n for elem in selected_elems:\n for n in elem:\n if not n in new_points_map:\n new_points_map[n] = new_points_index\n new_points_index += 1\n\n new_points_ref = np.zeros(len(new_points_map),dtype=int)\n for k,v in new_points_map.items():\n new_points_ref[v] = k\n\n new_points = np.zeros([len(new_points_ref),3],dtype=float)\n if self.nodes.num_attrs > 0:\n new_node_attrs = np.zeros([len(new_points_ref),self.nodes.num_attrs],dtype=float)\n if self.nodes.has_boundary_markers > 0:\n new_node_boundary_markers = np.zeros(len(new_points_ref),dtype=int)\n for i,pos in enumerate(new_points_ref):\n new_points[i] = self.nodes.points[pos]\n if self.nodes.num_attrs > 0:\n new_node_attrs[i,:] = self.nodes.attrs[pos,:]\n if self.nodes.has_boundary_markers > 0:\n new_node_boundary_markers[i] = self.nodes.boundary_markers[pos]\n\n new_elems = np.zeros_like(selected_elems)\n for i,elem in enumerate(selected_elems):\n a, b, c, d = elem\n new_elems[i] = new_points_map[a],new_points_map[b],new_points_map[c],new_points_map[d]\n\n new_faces = elems_to_faces(new_elems)\n\n obj2 = TetgenObject()\n\n obj2.elems.elems = new_elems\n if elem_attr is not None:\n obj2.elems.attrs = np.zeros_like(elem_attr)\n obj2.elems.attrs[:] = elem_attr\n obj2.elems.num_attrs = len(elem_attr[0])\n obj2.elems.num_nodes = 4\n obj2.elems.num_elems = len(new_elems)\n\n obj2.faces.faces = new_faces\n obj2.faces.num_faces = len(new_faces)\n\n obj2.nodes.points = new_points\n obj2.nodes.num_points = len(new_points)\n obj2.nodes.dim = 3\n if self.nodes.num_attrs > 0:\n obj2.nodes.attrs = new_node_attrs\n obj2.nodes.num_attrs = len(new_node_attrs[0])\n if self.nodes.has_boundary_markers > 0:\n obj2.nodes.boundary_markers = new_node_boundary_markers\n obj2.nodes.has_boundary_markers = 1\n\n return obj2", "def writexz(edges, bounds, filename, scale, space):\n #start = time.clock()\n file = open(filename, 'wb')\n inkscapeheader(file)\n figdata(file, edges, 'xz', bounds, scale, space)\n inkscapefooter(file)\n file.close()\n print 'Successfully exported ', Blender.sys.basename(filename)# + seconds", "def exportar_shape():\n archivo = GTFS_DIR + 'shapes_tmp.txt'\n print(Fore.GREEN + \"AVISO:\" + Fore.RESET +\n \"Exportando las geometrías a \" + archivo)\n direxists(GTFS_DIR)\n os.system('/usr/bin/sqlite3 -header -csv \"/var/tmp/gtfs.sqlite\" \"SELECT shape_id, shape_pt_lat, shape_pt_lon, shape_pt_sequence, shape_dist_traveled FROM shapes_csv ORDER BY shape_id, shape_pt_sequence;\" > \"' + archivo + '\"')\n return", "def export_to_xml(self, bc, axially_finite, plotzs=(0.0,), entropy=0,\n\t particles=1000, batches=10, inactive=5):\n\t\tgeom = self.get_openmc_geometry(bc, axially_finite)\n\t\tgeom.export_to_xml()\n\t\tself.export_key_pickle()\n\t\t# plots\n\t\tif len(plotzs):\n\t\t\tlots = openmc.Plots()\n\t\t\tfor z in plotzs:\n\t\t\t\tfor scheme in (\"cell\", \"material\"):\n\t\t\t\t\tp = openmc.Plot()\n\t\t\t\t\tp.name = \"Plot_{:.3f}_{}s\".format(z, scheme)\n\t\t\t\t\tp.filename = p.name\n\t\t\t\t\tp.basis = 'xy'\n\t\t\t\t\tp.color_by = scheme\n\t\t\t\t\tif scheme == \"material\":\n\t\t\t\t\t\tp.colors = self.material_lib.color_mapping\n\t\t\t\t\tp.origin = (0, 0, z)\n\t\t\t\t\t#p.width = [self.plot_width, self.plot_width]\n\t\t\t\t\tp.width = self.width\n\t\t\t\t\tlots.append(p)\n\t\t\tlots.export_to_xml()\n\t\t# essential settings\n\t\ts = openmc.Settings()\n\t\ts.particles = particles\n\t\ts.batches = batches\n\t\ts.inactive = inactive\n\t\tif entropy:\n\t\t\traise NotImplementedError(\"Shannon Entropy\")\n\t\t\temesh = openmc.Mesh.from_rect_lattice(self, division=entropy)\n\t\t\ts.entropy_mesh = emesh\n\t\ts.export_to_xml()\n\t\t# materials\n\t\tmats = geom.root_universe.get_all_materials().values()\n\t\topenmc.Materials(mats).export_to_xml()", "def to_2d_geometry(self):\n if self._n_layers is None:\n return self\n\n # extract information for selected elements\n elem_ids = self.bottom_elements\n node_ids, elem_tbl = self._get_nodes_and_table_for_elements(\n elem_ids, node_layers=\"bottom\"\n )\n node_coords = self.node_coordinates[node_ids]\n codes = self.codes[node_ids]\n\n # create new geometry\n geom = _UnstructuredGeometry()\n geom._set_nodes(\n node_coords,\n codes=codes,\n node_ids=node_ids,\n projection_string=self.projection_string,\n )\n geom._set_elements(elem_tbl, self.element_ids[elem_ids])\n\n geom._type = UnstructuredType.Mesh\n\n geom._reindex()\n\n return geom", "def test_export_stl(self):\n\n test_shape = ExtrudeMixedShape(\n points=[\n (10, 20, \"straight\"),\n (10, 10, \"straight\"),\n (20, 10, \"circle\"),\n (22, 15, \"circle\"),\n (20, 20, \"straight\"),\n ],\n distance=10,\n )\n os.system(\"rm tests/test.stl\")\n test_shape.export_stl(\"tests/test.stl\")\n assert Path(\"tests/test.stl\").exists() is True\n os.system(\"rm tests/test.stl\")\n test_shape.export_stl(\"tests/test\")\n assert Path(\"tests/test.stl\").exists() is True\n os.system(\"rm tests/test.stl\")", "def _exportNode(self):\n output = self._doc.createElement(\"object\")\n for nodename in (\"order\", \"hidden\"):\n skins = getattr(self.context, \"_\" + nodename)\n for skin in sorted(skins):\n for name in sorted(skins[skin]):\n node = self._doc.createElement(nodename)\n node.setAttribute(\"skinname\", skin)\n node.setAttribute(\"manager\", name)\n for viewlet in skins[skin][name]:\n child = self._doc.createElement(\"viewlet\")\n child.setAttribute(\"name\", viewlet)\n node.appendChild(child)\n output.appendChild(node)\n return output", "def writeGeom(self, outputFileName, suffix=\"\"):\n if suffix:\n self._getModifiedFileName(outputFileName, suffix)\n outputFileName = self.modifiedFileName\n\n runLog.important(\"Writing reactor geometry file as {}\".format(outputFileName))\n root = ET.Element(\n INP_SYSTEMS,\n attrib={\n INP_GEOM: str(self.geomType),\n INP_SYMMETRY: str(self.symmetry),\n },\n )\n tree = ET.ElementTree(root)\n # start at ring 1 pos 1 and go out\n for targetIndices in sorted(list(self.assemTypeByIndices)):\n ring, pos = targetIndices\n assembly = ET.SubElement(root, \"assembly\")\n assembly.set(\"ring\", str(ring))\n assembly.set(\"pos\", str(pos))\n fuelPath, fuelCycle = self.eqPathInput.get((ring, pos), (None, None))\n if fuelPath is not None:\n # set the equilibrium shuffling info if it exists\n assembly.set(INP_FUEL_PATH, str(fuelPath))\n assembly.set(INP_FUEL_CYCLE, str(fuelCycle))\n\n aType = self.assemTypeByIndices[targetIndices]\n assembly.set(\"name\", aType)\n # note: This is ugly and one-line, but that's ok\n # since we're transitioning.\n tree.write(outputFileName)", "def create_geometry_element_content(self, polygon_element_layout):\n arcpy_item = LayoutItem.get_arcpy_layout_element(self, self.polygon_object)\n PolygonElement.set_size_and_position(self, polygon_element_layout, arcpy_item)\n\n polygon_element_layout.setAttribute('type', '65644')\n polygon_element_layout.setAttribute(\"frame\", \"false\")\n polygon_element_layout.setAttribute(\"background\", \"false\")\n\n PolygonElement.set_uuid_attributes(arcpy_item.name, polygon_element_layout)\n\n symbol = change_interface(self.polygon_object, ArcGisModules.module_carto.IFillShapeElement).Symbol\n symbol_properties = {}\n\n SymbolPropertiesProvider.get_polygon_properties(symbol_properties, symbol)\n\n SimpleSymbol.create_simple_symbol(self.dom, polygon_element_layout, symbol_properties, 1, '1')\n\n element_geometry = change_interface(self.polygon_object, ArcGisModules.module_carto.IElement).Geometry\n polygon_symbol = change_interface(element_geometry, ArcGisModules.module_geometry.IPolygon5)\n point_collection = change_interface(polygon_symbol, ArcGisModules.module_geometry.IPointCollection)\n\n PolygonElement.create_nodes(self, polygon_element_layout, point_collection, arcpy_item)", "def to_xml_element(self):\n\n element = super().to_xml_element()\n element.set(\"type\", self._type)\n\n if self._type == 'slice':\n element.set(\"basis\", self._basis)\n\n subelement = ET.SubElement(element, \"origin\")\n subelement.text = ' '.join(map(str, self._origin))\n\n subelement = ET.SubElement(element, \"width\")\n subelement.text = ' '.join(map(str, self._width))\n\n if self._colors:\n for domain, color in sorted(self._colors.items(),\n key=lambda x: PlotBase._get_id(x[0])):\n subelement = ET.SubElement(element, \"color\")\n subelement.set(\"id\", str(PlotBase._get_id(domain)))\n if isinstance(color, str):\n color = _SVG_COLORS[color.lower()]\n subelement.set(\"rgb\", ' '.join(str(x) for x in color))\n\n if self._show_overlaps:\n subelement = ET.SubElement(element, \"show_overlaps\")\n subelement.text = \"true\"\n\n if self._overlap_color is not None:\n color = self._overlap_color\n if isinstance(color, str):\n color = _SVG_COLORS[color.lower()]\n subelement = ET.SubElement(element, \"overlap_color\")\n subelement.text = ' '.join(str(x) for x in color)\n\n if self._meshlines is not None:\n subelement = ET.SubElement(element, \"meshlines\")\n subelement.set(\"meshtype\", self._meshlines['type'])\n if 'id' in self._meshlines:\n subelement.set(\"id\", str(self._meshlines['id']))\n if 'linewidth' in self._meshlines:\n subelement.set(\"linewidth\", str(self._meshlines['linewidth']))\n if 'color' in self._meshlines:\n subelement.set(\"color\", ' '.join(map(\n str, self._meshlines['color'])))\n\n return element", "def test_case_2(self):\n graphic = Graphic(etree.parse(\"arrow.svg\").getroot())\n f = open(\"out/arrow_%s.svg\"%sys._getframe().f_code.co_name,\"w\")\n f.write(graphic.get_xml())\n f.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get list of top element ids based on element coordinates
def _get_top_elements_from_coordinates(self, ec=None): if ec is None: ec = self.element_coordinates d_eps = 1e-4 top_elems = [] x_old = ec[0, 0] y_old = ec[0, 1] for j in range(1, len(ec)): d2 = (ec[j, 0] - x_old) ** 2 + (ec[j, 1] - y_old) ** 2 # print(d2) if d2 > d_eps: # this is a new x,y point # then the previous element must be a top element top_elems.append(j - 1) x_old = ec[j, 0] y_old = ec[j, 1] return np.array(top_elems)
[ "def get_ids():", "def top_nodes(self):\n voffs = self.offset.take(self.bt_masks[1])*(self._mesh.layers-2)\n return np.unique(self.cell_node_list[:, self.bt_masks[1]] + voffs)", "def getNodeXY(id):\n for n in nodes:\n if n[0] == id:\n return (n[2], n[3])", "def _get_front_idxs_from_id(fronts, id):\n if id == -1:\n # This is the only special case.\n # -1 is the index of the catch-all final column offset front.\n freq_idxs = np.arange(fronts.shape[0], dtype=np.int64)\n sample_idxs = np.ones(len(freq_idxs), dtype=np.int64) * (fronts.shape[1] - 1)\n else:\n freq_idxs, sample_idxs = np.where(fronts == id)\n return [(f, i) for f, i in zip(freq_idxs, sample_idxs)]", "def retrieveCellIds(cls, listOfPoints):", "def getPositionsList(self):\n return [element.getPosition() for element in self.elements.values()]", "def getFacesTop(self):\n\tnx=self.nx\n\tstart = len(self.interiorFaces) + nx\n\treturn self.faces[start:start + nx]", "def find_positions(element):\n lst = {float(element.attrib[\"lat\"]), float(element.attrib[\"lon\"])}\n pos = list(lst)\n return pos", "def get_ids(self):\n if self.root == 'root':\n return []\n ids = []\n curr_level = [self.root]\n while curr_level:\n next_level = []\n for node in curr_level:\n if isinstance(node, _SummaryTreeNode):\n if node.children:\n next_level.extend(node.children)\n else:\n ids.append(node)\n curr_level = next_level\n return ids", "def getParetoFront(coords):\n paretoFront = [coords[0]] \n for pair in coords[1:]:\n if pair[1] <= paretoFront[-1][1]:\n paretoFront.append(pair)\n\n return paretoFront", "def ordered_panel_coordinates(self):\n pc = self.panel_corners()\n pc = sorted(pc, key=lambda x: x[0])\n\n # get the coordinates on the \"left\" and \"right\" side of the bounding box\n left_coords = pc[:2]\n right_coords = pc[2:]\n\n # sort y values ascending for correct order\n left_coords = sorted(left_coords, key=lambda y: y[0])\n right_coords = sorted(right_coords, key=lambda y: y[0])\n\n return [tuple(right_coords[1]), tuple(left_coords[1]), tuple(left_coords[0]), tuple(right_coords[0])]", "def get_sorted_ind(x):\n\td = dist.pdist(x)\n\tD = dist.squareform(d)\n\tY = sch.linkage(D, method='average', metric='cosine') \n\tZ = sch.dendrogram(Y)\n\tidx = Z['leaves'] \n\treturn idx", "def get_element_list(self, pos2):\n \n element_hash = pos2.x + (pos2.y * self.size.x)\n return self.element_hash.get(element_hash)", "def within_bbox(self, bbox):\n assert self.dimension() == 2, \"Non-2D embeddings are not supported by within_bbox()\"\n positions = self.field(Field.POSITION)\n return [id_val for id_val, pos in zip(self.ids, positions)\n if (pos[0] >= bbox[0] and pos[0] <= bbox[1] and\n pos[1] >= bbox[2] and pos[1] <= bbox[3])]", "def retrieveCellIds(self, listOfPoints, containedOnly=False):\n cellIds = []\n for cntb, bound in enumerate(listOfPoints):\n cellIds.append([])\n for point in bound:\n cellIds[cntb].extend(self.gridContainer['vertexToCellIds'][tuple(point)])\n if cntb == 0:\n previousSet = set(cellIds[cntb])\n if containedOnly:\n previousSet = set(previousSet).intersection(cellIds[cntb])\n else:\n previousSet.update(cellIds[cntb])\n\n return list(set(previousSet))", "def _get_position_session_id(self, ipython, position):\n maxs = []\n for x in range(self.MAX_ELEMENTS): maxs.append(0)\n #max1, max2 = 0, 0\n for x in ipython.history_manager.get_tail(n=100):\n for i in range(self.MAX_ELEMENTS):\n if x[0] > maxs[i]:\n maxs.insert(i, x[0])\n maxs = maxs[:10]\n return maxs[position]", "def get_markers(adata, n):\n df = pd.DataFrame(adata.uns['rank_genes_groups']['names']).head(n) \n df_genes = list()\n for i in range(0, len(df.columns)):\n col=str(i)\n df_genes.append(df[col])\n top_n = list()\n for i in range(0,len(df_genes)):\n for j in range(0, n):\n top_n.append(df_genes[i][j])\n return top_n", "def LoadIDs(element):\n ids = []\n for levelIDElement in element.findall('level'):\n ids.append(int(levelIDElement.text))\n return ids", "def elem2d_ids(self):\n if self._n_layers is None:\n raise InvalidGeometry(\"Object has no layers: cannot return elem2d_ids\")\n # or return self._2d_ids ??\n\n if self._2d_ids is None:\n res = self._get_2d_to_3d_association()\n self._e2_e3_table = res[0]\n self._2d_ids = res[1]\n self._layer_ids = res[2]\n return self._2d_ids", "def get_working_ids(self):\n if len(self.good_points) == 0:\n return set(self.data.index)\n else:\n return set(self.data.index) - set(chain.from_iterable([p.index for p in self.good_points]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extract 2d geometry from 3d geometry Returns UnstructuredGeometry 2d geometry (bottom nodes)
def to_2d_geometry(self): if self._n_layers is None: return self # extract information for selected elements elem_ids = self.bottom_elements node_ids, elem_tbl = self._get_nodes_and_table_for_elements( elem_ids, node_layers="bottom" ) node_coords = self.node_coordinates[node_ids] codes = self.codes[node_ids] # create new geometry geom = _UnstructuredGeometry() geom._set_nodes( node_coords, codes=codes, node_ids=node_ids, projection_string=self.projection_string, ) geom._set_elements(elem_tbl, self.element_ids[elem_ids]) geom._type = UnstructuredType.Mesh geom._reindex() return geom
[ "def get_3d(self) -> \"ProjectionGeometry\":\n if self.ndim == 2:\n if self.det_shape_vu is not None:\n new_det_shape_vu = np.ones(2, dtype=int)\n new_det_shape_vu[-len(self.det_shape_vu) :] = self.det_shape_vu\n else:\n new_det_shape_vu = None\n return dc_replace(self, geom_type=self.geom_type.replace(\"2d\", \"3d\"), det_shape_vu=new_det_shape_vu)\n else:\n return dc_replace(self)", "def get_3d(self) -> \"VolumeGeometry\":\n if len(self._vol_shape_xyz) == 2:\n return dc_replace(self, _vol_shape_xyz=np.concatenate((self._vol_shape_xyz, [1])))\n else:\n return dc_replace(self)", "def elements_to_geometry(self, elements, node_layers=\"all\"):\n elements = np.sort(elements) # make sure elements are sorted!\n\n # extract information for selected elements\n node_ids, elem_tbl = self._get_nodes_and_table_for_elements(\n elements, node_layers=node_layers\n )\n node_coords = self.node_coordinates[node_ids]\n codes = self.codes[node_ids]\n\n # create new geometry\n geom = _UnstructuredGeometry()\n geom._set_nodes(\n node_coords,\n codes=codes,\n node_ids=node_ids,\n projection_string=self.projection_string,\n )\n geom._set_elements(elem_tbl, self.element_ids[elements])\n geom._reindex()\n\n geom._type = self._type #\n if not self.is_2d:\n # original file was 3d\n\n layers_used = self.layer_ids[elements]\n unique_layer_ids = np.unique(layers_used)\n n_layers = len(unique_layer_ids)\n\n if (\n self._type == UnstructuredType.Dfsu3DSigma\n or self._type == UnstructuredType.Dfsu3DSigmaZ\n ) and n_layers == 1:\n # If source is 3d, but output only has 1 layer\n # then change type to 2d\n geom._type = UnstructuredType.Dfsu2D\n geom._n_layers = None\n if node_layers == \"all\":\n print(\n \"Warning: Only 1 layer in new geometry (hence 2d), but you have kept both top and bottom nodes! Hint: use node_layers='top' or 'bottom'\"\n )\n else:\n geom._type = self._type\n geom._n_layers = n_layers\n lowest_sigma = self.n_layers - self.n_sigma_layers + 1\n geom._n_sigma = sum(unique_layer_ids >= lowest_sigma)\n\n # If source is sigma-z but output only has sigma layers\n # then change type accordingly\n if (\n self._type == UnstructuredType.DfsuVerticalProfileSigmaZ\n or self._type == UnstructuredType.Dfsu3DSigmaZ\n ) and n_layers == geom._n_sigma:\n geom._type = UnstructuredType(self._type.value - 1)\n\n geom._top_elems = geom._get_top_elements_from_coordinates()\n\n return geom", "def ExtractVtuGeometry(inputVtu):\n \n filter = vtk.vtkGeometryFilter()\n filter.SetInput(inputVtu.ugrid)\n filter.Update()\n surfacePoly = filter.GetOutput()\n \n # Construct output\n result = vtu()\n result.ugrid = PolyDataToUnstructuredGrid(surfacePoly)\n \n return result", "def getGeometry(self, robot : RobotModel, qfinger=None,type='Group') -> Geometry3D:\n if qfinger is not None:\n q0 = robot.getConfig()\n robot.setConfig(self.setFingerConfig(q0,qfinger))\n res = Geometry3D()\n baseLink = robot.link(self.baseLink).index\n gripperLinks = self.gripperLinks if self.gripperLinks is not None else [baseLink] + self.descendantLinks(robot)\n if type == 'Group':\n res.setGroup()\n Tbase = robot.link(self.baseLink).getTransform()\n for i,link in enumerate(gripperLinks):\n Trel = se3.mul(se3.inv(Tbase),robot.link(link).getTransform())\n g = robot.link(link).geometry().copy()\n if not g.empty():\n g.setCurrentTransform(*se3.identity())\n g.transform(*Trel)\n else:\n print(\"Uh... link\",robot.link(link).getName(),\"has empty geometry?\")\n res.setElement(i,g)\n if qfinger is not None:\n robot.setConfig(q0)\n return res\n else:\n from . import geometry\n res = geometry.merge(*[robot.link(link) for link in gripperLinks])\n if qfinger is not None:\n robot.setConfig(q0)\n return res", "def convert_to_2d(geom):\n from django.contrib.gis.geos import WKBWriter, WKBReader\n wkb_r = WKBReader()\n wkb_w = WKBWriter()\n wkb_w.outdim = 2\n return wkb_r.read(wkb_w.write(geom))", "def extractVerticies(geom):\n isMulti = \"MULTI\" in geom.GetGeometryName()\n # Check geometry type\n if \"LINE\" in geom.GetGeometryName():\n if isMulti:\n pts = []\n for gi in range(geom.GetGeometryCount()):\n pts.append(geom.GetGeometryRef(gi).GetPoints())\n else:\n pts = geom.GetPoints()\n elif \"POLYGON\" in geom.GetGeometryName():\n if isMulti:\n pts = []\n for gi in range(geom.GetGeometryCount()):\n newGeom = geom.GetGeometryRef(gi).GetBoundary()\n pts.append(extractVerticies(newGeom))\n else:\n newGeom = geom.GetBoundary()\n pts = extractVerticies(newGeom)\n\n elif \"POINT\" in geom.GetGeometryName():\n if isMulti:\n pts = []\n for gi in range(geom.GetGeometryCount()):\n pts.append(geom.GetGeometryRef(gi).GetPoints())\n else:\n pts = geom.GetPoints()\n\n else:\n raise GeoKitGeomError(\"Cannot extract points from geometry \")\n\n if isMulti:\n out = np.concatenate(pts)\n else:\n out = np.array(pts)\n\n if out.shape[1] == 3: # This can happen when POINTs are extracted\n out = out[:, :2]\n return out", "def reflection_geometry(\n self,\n ) -> Optional[\n Literal[\n \"di:8\",\n \"de:8\",\n \"8:di\",\n \"8:de\",\n \"d:d\",\n \"d:0\",\n \"45a:0\",\n \"45c:0\",\n \"0:45a\",\n \"45x:0\",\n \"0:45x\",\n \"other\",\n ]\n ]:\n\n return self._reflection_geometry", "def cell_to_morph3d(cell):\n g = nx.DiGraph()\n stack = [cell.soma]\n while len(stack) > 0:\n sec = stack.pop()\n # This is roundabout way is required because nrn7.4 does not\n # provide explicit equivalent of `access {section}`. In nrn7.5\n # the 3d functions are available as Section methods.\n h('access {}'.format(sec.name()))\n stype = nu.sectype(sec.name())\n pt3d = int(h.n3d())\n # pt3d = int(sec.n3d()): # only nrn >= 7.5\n for ii in range(pt3d): \n name = '{}_{}'.format(sec.name(), ii)\n x = h.x3d(ii)\n y = h.y3d(ii)\n z = h.z3d(ii)\n d = h.diam3d(ii)\n g.add_node(name, x=x, y=y, z=z, r=d/2.0, s=stype, orig=sec)\n for ii in range(1, pt3d):\n n1 = '{}_{}'.format(sec.name(), ii-1)\n n2 = '{}_{}'.format(sec.name(), ii)\n length = ng.eucd(g, n1, n2)\n g.add_edge(n1, n2, length=length)\n current = h.SectionRef(sec=sec)\n if current.has_parent():\n h('access {}'.format(current.parent.name())) \n n1 = '{}_{}'.format(current.parent.name(),\n int(h.n3d()-1))\n g.add_edge(n1, '{}_0'.format(sec.name()),\n length=0)\n for child in current.child:\n # print('Adding', child.name())\n stack.append(child)\n return g, '{}_0'.format(cell.soma.name())", "def __get_geometry_components(self):\n mselmembers = OpenMaya.MSelectionList()\n fnset = OpenMaya.MFnSet(self.fnSkinCluster.deformerSet())\n components = OpenMaya.MObject()\n fnset.getMembers(mselmembers, False)\n dagpath = OpenMaya.MDagPath()\n mselmembers.getDagPath(0, dagpath, components)\n return dagpath, components", "def _makeGeometry(geometry, nesting, opt_coordinates=()):\n if nesting < 2 or nesting > 4:\n raise ee_exception.EEException('Unexpected nesting level.')\n\n # Handle a list of points.\n if isinstance(geometry, numbers.Number) and opt_coordinates:\n coordinates = [geometry]\n coordinates.extend(opt_coordinates)\n geometry = Feature.coordinatesToLine(coordinates)\n\n # Make sure the number of nesting levels is correct.\n item = geometry\n count = 0\n while isinstance(item, list) or isinstance(item, tuple):\n item = item[0]\n count += 1\n\n while count < nesting:\n geometry = [geometry]\n count += 1\n\n return geometry", "def extent_3d(self):\n\n minxy, maxxy = self.extent_2d()\n cs = self.cs\n xyz0 = cs.xyz_from_oriented((minxy[0], minxy[1], 0.0))\n xyz1 = cs.xyz_from_oriented((maxxy[0], minxy[1], 0.0))\n xyz2 = cs.xyz_from_oriented((maxxy[0], maxxy[1], 0.0))\n xyz3 = cs.xyz_from_oriented((minxy[0], maxxy[1], 0.0))\n\n \"\"\"\n xyz0 = cs.xyz_from_oriented((self.x0, self.y0, 0.0))\n xyz1 = cs.xyz_from_oriented((self.x0 + (self.nx - 1) * self.dx,\n self.y0,\n 0.0))\n xyz2 = cs.xyz_from_oriented((self.x0 + (self.nx - 1) * self.dx,\n self.y0 + (self.ny - 1) * self.dy, 0.0))\n xyz3 = cs.xyz_from_oriented((self.x0,\n self.y0 + (self.ny - 1) * self.dy,\n 0.0))\n \"\"\"\n\n minxyz = (min(xyz0[0], xyz1[0], xyz2[0], xyz3[0]),\n min(xyz0[1], xyz1[1], xyz2[1], xyz3[1]),\n min(xyz0[2], xyz1[2], xyz2[2], xyz3[2]))\n maxxyz = (max(xyz0[0], xyz1[0], xyz2[0], xyz3[0]),\n max(xyz0[1], xyz1[1], xyz2[1], xyz3[1]),\n max(xyz0[2], xyz1[2], xyz2[2], xyz3[2]))\n\n return minxyz, maxxyz", "def mol3d(self):\n if self._mol3d is None:\n apiurl = 'http://www.chemspider.com/MassSpecAPI.asmx/GetRecordMol?csid=%s&calc3d=true&token=%s' % (self.csid,TOKEN)\n response = urllib2.urlopen(apiurl)\n tree = ET.parse(response)\n self._mol3d = tree.getroot().text\n return self._mol3d", "def _parse_geometry(child, name, color, mesh_path, package_dir):\n geometry = child.find(\"geometry\")\n if geometry is None:\n raise UrdfException(\"Missing geometry tag in link '%s'\" % name)\n result = []\n for shape_type in [\"box\", \"cylinder\", \"sphere\", \"mesh\"]:\n shapes = geometry.findall(shape_type)\n Cls = shape_classes[shape_type]\n for shape in shapes:\n shape_object = Cls(\n name, mesh_path=mesh_path, package_dir=package_dir,\n color=color)\n shape_object.parse(shape)\n result.append(shape_object)\n return result", "def parse_geometry(self, world: bool = True, verbose: bool = False):\n if verbose:\n print(f\"{self.name}: Parsing chunk\")\n v = self.parse_vertices(world=world, verbose=verbose)\n t = self.parse_triangles(verbose=verbose)\n return v, t", "def get_geometry(self):\n dQdpx = np.zeros(3)\n dQdpy = np.zeros(3)\n dQdth = np.zeros(3)\n Astar = np.zeros(3)\n Bstar = np.zeros(3)\n Cstar = np.zeros(3)\n\n dQdpx[0] = -m.cos(self.delta) * m.cos(self.gamma)\n dQdpx[1] = 0.0\n dQdpx[2] = +m.sin(self.delta) * m.cos(self.gamma)\n\n dQdpy[0] = m.sin(self.delta) * m.sin(self.gamma)\n dQdpy[1] = -m.cos(self.gamma)\n dQdpy[2] = m.cos(self.delta) * m.sin(self.gamma)\n\n dQdth[0] = -m.cos(self.delta) * m.cos(self.gamma) + 1.0\n dQdth[1] = 0.0\n dQdth[2] = m.sin(self.delta) * m.cos(self.gamma)\n\n Astar[0] = 2 * m.pi / self.lamda * self.dpx * dQdpx[0]\n Astar[1] = 2 * m.pi / self.lamda * self.dpx * dQdpx[1]\n Astar[2] = 2 * m.pi / self.lamda * self.dpx * dQdpx[2]\n\n Bstar[0] = (2 * m.pi / self.lamda) * self.dpy * dQdpy[0]\n Bstar[1] = (2 * m.pi / self.lamda) * self.dpy * dQdpy[1]\n Bstar[2] = (2 * m.pi / self.lamda) * self.dpy * dQdpy[2]\n\n Cstar[0] = (2 * m.pi / self.lamda) * self.dth * dQdth[0]\n Cstar[1] = (2 * m.pi / self.lamda) * self.dth * dQdth[1]\n Cstar[2] = (2 * m.pi / self.lamda) * self.dth * dQdth[2]\n\n denom = np.dot(Astar, np.cross(Bstar, Cstar))\n A = 2 * m.pi * np.cross(Bstar, Cstar) / denom\n B = 2 * m.pi * np.cross(Cstar, Astar) / denom\n C = 2 * m.pi * np.cross(Astar, Bstar) / denom\n\n return np.array((A, B, C))", "def rhombicuboctahedron():\n return nx.read_gml(abs_path('gml/rhombicuboctahedron.gml'))", "def extract_triangle_mesh(self):\n tsdf_vol = self._tsdf_vol.cpu().numpy()\n color_vol = self._color_vol.cpu().numpy()\n vol_origin = self._vol_origin.cpu().numpy()\n\n # Marching cubes\n verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)\n verts_ind = np.round(verts).astype(int)\n verts = verts*self._voxel_size + vol_origin\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._const)\n colors_g = np.floor((rgb_vals - colors_b*self._const) / 256)\n colors_r = rgb_vals - colors_b*self._const - colors_g*256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n\n return verts, faces, norms, colors", "def _get_geometry_complex(self, id: int):\n coords_pocket, coords_ligand = self._get_coord(id)\n list_geom_tensors = []\n all_atoms_coords = np.concatenate((coords_pocket, coords_ligand))\n tensor_all_atoms_coords = (\n torch.from_numpy(all_atoms_coords).squeeze().unsqueeze(0)\n )\n length_padding = LEN_PADDING - tensor_all_atoms_coords.shape[1]\n result = F.pad(\n input=tensor_all_atoms_coords,\n pad=(0, 0, 0, length_padding),\n mode=\"constant\",\n value=0,\n )\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
list of nodes and element table for a list of elements
def _get_nodes_and_table_for_elements(self, elements, node_layers="all"): nodes = [] elem_tbl = [] if (node_layers is None) or (node_layers == "all") or self.is_2d: for j in elements: elem_nodes = self.element_table[j] elem_tbl.append(elem_nodes) for node in elem_nodes: nodes.append(node) else: # 3D file if (node_layers != "bottom") and (node_layers != "top"): raise Exception("node_layers must be either all, bottom or top") for j in elements: elem_nodes = self.element_table[j] nn = len(elem_nodes) halfn = int(nn / 2) if node_layers == "bottom": elem_nodes = elem_nodes[:halfn] if node_layers == "top": elem_nodes = elem_nodes[halfn:] elem_tbl.append(elem_nodes) for node in elem_nodes: nodes.append(node) return np.unique(nodes), elem_tbl
[ "def parse_table(element: WebElement) -> List[List[str]]:\n\n table_data = []\n\n # parse header columns\n header = []\n header_columns = element.find_elements_by_css_selector(\"thead tr th\")\n for column in header_columns:\n header.append(column.text)\n\n table_data.append(header)\n\n # parse data\n data_rows_elems = element.find_elements_by_css_selector(\"tbody tr\")\n for data_row_elem in data_rows_elems:\n data_row = []\n\n children_elems = data_row_elem.find_elements_by_css_selector(\"*\")\n\n for child_elem in children_elems:\n data_row.append(child_elem.text)\n\n table_data.append(data_row)\n\n return table_data", "def parseElements(elementsList, start):\n if start == -1:\n arvore = ElementRoot()\n newElement, end = parseElements(elementsList, 0)\n arvore.addChild(newElement)\n return arvore, len(elementsList)\n \n arvore = Element(elementsList[start][0])\n \n i = start+1\n while i < len(elementsList):\n if elementsList[i][3] == 0:\n newElement, i = parseElements(elementsList, i)\n arvore.addChild(newElement)\n else:\n arvore.addTextIndexes(elementsList[start][2], elementsList[i][1])\n return arvore, i\n i += 1\n\n return arvore, i", "def nodeToElement(node,result):\n\tname=node.getId()\n\ttype=node.getType()\n\tposition=node.getPosition()\n\tlength=node.getLength()\n\t#print \"==n2E==\",name,type,position,length\n\n\t#thick elements\n\tif node.isKindOf(\"dh\"):\n\t\tresult.add(Dipole(position,length,name))\n\telif node.isKindOf(\"qh\"):\n\t\tresult.add(Quadrupole(position,length,name))\n\telif node.isKindOf(\"qv\"):\n\t\tresult.add(Quadrupole(position,length,name))\n\telif node.isKindOf(\"pq\"):\n\t\tresult.add(Quadrupole(position,length,name))\n\n\t#thin elements within nonzero drift space (quasi thick elements)\n\telif node.isKindOf(\"rfgap\"):\n\t\tfor i in RFGap(position,length,name).asTuple(): result.add(i)\n\telif node.isKindOf(\"bcm\"):\n\t\tfor i in BCMonitor(position,length,name).asTuple(): result.add(i)\n\n\t#thin elements\n\telif node.isKindOf(\"dch\"):\n\t\tresult.add(HSteerer(position,length,name))\n\telif node.isKindOf(\"dcv\"):\n\t\tresult.add(VSteerer(position,length,name))\n\telif node.isKindOf(\"bpm\"):\n\t\tresult.add(BPMonitor(position,length,name))\n\telif node.isKindOf(\"ws\"):\n\t\tresult.add(WScanner(position,length,name))\n\telse:\n\t\tprint node.getId(),\"is unknown node type.\"\n\t\tsys.exit(-1)", "def getElementEdgeNodes(elementType, tag=-1, primary=False, task=0, numTasks=1):", "def getElementFaceNodes(elementType, faceType, tag=-1, primary=False, task=0, numTasks=1):", "def _get_elements(self, source):\n return list(chain(*[self.tree.xpath(xpath) for xpath in source]))", "def get_elements_dict(self, element_tree, elements):\n\t\telements[element_tree[\"name\"]] = element_tree \n\n\t\tif \"children\" in element_tree and len(element_tree[\"children\"]): \n\t\t\tfor child in element_tree[\"children\"]: \n\t\t\t\tself.get_elements_dict(child, elements)", "def create_table_element(self):\n element = etree.Element('table')\n element.text = '\\n'\n element.tail = '\\n'\n return element", "def create_single_line_nodelist(model,coord_start_seq,coord_end_seq,N,\n nodesetname=None,\n elemsetname=None):\n # get the current node and element seq\n nn = model.nodelist.get_seqmax()\n ne = model.connlist.get_seqmax()\n N = int(N)\n # get the start and end nodes\n coord_start = model.nodelist.itemlib[coord_start_seq]\n coord_end = model.nodelist.itemlib[coord_end_seq]\n \n ## specify the input parameters\n [X_start,Y_start,Z_start] = coord_start.xyz\n [X_end,Y_end,Z_end] = coord_end.xyz\n \n if nodesetname != None:\n model.add_to_set(nodesetname,[coord_start_seq,coord_end_seq],settype='node')\n \n if N == 1:\n ## case of single middle node\n mid_x = (X_start + X_end)/2\n mid_y = (Y_start - Y_end)/2\n mid_z = (Z_start - Z_end)/2\n \n # create new grid instance\n midnode = model.add_node([mid_x,mid_y,mid_z],setname=nodesetname)\n \n # create new element instance\n model.add_element([coord_start.seq,midnode.seq],setname=elemsetname)\n model.add_element([midnode.seq,coord_end.seq],setname=elemsetname)\n\n \n elif N==0:\n # create elements between the input two nodes\n model.add_element([coord_start.seq,coord_end.seq])\n \n else:\n step_x=((-X_start+X_end))/float(N)\n step_y=((-Y_start+Y_end))/float(N)\n step_z=((-Z_start+Z_end))/float(N)\n \n tempnodelist = []\n tempelementlist = []\n for i in range(1,N):\n tempnodelist.append([i*step_x+X_start,i*step_y+Y_start,i*step_z+Z_start])\n #tempnode = model.add_node(,setname=nodesetname)\n \n id_i = model.node(tempnodelist,setname=nodesetname)\n \n for i in range(1,N):\n if i == 1:\n tempelementlist.append([coord_start.seq,id_i +i])\n else:\n tempelementlist.append([id_i+ i-1,id_i+i])\n \n tempelementlist.append([id_i+i,coord_end.seq])\n \n \n \n model.element(tempelementlist,setname=elemsetname)\n return model", "def node_table(nodes, field_names=()):\n\n fields = OrderedDict([\n ('HOSTNAME', lambda s: s.get('host', s.get('hostname'))),\n ('IP', lambda s: s.get('ip') or mesos.parse_pid(s['pid'])[1]),\n ('ID', lambda s: s['id']),\n ('TYPE', lambda s: s['type']),\n ('REGION', lambda s: s['region']),\n ('ZONE', lambda s: s['zone']),\n ])\n\n for field_name in field_names:\n if field_name.upper() in fields:\n continue\n if ':' in field_name:\n heading, field_name = field_name.split(':', 1)\n else:\n heading = field_name\n fields[heading.upper()] = _dotted_itemgetter(field_name)\n\n sortby = list(fields.keys())[0]\n tb = table(fields, nodes, sortby=sortby)\n tb.align['TYPE'] = 'l'\n return tb", "def to_elements(element_references, doc=revit.doc):\r\n element_references = to_iterable(element_references)\r\n return [to_element(e_ref) for e_ref in element_references]", "def get_ast_elem_list(self):\n return self._ast_elem_list", "def produce_rows_lst():\n\n soup = open_custom_html('')\n rows = soup.findChildren(\"tr\")[1:]\n return rows", "def get_elements(self, regexp: str = '') -> List[Element]:\n elements = []\n for board in self.get_boards():\n elements.extend(board.get_elements(regexp))\n # removing template elements\n # TODO: Refactor and stop using redis in favour of firestore\n agent_name = self.get_redis_name().replace('_agent', '')\n elements = [element for element in elements\n if element.get_name() != f'@{agent_name}: place company name(s) here']\n\n # getting elements from Redis\n # the set in Redis is called something like 'news_agent:elements:{element_id}'\n # if there is no such element, or it is empty, it returns just an empty set, not None\n connection = get_redis_connection()\n redis_elements = self.__get_cached_elements(elements, connection)\n\n # new and updated elements\n existing_and_cached_elements = []\n for element in elements:\n if element.get_id() in redis_elements.keys():\n redis_elem_name = redis_elements[element.get_id()].get_name()\n if element.get_agent_task_id() \\\n or element.get_name() == redis_elem_name:\n last_proc_time = redis_elements[element.get_id()].get_last_processing_time()\n element.set_last_processing_time(last_proc_time)\n existing_and_cached_elements.append(element)\n\n return existing_and_cached_elements", "def relabel_multiple_element_list(*args):\r\n \r\n header_labels = []\r\n element_labels = []\r\n \r\n for i in range(len(args[0])):\r\n label = args[0][i]\r\n if len(args) > 1:\r\n for j in range(1,len(args)):\r\n label = '{0}_{1}'.format(label,args[j][i])\r\n element_labels.append(label)\r\n if label not in header_labels: header_labels.append(label)\r\n \r\n header_labels.sort()\r\n \r\n return header_labels, element_labels", "def __getXML_list(self,inlist, objname=None):\n h = \"\"\n for i in inlist:\n h += self.__getXML(i, objname)\n return h", "def _canonical_elements(self,elements):\n #elf = frozenset\n #elf = tuple # AC\n cmpf = lambda x,y: cmp(x[0].id,y[0].id)\n elf = lambda x: tuple(sorted(x,cmp=cmpf))\n elements = elf( element for element in pairs(elements) )\n return elements", "def test_ElementSelector(self):\n df = self.df\n tmr = ElementSelector()\n for input in [df]:\n with self.subTest(input=input):\n out = tmr.transform(input)", "def get_ordered_element_list(self):\n elements = []\n for eo_index in self.element_order_index:\n elements.append(self.grading_elements[eo_index])\n return (elements)", "def _extract_table(xml):\n two_keys = set()\n table = []\n root = ElementTree.fromstring(xml)\n for i, row in enumerate(root.iter('Table')):\n children = tuple(try_parse(c.text) for c in row)\n key = (children[0], children[1])\n if key not in two_keys:\n two_keys.add(key)\n table.append(children)\n\n return table" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find 3d elements of profile nearest to (x,y) coordinates
def find_nearest_profile_elements(self, x, y): if self.is_2d: raise InvalidGeometry("Object is 2d. Cannot get_nearest_profile") else: elem2d, _ = self._find_n_nearest_2d_elements(x, y) elem3d = self.e2_e3_table[elem2d] return elem3d
[ "def getElementByCoordinates(x, y, z, dim=-1, strict=False):", "def test_nearest(self):\n plugin = SpotExtraction(neighbour_selection_method='nearest')\n expected = self.neighbours[:, 0, 0:2].astype(int)\n result = plugin.extract_coordinates(self.neighbour_cube)\n self.assertArrayEqual(result.data, expected)", "def find_mrt(x, y):\n wd = (x - 1.375845) ** 2 + (y - 103.776289) ** 2\n gl = (x - 1.314834) ** 2 + (y - 103.891121) ** 2\n bv = (x - 1.304584) ** 2 + (y - 103.796945) ** 2\n db = (x - 1.307006) ** 2 + (y - 103.843568) ** 2\n cg = (x - 1.339967) ** 2 + (y - 103.731715) ** 2\n\n l = (wd, gl, bv, db, cg)\n X = l.index(min(l))\n place = \"nil\"\n mini = 99999999\n print(X)\n nearest_centroid = centroids_list[X]\n\n for k, val in nearest_centroid.items():\n dist = (x - float(val[0])) ** 2 + (y - float(val[1])) ** 2\n if (dist < mini):\n place = k\n mini = dist\n\n return place", "def search(Y, x, u, top_n):\n coefs=np.array([x.dot(vector) for vector in u.T])\n top_imgs=[]\n for img in Y:\n coefs_img=np.array([img.dot(vector) for vector in u.T])\n dist=np.linalg.norm(coefs-coefs_img)\n top_imgs.append((dist,img))\n if len(top_imgs)<=top_n:\n continue\n sorted(top_imgs,key=lambda x:x[0]) #sort according to the distance ascendingly\n del top_imgs[-1] #delete the last one (the furthest one)\n top_imgs=np.array([img[1] for img in top_imgs])\n return top_imgs #np.random.random((top_n, 256))", "def nearest(lon_pt, lat_pt, lon2d, lat2d, theshape):\n #print \"nearest point for \", lon_pt , \" \", lat_pt\n\n lon_pt += -lon2d\n lat_pt += -lat2d\n d = np.sqrt(lon_pt**2 + lat_pt**2)\n #print \"argmin=\", d.argmin()\n\n j, i = np.unravel_index(d.argmin(), theshape)\n #i = d.argmin(axis=0).min()\n #j = d.argmin(axis=1).min()\n #print i,j\n\n #from matplotlib import pyplot\n #pyplot.imshow(d)\n #pyplot.show()\n #raw_input()\n\n return i, j", "def find_winner(self):\n distance_matrix = np.array([node.distance for node in self.__get_map_element(\n self.model.map,\n self.model.dimensions\n )])\n distance_matrix = np.reshape(distance_matrix, [dim for dim in self.model.dimensions])\n winner_coords = np.where(distance_matrix == distance_matrix.min())\n winner_coords = [winner_coords[x][0] for x in range(len(self.model.dimensions))]\n try:\n return tuple([i.item() for i in winner_coords])\n except:\n print('cos sie zepsulo, nie powinno tutaj trafic xD')", "def test_nearest_land(self):\n plugin = SpotExtraction(neighbour_selection_method='nearest_land')\n expected = self.neighbours[:, 1, 0:2].astype(int)\n result = plugin.extract_coordinates(self.neighbour_cube)\n self.assertArrayEqual(result.data, expected)", "def test_returned_cube_nearest(self):\n plugin = SpotExtraction()\n expected = [0, 0, 12, 12]\n result = plugin.process(self.neighbour_cube, self.diagnostic_cube_xy)\n self.assertArrayEqual(result.data, expected)\n self.assertEqual(result.name(), self.diagnostic_cube_xy.name())\n self.assertEqual(result.units, self.diagnostic_cube_xy.units)\n self.assertArrayEqual(result.coord('latitude').points, self.latitudes)\n self.assertArrayEqual(result.coord('longitude').points,\n self.longitudes)\n result.attributes.pop('model_grid_hash')\n self.assertDictEqual(result.attributes,\n self.diagnostic_cube_xy.attributes)", "def getLocalCoordinatesInElement(elementTag, x, y, z):", "def match_det2cube_miripsf(alpha_resol, beta_resol, wave_resol,\n naxis1, naxis2, naxis3,\n xcenters, ycenters, zcoord,\n spaxel_flux,\n spaxel_weight,\n spaxel_iflux,\n spaxel_alpha, spaxel_beta, spaxel_wave,\n flux,\n coord1, coord2, wave, alpha_det, beta_det,\n rois_pixel, roiw_pixel, weight_pixel, softrad_pixel):\n\n nplane = naxis1 * naxis2\n# now loop over the pixel values for this region and find the spaxels that fall\n# withing the region of interest.\n nn = coord1.size\n# print('looping over n points mapping to cloud',nn)\n#________________________________________________________________________________\n for ipt in range(0, nn - 1):\n lower_limit = softrad_pixel[ipt]\n#________________________________________________________________________________\n # xcenters, ycenters is a flattened 1-D array of the 2 X 2 xy plane\n # cube coordinates.\n # find the spaxels that fall withing ROI of point cloud defined by\n # coord1,coord2,wave\n\n xdistance = (xcenters - coord1[ipt])\n ydistance = (ycenters - coord2[ipt])\n radius = np.sqrt(xdistance * xdistance + ydistance * ydistance)\n\n indexr = np.where(radius <= rois_pixel[ipt])\n indexz = np.where(abs(zcoord - wave[ipt]) <= roiw_pixel[ipt])\n\n#________________________________________________________________________________\n# loop over the points in the ROI\n for iz, zz in enumerate(indexz[0]):\n istart = zz * nplane\n for ir, rr in enumerate(indexr[0]):\n#________________________________________________________________________________\n#________________________________________________________________________________\n# if weight is miripsf -distances determined in alpha-beta coordinate system\n\n weights = FindNormalizationWeights(wave[ipt],\n wave_resol,\n alpha_resol,\n beta_resol)\n\n\n cube_index = istart + rr\n\n alpha_distance = alpha_det[ipt] - spaxel_alpha[cube_index]\n beta_distance = beta_det[ipt] - spaxel_beta[cube_index]\n wave_distance = abs(wave[ipt] - spaxel_wave[cube_index])\n\n xn = alpha_distance / weights[0]\n yn = beta_distance / weights[1]\n wn = wave_distance / weights[2]\n\n # only included the spatial dimensions\n wdistance = (xn * xn + yn * yn + wn * wn)\n weight_distance = np.power(np.sqrt(wdistance), weight_pixel[ipt])\n#________________________________________________________________________________\n# We have found the weight_distance based on instrument type\n\n if weight_distance < lower_limit: weight_distance = lower_limit\n weight_distance = 1.0 / weight_distance\n\n\n spaxel_flux[cube_index] = spaxel_flux[cube_index] + weight_distance * flux[ipt]\n spaxel_weight[cube_index] = spaxel_weight[cube_index] + weight_distance\n spaxel_iflux[cube_index] = spaxel_iflux[cube_index] + 1", "def findnearest(particle, particle_array): \r\n\tdist_array = np.sum((particle - particle_array)**2, axis=1)\r\n\treturn np.nanargmin(dist_array)", "def getClosestUVs(*args, **kwargs):\n \n pass", "def get_nearest_water(cube, tree, xi, yi, k=10, max_dist=0.04, min_var=0.01):\n distances, indices = tree.query(np.array([xi, yi]).T, k=k)\n if indices.size == 0:\n raise ValueError(\"No data found.\")\n # Get data up to specified distance.\n mask = distances <= max_dist\n distances, indices = distances[mask], indices[mask]\n if distances.size == 0:\n msg = \"No data near ({}, {}) max_dist={}.\".format\n raise ValueError(msg(xi, yi, max_dist))\n # Unstructured model.\n if (cube.coord(axis='X').ndim == 1) and (cube.ndim == 2):\n i = j = indices\n unstructured = True\n # Structured model.\n else:\n unstructured = False\n if cube.coord(axis='X').ndim == 2: # CoordinateMultiDim\n i, j = np.unravel_index(indices, cube.coord(axis='X').shape)\n else:\n shape = (cube.coord(axis='Y').shape[0],\n cube.coord(axis='X').shape[0])\n i, j = np.unravel_index(indices, shape)\n IJs = list(zip(i, j))\n for dist, idx in zip(distances, IJs):\n idx = tuple([int(kk) for kk in idx])\n if unstructured: # NOTE: This would be so elegant in py3k!\n idx = (idx[0],)\n # This weird syntax allow for idx to be len 1 or 2.\n series = cube[(slice(None),)+idx]\n if is_water(series, min_var=0.01):\n break\n else:\n series = None\n continue\n return series, dist, idx", "def nearest_neighbours(x,y,d=1):\n hcol = int((x / 1.5) + 0.5)\n adjust = Sin60 * (hcol % 2)\n hrow = int(((y - adjust) / R3) + 0.5)\n here = Vec(x,y)\n centres = sorted(((h_centre(hcol+i,hrow+j)-here).length(),hcol+i,hrow+j)\n for i in range(-d,d+1)\n for j in range(-d,d+1))\n return ((hc,hr) for (dd,hc,hr) in centres)", "def get_near(self, point, radius):\n return [x.data for x in self.entities.search_nn_dist(point, radius)]", "def efficient_closest_pair(points):\n\n points = sort_points_by_X(points)\n return efficient_closest_pair_routine(points)", "def GetNearestNeighbourArrays(sampling_points,theta_samples,phi_samples,roundup=False):\n theta_min = 0.0\n theta_max = np.pi\n phi_min = -np.pi\n phi_max = np.pi\n \n if roundup:\n division = int(ceil(sqrt(sampling_points/20)))\n else:\n division = int(round(sqrt(sampling_points/20)))\n \n neighbour_samples = 20*division**2\n \n theta_edges = np.linspace(theta_min,theta_max,num=2*theta_samples+1)[::2]\n phi_edges = np.linspace(phi_min,phi_max,num=2*phi_samples+1)[::2]\n \n Theta_edges, Phi_edges = np.meshgrid(theta_edges,phi_edges,indexing=\"ij\")\n \n geodesicSamplesCoordsThetaPhi, geodesicSamplesAreas = GetGeodesicSamplePoints(neighbour_samples)\n geodesicSamplesVec = ThetaPhisToUnitVec(geodesicSamplesCoordsThetaPhi)\n\n ThetaPhis = np.transpose(np.array([Theta_edges.flatten(),Phi_edges.flatten()]),(1,0))\n \n SampleVecs = ThetaPhisToUnitVec(ThetaPhis)\n NearestNeighbours = np.zeros(len(SampleVecs),dtype=int)\n \n for i in range(len(SampleVecs)):\n NearestNeighbours[i] = FindNearestNeighbour(SampleVecs[i],geodesicSamplesVec)\n \n NearestNeighbours = np.reshape(NearestNeighbours,(theta_samples+1,phi_samples+1))\n \n lat_edges = ThetaToLat(theta_edges)\n lon_edges = PhiToLong(phi_edges)\n Lon_edges, Lat_edges = np.meshgrid(lon_edges,lat_edges)\n \n return geodesicSamplesCoordsThetaPhi, NearestNeighbours, Lon_edges, Lat_edges", "def _get_closest_point_in_point_cloud(self, pixel):\n # Select only points that are in front.\n fwd_points = self.points[np.where(self.points[:, 2] > 0.0)]\n # Select x and y.\n pc_xy = fwd_points[:, 0:2]\n # Select z\n pc_z = fwd_points[:, 2]\n # Divize x, y by z\n normalized_pc = pc_xy / pc_z[:, None]\n xy = np.array([pixel.x, pixel.y]).transpose()\n # Compute distance\n dist = np.sum((normalized_pc - xy)**2, axis=1)\n # Select index of the closest point.\n closest_index = np.argmin(dist)\n # Return the closest point.\n return Location(fwd_points[closest_index][0],\n fwd_points[closest_index][1],\n fwd_points[closest_index][2])", "def _nearest(self, i):\n\n # Need the second nearest neighbor of i since the nearest neighbor\n # will be itself. Using argpartition, the k-th nearest neighbor is\n # placed at index k.\n idx = list(self.mesh[self.cell_index(self.spheres[i])])\n dists = cdist([self.spheres[i]], self.spheres[idx])[0]\n if dists.size > 1:\n j = dists.argpartition(1)[1]\n return idx[j], dists[j]\n else:\n return None, None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The 2dto3d element connectivity table for a 3d object
def e2_e3_table(self): if self._n_layers is None: print("Object has no layers: cannot return e2_e3_table") return None if self._e2_e3_table is None: res = self._get_2d_to_3d_association() self._e2_e3_table = res[0] self._2d_ids = res[1] self._layer_ids = res[2] return self._e2_e3_table
[ "def testStructuringElement3D(self):\n testse = structuringElement3D([0,4,5,6,9,12] , FACE_CENTER_CUBIC)\n testse2 = structuringElement3D([4,5,6,9,12] , CENTER_CUBIC)\n testse3 = structuringElement3D([0,4,5,6,9,12] , FACE_CENTER_CUBIC)\n self.assertTrue(testse.getDirections()==[0,4,5,6,9,12])\n self.assertTrue(testse.getGrid()==FACE_CENTER_CUBIC)\n self.assertTrue(testse.hasZero())\n self.assertTrue(testse2.getDirections()==[4,5,6,9,12])\n self.assertTrue(testse2.getGrid()==CENTER_CUBIC)\n self.assertTrue(not testse2.hasZero())\n transse=testse.transpose()\n self.assertTrue(transse.getDirections()==[0,1,2,3,9,12])\n self.assertTrue(transse.getGrid()==FACE_CENTER_CUBIC)\n self.assertTrue(transse.hasZero())\n self.assertTrue(testse3==testse)\n self.assertTrue(testse3!=testse2)\n s = repr(testse)\n self.assertTrue(s==\"structuringElement3D([0, 4, 5, 6, 9, 12], mamba3D.FACE_CENTER_CUBIC)\",s)", "def cell_to_morph3d(cell):\n g = nx.DiGraph()\n stack = [cell.soma]\n while len(stack) > 0:\n sec = stack.pop()\n # This is roundabout way is required because nrn7.4 does not\n # provide explicit equivalent of `access {section}`. In nrn7.5\n # the 3d functions are available as Section methods.\n h('access {}'.format(sec.name()))\n stype = nu.sectype(sec.name())\n pt3d = int(h.n3d())\n # pt3d = int(sec.n3d()): # only nrn >= 7.5\n for ii in range(pt3d): \n name = '{}_{}'.format(sec.name(), ii)\n x = h.x3d(ii)\n y = h.y3d(ii)\n z = h.z3d(ii)\n d = h.diam3d(ii)\n g.add_node(name, x=x, y=y, z=z, r=d/2.0, s=stype, orig=sec)\n for ii in range(1, pt3d):\n n1 = '{}_{}'.format(sec.name(), ii-1)\n n2 = '{}_{}'.format(sec.name(), ii)\n length = ng.eucd(g, n1, n2)\n g.add_edge(n1, n2, length=length)\n current = h.SectionRef(sec=sec)\n if current.has_parent():\n h('access {}'.format(current.parent.name())) \n n1 = '{}_{}'.format(current.parent.name(),\n int(h.n3d()-1))\n g.add_edge(n1, '{}_0'.format(sec.name()),\n length=0)\n for child in current.child:\n # print('Adding', child.name())\n stack.append(child)\n return g, '{}_0'.format(cell.soma.name())", "def test_layer3_edges(bf: Session, sot: SoT) -> None:\n # We check for L3 edges at the node-pair level, not interface level.\n # This suffices since we have only one edge between node pairs.\n layer3_edges = bf.q.layer3Edges(nodes=SNAPSHOT_NODES_SPEC,\n remoteNodes=SNAPSHOT_NODES_SPEC).answer().frame()\n layer3_node_pairs = {NodePair(node1=row[\"Interface\"].hostname, node2=row[\"Remote_Interface\"].hostname)\n for _, row in layer3_edges.iterrows()}\n assert layer3_node_pairs == sot.get_connected_node_pairs()", "def is3D(self) -> \"SbBool\":\n return _coin.SoCoordinateElement_is3D(self)", "def get_vertex_zlayers(net, glayout, net_prefix, redo_layout):\n\n def find_vlayers(row, vtype='Source', bar=None):\n if bar:\n bar.next()\n node = glayout.loc[row[vtype]]\n ebin = row['Edge_Bin']\n pbin = row['Pval_Bin']\n hpbin = np.nan\n if ('HPval_Bin' in row.index):\n hpbin = row['HPval_Bin']\n rbin = row['Rsqr_Bin']\n rel = row['Relationship']\n test = row['Test_Name']\n return(row[vtype], node['X'], node['Y'], ebin, pbin, hpbin, rbin, rel, test, node['Degree'], node['CC'])\n\n\n if (redo_layout | (not os.path.exists(net_prefix + '.3Dvlayers.txt'))):\n print(\"Calculating 3D vertex layout.\")\n bar = IncrementalBar('', max=net.shape[0]*2, suffix='%(percent)d%%')\n lsource = net.apply(find_vlayers, vtype='Source', bar=bar, axis=1)\n ltarget = net.apply(find_vlayers, vtype='Target', bar=bar, axis=1)\n print(\"\")\n\n columns = ['Vertex', 'X', 'Y', 'EBin', 'PBin', 'HPBin', 'RBin', 'Rel', 'Test_Name', 'Degree', 'CC']\n vlayers = pd.DataFrame.from_records(lsource.append(ltarget).values, columns=columns)\n vlayers = vlayers[vlayers.duplicated() == False]\n # We want to place the node in the layer where it first appears.\n vlayers = vlayers.groupby(by=['Vertex']).apply(lambda g: g[g['EBin'] == g['EBin'].max()])\n vlayers.reset_index(inplace=True, drop=True)\n vlayers.to_csv(net_prefix + '.3Dvlayers.txt')\n\n else:\n vlayers = pd.read_csv(net_prefix + '.3Dvlayers.txt', index_col=0)\n\n return vlayers", "def connectivity(self):\n if len(self._connectivity) ==0:\n self._connectivity = [[self.Nodes.index(n) for n in e.nodes] for e in self.Elements]\n return self._connectivity", "def get_elem_connectivity(self, object_id):\n (elem_block_connectivity, num_elem_this_blk,\n num_nodes_per_elem) = self.__ex_get_elem_conn(object_id)\n if self.use_numpy:\n elem_block_connectivity = ctype_to_numpy(\n self, elem_block_connectivity)\n return elem_block_connectivity, num_elem_this_blk.value, num_nodes_per_elem.value", "def Nodes3D(N):\n\n alpopt = np.array([0, 0, 0, 0.1002, 1.1332, 1.5608, 1.3413, 1.2577, 1.1603,\\\n 1.10153, 0.6080, 0.4523, 0.8856, 0.8717, 0.9655])\n\n if(N<=15):\n alpha = alpopt[N-1]\n else:\n alpha = 1.\n\n # total number of nodes and tolerance\n Np = (N+1)*(N+2)*(N+3)//6\n tol = 1e-8\n\n r,s,t = EquiNodes3D(N)\n\n L1 = (1.+t)/2\n L2 = (1.+s)/2\n L3 = -(1.+r+s+t)/2\n L4 = (1+r)/2\n\n # set vertices of tetrahedron\n v1 = np.array([-1., -1./sqrt(3.), -1./sqrt(6.)]) # row array\n v2 = np.array([ 1., -1./sqrt(3.), -1./sqrt(6.)])\n v3 = np.array([ 0, 2./sqrt(3.), -1./sqrt(6.)])\n v4 = np.array([ 0, 0, 3./sqrt(6.)])\n\n # orthogonal axis tangents on faces 1-4\n t1 = np.zeros((4,3))\n t1[0,:] = v2-v1\n t1[1,:] = v2-v1\n t1[2,:] = v3-v2\n t1[3,:] = v3-v1\n\n t2 = np.zeros((4,3))\n t2[0,:] = v3-0.5*(v1+v2)\n t2[1,:] = v4-0.5*(v1+v2)\n t2[2,:] = v4-0.5*(v2+v3)\n t2[3,:] = v4-0.5*(v1+v3)\n\n for n in range(4):\n # normalize tangents\n norm_t1 = la.norm(t1[n,:])\n norm_t2 = la.norm(t2[n,:])\n t1[n,:] = t1[n,:]/norm_t1 # 2-norm np.array ?\n t2[n,:] = t2[n,:]/norm_t2\n\n # Warp and blend for each face (accumulated in shiftXYZ)\n XYZ = L3*v1+L4*v2+L2*v3+L1*v4 # form undeformed coordinates\n shift = np.zeros((Np,3))\n for face in range(4):\n if(face==0):\n La = L1; Lb = L2; Lc = L3; Ld = L4; # check syntax\n\n if(face==1):\n La = L2; Lb = L1; Lc = L3; Ld = L4;\n\n if(face==2):\n La = L3; Lb = L1; Lc = L4; Ld = L2;\n\n if(face==3):\n La = L4; Lb = L1; Lc = L3; Ld = L2;\n\n # compute warp tangential to face\n warp1, warp2 = WarpShiftFace3D(N, alpha, alpha, La, Lb, Lc, Ld)\n\n # compute volume blending\n blend = Lb*Lc*Ld\n\n # modify linear blend\n denom = (Lb+0.5*La)*(Lc+0.5*La)*(Ld+0.5*La)\n ids = np.argwhere(denom>tol) # syntax\n ids = ids[:,0]\n\n blend[ids] = (1+(alpha*La[ids])**2)*blend[ids]/denom[ids]\n\n # compute warp & blend\n shift = shift + (blend*warp1)*t1[face,:]\n shift = shift + (blend*warp2)*t2[face,:]\n\n # fix face warp\n ids = np.argwhere((La<tol) *( (Lb>tol) + (Lc>tol) + (Ld>tol) < 3)) # syntax ??\n ids = ids[:,0]\n\n shift[ids,:] = warp1[ids]*t1[face,:] + warp2[ids]*t2[face,:]\n\n\n\n # shift nodes and extract individual coordinates\n XYZ = XYZ + shift\n x = XYZ[:,0]\n y = XYZ[:,1]\n z = XYZ[:,2]\n\n return x, y, z", "def readTableD3(self, data, n):\r\n #print \"reading TABLED3\"\r\n func = TABLED3\r\n self.readTable3(func, data)", "def GetConnectivityforMol(mol):\r\n result = {}\r\n for DesLabel in _connectivity.keys():\r\n result[DesLabel] = round(_connectivity[DesLabel](mol), 3)\r\n return result", "def itkImageD3_cast(obj: 'itkLightObject') -> \"itkImageD3 *\":\n return _itkImagePython.itkImageD3_cast(obj)", "def __find_tool_relation_to_3d_deadzone(self):\n l1, r1 = self.__project_from_3d_to_pixel(self.__psm1_last_pos__)\n l2, r2 = self.__project_from_3d_to_pixel(self.__psm2_last_pos__)\n# l1 = self.__world_to_pixel(self.__psm1_last_pos__, self.__cam_info__['left'])\n# r1 = self.__world_to_pixel(self.__psm1_last_pos__, self.__cam_info__['right'])\n# l2 = self.__world_to_pixel(self.__psm2_last_pos__, self.__cam_info__['left'])\n# r2 = self.__world_to_pixel(self.__psm2_last_pos__, self.__cam_info__['right'])\n \n def contact_edges(p):\n \"\"\"!\n Find which edges are in contact with the tool\n \"\"\"\n edges = {}\n if p[0] > self.__cam_width__ * (1-self.__deadzone_margin__):\n edges['right'] = p[0] - self.__cam_width__ * (1-self.__deadzone_margin__)\n if p[1] > self.__cam_height__ * (1-self.__deadzone_margin__):\n edges['bottom'] = p[1] - self.__cam_height__ * (1-self.__deadzone_margin__)\n if p[0] < self.__cam_width__ * self.__deadzone_margin__ :\n edges['left'] = p[0] - self.__cam_width__ * self.__deadzone_margin__ \n if p[1] < self.__cam_height__ * self.__deadzone_margin__ :\n edges['top'] = p[1] - self.__cam_height__ * self.__deadzone_margin__ \n return edges\n \n return contact_edges(l1), contact_edges(l2)", "def elem2d_ids(self):\n if self._n_layers is None:\n raise InvalidGeometry(\"Object has no layers: cannot return elem2d_ids\")\n # or return self._2d_ids ??\n\n if self._2d_ids is None:\n res = self._get_2d_to_3d_association()\n self._e2_e3_table = res[0]\n self._2d_ids = res[1]\n self._layer_ids = res[2]\n return self._2d_ids", "def topology_connectivity(self):\n # loop through cells.\n for cell in self.cell:\n faces = cell.set_face_vertices()\n # loop through points of faces.\n # note that 'faces' is not assigned to the cell yet.\n for face in faces:\n if face[0].parent_bface: # needed to use continue statement.\n # loop though bfaces that the first face vertex belongs to.\n for parent_bface in face[0].parent_bface: # face[0] is the first face vertex.\n # count number of parent bfaces other points of the face belongs to.\n sig = 0\n for point in face[1:]: # do not consider the first face point.\n if parent_bface in point.parent_bface:\n sig += 1\n if sig == len(face) - 1: # '-1' because we start from the second vertex to count matches.\n cell.bface.append(parent_bface)\n parent_bface.parent_cell.append(cell)\n break # looping though bfaces of the first face vertex.\n continue # with the next face as we are done with the current one.\n\n # loop through parent cells of the face vertex.\n for parent_cell in face[0].parent_cell:\n # make sure that parent cell is not the current cell.\n if parent_cell != cell:\n # check if this parent cell is already processed.\n if parent_cell not in cell.nei:\n sig = 0\n for point in face[1:]: # do not consider the first face point.\n sig += point.parent_cell.count(parent_cell)\n if sig == len(face) - 1: # '-1' because we start from the second vertex to count matches.\n cell.nei.append(parent_cell)\n parent_cell.nei.append(cell)\n if len(cell.shape.args) == 4: # check if the shape is quad.\n shape = symgeo.Line(face[0].shape, face[1].shape)\n self.iface.append(InteriorFace(shape, face, self))\n cell.iface.append(self.iface[-1])\n parent_cell.iface.append(self.iface[-1])\n break # looping though parent cells of the first face vertex.", "def draw_object_3d(self, type, mode, objects, default_count, vert_v_vx, vert_v_vy, vert_v_vz, norm_v_vx, norm_v_vy, norm_v_vz, color_vv, index_vv, count_vv):\n self._draw_object_3d(type, mode, objects, default_count, vert_v_vx, vert_v_vy, vert_v_vz, norm_v_vx, norm_v_vy, norm_v_vz, color_vv, index_vv, count_vv)", "def build_l3_connectivity(anm):\n #TODO: use this as base for ospf, ebgp, ip, etc rather than exploding in each\n g_in = anm['input']\n g_l3conn = anm.add_overlay(\"l3_conn\")\n g_l3conn.add_nodes_from(g_in, retain=['label', 'update', 'device_type', 'asn',\n 'specified_int_names',\n 'device_subtype', 'platform', 'host', 'syntax'])\n g_l3conn.add_nodes_from(g_in.nodes(\"is_switch\"), retain=['asn'])\n#TODO: check if edge_id needs to be still retained\n g_l3conn.add_edges_from(g_in.edges(), retain=['edge_id'])\n\n ank_utils.aggregate_nodes(g_l3conn, g_l3conn.nodes(\"is_switch\"),\n retain=\"edge_id\")\n exploded_edges = ank_utils.explode_nodes(g_l3conn, g_l3conn.nodes(\"is_switch\"),\n retain=\"edge_id\")\n for edge in exploded_edges:\n edge.multipoint = True\n edge.src_int.multipoint = True\n edge.dst_int.multipoint = True", "def getPtr3(self) -> \"SbVec3f const *\":\n return _coin.SoGLCoordinateElement_getPtr3(self)", "def render_map_3d(self):\n for y in range(0, self.dimensions[0], self.granularity):\n for z in range(0, self.dimensions[1], self.granularity):\n for x in range(0, self.dimensions[2], self.granularity):\n if self.map[y, z, x] == 1:\n bpy.ops.mesh.primitive_cube_add(location=(x-(self.dimensions[2]/2),\n z-(self.dimensions[1]/2),\n y+1))", "def itkCompositeTransformD3_cast(obj: 'itkLightObject') -> \"itkCompositeTransformD3 *\":\n return _itkCompositeTransformPython.itkCompositeTransformD3_cast(obj)", "def render3D(self):\n mesh = trimesh.Trimesh(vertices=self.verts, faces=self.faces)\n mesh.show(resolution=(512, 512))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The associated 2d element id for each 3d element
def elem2d_ids(self): if self._n_layers is None: raise InvalidGeometry("Object has no layers: cannot return elem2d_ids") # or return self._2d_ids ?? if self._2d_ids is None: res = self._get_2d_to_3d_association() self._e2_e3_table = res[0] self._2d_ids = res[1] self._layer_ids = res[2] return self._2d_ids
[ "def get_element2dof_id_map(self):\n assert self.model_type == 'frame', 'this function assumes 6 dof each node for now!'\n return {int(e_id) : {0 : id_map[0:6], 1 : id_map[6:12]} \n for e_id, id_map in enumerate(self._sc_ins.get_element2dof_id_map())}", "def update_ids(self):\n paths=self.get_surf_paths()\n for path in paths:\n S=self.get_surface(path)\n S.id=path", "def get_elem_id_map(self):\n return self.__ex_get_id_map('EX_ELEM_MAP')", "def GetId(self, *args):\n return _Graphic3d.Graphic3d_TextureRoot_GetId(self, *args)", "def __hash__(self):\n # we assume the largest number of faces is 4, face id is 1-4, need 0-3\n hval = self.element.id*4+(self.id-1)\n return hval", "def uniqueId( self ):\r\n\t\treturn mxs.blurUtil.uniqueId( self._nativePointer.layerAsRefTarg )", "def material_ids(self):\n return [self.Mid1(), self.Mid2(), self.Mid3()]", "def id(self):\n if not self._id:\n self._id = self._layer.GetLayerId()\n return self._id", "def EffectiveBlockLayerId(self) -> _n_0_t_0:", "def PlotLayoutId(self) -> _n_0_t_0:", "def get_IDWT(self):\n if len(self.DWT) == 0:\n self.get_DWT()\n for i in self.H:\n self.IDWT[i] = np.linalg.multi_dot([self.H[i].T, self.DWT[i], self.H[i]])\n return", "def elements22Dindexes(self, items_idx):\n M_elements = list(it.product(*[items_idx, items_idx]))\n i = [item[0] for item in M_elements]\n j = [item[1] for item in M_elements]\n Ndim = len(set(i))\n return (i,j, Ndim)", "def nodeIDs2ElementIDs(self):\n if len(self._nodeIDs2ElementIDs) == 0:\n # Compute list of connected elements for each node\n self._nodeIDs2ElementIDs=dict()\n for i,n in enumerate(self.Nodes):\n self._nodeIDs2ElementIDs[n.ID] = [e.ID for e in self.Elements if n.ID in e.nodeIDs]\n return self._nodeIDs2ElementIDs", "def image_id_at(self, i):\n return i", "def surface_inner_object(self):\n label='AdjacentSpaceId'\n AdjacentSpaceId_nodes=self.child_nodes(label=label)\n if len(AdjacentSpaceId_nodes)==0:\n return None\n else:\n spaceIdRef=AdjacentSpaceId_nodes[0].attributes['spaceIdRef']\n for n in self._graph.nodes:\n if n.attributes.get('id')==spaceIdRef:\n return n", "def get_ids():", "def get_dom_element_id(self):\n return '%s%s' % (self.entry_type_id, self.entry_id)", "def id_list(self):\n return numpy.array(self.spiketrains.keys(), int)", "def surface_outer_object(self):\n label='AdjacentSpaceId'\n AdjacentSpaceId_nodes=self.child_nodes(label=label)\n if len(AdjacentSpaceId_nodes)<2:\n return None\n else:\n spaceIdRef=AdjacentSpaceId_nodes[1].attributes['spaceIdRef']\n for n in self._graph.nodes:\n if n.attributes.get('id')==spaceIdRef:\n return n" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Maximum number of layers
def n_layers(self): return self._n_layers
[ "def get_number_of_layers(self) -> int:\n pass", "def num_hidden_layers(self):\n return len(self.weight_matrices) - 1", "def num_perpception_layer_points(layer):\n return (layer + 1) * 4", "def max_num_boxes_batch(self):\n shape_list = list()\n for batch in self.batch_boxes.take(-1):\n as_array = np.array(list(dict.values(batch)))\n shape_list.append(as_array.shape)\n return np.max(np.array(shape_list), axis=0)", "def max_ripples():\r\n return 8", "def getMaxPool() -> uint256:\n return self.maxPool", "def max_size(self):\n return self.info_sliced.largest_intermediate", "def get_channel_max():", "def max_tile_shape(self):\n return self.__max_tile_shape", "def MaxPoolN(x,n):\n ret_val = tf.nn.max_pool(x,ksize = [1,n,n,1],\n strides = [1,n,n,1])\n return ret_val", "def num_layers(self):\r\n return len(self.hid_acts)", "def max_iterations(self) -> int:\n return self._max_epochs", "def define_first_layer_dim(x):\n # TODO: go to higher dimensions\n max_dim = 4\n\n if len(x[0]) <= max_dim:\n return len(x[0])\n else:\n return max_dim", "def num_layers(self) -> int:\n if hasattr(self._contextualizer, \"num_layers\"):\n return self._contextualizer.num_layers + 1\n else:\n raise NotImplementedError(\n f\"Contextualizer of type {type(self._contextualizer)} \"\n + \"does not report how many layers it has.\"\n )", "def resize(self, layers):", "def max_component_size(self) -> int:\n nodes_len = len(self.nodes)\n stack: List[int] = []\n visited = [False]*nodes_len\n max_size = 0\n\n for u in range(nodes_len):\n if visited[u]:\n continue\n\n size = 1\n visited[u] = True\n stack.append(u)\n while stack:\n v = stack.pop()\n v_edges = self.edges[v]\n\n for k in range(nodes_len):\n if not v_edges[k]: # Not a neighbour\n continue\n if visited[k]:\n continue\n\n size += 1\n visited[k] = True\n stack.append(k)\n\n if size > max_size:\n max_size = size\n\n return max_size", "def max_noutput_items(self):\n return _frame_detection_swig.deinterleaver_bb_sptr_max_noutput_items(self)", "def max_noutput_items(self):\n return _wavelet_swig.squash_ff_sptr_max_noutput_items(self)", "def DEFAULT_MAX_DEPTH(self, *args, **kwargs):\n ...", "def max_noutput_items(self):\n return _wavelet_swig.wavelet_ff_sptr_max_noutput_items(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Number of sigma layers
def n_sigma_layers(self): return self._n_sigma
[ "def get_number_of_layers(self) -> int:\n pass", "def sigmaToSize(sigma):\n return 2*int(kernelSize*sigma) + 1", "def num_hidden_layers(self):\n return len(self.weight_matrices) - 1", "def num_perpception_layer_points(layer):\n return (layer + 1) * 4", "def compute_kernel_size(sigma_val: float) -> int:\n return 2 * int(4.0 * sigma_val + 0.5) + 1", "def dim_node_features(self) -> int:\n return 5", "def n_neuron(self):\n pass", "def calc_bayes_model_dim(self):\n\t\treturn 2. * (np.sum(self.posterior_weights * (self.LLs ** 2.)) - np.sum((self.posterior_weights * self.LLs) ** 2.))", "def state_dim(self) -> int:\n return self.backbone.state_dim", "def get_nmodels(self):\n return self.cov.shape[0]", "def num_layers(self):\r\n return len(self.hid_acts)", "def ndim_meas(self):\n return 1", "def sigma(self):\r\n return self._sigma", "def ndim(self):\n return len(self._bins)", "def compute_sigma_weights(self, dim: int) -> Tuple[torch.Tensor, torch.Tensor]:", "def get_num_3dcnn_layers(sequence_size, row_count, col_count,\n num_neurons=1000, num_filters=16):\n total_expo = np.log2(sequence_size * row_count * col_count * num_filters\n / num_neurons)\n if total_expo < 0:\n return 0\n s_expo = int(np.log2(sequence_size))\n r_expo = int(np.log2(row_count))\n c_expo = int(np.log2(col_count))\n expos = sorted([s_expo, r_expo, c_expo])\n num_3dcnn_layers = 0\n for i in range(len(expos)):\n expo = expos[i]\n if i == 0:\n prev = 0\n else:\n prev = expos[i - 1]\n if total_expo <= (expo - prev) * (len(expos) - i):\n num_3dcnn_layers += int(total_expo // (len(expos) - i)) + 1\n break\n else:\n num_3dcnn_layers += expo - prev\n total_expo -= (expo - prev) * (len(expos) - i)\n return num_3dcnn_layers", "def get_neurons_count(self):\n\n return self.activations.m", "def get_num_of_features(self) -> int:\n return self.__neuron_each_layer[0]", "def n_independent_parameters(self):\n return", "def num_layers(self) -> int:\n if hasattr(self._contextualizer, \"num_layers\"):\n return self._contextualizer.num_layers + 1\n else:\n raise NotImplementedError(\n f\"Contextualizer of type {type(self._contextualizer)} \"\n + \"does not report how many layers it has.\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Maximum number of zlayers
def n_z_layers(self): if self._n_layers is None: return None return self._n_layers - self._n_sigma
[ "def get_number_of_layers(self) -> int:\n pass", "def max_z(self):\n return self.origin[2] + self.size[2]", "def n_zlabels(self):\n return self._n_zlabels", "def max_ripples():\r\n return 8", "def num_perpception_layer_points(layer):\n return (layer + 1) * 4", "def z_step_size(self):\n return (self.z_upper - self.z_lower) / self.nz", "def max_depth():\n return ctoast.timing_manager_max_depth()", "def get_channel_max():", "def maximum_z_projection(input, output):\n\n\n parameters = {\n \"dst_max\":output,\n \"src\":input,\n };\n\n execute(__file__, 'maximum_z_projection_x.cl', 'maximum_z_projection', output.shape, parameters);", "def zfit(self):\n self.zmax = self.zi.max()\n self.zmin = self.zi.min()", "def n_faces_z(self):\n if self.dim < 3:\n return None\n return int(np.prod(x + y for x, y in zip(self._n, (0, 0, 1))))", "def collatz_len(n):\n if n == 1:\n return 1\n else:\n return 1 + collatz_len(collatz_step(n))", "def n_edges_z(self):\n if self.dim < 3:\n return None\n return int(np.prod(x + y for x, y in zip(self._n, (1, 1, 0))))", "def getMaxPool() -> uint256:\n return self.maxPool", "def get_nzones(zones):\n nzones = np.unique(zones).shape[0]\n if 0 in np.unique(zones):\n nzones -=1\n return nzones", "def num_hidden_layers(self):\n return len(self.weight_matrices) - 1", "def max_raindrops():\r\n return 100", "def max_size(self):\n return self.info_sliced.largest_intermediate", "def num_layers(self) -> int:\n if hasattr(self._contextualizer, \"num_layers\"):\n return self._contextualizer.num_layers + 1\n else:\n raise NotImplementedError(\n f\"Contextualizer of type {type(self._contextualizer)} \"\n + \"does not report how many layers it has.\"\n )", "def DEFAULT_MAX_DEPTH(self, *args, **kwargs):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
generate matplotlib polygons from element table for plotting Returns list(matplotlib.patches.Polygon) list of polygons for plotting
def _to_polygons(self, geometry=None): if geometry is None: geometry = self from matplotlib.patches import Polygon polygons = [] for j in range(geometry.n_elements): nodes = geometry.element_table[j] pcoords = np.empty([len(nodes), 2]) for i in range(len(nodes)): nidx = nodes[i] pcoords[i, :] = geometry.node_coordinates[nidx, 0:2] polygon = Polygon(pcoords, True) polygons.append(polygon) return polygons
[ "def extract_polygons(data):\n polygons = []\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n \n north_coord_min = north - d_north\n north_coord_max = north + d_north\n east_coord_min = east - d_east\n east_coord_max = east + d_east\n \n \n corners = [(np.int(north_coord_min), np.int(east_coord_min)),\n (np.int(north_coord_max), np.int(east_coord_min)),\n (np.int(north_coord_max), np.int(east_coord_max)),\n (np.int(north_coord_min), np.int(east_coord_max))]\n \n height = alt+d_alt\n\n p = Polygon(corners)\n \n polygons.append([p, height])\n \n return polygons", "def generate_polygon():\n with open('sweden.json') as f:\n data = json.load(f)\n\n arr = data['geometry']['coordinates']\n dt = []\n res = []\n for x in arr:\n for poly in x:\n for p in poly:\n dt.append(p)\n print(getPixel(p[0], p[1], 512))\n res.append(dt)\n dt = []\n \n\n\n for i,p in enumerate(res):\n res[i] = np.array([[ xtile(x), ytile(y)] for x,y in p])\n\n return res", "def parts(self):\n\n return sum(map(list, self.polygons), [])", "def points_to_polygon(l):\n polygon = Polygon()\n for i in l:\n polygon.add_vertex(Vertex(i[X], i[Y]))\n return polygon", "def paint_poly_all(self, obj, tooldia=None, order=None, method=None, outname=None, tools_storage=None, plot=True,\n run_threaded=True):\n\n # This is a recursive generator of individual Polygons.\n # Note: Double check correct implementation. Might exit\n # early if it finds something that is not a Polygon?\n # def recurse(geo):\n # try:\n # for subg in geo:\n # for subsubg in recurse(subg):\n # yield subsubg\n # except TypeError:\n # if isinstance(geo, Polygon):\n # yield geo\n #\n # raise StopIteration\n\n def recurse(geometry, reset=True):\n \"\"\"\n Creates a list of non-iterable linear geometry objects.\n Results are placed in self.flat_geometry\n\n :param geometry: Shapely type or list or list of list of such.\n :param reset: Clears the contents of self.flat_geometry.\n \"\"\"\n if self.app.abort_flag:\n # graceful abort requested by the user\n raise grace\n\n if geometry is None:\n return\n\n if reset:\n self.flat_geometry = []\n\n # ## If iterable, expand recursively.\n try:\n for geo in geometry:\n if geo and not geo.is_empty and geo.is_valid:\n recurse(geometry=geo, reset=False)\n\n # ## Not iterable, do the actual indexing and add.\n except TypeError:\n if isinstance(geometry, LinearRing):\n g = Polygon(geometry)\n self.flat_geometry.append(g)\n else:\n self.flat_geometry.append(geometry)\n\n return self.flat_geometry\n\n if obj.kind == 'gerber':\n # I don't do anything here, like buffering when the Gerber is loaded without buffering????!!!!\n if self.app.defaults[\"gerber_buffering\"] == 'no':\n msg = '%s %s %s' % (_(\"Paint Tool.\"), _(\"Paint all polygons task started.\"), _(\"Buffering geometry...\"))\n self.app.inform.emit(msg)\n else:\n self.app.inform.emit('%s %s' % (_(\"Paint Tool.\"), _(\"Paint all polygons task started.\")))\n\n if self.app.defaults[\"tools_paint_plotting\"] == 'progressive':\n if isinstance(obj.solid_geometry, list):\n obj.solid_geometry = MultiPolygon(obj.solid_geometry).buffer(0)\n else:\n obj.solid_geometry = obj.solid_geometry.buffer(0)\n else:\n self.app.inform.emit('%s %s' % (_(\"Paint Tool.\"), _(\"Paint all polygons task started.\")))\n\n painted_area = recurse(obj.solid_geometry)\n\n # No polygon?\n if not painted_area:\n self.app.log.warning('No polygon found.')\n self.app.inform.emit('[WARNING] %s' % _('No polygon found.'))\n return\n\n self.paint_geo(obj, painted_area, tooldia=tooldia, order=order, method=method, outname=outname,\n tools_storage=tools_storage, plot=plot, run_threaded=run_threaded)", "def add_polygon(self):\n for i, r in enumerate(self.rows):\n p = Polygon(self.start_pos, r, self.points)\n self.polygons[i].insert(0, p)\n self.space.add(p.body, p.poly)", "def polygons(self):\n for feature in self.reader:\n geometry = feature['geometry']\n if geometry['type'] == 'Polygon':\n coords = geometry['coordinates']\n yield annotations.Polygon(\n coords[0], coords[1:],\n layer=self.layer, **feature['properties']\n )\n elif geometry['type'] == 'MultiPolygon':\n for coords in geometry['coordinates']:\n yield annotations.Polygon(\n coords[0], coords[1:],\n layer=self.layer, **feature['properties']\n )", "def polylines(self):\n\n\t\treturn [[self.vertex_coordinates(vkey) for vkey in polyedge] for polyedge in self.polyedges()]", "def make_polygons(geojs):\n polygons = {}\n for block in range(len(geojs[\"features\"])):\n geoid = geojs[\"features\"][block][\"properties\"][\"geoid\"]\n polygon = shape(geojs[\"features\"][block][\"geometry\"])\n polygons[geoid] = polygon\n return polygons", "def edges(poly):\n cpoly = closePoly(poly)\n return zip(cpoly[:-1],cpoly[1:])", "def getPoly(pt, w, h):\n x, y = pt\n ll = (x - (w * 0.5), y - (h * 0.5))\n ul = (x - (w * 0.5), y + (h * 0.5))\n ur = (x + (w * 0.5), y + (h * 0.5))\n lr = (x + (w * 0.5), y - (h * 0.5))\n return arcpy.Polygon(arcpy.Array([arcpy.Point(*coords) for coords in [ll,ul,ur,lr,ll]]))", "def boxesToPolygons(self):\n self.polygons = []\n for b in self.boxes:\n self.polygons.append([Polygonize.boxToRing(b)])\n # for poly in self.polygons:\n # QSWATUtils.loginfo('Polygon has ring {0!s}'.format(poly[0].perimeter))\n self.boxes = None", "def makeDirectedPolygon(self, poly):\n\n last_posn = poly[0]\n result = [last_posn]\n\n for posn in poly[1:]:\n result.extend(self.makeArrowhead(last_posn, posn))\n last_posn = posn\n\n return result", "def create_label_map_from_polygons(building_list, label_map):\n for building in building_list:\n polygon = building['poly']\n ring = polygon.GetGeometryRef(0)\n xx, yy = [], []\n for i in range(0, ring.GetPointCount()):\n y, x, z = ring.GetPoint(i)\n xx.append(x)\n yy.append(y)\n xx = np.array(xx)\n yy = np.array(yy)\n rr, cc = sk_draw.polygon(xx, yy)\n #print('{}, {}'.format(rr, cc))\n label_map[rr, cc] = building['BuildingId']\n return label_map", "def solid_polygon(folder_name, poly_coords, attributes, name_col_name=None,\n altitude_mode=\"ctg\", style_to_use=None, visibility=1):\n\n outer_poly_coords = poly_coords\n\n folder = ET.Element(\"Folder\")\n ET.SubElement(folder, \"name\").text = str(folder_name)\n ET.SubElement(folder, \"visibility\").text = str(visibility)\n\n headers = [header for header in attributes[0]]\n name_col_index = headers.index(name_col_name)\n cdata_list = []\n extended_data_list = []\n\n for row in attributes[1:]:\n row_attributes = [cell for cell in row]\n # print(attributes)\n attribute_str = \"<![CDATA[\"\n\n for cell in range(len(headers)):\n attribute_str += \"<b>\" + str(headers[cell]) + \"</b>: \" + str(row_attributes[cell]) + \"<br>\"\n attribute_str += \"]]>\"\n\n cdata_list.append(attribute_str)\n\n extended_data = ET.Element(\"ExtendedData\")\n for cell in range(len(headers)):\n data = ET.SubElement(extended_data, \"Data\", name=str(headers[cell]))\n ET.SubElement(data, \"displayName\").text = str(headers[cell])\n ET.SubElement(data, \"value\").text = str(row[cell])\n extended_data_list.append(extended_data)\n\n count = 0\n for outer_poly in outer_poly_coords:\n outer_boundary_coord_str = \"\"\n for coord_set in outer_poly:\n try:\n x = float(coord_set[0])\n y = float(coord_set[1])\n z = int(float(coord_set[2]))\n\n outer_boundary_coord_str += str(x) + \",\" + str(y) + \",\" + str(z) + \" \"\n except ValueError:\n pass\n first_coord = outer_boundary_coord_str[:outer_boundary_coord_str.index(\" \")]\n outer_boundary_coord_str += first_coord\n\n placemark = ET.SubElement(folder, \"Placemark\")\n ET.SubElement(placemark, \"name\").text = str(attributes[count + 1][name_col_index])\n ET.SubElement(placemark, \"visibility\").text = str(visibility)\n ET.SubElement(placemark, \"description\").text = cdata_list[count]\n if style_to_use is not None:\n ET.SubElement(placemark, \"styleUrl\").text = style_to_use\n polygon = ET.SubElement(placemark, \"Polygon\")\n\n outer_boundary = ET.SubElement(polygon, \"outerBoundaryIs\")\n outer_linear_ring = ET.SubElement(outer_boundary, \"LinearRing\")\n ET.SubElement(outer_linear_ring, \"coordinates\").text = outer_boundary_coord_str\n\n ET.SubElement(polygon, \"altitudeMode\").text = altitude_modes(altitude_mode)\n placemark.append(extended_data_list[count])\n\n count += 1\n\n return folder", "def plot_polytope(poly):\n from sympy.plotting.plot import Plot, List2DSeries\n\n xl = [vertex.x for vertex in poly.vertices]\n yl = [vertex.y for vertex in poly.vertices]\n\n xl.append(poly.vertices[0].x) # Closing the polygon\n yl.append(poly.vertices[0].y)\n\n l2ds = List2DSeries(xl, yl)\n p = Plot(l2ds, axes='label_axes=True')\n p.show()", "def get_polygons(geom_object_list):\n\n def _process_multipolygon(multipolygon_obj, results, ignore_invalid):\n \"\"\"\n Process multipolygon object\n \"\"\"\n if settings.TASKING_SHAPEFILE_ALLOW_NESTED_MULTIPOLYGONS:\n nested_items = get_polygons((x for x in multipolygon_obj))\n results = results + nested_items\n else:\n if not ignore_invalid:\n results.append(multipolygon_obj)\n\n return results\n\n result = []\n for item in geom_object_list:\n if settings.TASKING_SHAPEFILE_IGNORE_INVALID_TYPES:\n if isinstance(item, geometries.Polygon):\n result.append(item.geos)\n elif isinstance(item, geometries.MultiPolygon):\n result = _process_multipolygon(\n multipolygon_obj=item,\n results=result,\n ignore_invalid=settings.TASKING_SHAPEFILE_IGNORE_INVALID_TYPES,\n )\n else:\n continue\n else:\n if isinstance(item, geometries.MultiPolygon):\n result = _process_multipolygon(\n multipolygon_obj=item,\n results=result,\n ignore_invalid=settings.TASKING_SHAPEFILE_IGNORE_INVALID_TYPES,\n )\n else:\n result.append(item.geos)\n\n return result", "def plot_polygon(poly, symbol='w', **kwargs):\n for i in range(poly.GetGeometryCount()):\n x, y = zip(*poly.GetGeometryRef(i).GetPoints())\n plt.fill(x, y, symbol, **kwargs)", "def polyedges(self):\n\n\t\tpolyedges = []\n\n\t\tedges = list(self.edges())\n\n\t\twhile len(edges) > 0:\n\n\t\t\t# collect new polyedge\n\t\t\tu0, v0 = edges.pop()\n\t\t\tpolyedges.append(self.polyedge(u0, v0))\n\n\t\t\t# remove collected edges\n\t\t\tfor u, v in pairwise(polyedges[-1]):\n\t\t\t\tif (u, v) in edges:\n\t\t\t\t\tedges.remove((u, v))\n\t\t\t\telif (v, u) in edges:\n\t\t\t\t\tedges.remove((v, u))\n\n\t\treturn polyedges", "def __dump_polygon(obj, fmt):\n coords = obj['coordinates']\n poly = 'POLYGON (%s)'\n rings = (', '.join(' '.join(fmt % c for c in pt) for pt in ring)\n for ring in coords)\n rings = ('(%s)' % r for r in rings)\n poly %= ', '.join(rings)\n return poly" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export mesh as shapely MultiPolygon Returns shapely.geometry.MultiPolygon polygons with mesh elements
def to_shapely(self): from shapely.geometry import Polygon, MultiPolygon polygons = [] for j in range(self.n_elements): nodes = self.element_table[j] pcoords = np.empty([len(nodes), 2]) for i in range(len(nodes)): nidx = nodes[i] pcoords[i, :] = self.node_coordinates[nidx, 0:2] polygon = Polygon(pcoords) polygons.append(polygon) mp = MultiPolygon(polygons) return mp
[ "def asMultiPolygon(context): # -> MultiPolygonAdapter:\n ...", "def export_mesh(vertices, triangles, filename, mesh_name=\"mcubes_mesh\"):\n \n import collada\n \n mesh = collada.Collada()\n \n vert_src = collada.source.FloatSource(\"verts-array\", vertices, ('X','Y','Z'))\n geom = collada.geometry.Geometry(mesh, \"geometry0\", mesh_name, [vert_src])\n \n input_list = collada.source.InputList()\n input_list.addInput(0, 'VERTEX', \"#verts-array\")\n \n triset = geom.createTriangleSet(np.copy(triangles), input_list, \"\")\n geom.primitives.append(triset)\n mesh.geometries.append(geom)\n \n geomnode = collada.scene.GeometryNode(geom, [])\n node = collada.scene.Node(mesh_name, children=[geomnode])\n \n myscene = collada.scene.Scene(\"mcubes_scene\", [node])\n mesh.scenes.append(myscene)\n mesh.scene = myscene\n \n mesh.write(filename)", "def MultiPolygon(coordinates, *rest):\n return {\n 'type': 'MultiPolygon',\n 'coordinates': Feature._makeGeometry(coordinates, 4, rest)\n }", "def to_multipolygon(p: Union[Polygon, MultiPolygon]) -> MultiPolygon:\n\n if isinstance(p, Polygon):\n res = MultiPolygon([p])\n elif isinstance(p, MultiPolygon):\n res = p\n else:\n raise ValueError(\"Input must be shapely Polygon or MultiPolygon\")\n\n return res", "def build_mesh(directory, mesh_divisions):\n\n # Get bounds of mesh.\n maxy, maxx, miny, minx = get_bounds(directory)\n\n # X and Y divisions counts.\n nx = mesh_divisions\n ny = mesh_divisions\n\n # X and Y divisions size.\n dx = abs(maxx - minx) / nx\n dy = abs(maxy - miny) / ny\n\n # Init mesh list and id counter.\n crs = {'init': 'epsg:4326'}\n mesh = gpd.GeoDataFrame(crs=crs)\n r_id = 0\n\n # For every \"row\" (lattitude) division:\n for i in range(ny):\n\n # For every \"column\" (longitude) division:\n for j in range(nx):\n\n # Init poly coors.\n vertices = []\n\n # Southwest corner coordinate:\n vertices.append([min(minx+dx*j,maxx),max(maxy-dy*i,miny)])\n\n # Southeast corner coordinate:\n vertices.append([min(minx+dx*(j+1),maxx),max(maxy-dy*i,miny)])\n\n # Northeast corner coordinate:\n vertices.append([min(minx+dx*(j+1),maxx),max(maxy-dy*(i+1),miny)])\n\n # Northwest corner coordinate:\n vertices.append([min(minx+dx*j,maxx),max(maxy-dy*(i+1),miny)])\n\n # Close loop, Southwest corner coordinate:\n vertices.append([min(minx+dx*j,maxx),max(maxy-dy*i,miny)])\n\n # Turn into a shapely Polygon\n r_poly = Polygon(vertices)\n\n # Init GeoSeries with Polygon\n r_series = gpd.GeoSeries(r_poly)\n r_series.name = r_id\n\n # Append Series to Mesh GeoDataFrame\n mesh = mesh.append(r_series)\n\n # Increase id.\n r_id += 1\n\n # Set gemotry.\n mesh = mesh.rename(columns={0: 'geometry'}).set_geometry('geometry')\n\n # Rotate the mesh.\n pass\n\n # Return the GeoDataFrame\n return mesh", "def _assemble_multipolygon_component_polygons(element, geometries):\n outer_polygons = []\n inner_polygons = []\n outer_linestrings = []\n inner_linestrings = []\n\n # get the linestrings and polygons that make up the multipolygon\n for member in element[\"members\"]:\n if member.get(\"type\") == \"way\":\n # get the member's geometry from linestrings_and_polygons\n linestring_or_polygon = geometries.get(f\"way/{member['ref']}\")\n # sort it into one of the lists according to its role and geometry\n if (member.get(\"role\") == \"outer\") and (\n linestring_or_polygon[\"geometry\"].geom_type == \"Polygon\"\n ):\n outer_polygons.append(linestring_or_polygon[\"geometry\"])\n elif (member.get(\"role\") == \"inner\") and (\n linestring_or_polygon[\"geometry\"].geom_type == \"Polygon\"\n ):\n inner_polygons.append(linestring_or_polygon[\"geometry\"])\n elif (member.get(\"role\") == \"outer\") and (\n linestring_or_polygon[\"geometry\"].geom_type == \"LineString\"\n ):\n outer_linestrings.append(linestring_or_polygon[\"geometry\"])\n elif (member.get(\"role\") == \"inner\") and (\n linestring_or_polygon[\"geometry\"].geom_type == \"LineString\"\n ):\n inner_linestrings.append(linestring_or_polygon[\"geometry\"])\n\n # Merge outer linestring fragments.\n # Returns a single LineString or MultiLineString collection\n merged_outer_linestrings = linemerge(outer_linestrings)\n\n # polygonize each linestring separately and append to list of outer polygons\n if merged_outer_linestrings.geom_type == \"LineString\":\n outer_polygons += polygonize(merged_outer_linestrings)\n elif merged_outer_linestrings.geom_type == \"MultiLineString\":\n for merged_outer_linestring in list(merged_outer_linestrings.geoms):\n outer_polygons += polygonize(merged_outer_linestring)\n\n # Merge inner linestring fragments.\n # Returns a single LineString or MultiLineString collection\n merged_inner_linestrings = linemerge(inner_linestrings)\n\n # polygonize each linestring separately and append to list of inner polygons\n if merged_inner_linestrings.geom_type == \"LineString\":\n inner_polygons += polygonize(merged_inner_linestrings)\n elif merged_inner_linestrings.geom_type == \"MultiLineString\":\n for merged_inner_linestring in merged_inner_linestrings.geoms:\n inner_polygons += polygonize(merged_inner_linestring)\n\n if not outer_polygons:\n utils.log(\n \"No outer polygons were created for\"\n f\" https://www.openstreetmap.org/{element['type']}/{element['id']}\"\n )\n\n return outer_polygons, inner_polygons", "def multipoly(self):\n multipoly = None\n # exclude zipcodes for which there was nothing in the shp file\n zipcode_qs = self.zipcode_set.exclude(zcta__isnull=True)\n if zipcode_qs.count():\n zipcodes = zipcode_qs.all()\n multipoly = zipcodes[0].zcta.geom\n for zipcode in zipcodes[1:]:\n multipoly = multipoly.union(zipcode.zcta.geom)\n # the multipolygon.union() can return a straight polygon\n if not isinstance(multipoly, MultiPolygon):\n multipoly = MultiPolygon(multipoly)\n return multipoly", "def writeMesh(mesh, name):\n print(\"Writing\", mesh.GetNumberOfPolys(), \"polygons to\", name)\n if name.endswith(\".vtk\"):\n writeVTKMesh(mesh, name)\n return\n if name.endswith(\".ply\"):\n writePLY(mesh, name)\n return\n if name.endswith(\".stl\"):\n writeSTL(mesh, name)\n return\n print(\"Unknown file type: \", name)", "def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])", "def contourf_to_multipolygeojson(contourf,\n unit='', fill_opacity=.9, strdump=False,\n geojson_filepath=None,\n ndigits=3, min_angle_deg=5, stroke_width=1):\n contour_levels = contourf.levels\n polygon_features = []\n mps = []\n contourf_idx = 0\n for coll in contourf.collections:\n color = coll.get_facecolor()\n for path in coll.get_paths():\n for coord in path.to_polygons():\n if min_angle_deg:\n coord = keep_high_angle(coord, min_angle_deg)\n coord = np.around(coord, ndigits) if ndigits else coord\n op = MP(contour_levels[contourf_idx], rgb2hex(color[0]))\n if op in mps:\n for i, k in enumerate(mps):\n if k == op:\n mps[i].add_coords(coord.tolist())\n else:\n op.add_coords(coord.tolist())\n mps.append(op)\n contourf_idx += 1\n # starting here the multipolys will be extracted\n for muli in mps:\n polygon = muli.mpoly()\n fcolor = muli.color\n properties = set_properties(stroke_width, fcolor, fill_opacity,\n contour_levels, contourf_idx,\n unit)\n feature = Feature(geometry=polygon, properties=properties)\n polygon_features.append(feature)\n collection = FeatureCollection(polygon_features)\n if strdump or not geojson_filepath:\n return geojson.dumps(collection, sort_keys=True, separators=(',', ':'))\n with open(geojson_filepath, 'w') as fileout:\n geojson.dump(collection, fileout,\n sort_keys=True, separators=(',', ':'))", "def __dump_multipolygon(obj, fmt):\n coords = obj['coordinates']\n mp = 'MULTIPOLYGON (%s)'\n\n polys = (\n # join the polygons in the multipolygon\n ', '.join(\n # join the rings in a polygon,\n # and wrap in parens\n '(%s)' % ', '.join(\n # join the points in a ring,\n # and wrap in parens\n '(%s)' % ', '.join(\n # join coordinate values of a vertex\n ' '.join(fmt % c for c in pt)\n for pt in ring)\n for ring in poly)\n for poly in coords)\n )\n mp %= polys\n return mp", "def to_multi(self):\n if self.name.startswith(('Point', 'LineString', 'Polygon')):\n self.num += 3", "def unstructured_mesh(fname, sizing, convert):\n geo_tools.prep_mesh_config(\n fname + \"Morphology.geo\", fname + \"UMesh.geo\", sizing)\n mesh_domain(fname + \"UMesh.geo\")\n if convert:\n convert_mesh(fname + \"UMesh.msh\", fname + \"UMesh.xml\")", "def export_polygon( polygon, output_filename ): \n if os.path.exists( output_filename ):\n warnings.warn('Existing output file will be overwritten!')\n f = open( output_filename, 'w' )\n if isinstance(polygon,Polygon):\n for vertex in polygon.vertices:\n f.write('%d\\t%.20e\\t%.20e\\n' % (0,vertex.x,vertex.y))\n elif isinstance(polygon,MultiPolygon):\n for ix,polygon in enumerate(polygon.polygons):\n for vertex in polygon.vertices:\n f.write('%d\\t%.20e\\t%.20e\\n' % (ix,vertex.x,vertex.y))\n f.close()", "def h3_set_to_multi_polygon(hexes, geo_json=False):\n # todo: this function output does not match with `polyfill`.\n # This function returns a list of polygons, while `polyfill` returns\n # a GeoJSON-like dictionary object.\n hexes = _in_collection(hexes)\n return _cy.h3_set_to_multi_polygon(hexes, geo_json=geo_json)", "def parts(self):\n\n return sum(map(list, self.polygons), [])", "def geos_multipolygon_from_polygons(\n arg,\n): # -> tuple[Any | Unknown, Unknown | Literal[2, 3]] | tuple[Any, Literal[3]] | tuple[Any, Any | Literal[2, 3]]:\n ...", "def from_polygons(cls, iterable_of_polygons):\n return MultiPolygon([polygon.to_dict()['coordinates'] for polygon in iterable_of_polygons])", "def create_mesh_data(self):\n\n # if len(self.physical_surfaces) > 1:\n # self.geom.boolean_union(self.physical_surfaces)\n\n self.__physical_surfaces__()\n\n directory = os.getcwd() + '/debug/gmsh/'\n\n mesh_file = '{}{}.msh'.format(directory, self.filename)\n geo_file = '{}{}.geo'.format(directory, self.filename)\n vtk_file = '{}{}.vtu'.format(directory, self.filename)\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n mesh_data = pygmsh.generate_mesh(\n self.geom, verbose=False, dim=2,\n prune_vertices=False,\n remove_faces=False,\n geo_filename=geo_file\n )\n\n # meshio.write(mesh_file, mesh_data)\n # meshio.write(vtk_file, mesh_data)\n\n return mesh_data", "def convert_to_shapely_polygons(polygons, points, return_first=False, sort=False, mp=False):\n if sort:\n polygons.sort(key=lambda poly: len(poly.shell), reverse=True)\n\n shapely_polygons = []\n for poly in polygons:\n shell_coords = get_poly_coords(poly.shell, points)\n hole_coords = [get_poly_coords(hole, points) for hole in poly.holes]\n poly_shape = Polygon(shell=shell_coords, holes=hole_coords)\n \n shapely_polygons.append(poly_shape)\n\n # Return only the largest by number of vertices\n if shapely_polygons and return_first:\n return shapely_polygons[0]\n\n # Check if a multipolygon\n if len(shapely_polygons) > 1 and mp:\n return MultiPolygon(shapely_polygons)\n else:\n return shapely_polygons" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert cellcentered data to nodecentered by pseudolaplacian method
def get_node_centered_data(self, data, extrapolate=True): nc = self.node_coordinates elem_table, ec, data = self._create_tri_only_element_table(data) node_cellID = [ list(np.argwhere(elem_table == i)[:, 0]) for i in np.unique(elem_table.reshape(-1,)) ] node_centered_data = np.zeros(shape=nc.shape[0]) for n, item in enumerate(node_cellID): I = ec[item][:, :2] - nc[n][:2] I2 = (I ** 2).sum(axis=0) Ixy = (I[:, 0] * I[:, 1]).sum(axis=0) lamb = I2[0] * I2[1] - Ixy ** 2 omega = np.zeros(1) if lamb > 1e-10 * (I2[0] * I2[1]): # Standard case - Pseudo lambda_x = (Ixy * I[:, 1] - I2[1] * I[:, 0]) / lamb lambda_y = (Ixy * I[:, 0] - I2[0] * I[:, 1]) / lamb omega = 1.0 + lambda_x * I[:, 0] + lambda_y * I[:, 1] if not extrapolate: omega[np.where(omega > 2)] = 2 omega[np.where(omega < 0)] = 0 if omega.sum() > 0: node_centered_data[n] = np.sum(omega * data[item]) / np.sum(omega) else: # We did not succeed using pseudo laplace procedure, use inverse distance instead InvDis = [ 1 / np.hypot(case[0], case[1]) for case in ec[item][:, :2] - nc[n][:2] ] node_centered_data[n] = np.sum(InvDis * data[item]) / np.sum(InvDis) return node_centered_data
[ "def recenter(cluster):\n tot = cluster.shape[0]\n return np.sum(cluster, axis=0) / tot", "def __update_centers(self):\n \n centers = [[] for i in range(len(self.__clusters))];\n \n for index in range(len(self.__clusters)):\n point_sum = [0] * len(self.__pointer_data[0]);\n \n for index_point in self.__clusters[index]:\n point_sum = list_math_addition(point_sum, self.__pointer_data[index_point]);\n \n centers[index] = list_math_division_number(point_sum, len(self.__clusters[index]));\n \n return centers;", "def GetNodeCenterImage(self, node_id, antialias = False):\n\n\t\tif self.data_loaded:\n\t\t\t# if self.WordleImages:\n\t\t\tif self.WordleImages:\n\t\t\t\n\t\t\t\tself.WordleView.SetRandomSeed(0);\n\n\t\t\t\t# Need to create separate images (Z) for each column of matrix result\n\t\t\t\t# Bases is D x N matrix\n\t\t\t\timage_cols = self.Centers[node_id]*self.V.T + self.cm\n\t\t\t\t\n\t\t\t\tself.WordleView.SetColorByArray(False)\n\t\t\t\tself.WordleView.Update()\n\t\t\t\t\n\t\t\t\tcoeffs = VN.numpy_to_vtk(image_cols.T*100, deep=True)\n\t\t\t\tcoeffs.SetName('coefficient')\n\t\t\t\tc_sign = VN.numpy_to_vtk(N.sign(image_cols.T), deep=True)\n\t\t\t\tc_sign.SetName('sign')\n\t\t\t\t\n\t\t\t\tself.WordleTable.RemoveColumn(2)\n\t\t\t\tself.WordleTable.RemoveColumn(1)\n\t\t\t\tself.WordleTable.AddColumn(coeffs)\n\t\t\t\tself.WordleTable.AddColumn(c_sign)\n\t\t\t\tself.WordleView.RemoveAllRepresentations()\n\t\t\t\tself.WordleView.AddRepresentationFromInput(self.WordleTable)\n\t\t\t\t\n\t\t\t\tself.WordleTable.Modified()\n\t\t\t\t\n\t\t\t\timg = vtk.vtkImageData()\n\t\t\t\timg.DeepCopy(self.WordleView.GetImageData(antialias))\n\t\t\t\timg.GetPointData().GetScalars().SetName('Intensity')\n\t\t\t\t\n\t\t\t\treturn img\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t\n\t\t\t\t# imagesc(reshape(gW.Centers{1}*V(:,1:D)'+cm,28,[]))\n\t\n\t\t\t\tif self.downsampled:\n\t\t\t\t\timage_col = self.Centers_down[node_id]\n\t\t\t\t\timR = self.imR_down\n\t\t\t\t\timC = self.imC_down\n\t\t\t\telse:\n\t\t\t\t\t# V now already chopped to AmbientDimension\n\t\t\t\t\t# Compute all detail images for that dimension\n\t\t\t\t\t# print \"DS Calculating center image\"\n\t\t\t\t\t# print node_id, self.Centers[node_id].shape, self.V.T.shape, self.cm.shape\n\t\t\t\t\timage_col = self.Centers[node_id]*self.V.T + self.cm\n\t\t\t\t\timR = self.imR\n\t\t\t\t\timC = self.imC\n\t\n\t\t\t\t# print \"DS done calculating center image\"\n\t\t\t\t# To make it linear, it is the correct order (one image after another) to .ravel()\n\t\t\t\timage_linear = N.asarray(image_col.T).ravel()\n\t\n\t\t\t\tintensity = VN.numpy_to_vtk(image_linear, deep=True)\n\t\t\t\tintensity.SetName('Intensity')\n\t\n\t\t\t\timageData = vtk.vtkImageData()\n\t\t\t\timageData.SetOrigin(0,0,0)\n\t\t\t\timageData.SetSpacing(1,1,1)\n\t\t\t\timageData.SetDimensions(imR, imC, 1)\n\t\t\t\timageData.GetPointData().AddArray(intensity)\n\t\t\t\timageData.GetPointData().SetActiveScalars('Intensity')\n\t\n\t\t\t\treturn imageData\n\t\n\t\telse:\n\t\t\traise IOError, \"Can't get image until data is loaded successfully\"", "def perform_centering(self):\r\n centered_data = self.data - np.repeat(self.mean_data[:, np.newaxis], self.data.shape[1], axis=1) + self.weight\r\n return centered_data", "def preevolve(self):\n\n # we just initialized cell-centers, but we need to store averages\n for var in self.cc_data.names:\n self.cc_data.from_centers(var)", "def init_cluster_hex(img,bands,rows,columns,ki):\n\n N = rows * columns\n \n #Setting up SNITC\n S = (rows*columns / (ki * (3**0.5)/2))**0.5\n\n #Get nodes per row allowing a half column margin at one end that alternates\n nodeColumns = round(columns/S - 0.5);\n #Given an integer number of nodes per row recompute S\n S = columns/(nodeColumns + 0.5); \n\n # Get number of rows of nodes allowing 0.5 row margin top and bottom\n nodeRows = round(rows/((3)**0.5/2*S));\n vSpacing = rows/nodeRows;\n\n # Recompute k\n k = nodeRows * nodeColumns;\n\n # Allocate memory and initialise clusters, labels and distances.\n C = numpy.zeros([k,bands+3]) # Cluster centre data 1:times is mean on each band of series\n # times+1 and times+2 is row, col of centre, times+3 is No of pixels\n l = -numpy.ones([rows,columns]) # Matrix labels.\n d = numpy.full([rows,columns], numpy.inf) # Pixel distance matrix from cluster centres.\n\n # Initialise grid\n kk = 0;\n r = vSpacing/2;\n for ri in range(nodeRows):\n x = ri\n if x % 2:\n c = S/2\n else:\n c = S\n\n for ci in range(nodeColumns):\n cc = int(numpy.floor(c)); rr = int(numpy.floor(r))\n ts = img[:,rr,cc]\n st = numpy.append(ts,[rr,cc,0])\n C[kk, :] = st\n c = c+S\n kk = kk+1\n\n r = r+vSpacing\n \n #Cast S\n S = round(S)\n \n return C,S,l,d,k", "def transform_cell(cell):\n cell = np.array(cell)\n transform, upper_tri = np.linalg.qr(cell.T, mode=\"complete\")\n new_cell = np.transpose(upper_tri)\n\n # LAMMPS also requires positive values on the diagonal of the,\n # so invert cell if necessary\n inversion = np.eye(3)\n for i in range(3):\n if new_cell[i][i] < 0.0:\n inversion[i][i] = -1.0\n new_cell = np.dot(inversion, new_cell.T).T\n transform = np.dot(transform, inversion.T).T\n\n return new_cell, transform", "def _celestial(self):\n cos = np.cos(self.lat)\n sin = np.sin(self.lat)\n transfo = np.matrix([ \n [0, -sin, cos],\n [1, 0, 0],\n [0, cos, sin]\n ])\n return transfo", "def initial_superpixel_cluster(self, superpixel_center, superpixel_seed_index, pixels, space_map, norm_map):\n # Reshape depth from (3,3,1) into (3,3)\n shape = self.depth.shape[0:2]\n depth = self.depth.reshape(shape)\n mask1 = pixels != superpixel_seed_index\n mask2 = depth <= 0.05\n mask = mask1 | mask2\n\n [col, row] = np.meshgrid(\n np.arange(self.im_width), np.arange(self.im_height))\n col = np.ma.array(col, mask=mask) - superpixel_center[0]\n row = np.ma.array(row, mask=mask) - superpixel_center[1]\n\n diff = np.ma.multiply(col, col) + np.ma.multiply(row, row)\n max_dist = np.max(diff)\n\n # Reshape depth from mxn into (m-1)x(n-1)\n pixel_depths = depth[:-1, :-1][~mask[:-1, :-1]].reshape(-1, 1)\n valid_depth_num = pixel_depths.shape[0]\n pixel_positions = space_map[:-1, :-1][~mask[:-1, :-1]].reshape(-1, 3)\n pixel_norms = norm_map[~mask[:-1, :-1]].reshape(-1, 3)\n return pixel_depths, pixel_norms, pixel_positions, max_dist, valid_depth_num", "def centroid(micro):\r\n return micro['ls']/micro['n']", "def __compute_cluster_centers(self):\n center = dict()\n for index,class_key in enumerate(self.classes):\n membership_list = np.array([mb[index] for mb in self.df.membership])\n membership_list = membership_list**self.m\n num = np.dot(membership_list, self.X)\n den = np.sum(membership_list)\n center[class_key] = num/den\n return center", "def translate_to_cell_center(self):\n if self.cell is None:\n raise NameError(\"cell not defined\")\n else:\n self.translate_to_zero()\n cell_center = (self.cell[0] + self.cell[1] + self.cell[2]) / 2\n self.translate(cell_center)", "def ens_CM1_C2A(ens, var = 'ALL'):\n \n# Copy data from cell centered surrogate, then average the staggered fields to the centers\n \n t0 = timer()\n \n nx = ens.nx\n ny = ens.ny\n nz = ens.nz\n \n if var.upper() == \"U\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.u_ptr,:,:,:,0] = 0.5*(fstate.u[:,:,:,0] + fstate.u[:,:,:,1])\n fstate.xyz3d[ens.u_ptr,:,:,:,nx-1] = 0.5*(fstate.u[:,:,:,nx-1] + fstate.u[:,:,:,nx])\n fstate.xyz3d[ens.u_ptr,:,:,:,1:nx-1] = (-fstate.u[:,:,:,0:nx-2] + 13.0*fstate.u[:,:,:,1:nx-1] \\\n -fstate.u[:,:,:,3:nx+1] + 13.0*fstate.u[:,:,:,2:nx] ) / 24.0\n \n if var.upper() == \"V\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.v_ptr,:,:,0,:] = 0.5*(fstate.v[:,:,0,:] + fstate.v[:,:,1,:])\n fstate.xyz3d[ens.v_ptr,:,:,ny-1,:] = 0.5*(fstate.v[:,:,ny-1,:] + fstate.v[:,:,ny,:])\n fstate.xyz3d[ens.v_ptr,:,:,1:ny-1,:] = (-fstate.v[:,:,0:ny-2,:] + 13.0*fstate.v[:,:,1:ny-1,:] \\\n -fstate.v[:,:,3:ny+1,:] + 13.0*fstate.v[:,:,2:ny,:] ) / 24.0\n \n if var.upper() == \"W\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.w_ptr,:,0,:,:] = 0.5*(fstate.w[:,0,:,:] + fstate.w[:,1,:,:])\n fstate.xyz3d[ens.w_ptr,:,nz-1,:,:] = 0.5*(fstate.w[:,nz-1,:,:] + fstate.w[:,nz,:,:])\n fstate.xyz3d[ens.w_ptr,:,1:nz-1,:,:] = (-fstate.w[:,0:nz-2,:,:] + 13.0*fstate.w[:,1:nz-1,:,:] \\\n -fstate.w[:,3:nz+1,:,:] + 13.0*fstate.w[:,2:nz,:,:] ) / 24.0\n \n# Create ens variables to point at A-grid velocities\n\n ens.addvariable(\"UA\", data=fstate.xyz3d[ens.u_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n ens.addvariable(\"VA\", data=fstate.xyz3d[ens.v_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n ens.addvariable(\"WA\", data=fstate.xyz3d[ens.w_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n \n if time_all: print(\"\\n Wallclock time to convert from C to A grid:\", round(timer() - t0, 3), \" sec\")\n\n return", "def centre(self, cluster):\r\n size = len(cluster) * 1.0\r\n cen = np.zeros_like(self.data[0])\r\n for item in cluster:\r\n cen = cen + self.data[item]\r\n return cen / size", "def density_cluster(Data,iradius, Clusters): #This function classifies data points into clusters and noise points", "def computeCentroids(self, docs, assignment):\n\n res = scipy.sparse.csr_matrix(docs * assignment.transpose())\n return self.l2normalizeCols(res)", "def init_center_c(self, data,train_model:Model, eps=0.1):\n output = train_model.predict(data)\n c = np.zeros(output.shape[0])\n c = np.sum(output, axis=0)\n c /= output.shape[0]\n # If c_i is too close to 0, set to +-eps. Reason: a zero unit can be trivially matched with zero weights.\n c[(abs(c) < eps) & (c < 0)] = -eps\n c[(abs(c) < eps) & (c > 0)] = eps\n return c", "def transform(self):\n return self.cellx, 0.0, self.left, 0.0, -self.celly, self.top", "def get_tile_centroid(tile_id):\n \n tile_whole_str = tile_id[0:4] #first four hex digits represent the 1x1 degree tile\n tile_frac_str = tile_id[4:] #rest correspond to the fractional portion of a coordinate\n level = len(tile_frac_str) #level is the number of hex digits that represent the fractional portion of a tile\n \n lat_frac = 0\n lon_frac = 0\n \n ##\n #hex string to int\n whole = int(tile_whole_str, 16)\n frac = int(tile_frac_str, 16) \n \n ##\n #The world is divided laterally along the x axis into 360 sections. To know the latitude, divide by 360, which gives the \n #index of the vertical partition. This is a number from 0 to 180. Negating by 90 gives the correct latitude.\n #\n #Taking the modulo by 360 gives the index of the lateral partition. Negating by 180 gives the longitude value. \n whole_lat = int(whole / 360) - 90;\n whole_lon = int(whole % 360) - 180;\n \n \n ##\n #Determining the fractions\n \n #Each hex digit in the fractional part represents the index of the corresponding 4x4 grid for each level, starting with\n #the highest level by the least significant hex digit \n \n tude_shift = 0\n tile_shift = 0\n \n for i in range(level):\n current_frac = ((frac >> tile_shift) & 15) # & with 15, i.e., 1111 to get the last four digits in the fractional part, \n # each time shift by 4 digits to go to the next hex digit starting with the right most one \n \n # the first and last two digits in each four digit block correspond to the latitude portion and the longitude portion \n lat_frac = lat_frac + (((current_frac & 12)>>2) << tude_shift) # & by 12, 1100 to get the latitude portion, shift two digits to remove the trailing \n # 0 bits and shift the entire thing by \n lon_frac = lon_frac + ((current_frac & 3) << tude_shift) # similarly & by 3, 0011 to get the last two digits with correspond to the longitude fraction\n \n # each two digits derived are shifted appropriately, by tude_shift, to insert at the correct position in the \n # binary string corresponding to either latitude or longitude \n \n tile_shift += 4 #each hex digit is extracted by shifting the binary representation of a tile by 4 digits\n tude_shift += 2 #two binary digits are inserted from right most and two digit shifts each time the loop runs \n \n \n div_factor = 4**level\n \n cent_lat_frac = (lat_frac + 0.5)/(div_factor)\n cent_lon_frac = (lon_frac + 0.5)/(div_factor)\n \n cent_lat = whole_lat + cent_lat_frac\n cent_lon = whole_lon + cent_lon_frac\n \n return (cent_lat, cent_lon)", "def get_latent_representation(self) -> np.ndarray:\n return self.module.cell_embedding.cpu().numpy().T" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read header of mesh file and set object properties
def _read_mesh_header(self, filename): msh = MeshFile.ReadMesh(filename) self._source = msh self._projstr = msh.ProjectionString self._type = UnstructuredType.Mesh # geometry self._set_nodes_from_source(msh) self._set_elements_from_source(msh)
[ "def _read_primary_header(self):\n\n self._check_magic_number()\n\n self.f.seek(0)\n self.time, = np.fromfile(self.f, dtype='<f8', count=1)\n self._nbodies_tot, self._ncomp = np.fromfile(self.f, dtype=np.uint32,count=2)\n\n data_start = 16 # guaranteed first component location...\n\n # now read the component headers\n for comp in range(0,self._ncomp):\n self.f.seek(data_start)\n next_comp = self._read_spl_component_header()\n data_start = next_comp", "def reload(self):\n with open(self.filename,'r') as meshfile:\n # scan file until we reach a mesh format declarator\n if not scan_for_keyword(meshfile, \"$meshformat\"):\n return False\n # read mesh format information\n self.meshformat = meshfile.readline()\n #check for end of mesh formatting block\n if meshfile.readline().lower().strip() != \"$endmeshformat\":\n print(\"Can only read ASCII meshes.\")\n return False\n\n if not scan_for_keyword(meshfile, \"$nodes\"):\n return False\n\n self.num_nodes = int(meshfile.readline())\n self.node_positions = np.zeros((self.num_nodes, 3))\n nodeids = [0]*self.num_nodes\n for i in range(self.num_nodes):\n nodeinf = meshfile.readline().split()\n # shift to zero-indexing from gmsh/matlab 1-indexing\n nodeids[i] = int(nodeinf[0]) - 1\n nodex = np.array([float(k) for k in nodeinf[1:]])\n #set axis-aligned bounding box for the mesh\n if (i == 0):\n self.bounding_box[0] = nodex\n self.bounding_box[1] = nodex\n else:\n self.bounding_box[0] = [min(self.bounding_box[0][k],nodex[k]) for k in range(3)]\n self.bounding_box[1] = [max(self.bounding_box[1][k],nodex[k]) for k in range(3)]\n self.node_positions[i] = nodex\n if not scan_for_keyword(meshfile, \"$endnodes\"):\n return False\n if not scan_for_keyword(meshfile, \"$elements\"):\n return False\n\n self.num_elements = int(meshfile.readline())\n #constants given by the file format\n num_infos = 4\n tagidx = 3\n self.element_infos = [[0]*num_infos]*self.num_elements\n self.element_tags = [0]*self.num_elements\n self.num_points = 0\n self.num_lines = 0\n self.num_tris = 0\n self.num_quads = 0\n # self.num_tets = 0\n # self.num_hexas = 0\n # self.num_prisms = 0\n # self.num_pyramids = 0\n self.num_lines3 = 0\n self.num_tris6 = 0\n\n self.points = np.zeros((self.num_elements,2), np.int32)\n self.lines = np.zeros((self.num_elements,3), np.int32)\n self.tris = np.zeros((self.num_elements,4), np.int32)\n self.quads = np.zeros((self.num_elements,5), np.int32)\n # self.tets = np.zeros((self.num_elements,5), np.int32)\n # self.hexas = np.zeros((self.num_elements,9), np.int32)\n # self.prisms = np.zeros((self.num_elements,7), np.int32)\n # self.pyramids = np.zeros((self.num_elements,6), np.int32)\n self.lines3 = np.zeros((self.num_elements,4), np.int32)\n self.tris6 = np.zeros((self.num_elements,7), np.int32)\n\n tokens = []\n tline = meshfile.readline().lower().strip()\n while tline != \"$endelements\":\n if not tline:\n return False\n tokens = tokens + [int(k) for k in tline.split()]\n tline = meshfile.readline().lower().strip()\n for i in range(self.num_elements):\n self.element_infos[i] = [tokens.pop(0) for k in range(num_infos)]\n # I have honestly no clue what this means, but it consumes tokens\n # so it's staying in the code\n self.element_tags[i] = [tokens.pop(0) for k in range(self.element_infos[i][2]-1)]\n # minus 1s to shift from one-indexing to zero-indexing\n element_nodes = [tokens.pop(0)-1 for k in range(NODES_PER_ELEMENT_TYPE[self.element_infos[i][1]-1])]\n\n if self.element_infos[i][1] == 15:\n self.points[self.num_points][0] = nodeids[element_nodes[0]]\n self.points[self.num_points][1] = self.element_infos[i][tagidx]\n self.num_points = self.num_points + 1\n elif self.element_infos[i][1] == 1:\n self.add_line(i, nodeids, element_nodes, 1)\n elif self.element_infos[i][1] == 8:\n self.add_line(i, nodeids, element_nodes, 2)\n elif self.element_infos[i][1] == 2:\n self.add_triangle(i, nodeids, element_nodes, 1)\n elif self.element_infos[i][1] == 9:\n self.add_triangle(i, nodeids, element_nodes, 2)\n elif self.element_infos[i][1] == 3:\n for j in range(4):\n self.quads[self.num_quads][j] = nodeids[element_nodes[j]]\n self.quads[self.num_quads][4] = self.element_infos[i][tagidx]\n self.num_quads = self.num_quads + 1\n\n #TODO tetras/hexes/prisms/pyramids\n \n\n return True", "def _read_spl_component_header(self):\n\n data_start = self.f.tell()\n # manually do headers\n _1,_2,self.nprocs, nbodies, nint_attr, nfloat_attr, infostringlen = np.fromfile(self.f, dtype=np.uint32, count=7)\n\n # need to figure out what to do with nprocs...it has to always be the same, right?\n head = np.fromfile(self.f, dtype=np.dtype((np.bytes_, infostringlen)),count=1)\n head_normal = head[0].decode()\n head_dict = yaml.safe_load(head_normal)\n\n\n head_dict['nint_attr'] = nint_attr\n head_dict['nfloat_attr'] = nfloat_attr\n # data starts at ...\n next_comp = 4*7 + infostringlen + data_start\n\n self.component_map[head_dict['name']] = next_comp\n self.header[head_dict['name']] = head_dict\n\n next_comp += self.nprocs*1024\n\n # specifically look for indexing\n try:\n self.indexing = head_dict['parameters']['indexing']\n except:\n self.indexing = head_dict['indexing']=='true'\n\n return next_comp", "def obj_loader(file_name,normalize=False):\n vertices = []\n faces = []\n vnormals = []\n\n with open(file_name,'r') as fin:\n for line in fin:\n if line.startswith('#'):\n continue\n values = line.split()\n if len(values) < 1:\n continue\n if values[0] == 'v':\n v = list(map(float,values[1:4]))\n vertices.append(v)\n elif values[0] == 'vn':\n vn = list(map(float,values[1:4]))\n vnormals.append(vn)\n elif values[0] == 'f':\n face = []\n for v in values[1:]:\n w = v.split('/')\n face.append(int(w[0])) \n faces.append(face) \n \n vertices = np.array(vertices)\n faces = np.array(faces)\n if len(vnormals):\n vnormals = np.array(vnormals)\n faces = faces-1\n if normalize:\n bbox_max = np.max(vertices,axis=0)\n bbox_min = np.min(vertices,axis=0)\n bbox_center = 0.5 * (bbox_max + bbox_min)\n bbox_rad = np.linalg.norm(bbox_max - bbox_center)\n vertices -= bbox_center\n vertices /= (bbox_rad*2.0)\n if np.any(faces < 0): \n print('Negative face indexing in obj file')\n \n return vertices, faces, vnormals", "def load_trimesh_from_file(self):\n self.mesh = trimesh.load(self.mesh_path,process=False)", "def read_header(self, file_handle):\n header = {\"FCS format\": file_handle.read(6)}\n\n file_handle.read(4) # 4 space characters after the FCS format\n\n for field in [\n \"text start\",\n \"text end\",\n \"data start\",\n \"data end\",\n \"analysis start\",\n \"analysis end\",\n ]:\n s = file_handle.read(8)\n try:\n field_value = int(s)\n except ValueError:\n field_value = 0\n header[field] = field_value\n\n # Checking that the location of the TEXT segment is specified\n for k in [\"text start\", \"text end\"]:\n if header[k] == 0:\n raise ValueError(\n \"The FCS file '{}' seems corrupted. (Parser cannot locate information \"\n \"about the '{}' segment.)\".format(self.path, k)\n )\n elif header[k] > self._file_size:\n raise ValueError(\n \"The FCS file '{}' is corrupted. '{}' segment \"\n \"is larger than file size\".format(self.path, k)\n )\n\n self._data_start = header[\"data start\"]\n self._data_end = header[\"data start\"]\n\n if header[\"analysis start\"] != 0:\n warnings.warn(\n \"There appears to be some information in the ANALYSIS segment of file {0}. \"\n \"However, it might not be read correctly.\".format(self.path)\n )\n\n self.annotation[\"__header__\"] = header", "def __get_header(self):\n # try:\n self.header = self.hdulist[0].header\n # except:\n # self.hdulist = astropy.io.fits.open(self.map_name)\n # self.header = self.hdulist[0].header", "def readGeometry(self):\r\n \r\n lineNum = 0\r\n with open(self.fileName, 'r') as fIn:\r\n # read the geometry file\r\n try:\r\n for line in fIn:\r\n # loop through each line in the file\r\n \r\n # inc the line number\r\n lineNum = lineNum + 1\r\n # parse the line in geometry file, adding info to geometryInfo\r\n self._parseHocGeometryLine(line)\r\n \r\n except IOError as err:\r\n sys.tracebacklimit = 0\r\n raise IOError('Error reading %s line %d: %s' % \\\r\n (self.fileName, lineNum, err.message))\r\n \r\n if self._openFilament:\r\n raise IOError('Error reading %s, filament %s open at end of file' %\r\n (self.fileName, self._openFilament))\r\n \r\n # connect filaments and remove filaments and _connections, leaving segments\r\n # and nodes\r\n self._connectFilaments()\r\n \r\n \r\n # make compartments from hemispheres remaining at the end of unconnected\r\n # segments\r\n #self._addOneNodeCompartments()\r", "def _parse_header(self):\n header = int_from_lbytes(self._reader.read(4))\n if header != self._HEADER:\n raise StashFileParseError(f'Invalid header id: 0x{header:08X}')\n self.version = int_from_lbytes(self._reader.read(2))", "def parse_header(self):\n\n chunk_id, chunk_len = self.next_chunk()\n instream = self.instream\n\n # check if it is a proper midi file\n if chunk_id != b\"MThd\":\n raise ParseError(\"Invalid MIDI file header. Chunk identifier must be 'MThd'.\")\n\n # Header values are at fixed locations, so no reason to be clever\n self.format = read_bew(instream.read(2))\n self.num_tracks = read_bew(instream.read(2))\n\n if self.format == 0 and self.num_tracks > 1:\n msg = (\n \"Invalid number of tracks (%i). Type 0 midi files may only \"\n \"contain a single track.\" % self.num_tracks\n )\n\n if self.strict:\n raise ParseError(msg)\n else:\n log.warning(msg)\n\n tick_div = instream.read(2)\n fps, resolution = tointseq(tick_div)\n\n if fps & 0x80:\n metrical = False\n else:\n metrical = True\n division = read_bew(tick_div)\n\n # Theoretically a header larger than 6 bytes can exist\n # but no one has seen one in the wild.\n # We will correctly ignore unknown data if present, though.\n if chunk_len > 6:\n log.warning(\"Invalid header size (%i). Skipping trailing header \" \"bytes\", chunk_len)\n instream.seek(chunk_len - 6, 1)\n\n # call the header event handler on the stream\n if metrical:\n self.dispatch(\n \"header\", self.format, self.num_tracks, metrical=True, tick_division=division\n )\n else:\n self.dispatch(\n \"header\",\n self.format,\n self.num_tracks,\n metrical=False,\n fps=fps,\n frame_resolution=resolution,\n )", "def _parse_header(self):\n log.debug('---In dcd.py, parse_header()')\n #process the first header block\n\n header1 = self._fo.read(92)\n header1_format=\\\n \"i---cccci---i---i---i---xxxxxxxxxxxxxxxxxxxxf---i---i---xxxxxxxxxxxxxxxxxxxxxxxxxxxxi---i---\"\n # |1 |5 |10 |15 |20 |25 |30 |35 |40 |45 |50 |55 |60 |65 |70 |75 |80 |85 |90\n #|header size=84 |nframes*tstep |tstep_size |charm_ver\n # |CORD=has coordinates |block_a |header_size=84\n # |nframes |block_b\n # |starting timestep\n # |timestep between coord sets \n header1_format = string.replace(header1_format, \"-\", \"\")\n header1 = struct.unpack(header1_format, header1)\n header1_size1, c1, c2, c3, c4, self._nframes, self._firsttstep, self._dcdfreq, self._ntsteps, self._tstep_size, self._block_a, self._block_b, self._charm_v, header1_size2 = header1 #unpack the tuple header1\n \n \n self._dcdtype = \"\".join((c1,c2,c3,c4)) #get the data-type field. I it should always be cord...\n if header1_size1 != 84 or header1_size2 !=84:\n log.error(\"error-- header size fields not correct (should be 84)\\n\")\n if self._block_a != 0 or self._block_b != 0:\n log.info(\"I've found a signal possibly indicating an extra record block\")\n log.info(\" I'll try to parse it, but it might fail. Also, I won't use\")\n log.info(\" any data from them.\")", "def _parse_header(self):\n\n if self.ei_magic != '\\x7fELF':\n return\n\n self.seek(16,0)\n reading = {'h': self.le_half, 'w': self.le_word,'a': self.le_addr,\n 'o': self.le_offset, 'x': self.le_xword}\n labels = ('type', 'machine', 'version', 'entry', 'phoff', \\\n 'shoff', 'flags', 'ehsize', 'phentsize', 'phnum',\\\n 'shentsize','shnum','shstrndx')\n htypes = ('h','h','w','a','o','o','w','h','h','h','h','h','h')\n\n # Retrieve ELF header\n self.elfhead = dict(zip(labels,[reading[t]() for t in htypes]))\n\n # Retrieve section header string table.\n # sh: name, type, flags, addr, offset, size, link, info, addralign, entsize\n self.seek((self.elfhead['shentsize'] * self.elfhead['shstrndx'])\\\n + self.elfhead['shoff'], 0)\n\n labels = ('name', 'type', 'flags', 'addr', 'offset', \\\n 'size', 'link', 'info', 'addralign', 'entsize')\n\n shtypes = ('w','w','x','a','o','x','w','w','x','x')\n\n sh_strtableh = dict(zip(labels,[reading[t]() for t in shtypes]))\n self.seek(sh_strtableh['offset'],0)\n self.sh_strtableh = sh_strtableh\n\n # Now the section header is known, can retrieve dynamic string table\n self.dynstrh = self._find_section('.dynstr')", "def __init__(self, name, header):\n\n self.header = header.copy()\n#\n# Check if the file already exists. If it does not, check to see\n# if we were provided with a Primary Header. If not we will need\n# to prepend a default PrimaryHDU to the file before writing the\n# given header.\n#\n if not os.path.exists(name):\n if not self.header.has_key('SIMPLE'):\n hdulist = HDUList([PrimaryHDU()])\n hdulist.writeto(name, 'exception')\n else:\n if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0:\n#\n# This will not be the first extension in the file so we\n# must change the Primary header provided into an image\n# extension header.\n#\n self.header.update('XTENSION','IMAGE','Image extension',\n after='SIMPLE')\n del self.header['SIMPLE']\n\n if not self.header.has_key('PCOUNT'):\n dim = self.header['NAXIS']\n \n if dim == 0:\n dim = ''\n else:\n dim = str(dim)\n\n self.header.update('PCOUNT', 0, 'number of parameters',\n after='NAXIS'+dim)\n\n if not self.header.has_key('GCOUNT'):\n self.header.update('GCOUNT', 1, 'number of groups',\n after='PCOUNT')\n\n self._ffo = _File(name, 'append')\n self._ffo.getfile().seek(0,2)\n\n self._hdrLoc = self._ffo.writeHDUheader(self)\n self._datLoc = self._ffo.getfile().tell()\n self._size = self.size()\n\n if self._size != 0:\n self.writeComplete = 0\n else:\n self.writeComplete = 1", "def __shapefileHeader(self, fileObj, headerType='shp'):\r\n f = self.__getFileObj(fileObj)\r\n f.seek(0)\r\n # File code, Unused bytes\r\n f.write(pack(\">6i\", 9994,0,0,0,0,0))\r\n # File length (Bytes / 2 = 16-bit words)\r\n if headerType == 'shp':\r\n f.write(pack(\">i\", self.__shpFileLength()))\r\n elif headerType == 'shx':\r\n f.write(pack('>i', ((100 + (len(self._shapes) * 8)) / 2)))\r\n # Version, Shape type\r\n f.write(pack(\"<2i\", 1000, self.shapeType))\r\n # The shapefile's bounding box (lower left, upper right)\r\n if self.shapeType != 0:\r\n try:\r\n f.write(pack(\"<4d\", *self.bbox()))\r\n except error:\r\n raise ShapefileException(\"Failed to write shapefile bounding box. Floats required.\")\r\n else:\r\n f.write(pack(\"<4d\", 0,0,0,0))\r\n # Elevation\r\n z = self.zbox()\r\n # Measure\r\n m = self.mbox()\r\n try:\r\n f.write(pack(\"<4d\", z[0], z[1], m[0], m[1]))\r\n except error:\r\n raise ShapefileException(\"Failed to write shapefile elevation and measure values. Floats required.\")", "def __init__(self, fileobj=None):\n\n if fileobj is None:\n # for testing\n self._flags = 0\n return\n\n fn = getattr(fileobj, \"name\", \"<unknown>\")\n data = fileobj.read(10)\n if len(data) != 10:\n raise ID3NoHeaderError(\"%s: too small\" % fn)\n\n id3, vmaj, vrev, flags, size = struct.unpack('>3sBBB4s', data)\n self._flags = flags\n self.size = BitPaddedInt(size) + 10\n self.version = (2, vmaj, vrev)\n\n if id3 != b'ID3':\n raise ID3NoHeaderError(\"%r doesn't start with an ID3 tag\" % fn)\n\n if vmaj not in [2, 3, 4]:\n raise ID3UnsupportedVersionError(\"%r ID3v2.%d not supported\"\n % (fn, vmaj))\n\n if not BitPaddedInt.has_valid_padding(size):\n raise error(\"Header size not synchsafe\")\n\n if (self.version >= self._V24) and (flags & 0x0f):\n raise error(\n \"%r has invalid flags %#02x\" % (fn, flags))\n elif (self._V23 <= self.version < self._V24) and (flags & 0x1f):\n raise error(\n \"%r has invalid flags %#02x\" % (fn, flags))\n\n if self.f_extended:\n extsize_data = read_full(fileobj, 4)\n\n if PY3:\n frame_id = extsize_data.decode(\"ascii\", \"replace\")\n else:\n frame_id = extsize_data\n\n if frame_id in Frames:\n # Some tagger sets the extended header flag but\n # doesn't write an extended header; in this case, the\n # ID3 data follows immediately. Since no extended\n # header is going to be long enough to actually match\n # a frame, and if it's *not* a frame we're going to be\n # completely lost anyway, this seems to be the most\n # correct check.\n # https://github.com/quodlibet/quodlibet/issues/126\n self._flags ^= 0x40\n extsize = 0\n fileobj.seek(-4, 1)\n elif self.version >= self._V24:\n # \"Where the 'Extended header size' is the size of the whole\n # extended header, stored as a 32 bit synchsafe integer.\"\n extsize = BitPaddedInt(extsize_data) - 4\n if not BitPaddedInt.has_valid_padding(extsize_data):\n raise error(\n \"Extended header size not synchsafe\")\n else:\n # \"Where the 'Extended header size', currently 6 or 10 bytes,\n # excludes itself.\"\n extsize = struct.unpack('>L', extsize_data)[0]\n\n self._extdata = read_full(fileobj, extsize)", "def _read_halo_data(self, halo_id, offset):\n with FortranFile(self._fname) as fpu:\n fpu.seek(offset)\n if self._longint:\n npart = fpu.read_int64()\n else:\n npart = fpu.read_int()\n iord_array = self._read_member_helper(fpu, npart)\n halo_id_read = fpu.read_int()\n assert halo_id == halo_id_read\n if self._read_contamination:\n attrs = self._halo_attributes + self._halo_attributes_contam\n else:\n attrs = self._halo_attributes\n props = fpu.read_attrs(attrs)\n\n # Convert positions between [-Lbox/2, Lbox/2] to [0, Lbox].\n # /!\\: AdaptaHOP assumes that 1Mpc == 3.08e24 (exactly)\n boxsize = self.base.properties[\"boxsize\"]\n Mpc2boxsize = boxsize.in_units(\"cm\") / 3.08e24 # Hard-coded in AdaptaHOP...\n for k in \"xyz\":\n props[k] = boxsize.in_units(\"Mpc\") * (props[k] / Mpc2boxsize + 0.5)\n\n # Add units for known fields\n for k, v in list(props.items()):\n if k in UNITS:\n props[k] = v * UNITS[k]\n\n props[\"file_offset\"] = offset\n props[\"npart\"] = npart\n props[\"members\"] = iord_array\n\n # Create halo object and fill properties\n if hasattr(self, \"_group_to_indices\"):\n index_array = self._group_to_indices[halo_id]\n iord_array = None\n else:\n index_array = None\n iord_array = iord_array\n halo = Halo(\n halo_id, self, self._base_dm, index_array=index_array, iord_array=iord_array\n )\n halo.properties.update(props)\n\n return halo", "def parse_headerfile(self):\r\n # Read and store the data contained in the input File\r\n myEDFFile = open(self.InputFile,'rb')\r\n EDFValues = myEDFFile.read()\r\n myEDFFile.close()\r\n\r\n # Extract the Header File that contains info about the record\r\n self.HeaderSize = int(EDFValues[184:192])\r\n self.HeaderFile = EDFValues[:self.HeaderSize]\r\n # Extract the actual data in the input file\r\n self.rawDataRecord = EDFValues[self.HeaderSize:]\r\n self.tempDataRecord = list(self.rawDataRecord)\r\n\r\n # Extract information from the EDF Header File\r\n self.total_duration = int(self.HeaderFile[236:244]) # total duration of the signal (in seconds)\r\n if self.duration == None:\r\n self.duration = self.total_duration\r\n self.signalnum = int(self.HeaderFile[252:256]) # number of signals in edf file\r", "def parse_geometry_header(self) -> dict:\n offset = get_raw_address(self.geometry_header, self.section)\n self.xbe.seek(offset)\n\n vertex_list_offset = unpack(\"i\", self.xbe.read(4))[0]\n if vertex_list_offset == 0:\n vertex_list_offset = None\n\n triangle_list_offset = unpack(\"i\", self.xbe.read(4))[0]\n if triangle_list_offset == 0:\n triangle_list_offset = None\n\n float_array = []\n for _ in range(6):\n float_array.append(unpack(\"f\", self.xbe.read(4))[0])\n\n return {\n \"vertex_list_offset\": vertex_list_offset,\n \"triangle_list_offset\": triangle_list_offset,\n \"farray\": float_array, # unknown use\n }", "def load_header(self, filename=None):\n # (Re-)initialize header\n self.header = HeaderInfo()\n\n # the width of each sample is always 2 bytes\n self.header.sample_width = 2\n\n # If filename specified, use it, else use previously specified\n if filename is not None: self.filename = filename\n self.header.filename = self.filename\n\n # first load the binary in directly\n self.file_handle = open(self.filename, 'rb') # buffering=?\n\n # Read File_Type_ID and check compatibility\n # If v2.2 is used, this value will be 'NEURALCD', which uses a slightly\n # more complex header. Currently unsupported.\n self.header.File_Type_ID = [chr(ord(c)) \\\n for c in self.file_handle.read(8)]\n if \"\".join(self.header.File_Type_ID) != 'NEURALSG':\n logging.info( \"Incompatible ns5 file format. Only v2.1 is supported.\\nThis will probably not work.\")\n\n\n # Read File_Spec and check compatibility.\n self.header.File_Spec = [chr(ord(c)) \\\n for c in self.file_handle.read(16)]\n if \"\".join(self.header.File_Spec[:8]) != '30 kS/s\\0':\n logging.info( \"File_Spec seems to indicate you did not sample at 30KHz.\")\n\n\n #R ead Period and verify that 30KHz was used. If not, the code will\n # still run but it's unlikely the data will be useful.\n self.header.period, = struct.unpack('<I', self.file_handle.read(4))\n if self.header.period != 1:\n logging.info( \"Period seems to indicate you did not sample at 30KHz.\")\n self.header.f_samp = self.header.period * 30000.0\n\n\n # Read Channel_Count and Channel_ID\n self.header.Channel_Count, = struct.unpack('<I',\n self.file_handle.read(4))\n self.header.Channel_ID = [struct.unpack('<I',\n self.file_handle.read(4))[0]\n for _ in xrange(self.header.Channel_Count)]\n\n # Compute total header length\n self.header.Header = 8 + 16 + 4 + 4 + \\\n 4*self.header.Channel_Count # in bytes\n\n # determine length of file\n self.file_handle.seek(0, 2) # last byte\n self.header.file_total_size = self.file_handle.tell()\n self.header.n_samples = \\\n (self.header.file_total_size - self.header.Header) / \\\n self.header.Channel_Count / self.header.sample_width\n self.header.Length = np.float64(self.header.n_samples) / \\\n self.header.Channel_Count\n if self.header.sample_width * self.header.Channel_Count * \\\n self.header.n_samples + \\\n self.header.Header != self.header.file_total_size:\n logging.info( \"I got header of %dB, %d channels, %d samples, \\\n but total file size of %dB\" % (self.header.Header,\n self.header.Channel_Count, self.header.n_samples,\n self.header.file_total_size))\n\n # close file\n self.file_handle.close()", "def load_off(filename, size):\n\n # create 3D array (cube with edge = size)\n obj = np.zeros([size, size, size])\n\n # open filename.off\n with open(filename) as f:\n\n # read first line\n header = f.readline() # returns a string\n # set properties\n properties = f.readline().split(\" \") # returns a list of chars\n num_vertices = int(properties[0])\n num_faces = int(properties[1])\n num_edges = int(properties[2])\n print(\"Properties:\",\n \"\\nNumber of vertices:\", num_vertices,\n \"\\nNUmber of faces: \", num_faces,\n \"\\nNumber of edges: \", num_edges)\n\n # read everything else\n body = f.readlines() # returns a list of strings\n if num_vertices != 0:\n vertices = body[0:num_vertices]\n else:\n raise ValueError(\"No vertex found.\")\n if num_faces != 0:\n faces = body[num_vertices:num_vertices+num_faces]\n else:\n raise ValueError(\"No face found.\")\n if num_edges != 0:\n edges = body[num_faces:num_faces+num_edges]\n \n # set vertices\n for i in range(num_vertices):\n coords = vertices[i].split(\" \")\n if (int(float(coords[0])) < size) and (int(float(coords[1])) < size) and (int(float(coords[2])) < size):\n obj[int(float(coords[0])), int(float(coords[1])), int(float(coords[2]))] = 1\n else:\n print(\"Error at vertex\", i)\n\n return obj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read header of dfsu file and set object properties
def _read_dfsu_header(self, filename): dfs = DfsuFile.Open(filename) self._source = dfs self._projstr = dfs.Projection.WKTString self._type = UnstructuredType(dfs.DfsuFileType) self._deletevalue = dfs.DeleteValueFloat # geometry self._set_nodes_from_source(dfs) self._set_elements_from_source(dfs) if not self.is_2d: self._n_layers = dfs.NumberOfLayers self._n_sigma = dfs.NumberOfSigmaLayers # items self._n_items = safe_length(dfs.ItemInfo) self._items = get_item_info(dfs, list(range(self._n_items))) # time self._start_time = from_dotnet_datetime(dfs.StartDateTime) self._n_timesteps = dfs.NumberOfTimeSteps self._timestep_in_seconds = dfs.TimeStepInSeconds dfs.Close()
[ "def __get_header(self):\n # try:\n self.header = self.hdulist[0].header\n # except:\n # self.hdulist = astropy.io.fits.open(self.map_name)\n # self.header = self.hdulist[0].header", "def _read_primary_header(self):\n\n self._check_magic_number()\n\n self.f.seek(0)\n self.time, = np.fromfile(self.f, dtype='<f8', count=1)\n self._nbodies_tot, self._ncomp = np.fromfile(self.f, dtype=np.uint32,count=2)\n\n data_start = 16 # guaranteed first component location...\n\n # now read the component headers\n for comp in range(0,self._ncomp):\n self.f.seek(data_start)\n next_comp = self._read_spl_component_header()\n data_start = next_comp", "def read_header(self, file_handle):\n header = {\"FCS format\": file_handle.read(6)}\n\n file_handle.read(4) # 4 space characters after the FCS format\n\n for field in [\n \"text start\",\n \"text end\",\n \"data start\",\n \"data end\",\n \"analysis start\",\n \"analysis end\",\n ]:\n s = file_handle.read(8)\n try:\n field_value = int(s)\n except ValueError:\n field_value = 0\n header[field] = field_value\n\n # Checking that the location of the TEXT segment is specified\n for k in [\"text start\", \"text end\"]:\n if header[k] == 0:\n raise ValueError(\n \"The FCS file '{}' seems corrupted. (Parser cannot locate information \"\n \"about the '{}' segment.)\".format(self.path, k)\n )\n elif header[k] > self._file_size:\n raise ValueError(\n \"The FCS file '{}' is corrupted. '{}' segment \"\n \"is larger than file size\".format(self.path, k)\n )\n\n self._data_start = header[\"data start\"]\n self._data_end = header[\"data start\"]\n\n if header[\"analysis start\"] != 0:\n warnings.warn(\n \"There appears to be some information in the ANALYSIS segment of file {0}. \"\n \"However, it might not be read correctly.\".format(self.path)\n )\n\n self.annotation[\"__header__\"] = header", "def _parse_header(self):\n log.debug('---In dcd.py, parse_header()')\n #process the first header block\n\n header1 = self._fo.read(92)\n header1_format=\\\n \"i---cccci---i---i---i---xxxxxxxxxxxxxxxxxxxxf---i---i---xxxxxxxxxxxxxxxxxxxxxxxxxxxxi---i---\"\n # |1 |5 |10 |15 |20 |25 |30 |35 |40 |45 |50 |55 |60 |65 |70 |75 |80 |85 |90\n #|header size=84 |nframes*tstep |tstep_size |charm_ver\n # |CORD=has coordinates |block_a |header_size=84\n # |nframes |block_b\n # |starting timestep\n # |timestep between coord sets \n header1_format = string.replace(header1_format, \"-\", \"\")\n header1 = struct.unpack(header1_format, header1)\n header1_size1, c1, c2, c3, c4, self._nframes, self._firsttstep, self._dcdfreq, self._ntsteps, self._tstep_size, self._block_a, self._block_b, self._charm_v, header1_size2 = header1 #unpack the tuple header1\n \n \n self._dcdtype = \"\".join((c1,c2,c3,c4)) #get the data-type field. I it should always be cord...\n if header1_size1 != 84 or header1_size2 !=84:\n log.error(\"error-- header size fields not correct (should be 84)\\n\")\n if self._block_a != 0 or self._block_b != 0:\n log.info(\"I've found a signal possibly indicating an extra record block\")\n log.info(\" I'll try to parse it, but it might fail. Also, I won't use\")\n log.info(\" any data from them.\")", "def _parse_header(self):\n header = int_from_lbytes(self._reader.read(4))\n if header != self._HEADER:\n raise StashFileParseError(f'Invalid header id: 0x{header:08X}')\n self.version = int_from_lbytes(self._reader.read(2))", "def __init__(self, name, header):\n\n self.header = header.copy()\n#\n# Check if the file already exists. If it does not, check to see\n# if we were provided with a Primary Header. If not we will need\n# to prepend a default PrimaryHDU to the file before writing the\n# given header.\n#\n if not os.path.exists(name):\n if not self.header.has_key('SIMPLE'):\n hdulist = HDUList([PrimaryHDU()])\n hdulist.writeto(name, 'exception')\n else:\n if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0:\n#\n# This will not be the first extension in the file so we\n# must change the Primary header provided into an image\n# extension header.\n#\n self.header.update('XTENSION','IMAGE','Image extension',\n after='SIMPLE')\n del self.header['SIMPLE']\n\n if not self.header.has_key('PCOUNT'):\n dim = self.header['NAXIS']\n \n if dim == 0:\n dim = ''\n else:\n dim = str(dim)\n\n self.header.update('PCOUNT', 0, 'number of parameters',\n after='NAXIS'+dim)\n\n if not self.header.has_key('GCOUNT'):\n self.header.update('GCOUNT', 1, 'number of groups',\n after='PCOUNT')\n\n self._ffo = _File(name, 'append')\n self._ffo.getfile().seek(0,2)\n\n self._hdrLoc = self._ffo.writeHDUheader(self)\n self._datLoc = self._ffo.getfile().tell()\n self._size = self.size()\n\n if self._size != 0:\n self.writeComplete = 0\n else:\n self.writeComplete = 1", "def parse_headerfile(self):\r\n # Read and store the data contained in the input File\r\n myEDFFile = open(self.InputFile,'rb')\r\n EDFValues = myEDFFile.read()\r\n myEDFFile.close()\r\n\r\n # Extract the Header File that contains info about the record\r\n self.HeaderSize = int(EDFValues[184:192])\r\n self.HeaderFile = EDFValues[:self.HeaderSize]\r\n # Extract the actual data in the input file\r\n self.rawDataRecord = EDFValues[self.HeaderSize:]\r\n self.tempDataRecord = list(self.rawDataRecord)\r\n\r\n # Extract information from the EDF Header File\r\n self.total_duration = int(self.HeaderFile[236:244]) # total duration of the signal (in seconds)\r\n if self.duration == None:\r\n self.duration = self.total_duration\r\n self.signalnum = int(self.HeaderFile[252:256]) # number of signals in edf file\r", "def test_read_header():\n header = get_header(AIA_193_JP2)[0]\n assert isinstance(header, FileHeader)", "def load_header(self, filename=None):\n # (Re-)initialize header\n self.header = HeaderInfo()\n\n # the width of each sample is always 2 bytes\n self.header.sample_width = 2\n\n # If filename specified, use it, else use previously specified\n if filename is not None: self.filename = filename\n self.header.filename = self.filename\n\n # first load the binary in directly\n self.file_handle = open(self.filename, 'rb') # buffering=?\n\n # Read File_Type_ID and check compatibility\n # If v2.2 is used, this value will be 'NEURALCD', which uses a slightly\n # more complex header. Currently unsupported.\n self.header.File_Type_ID = [chr(ord(c)) \\\n for c in self.file_handle.read(8)]\n if \"\".join(self.header.File_Type_ID) != 'NEURALSG':\n logging.info( \"Incompatible ns5 file format. Only v2.1 is supported.\\nThis will probably not work.\")\n\n\n # Read File_Spec and check compatibility.\n self.header.File_Spec = [chr(ord(c)) \\\n for c in self.file_handle.read(16)]\n if \"\".join(self.header.File_Spec[:8]) != '30 kS/s\\0':\n logging.info( \"File_Spec seems to indicate you did not sample at 30KHz.\")\n\n\n #R ead Period and verify that 30KHz was used. If not, the code will\n # still run but it's unlikely the data will be useful.\n self.header.period, = struct.unpack('<I', self.file_handle.read(4))\n if self.header.period != 1:\n logging.info( \"Period seems to indicate you did not sample at 30KHz.\")\n self.header.f_samp = self.header.period * 30000.0\n\n\n # Read Channel_Count and Channel_ID\n self.header.Channel_Count, = struct.unpack('<I',\n self.file_handle.read(4))\n self.header.Channel_ID = [struct.unpack('<I',\n self.file_handle.read(4))[0]\n for _ in xrange(self.header.Channel_Count)]\n\n # Compute total header length\n self.header.Header = 8 + 16 + 4 + 4 + \\\n 4*self.header.Channel_Count # in bytes\n\n # determine length of file\n self.file_handle.seek(0, 2) # last byte\n self.header.file_total_size = self.file_handle.tell()\n self.header.n_samples = \\\n (self.header.file_total_size - self.header.Header) / \\\n self.header.Channel_Count / self.header.sample_width\n self.header.Length = np.float64(self.header.n_samples) / \\\n self.header.Channel_Count\n if self.header.sample_width * self.header.Channel_Count * \\\n self.header.n_samples + \\\n self.header.Header != self.header.file_total_size:\n logging.info( \"I got header of %dB, %d channels, %d samples, \\\n but total file size of %dB\" % (self.header.Header,\n self.header.Channel_Count, self.header.n_samples,\n self.header.file_total_size))\n\n # close file\n self.file_handle.close()", "def testReadFileHeader(self):\n output_writer = test_lib.TestOutputWriter()\n test_file = unified_logging.DSCFile(output_writer=output_writer)\n\n test_file_path = self._GetTestFilePath([\n 'uuidtext', 'dsc', '8E21CAB1DCF936B49F85CF860E6F34EC'])\n self._SkipIfPathNotExists(test_file_path)\n\n with open(test_file_path, 'rb') as file_object:\n test_file._ReadFileHeader(file_object)", "def read_header(fits_file):\n\n head = {}\n F = pf.open(fits_file)\n H = F[0].header\n head['Ntot'] = H['N_TOT']\n head['Nmu'] = H['N_MU']\n head['Nsig'] = H['N_SIGMA']\n head['Nv'] = H['N_VOIGT']\n head['Ncoef'] = H['N_COEF']\n head['Nspa'] = H['N_SPARSE']\n head['mu'] = [H['MU1'], H['MU2']]\n head['sig'] = [H['SIGMA1'], H['SIGMA2']]\n head['z'] = F[1].data.field('redshift')\n F.close()\n return head", "def _read_spl_component_header(self):\n\n data_start = self.f.tell()\n # manually do headers\n _1,_2,self.nprocs, nbodies, nint_attr, nfloat_attr, infostringlen = np.fromfile(self.f, dtype=np.uint32, count=7)\n\n # need to figure out what to do with nprocs...it has to always be the same, right?\n head = np.fromfile(self.f, dtype=np.dtype((np.bytes_, infostringlen)),count=1)\n head_normal = head[0].decode()\n head_dict = yaml.safe_load(head_normal)\n\n\n head_dict['nint_attr'] = nint_attr\n head_dict['nfloat_attr'] = nfloat_attr\n # data starts at ...\n next_comp = 4*7 + infostringlen + data_start\n\n self.component_map[head_dict['name']] = next_comp\n self.header[head_dict['name']] = head_dict\n\n next_comp += self.nprocs*1024\n\n # specifically look for indexing\n try:\n self.indexing = head_dict['parameters']['indexing']\n except:\n self.indexing = head_dict['indexing']=='true'\n\n return next_comp", "def parse_header(self, line):\n bml.logger.debug(\"BssFile.parse_header(line=%s)\" % (line))\n # GJP 2021-04-16 Allow empty system names\n m = re.match(r\"(?P<file_type>.)00\\{(?P<system_name>[^\\}]*)\\}=NYYYYYY(?P<summary>.*$)\", line)\n assert m, \"line (%s) does not match header record\" % (line)\n self.file_type = m.group('file_type')\n self.system_name = m.group('system_name')\n self.summary = m.group('summary').rstrip()\n bml.logger.debug(\"file_type: %s; system_name: %s; summary: %s\" % (self.file_type, self.system_name, self.summary))\n self.state_nr = self.state_nr + 1 # only one header\n return True", "def __readHeaders(self, fh):\n fh.readline()\n fh.readline()\n \n headersStr = fh.readline()\n headers = [ s.strip() for s in headersStr[1:].split() ]\n unitsStr = fh.readline()\n units = [ s.strip() for s in unitsStr[1:].split() ]\n \n fh.readline()\n \n headers.pop(1)\n units[0] = 'mjd'\n units[1] = 'seconds'\n\n self.startDate = self.__getStartDate(fh)\n\n # Get a mapping of header names to column index\n headerDict = dict(list(zip(headers,list(range(len(headers))))))\n return (headerDict, units)", "def _parse_header(self):\n\n if self.ei_magic != '\\x7fELF':\n return\n\n self.seek(16,0)\n reading = {'h': self.le_half, 'w': self.le_word,'a': self.le_addr,\n 'o': self.le_offset, 'x': self.le_xword}\n labels = ('type', 'machine', 'version', 'entry', 'phoff', \\\n 'shoff', 'flags', 'ehsize', 'phentsize', 'phnum',\\\n 'shentsize','shnum','shstrndx')\n htypes = ('h','h','w','a','o','o','w','h','h','h','h','h','h')\n\n # Retrieve ELF header\n self.elfhead = dict(zip(labels,[reading[t]() for t in htypes]))\n\n # Retrieve section header string table.\n # sh: name, type, flags, addr, offset, size, link, info, addralign, entsize\n self.seek((self.elfhead['shentsize'] * self.elfhead['shstrndx'])\\\n + self.elfhead['shoff'], 0)\n\n labels = ('name', 'type', 'flags', 'addr', 'offset', \\\n 'size', 'link', 'info', 'addralign', 'entsize')\n\n shtypes = ('w','w','x','a','o','x','w','w','x','x')\n\n sh_strtableh = dict(zip(labels,[reading[t]() for t in shtypes]))\n self.seek(sh_strtableh['offset'],0)\n self.sh_strtableh = sh_strtableh\n\n # Now the section header is known, can retrieve dynamic string table\n self.dynstrh = self._find_section('.dynstr')", "def __dbfHeader(self):\r\n if not self.dbf:\r\n raise ShapefileException(\"Shapefile Reader requires a shapefile or file-like object. (no dbf file found)\")\r\n dbf = self.dbf\r\n headerLength = self.__dbfHeaderLength()\r\n numFields = (headerLength - 33) // 32\r\n for field in range(numFields):\r\n fieldDesc = list(unpack(\"<11sc4xBB14x\", dbf.read(32)))\r\n name = 0\r\n idx = 0\r\n if \"\\x00\" in fieldDesc[name]:\r\n idx = fieldDesc[name].index(\"\\x00\")\r\n else:\r\n idx = len(fieldDesc[name]) - 1\r\n fieldDesc[name] = fieldDesc[name][:idx]\r\n fieldDesc[name] = fieldDesc[name].lstrip()\r\n self.fields.append(fieldDesc)\r\n terminator = dbf.read(1)\r\n assert terminator == \"\\r\"\r\n self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))", "def __init__(self, fileobj=None):\n\n if fileobj is None:\n # for testing\n self._flags = 0\n return\n\n fn = getattr(fileobj, \"name\", \"<unknown>\")\n data = fileobj.read(10)\n if len(data) != 10:\n raise ID3NoHeaderError(\"%s: too small\" % fn)\n\n id3, vmaj, vrev, flags, size = struct.unpack('>3sBBB4s', data)\n self._flags = flags\n self.size = BitPaddedInt(size) + 10\n self.version = (2, vmaj, vrev)\n\n if id3 != b'ID3':\n raise ID3NoHeaderError(\"%r doesn't start with an ID3 tag\" % fn)\n\n if vmaj not in [2, 3, 4]:\n raise ID3UnsupportedVersionError(\"%r ID3v2.%d not supported\"\n % (fn, vmaj))\n\n if not BitPaddedInt.has_valid_padding(size):\n raise error(\"Header size not synchsafe\")\n\n if (self.version >= self._V24) and (flags & 0x0f):\n raise error(\n \"%r has invalid flags %#02x\" % (fn, flags))\n elif (self._V23 <= self.version < self._V24) and (flags & 0x1f):\n raise error(\n \"%r has invalid flags %#02x\" % (fn, flags))\n\n if self.f_extended:\n extsize_data = read_full(fileobj, 4)\n\n if PY3:\n frame_id = extsize_data.decode(\"ascii\", \"replace\")\n else:\n frame_id = extsize_data\n\n if frame_id in Frames:\n # Some tagger sets the extended header flag but\n # doesn't write an extended header; in this case, the\n # ID3 data follows immediately. Since no extended\n # header is going to be long enough to actually match\n # a frame, and if it's *not* a frame we're going to be\n # completely lost anyway, this seems to be the most\n # correct check.\n # https://github.com/quodlibet/quodlibet/issues/126\n self._flags ^= 0x40\n extsize = 0\n fileobj.seek(-4, 1)\n elif self.version >= self._V24:\n # \"Where the 'Extended header size' is the size of the whole\n # extended header, stored as a 32 bit synchsafe integer.\"\n extsize = BitPaddedInt(extsize_data) - 4\n if not BitPaddedInt.has_valid_padding(extsize_data):\n raise error(\n \"Extended header size not synchsafe\")\n else:\n # \"Where the 'Extended header size', currently 6 or 10 bytes,\n # excludes itself.\"\n extsize = struct.unpack('>L', extsize_data)[0]\n\n self._extdata = read_full(fileobj, extsize)", "def __dbfHeader(self):\r\n f = self.__getFileObj(self.dbf)\r\n f.seek(0)\r\n version = 3\r\n year, month, day = time.localtime()[:3]\r\n year -= 1900\r\n # Remove deletion flag placeholder from fields\r\n for field in self.fields:\r\n if field[0].startswith(\"Deletion\"):\r\n self.fields.remove(field)\r\n numRecs = len(self.records)\r\n numFields = len(self.fields)\r\n headerLength = numFields * 32 + 33\r\n recordLength = sum([int(field[2]) for field in self.fields]) + 1\r\n header = pack('<BBBBLHH20x', version, year, month, day, numRecs,\r\n headerLength, recordLength)\r\n f.write(header)\r\n # Field descriptors\r\n for field in self.fields:\r\n name, fieldType, size, decimal = field\r\n name = name.replace(' ', '_')\r\n name = name.ljust(11).replace(' ', '\\x00')\r\n size = int(size)\r\n fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)\r\n f.write(fld)\r\n # Terminator\r\n f.write('\\r')", "def readSVCheader(filename):\n\n def splitListInHalf(thelist):\n \"\"\" This function is used to split a list half\n\n Keyword arguments:\n thelist -- A list to split in half\n\n Returns:\n tuple of two lists\n \"\"\"\n halfPoint = len(thelist) / 2\n return (thelist[:halfPoint], thelist[halfPoint:])\n\n try:\n fid = open(filename, 'rU')\n\n # make sure we are svc sig file\n line = fid.readline()\n if line[:-1] != '/*** Spectra Vista SIG Data ***/':\n # use line[:-1] to ignore the newline\n raise RuntimeError(\"'\" + filename + \"' is not valid SVC sig file.\")\n\n linect = 1\n # prealocate some headers\n commonHeader = {}\n referenceHeader = {}\n targetHeader = {}\n\n commaregex = re.compile(r'\\s*,\\s*')\n\n run = True\n while run:\n line = fid.readline()\n linect += 1\n splitLine = line.split('=')\n key = splitLine[0].strip()\n\n # parse the keys\n if key == 'data':\n run = False\n else:\n value = splitLine[1].strip()\n # should add error checking on this operation\n if key == 'name':\n commonHeader[key] = value\n elif key == 'instrument':\n commonHeader[key] = value\n elif key == 'integration':\n tmp = commaregex.split(value)\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'scan method':\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(value)\n elif key == 'scan coadds':\n tmp = commaregex.split(value)\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'scan time':\n tmp = commaregex.split(value)\n # can this be an int?\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'scan settings':\n tmp = commaregex.split(value)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'external data set1':\n # these seem to not have a space after the comma....\n # I may want to switch to regualar expressions for this!\n tmp = commaregex.split(value)\n # i need to check that this is an int\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'external data set2':\n # these seem to not have a space after the comma....\n # I may want to switch to regualar expressions for this!\n tmp = commaregex.split(value)\n # i need to check that this is an int\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'external data dark':\n # these seem to not have a space after the comma....\n # I may want to switch to regualar expressions for this!\n tmp = commaregex.split(value)\n # i need to check that this is an int\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'external data mask':\n commonHeader[key] = float(value)\n elif key == 'optic':\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(value)\n elif key == 'temp':\n tmp = commaregex.split(value)\n # i need to check that this is an int\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'battery':\n tmp = commaregex.split(value)\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'error':\n tmp = commaregex.split(value)\n tmp = map(int, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'units':\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(value)\n elif key == 'time':\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(value)\n elif key == 'latitude':\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(value)\n elif key == 'longitude':\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(value)\n elif key == 'gpstime':\n tmp = commaregex.split(value)\n # check to see if the value was set.\n if not tmp:\n referenceHeader[key] = None\n targetHeader[key] = None\n elif tmp[0] and tmp[1]:\n tmp = map(float, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n else:\n referenceHeader[key] = None\n targetHeader[key] = None\n elif key == 'comm':\n commonHeader[key] = value\n elif key == 'memory slot':\n tmp = commaregex.split(value)\n tmp = map(int, tmp)\n (referenceHeader[key], targetHeader[key]) = \\\n splitListInHalf(tmp)\n elif key == 'factors':\n idx = value.find('[')\n if idx > 0:\n tmp = value[:idx]\n tmp = tmp.strip()\n tmp = commaregex.split(tmp)\n commonHeader[key] = map(float, tmp)\n tmp = value[idx+1:]\n idx = tmp.find(']')\n if idx > 0:\n tmp = tmp[:idx]\n commonHeader['factors comment'] = tmp\n else:\n # no comments\n tmp = commaregex.split(value)\n commonHeader[key] = map(float, tmp)\n else:\n # we are an unknown key, but we can add it since I can index\n # dictionaries by strings\n # add __unknown_ in front to help with parsing in writing.\n commonHeader['__unknown_' + key] = value\n\n\n return commonHeader, referenceHeader, targetHeader, linect\n except IOError:\n pass\n except RuntimeError:\n pass\n finally:\n fid.close()", "def testReadFileHeader(self):\n output_writer = test_lib.TestOutputWriter()\n test_file = unified_logging.UUIDTextFile(output_writer=output_writer)\n\n test_file_path = self._GetTestFilePath([\n 'uuidtext', '22', '0D3C2953A33917B333DD8366AC25F2'])\n self._SkipIfPathNotExists(test_file_path)\n\n with open(test_file_path, 'rb') as file_object:\n test_file._ReadFileHeader(file_object)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Time step size in seconds
def timestep(self): return self._timestep_in_seconds
[ "def time_step(self):\n ts = float(rospy.get_param('/time_step_size', None))\n\n if ts is None:\n raise RuntimeError(\"No Time step has been set by the driving node..\")\n else:\n return ts", "def n_timesteps(self) -> int:\n if self.total_time < self.timestep:\n warnings.warn(\n f\"No simulation possible: you asked for {self.total_time} \"\n f\"simulation time but the timestep is {self.timestep}\"\n )\n return floor(self.total_time.total_seconds() / self.timestep.total_seconds())", "def step_size(self) -> Timedelta:\n assert self._step_size is not None, \"No step size provided\"\n return self._step_size", "def getNrTimesteps():\n\n timesteps = 25\n return timesteps", "def test_TIME_smaller_timestep(self):\n with mn.model(timestep=0.5) as m:\n Time = mn.variable('Time', lambda md: md.TIME, '__model__')\n Step = mn.variable('Step', lambda md: md.STEP, '__model__')\n self.assertEqual(Time[''], 0)\n self.assertEqual(Step[''], 0)\n m.step()\n self.assertEqual(Time[''], 0.5)\n self.assertEqual(Step[''], 1)\n m.step()\n self.assertEqual(Time[''], 1)\n self.assertEqual(Step[''], 2)\n m.reset()\n self.assertEqual(Time[''], 0)\n self.assertEqual(Step[''], 0)", "def duration(self):\n return self.no_timesteps * self.dt", "def timestep(self):\n return self.global_timestep", "def _calculate_runtime(self):\n\n _time = 0\n for _, _passes, _captures in self._batches:\n for cap in _captures:\n _time_temp = ((cap.duration * _passes))\n\n if cap.focused:\n _nmacs = len(cap.macs)\n _deg, _dur = cap.focused\n _time_fine = (_deg * _dur) / 360\n _time_fine *= _nmacs\n _time_temp += _time_fine\n\n _time += _time_temp\n\n return datetime.timedelta(seconds=_time)", "def StepsPerInch(self) -> float:", "def ms(self, t):\n return t // 1000000", "def simulation_time_steps(self):\n\n return (self.__simulation_end_time_step - self.__simulation_start_time_step) + 1", "def _flow_time_step(self, dt: float, **kwargs):\n ...", "def get_stepsize(self) -> float:\n return self.m_stepsize", "def duration(self):\n\t\treturn int(self._duration/self.tick_period) * self.tick_period", "def TimeLengthMS(runData):\n firsttime = runData['magnetometer'][0][0]\n lasttime = runData['magnetometer'][-1][0]\n return (lasttime - firsttime) / 1e6 # convert to ms from ns", "def total_time(self):\n t = timedelta()\n for step in self.steps:\n if ('time' in step):\n t += self.parsetime(step['time'])\n return(t)", "def step_interval_millis(self):\n return self.__stepIntervalMillis", "def timescale(self):\n raise NotImplementedError", "def tstep_t(self, step):\n it = int(float(step))\n self.set_tstep(it)\n self.update(it, isframe=True)", "def _minTimeStep(self) -> float:\r\n start = perf_counter()\r\n\r\n # CFL & Force coeffs\r\n gamma_c = 0.25; gamma_f = 0.25\r\n\r\n m, c, f = TimeStep().compute(self.fluid_count, self.particleArray[self.f_indexes], gamma_c, gamma_f)\r\n\r\n # Check the time-step.\r\n if (m < 1e-6):\r\n # Only show warning is non sequential.\r\n # if (len(self.dt_a) != self.ts_error + 1):\r\n # println(f'{Fore.YELLOW}WARNING! {Style.RESET_ALL}Time-step has become very small; might indicate an error.')\r\n self.ts_error = len(self.dt_a)\r\n\r\n # Store for later retrieval, making pretty plots or whatevs.\r\n self.dt_c.append(c); self.dt_f.append(f); self.dt_a.append(m)\r\n\r\n self.timing_data['time_step'] += perf_counter() - start\r\n return m" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the code values of the nodes
def set_codes(self, codes): if len(codes) != self.n_nodes: raise Exception(f"codes must have length of nodes ({self.n_nodes})") self._codes = codes self._valid_codes = None
[ "def update_code(self, new_code):\n self.code = new_code # code from __inti ___\n\n # Fill in the rest", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code\n # print(self.code) #for checking\n return self.code", "def handle_nodes(nodes):\n\t# Assumptions: the node() line is all one one line\n\n\tsplit_nodes = []\n\tcurnode = -1\n\tfor m in nodes:\n\t\tsplit_nodes.append({})\n\t\tcurnode += 1\n\n\t\t# TODO: make this a function call or something so i can change the node language more easily\n\t\t# no need to error check this since we already did in process_node\n\t\tma = re.match(g.MAP_RE, m)\n\n\t\tsplit_nodes[curnode][\"label\"] = ma.group(1)\n\t\tsplit_nodes[curnode][\"rank\"] = ma.group(2)\n\t\tsplit_nodes[curnode][\"index\"] = ma.group(3)\n\t\tsplit_nodes[curnode][\"lower\"] = ma.group(4)\n\t\tsplit_nodes[curnode][\"step\"] = ma.group(5)\n\t\tsplit_nodes[curnode][\"upper\"] = ma.group(6)\n\t\tsplit_nodes[curnode][\"cond\"] = ma.group(7)\n\t\tsplit_nodes[curnode][\"pred\"] = ma.group(8)\n\t\tsplit_nodes[curnode][\"targets\"] = ma.group(9)\n\t\tsplit_nodes[curnode][\"func\"] = ma.group(10)\n\t\tsplit_nodes[curnode][\"func_name\"] = ma.group(11)\n\t\tsplit_nodes[curnode][\"in_args\"] = ma.group(12)\n\n\n\t# go through the nodes and one at a time output the code. The multiple\n\t# loops are necessary so that the code is output together for each\n\t# function.\n\t# TODO: some of these loops could be combined together for performance\n\tfor m in split_nodes:\n\t\tg.nodes[m['label']] = {'label': m['label'], 'func_name': m['func_name'], 'index': m['index'], 'rank': m['rank']}\n\t\tg.functions[m['label']] = m['func_name']\n\t\tg.intervals[m['label']] = {'lower': m['lower'], 'step': m['step'], 'upper': m['upper']}\n\t\tg.real_preds[m['label']] = make_targets(m['pred'])\n\t\tg.preds[m['label']] = flatten(g.real_preds[m['label']])\n\t\tg.real_targets[m['label']] = make_targets(m['targets'])\n\t\tg.targets[m['label']] = flatten(g.real_targets[m['label']])\n\t\t#g.targets[m['label']] = flatten(make_targets(m['targets']))\n\t\tg.target_variables[m['label']] = m['cond']\n\tfor n in g.nw_calls:\n\t\tg.real_preds[n['label']] = make_targets(n['preds'])\n\t\tg.real_targets[n['label']] = make_targets(n['succ'])\n\n\t# create the task graph\n\tg.graph[\"0\"] = graph.GraphNode(\"0\", [], [])\n\tfor n in split_nodes:\n\t\tlabel = n['label']\n\t\tnode = graph.GraphNode(label, g.real_preds[label], g.real_targets[label])\n\t\tif \"0\" in node.get_pred():\n\t\t\tg.graph[\"0\"].add_succ(label)\n\t\tg.graph[label] = node\n\tfor n in g.nw_calls:\n\t\tlabel = n['label']\n\t\tnode = graph.GraphNode(label, g.real_preds[label], g.real_targets[label])\n\t\tif \"0\" in node.get_pred():\n\t\t\tnode.output()\n\t\t\terror(\"Cannot start a graph with a network call\")\n\t\tg.graph[label] = node\n\tgraph.compute_dominance(g.graph)\n\tgraph.match_forks_to_joins(g.graph)\n\n#\tfor l in g.graph:\n#\t\tg.graph[l].output()\n\n\tfor m in split_nodes:\n\t\t# store the input args so we can refer to their type later\n\t\tprocess_func_args(m['func_name'], m['in_args'])\n\t\tprocess_input_arguments(m['func_name'], m['in_args'])\n\t\tif m['index'] == 'NULL':\n\t\t\t#warning(\"Caught a NULL loop index variable that will be replaced with '\" + g.INDEX + \"'\")\n\t\t\tm['index'] = g.INDEX\n\t\tg.indices[m['label']] = m['index']\n\n\tfor m in split_nodes:\n\t\thandle_main_node(m['label'], m['lower'], m['step'], m['upper'], m['func_name'])\n\n\tif g.OUTPUT == \"C\" or g.OUTPUT == \"omp\":\n\t\tpil2c.print_main_func()\n\t\tpil2c.print_funcs()\n\t\tpil2c.handle_nodes(split_nodes)\n\telif g.OUTPUT == \"swarm\":\n\t\tpil2swarm.print_main_func()\n\t\tpil2swarm.print_funcs()\n\t\tpil2swarm.handle_nodes(split_nodes)\n\telif g.OUTPUT == \"afl\":\n\t\tpil2afl.print_main_func()\n\t\tpil2afl.print_funcs()\n\t\tpil2afl.handle_nodes(split_nodes)\n\telif g.OUTPUT == \"ocr\":\n\t\tpil2ocr.print_main_func()\n\t\tpil2ocr.print_funcs()\n\t\tpil2ocr.handle_nodes(split_nodes)\n\telse:\n\t\terror(\"Unknown OUTPUT backend: \" + g.OUTPUT)\n\n\treturn split_nodes", "def setCode(self, c):\n\t\t\n\t\tself.code = c", "def __setNodes(self, value):\n\n self.__nodes = value", "def update_value(self, value):\n self.node_value = value", "def set_value(self,value):\n self.node.set(value)", "def updateNodes(nodes):\r\n global node_names\r\n node_names.extend(n.classname for n in nodes)", "def update(self, data, node):\n node.data = data", "def set_code(self, key, value):\n self._code[key] = value", "def reformat_code(self, event=None):\n code = self.get_text()\n self._parent_notebook.ide.fpl_interpreter.syntax_transform(self.title, code)\n self.set_text(self._parent_notebook.ide.fpl_interpreter.prettyfied(), init=False)\n self.parse_interpret_highlight()", "def replaceNode(self, *args) -> \"void\":\n return _coin.SoMFNode_replaceNode(self, *args)", "def ModifyNode(self, attribute_list, predicates): \n nodes_modified = attribute_list[0] \n attrs_changed = attribute_list[1] \n modify_boolean = attribute_list[2] \n self.query_evaluator.modify_node(nodes_modified[2], \n attrs_changed[2], int ((modify_boolean[2])['val']))", "def set_code(self, code):\n self._code = code", "def set_nodes(self, nodes):\n self.nodes = nodes\n self.update_size()", "def setNode(self, x, y, val):\n self.nodes[y][x] = val", "def update (self, nodes=[], edges=[]):\n for n in nodes:\n props = \",\".join([\n f\"\"\" s.{k} = \"{v}\" \"\"\"\n for k, v in n.items()\n if not k == \"id\"\n ])\n statement = f\"\"\"MATCH (s {{ id : \"{n['id']}\" }}) SET {props} \"\"\"\n self.exec (statement)\n\n # TODO: determine best way to represent hierarchical node properties.\n # TODO: analogous logic for updating edges.", "def test_code(self):\r\n code = Element('code')\r\n text = Text()\r\n text.data = u\"print this: twisted.lore.latex\"\r\n code.appendChild(text)\r\n\r\n self.spitter.visitNode(code)\r\n self.assertEqual(\r\n ''.join(self.output),\r\n \"\\\\texttt{print this: twisted.\\\\linebreak[1]lore.\\\\\"\r\n \"linebreak[1]latex}\")", "def _map_code(self, code):\r\n\r\n mapping = {'C': 'V', 'E': 'E', 'F': 'E', 'I': 'V', 'R': 'W', 'W': 'W'}\r\n return (mapping[code[0]], code[1:])", "def annotate_extrinsic_node_data(self, data):\n for key, values in data[self.mirna_name].items():\n for n, v in zip(sorted(self.graph.nodes), values):\n if isinstance(v, int) or isinstance(v, float):\n v = np.array([v])\n self.graph.node[n][key] = v" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot mesh boundary nodes and their codes
def plot_boundary_nodes(self, boundary_names=None): import matplotlib.pyplot as plt nc = self.node_coordinates c = self.codes if boundary_names is not None: if len(self.boundary_codes) != len(boundary_names): raise Exception( f"Number of boundary names ({len(boundary_names)}) inconsistent with number of boundaries ({len(self.boundary_codes)})" ) user_defined_labels = dict(zip(self.boundary_codes, boundary_names)) fig, ax = plt.subplots() for code in self.boundary_codes: xn = nc[c == code, 0] yn = nc[c == code, 1] if boundary_names is None: label = f"Code {code}" else: label = user_defined_labels[code] plt.plot(xn, yn, ".", label=label) plt.legend() plt.title("Boundary nodes") ax.set_xlim(nc[:, 0].min(), nc[:, 0].max()) ax.set_ylim(nc[:, 1].min(), nc[:, 1].max())
[ "def plot_fe_mesh(nodes, elements, element_marker=[0, 0.1]):\n plt.hold('on')\n all_x_L = [nodes[elements[e][0]] for e in range(len(elements))]\n element_boundaries = all_x_L + [nodes[-1]]\n for x in element_boundaries:\n plt.plot([x, x], element_marker, 'm--') # m gives dotted lines\n plt.plot(nodes, [0]*len(nodes), 'ro')", "def draw_mesh(mesh_obj, electrode_num, electrode_centers, electrode_radius):\n\n plt.rcParams['font.family'] = 'Times New Roman'\n # plt.rc('text', usetex=True)\n plt.rc('xtick', labelsize=12)\n plt.rc('ytick', labelsize=12)\n plt.rc('axes', labelsize=12)\n points = mesh_obj['node']\n tri = mesh_obj['element']\n perm = mesh_obj['perm']\n x, y = points[:, 0] * 0.7, points[:, 1] * 0.7\n fig, ax = plt.subplots(figsize=(4.25, 4.25))\n im = ax.tripcolor(x, y, tri, np.abs(perm), shading='flat', edgecolors='k', vmax=2, vmin=0)\n # fig.colorbar(im)\n for i, electrode_center in enumerate(electrode_centers):\n x = electrode_center[0] - electrode_radius\n y = electrode_center[1] - electrode_radius\n width = 2 * electrode_radius * 0.7\n ax.add_patch(\n patches.Rectangle(\n (x * 0.7, y * 0.7), # (x,y)\n width, # width\n width, # height\n color='y'\n )\n )\n ax.annotate(str(i), (x * 0.7, y * 0.7))\n ax.set_aspect('equal')\n\n _, ax = plt.subplots(figsize=(20, 20))\n ax.plot(points[:, 0], points[:, 1], 'ro', markersize=5)\n for i in range(points.shape[0]):\n ax.text(points[i, 0], points[i, 1], str(i), fontsize=8)\n ax.grid('on')\n ax.set_aspect('equal')\n plt.show()", "def plot_marked_region(self,boundary_parts, bulk_parts,plot_index_1=1,view_elev=0, view_azim=0):\n ax_1= self.axes[str(plot_index_1+1)]\n ax_1.view_init(view_elev , view_azim)\n ax_1.set_title('Mesh bulk', fontsize=20)\n plot(bulk_parts)\n plt.show()\n plt.savefig('/test_'+str(0)+'.png', dpi=100)", "def plot_mesh(mesh):\n\tfig = plt.figure(figsize=(16,9))\n\tax = fig.add_subplot(111)\n\tx_mesh = [coord[0] for coord in mesh]\n\ty_mesh = [coord[1] for coord in mesh]\n\tax.scatter(x_mesh,y_mesh)\n\tplt.show()", "def MeshPlot(self):\n actors = []\n if self.dim == 2:\n actors.append(self.Domain(0, opacity=0.8))\n actors.append(self.MeshFunction(self.datadim))\n else:\n actors.append(self.MeshFunction(self.datadim, opacity=1, lut=self.blue3D))\n actors.append(self.Domain(1))\n actors.append(self.Edges())\n self.Render(actors)\n self.last = \"self.MeshPlot()\"", "def plot(self):\n\t\txspace = np.linspace(-self.radius, self.radius, 1000)\n\t\tyspace = np.linspace(-self.radius, self.radius, 1000)\n\t\tX,Y = np.meshgrid(xspace,yspace)\n\t\tF = self.equation(X,Y)\n\t\tfig, ax = plt.subplots()\n\t\tax.contour(X,Y,F,[0])\n\t\tax.set_aspect(1)\n\t\tplt.title(\"Boundary\", fontsize=8)\n\t\tplt.xlim(-self.radius - .25, self.radius + .25)\n\t\tplt.ylim(-self.radius - .25, self.radius + .25)\n\t\tplt.grid(linestyle='--')", "def plot_all_nodes(self):\n x_arr = np.zeros(len(self.closed_list) + len(self.open_list) - 1)\n y_arr = np.zeros(len(self.closed_list) + len(self.open_list) - 1)\n index = 0\n\n # Assemble arrays of x and y coords\n while index < len(self.closed_list) + len(self.open_list) - 1:\n for cl_i, node in enumerate(self.closed_list):\n x_arr[index] = node.x_pos_w\n y_arr[index] = node.y_pos_w\n index = cl_i\n\n for ol_i, node in enumerate(self.open_list):\n x_arr[index] = node.x_pos_w\n y_arr[index] = node.y_pos_w\n index = ol_i + len(self.closed_list)\n\n # Plot and update the figure\n plt.plot(x_arr, y_arr, 'ko', markersize=1)\n self.field.fig.canvas.draw()", "def drawDecisionBoundary(self):\r\n x, y = np.linspace(MIN_PLOT_VALUE, MAX_PLOT_VALUE), np.linspace(MIN_PLOT_VALUE, MAX_PLOT_VALUE)\r\n xx, yy = np.meshgrid(x, y)\r\n \r\n samples = np.empty(shape=(0, 2))\r\n for i in range(len(xx)):\r\n for j in range(len(yy)):\r\n samples = np.vstack((samples, np.array([[xx[i, j], yy[i, j]]])))\r\n z = self.neuralNetwork.forward(samples, self.activationFunction)\r\n z = np.transpose(z)\r\n classOneZ = z[0]\r\n classTwoZ = z[1]\r\n classOneZ = np.reshape(classOneZ, xx.shape)\r\n classTwoZ = np.reshape(classTwoZ, xx.shape)\r\n \r\n self.axis.contourf(xx, yy, classOneZ, cmap=mpl.cm.Greens, alpha=0.5)\r\n self.axis.contourf(xx, yy, classTwoZ, cmap=mpl.cm.Reds, alpha=0.5)", "def show_mesh(self):\n self.create_graph()\n self.assign_load_case()\n # self.assign_wind_loads()\n self.apply_stresses()\n self.create_slf_file()\n self.test_slf_file()\n self.parse_results()\n self.show_analysis()", "def plot_quadmesh(self, ax, name=None, show=True, set_axis=True, \n vertex_numbers=False, edge_numbers=False,\n cell_numbers=False):\n node = self.root_node()\n if set_axis:\n x0, x1, y0, y1 = node.quadcell().box() \n hx = x1 - x0\n hy = y1 - y0\n ax.set_xlim(x0-0.1*hx, x1+0.1*hx)\n ax.set_ylim(y0-0.1*hy, y1+0.1*hy)\n rect = plt.Polygon([[x0,y0],[x1,y0],[x1,y1],[x0,y1]],fc='b',alpha=0.5)\n ax.add_patch(rect)\n #\n # Plot QuadCells\n # \n for cell in self.iter_quadcells():\n \n x0, y0 = cell.vertices['SW'].coordinate()\n x1, y1 = cell.vertices['NE'].coordinate() \n\n # Plot current cell\n # plt.plot([x0, x0, x1, x1],[y0, y1, y0, y1],'r.')\n points = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]\n if cell.is_marked():\n rect = plt.Polygon(points, fc='r', edgecolor='k')\n else:\n rect = plt.Polygon(points, fc='w', edgecolor='k')\n ax.add_patch(rect)\n \n #\n # Plot Vertex Numbers\n # \n if vertex_numbers:\n vertices = self.quadvertices()\n v_count = 0\n for v in vertices:\n x,y = v.coordinate()\n x += 0.01\n y += 0.01\n ax.text(x,y,str(v_count),size='smaller')\n v_count += 1\n \n #\n # Plot Edge Numbers\n #\n if edge_numbers:\n edges = self.iter_quadedges()\n e_count = 0\n for e in edges:\n if not(e.is_marked()):\n v1, v2 = e.vertices()\n x0,y0 = v1.coordinate()\n x1,y1 = v2.coordinate()\n x_pos, y_pos = 0.5*(x0+x1),0.5*(y0+y1)\n if x0 == x1:\n # vertical\n ax.text(x_pos,y_pos,str(e_count),rotation=-90,\n size='smaller',verticalalignment='center')\n else:\n # horizontal\n y_offset = 0.05*np.abs((x1-x0))\n ax.text(x_pos,y_pos+y_offset,str(e_count),size='smaller',\n horizontalalignment='center') \n e_count += 1\n e.mark()\n \n #\n # Plot Cell Numbers\n #\n if cell_numbers:\n cells = self.iter_quadcells()\n c_count = 0\n for c in cells:\n x0,x1,y0,y1 = c.box()\n x_pos, y_pos = 0.5*(x0+x1), 0.5*(y0+y1)\n ax.text(x_pos,y_pos,str(c_count),horizontalalignment='center',\n verticalalignment='center',size='smaller')\n c_count += 1\n return ax", "def plot_structure(self):\n pass", "def vismesh(xmesh, cmap='viridis', show=False, SphCoord=True, \n config_quiver=(2, 4, 'k', 1), n_vrange=None, s_vrange=None,\n lonshift=0, figsize=(10, 5)):\n nlat, nlon, nd = xmesh.shape\n lmax_plot = nlat - 1\n if SphCoord:\n fig = [None for _ in range(2)]\n ax = [None for _ in range(2)]\n xshear= _np.linalg.norm(xmesh[...,1:], axis=-1)\n \n fig[0], ax[0] = plotfv(xmesh[...,0], show=show, cmap=cmap,vrange=n_vrange,\n lonshift=lonshift, figsize=figsize)\n ax[0].set_title('norm')\n \n fig[1], ax[1] = plotfv(xshear, show=show, cmap='Reds', lonshift=lonshift, figsize=figsize, vrange=s_vrange)\n latsdeg, lonsdeg = _psh.expand.GLQGridCoord(lmax_plot, extend=True)\n lons, lats = _np.meshgrid(lonsdeg, latsdeg)\n xshift = _np.roll(xmesh, _np.round(lons.shape[1]*lonshift/360).astype(_np.int), axis=1)\n st, dq, color, scale = config_quiver\n ax[1].quiver(lons[::dq,st::dq], lats[::dq,st::dq], \n xshift[::dq,st::dq,2], -xshift[::dq,st::dq,1], \n color=color, scale=scale)\n ax[1].set_title('shear')\n else:\n fig = [None for _ in range(3)]\n ax = [None for _ in range(3)]\n titlestr = ('x', 'y', 'z')\n for k in range(3):\n fig[k], ax[k] = plotfv(xmesh[...,k], show=show, cmap=cmap, lonshift=lonshift, figsize=figsize)\n ax[k].set_title('$'+titlestr[k]+'$')\n return fig, ax", "def plotNodes(self):\n for type in self.nodeWithTypes:\n for n in self.nodeWithTypes[type]:\n x_coords,y_coords = n\n self.MplWidget.canvas.axes.scatter(x_coords, y_coords, 20, self.nodeColor[type], zorder=3)\n self.updateCounterDisplay()", "def calculate_change_mesh(self):", "def beam_gate_boundary_plots(boundaries, clusters, clust_idf, glim, blim, title, fname, gflg_type=-1):\n GS_CASES = [\"Sudden [2004]\", \"Blanchard [2006]\", \"Blanchard [2009]\"]\n if gflg_type>=0: case = GS_CASES[gflg_type]\n fig, ax = plt.subplots(figsize=(6,4), nrows=1, ncols=1, dpi=180)\n ax.set_ylabel(\"Gates\", fontdict=font)\n ax.set_xlabel(\"Beams\", fontdict=font)\n ax.set_xlim(blim[0]-1, blim[1] + 2)\n ax.set_ylim(glim[0], glim[1])\n for b in range(blim[0], blim[1] + 1):\n ax.axvline(b, lw=0.3, color=\"gray\", ls=\"--\")\n boundary = boundaries[b]\n for bnd in boundary:\n ax.plot([b, b+1], [bnd[\"lb\"], bnd[\"lb\"]], ls=\"--\", color=\"b\", lw=0.5)\n ax.plot([b, b+1], [bnd[\"ub\"], bnd[\"ub\"]], ls=\"--\", color=\"g\", lw=0.5)\n #ax.scatter([b+0.5], [bnd[\"peak\"]], marker=\"*\", color=\"k\", s=3)\n fonttext[\"size\"] = 6\n for x in clusters.keys():\n C = clusters[x]\n for _c in C: \n if clust_idf is None: ax.text(_c[\"bmnum\"]+(1./3.), (_c[\"ub\"]+_c[\"lb\"])/2, \"%02d\"%int(x),\n horizontalalignment=\"center\", verticalalignment=\"center\",fontdict=fonttext)\n else: ax.text(_c[\"bmnum\"]+(1./3.), (_c[\"ub\"]+_c[\"lb\"])/2, clust_idf[x],\n horizontalalignment=\"center\", verticalalignment=\"center\",fontdict=fonttext)\n ax.axvline(b+1, lw=0.3, color=\"gray\", ls=\"--\")\n ax.set_title(title)\n fonttext[\"size\"] = 10\n if gflg_type>=0: ax.text(1.05, 0.5, case, ha=\"center\", va=\"center\", fontdict=fonttext, transform=ax.transAxes, rotation=90)\n ax.set_xticks(np.arange(blim[0], blim[1] + 1) + 0.5)\n ax.set_xticklabels(np.arange(blim[0], blim[1] + 1))\n fig.savefig(fname, bbox_inches=\"tight\")\n return", "def visualize_on_network(network, node_values, coords_path,\n titles, cmap='YlOrRd',\n node_size=50, font_size=8, scale=500):\n assert node_values[0].size, \"there should be multiple values per node\"\n\n # This is the grid for 5 pictures\n gs = gridspec.GridSpec(3, 4, width_ratios=(20, 1, 20, 1))\n network_gs_indices = [(0, 0), (0, 2), (1, 0), (1, 2), (2,0)]\n cbar_gs_indices = [(0, 1), (0, 3), (1, 1), (1, 3), (2, 1)]\n\n # Loading coordinates from the file\n with open(coords_path, 'rb') as f:\n #coords = pickle.load(f, encoding='latin1')\n coords = pickle.load(f, encoding='latin1')\n\n # Loop over different value sets\n for node_val, title, network_gs_index, cb_gs_index in zip(node_values,\n titles,\n network_gs_indices,\n cbar_gs_indices):\n # Draw the network figure\n ax = plt.subplot(gs[network_gs_index[0], network_gs_index[1]])\n nx.draw(network, pos=coords, node_color=node_val, cmap=cmap,\n node_size=node_size, font_size=font_size, edgecolors='black')\n \n \n # Draw the colorbar (cb)\n cmap = plt.get_cmap('OrRd')\n norm = mpl.colors.Normalize(vmin=np.min(node_val), vmax=np.max(node_val))\n scm = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)\n plt.colorbar(scm, ax=ax)\n\n ax.set_title(title)\n\n plt.tight_layout()\n #retun fig THis is a error before providing by the teacher\n return plt", "def display_mesh_info(self):\n\n print(\"Mesh Statistics:\")\n print(\"--{0} nodes\".format(self.num_nodes))\n print(\"--{0} elements\".format(len(self.elements)))\n\n regions = max(self.mesh_attributes) + 1\n text = \"--{0} region\".format(regions)\n\n if regions == 1:\n text += \"\\n\"\n else:\n text += \"s\\n\"\n\n print(text)", "def exo1():\n nblist = round(linspace(10, nb, 4))\n for i in 1: length(nblist):\n V = U(: , 1: nblist(i))\n subplot(2, 2, i)\n plot_mesh((vertex*V)*V', faces)", "def plot_nodes(ax, nodes):\n if nodes is not None and len(nodes) > 0:\n for node in nodes:\n ax.plot(node.position.item(1), node.position.item(0), \"yo\", markersize=NODESIZE)", "def plot_2D_boundary(plot_range,points,decisionfcn,labels,values=[0]):\n\n clist = ['b', 'r', 'g', 'k', 'm', 'y'] # colors for the classes\n\n # evaluate on a grid and plot contour of decision function\n x = np.arange(plot_range[0], plot_range[1], .1)\n y = np.arange(plot_range[2], plot_range[3], .1)\n xx, yy = np.meshgrid(x, y)\n xxx, yyy = xx.flatten(), yy.flatten() # lists of x,y in grid\n zz = np.array(decisionfcn(xxx, yyy))\n zz = zz.reshape(xx.shape)\n # plot contour(s) at values\n plt.contour(xx, yy, zz, values)\n\n # for each class, plot the points with '*' for correct, 'o' for incorrect\n # points is a list which contains two numpy array\n for i in range(len(points)):\n d = decisionfcn(points[i][:, 0], points[i][:, 1])\n correct_ndx = labels[i] == d\n incorrect_ndx = labels[i] != d\n plt.plot(points[i][correct_ndx, 0], points[i][correct_ndx, 1], '*', color=clist[i])\n plt.plot(points[i][incorrect_ndx, 0], points[i][incorrect_ndx, 1], 'o', color=clist[i])\n\n plt.axis('equal')\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Inorder traversal of graph blocks. Yields block and its depth.
def iterblocks(self): def traverse(b, depth): if type(b) is not tuple: return t, v = b v_type, v_start = t if v_type == 'block': yield v, depth else: for block in v: for y in traverse(block, depth+1): yield y for v in self.sortedvertices(): for y in traverse(v, 1): yield y
[ "def iterateDepthFirst(self):\r\n gen = self.__iterateDepthFirst(self.root)\r\n for n in gen:\r\n yield n", "def _process_graph(graph, visited, inferred_types, backend):\n for i in range(graph.exit_index()):\n block = graph.block(i)\n if block not in visited:\n visited.add(block)\n _process_block(block, visited, inferred_types, backend)", "def traverse_depthwise(self, flag=None):\n queue = deque([self]) \n while len(queue) != 0:\n node = queue.popleft()\n if node.has_children():\n for child in node.get_children():\n if child is not None:\n queue.append(child)\n if flag is not None:\n if node.is_marked(flag):\n yield node\n else:\n yield node", "def walk(self) -> Iterator[GraphNodeT]:\n\n visited: Set[str] = set()\n visitable: List[str] = list()\n not_visited = {\n key: copy.deepcopy(value) for key, value in self._dependencies.items()\n }\n\n # Mark all nodes without dependencies as immediately-visitable.\n for name, dependencies in not_visited.items():\n if not dependencies:\n visitable.append(name)\n\n # Remove all nodes that are now visitable from not_visited.\n for name in visitable:\n not_visited.pop(name, None)\n\n # Iteratively yield the next visitable GraphNode, moving elements from\n # not_visited to visitable as their dependencies are visited.\n while visitable:\n visited_name = visitable.pop(0)\n yield self._nodes[visited_name]\n\n visited.add(visited_name)\n not_visited.pop(visited_name, None)\n for not_visited_name, dependencies in not_visited.items():\n # Mark this not_visited element as visitable if we have just\n # visited its last outstanding dependency.\n if dependencies:\n dependencies.discard(visited_name)\n if not dependencies:\n visitable.append(not_visited_name)\n\n # Indicate that walking completed unsuccessfully, reporting the\n # GraphNodes that were not visited.\n if not_visited:\n raise GraphWalkError(dict(not_visited))", "def tree(self, block_type = None, follow_all = True):\n # yield self\n if not block_type:\n yield self\n elif isinstance(self, block_type):\n yield self\n elif not follow_all:\n return # don't recurse further\n\n # yield tree attached to each child\n for child in self.getRefs():\n for block in child.tree(block_type = block_type, follow_all = follow_all):\n yield block", "def __iter__(self):\n\n v = self.root\n if v is None:\n return\n while True:\n while v.left is not None:\n v = v.left\n k = self.splay(v)\n if k.right is not None:\n v = k.right\n yield k\n else:\n yield k\n break", "def breadth_first_traversal(self):\n from que_ import QueueStructure\n tree_queue = QueueStructure()\n if self._root:\n current_node = self._root\n tree_queue.enqueue(current_node)\n while len(tree_queue):\n current_node = tree_queue.dequeue()\n yield current_node._data\n if current_node._lchild:\n tree_queue.enqueue(current_node._lchild)\n if current_node._rchild:\n tree_queue.enqueue(current_node._rchild)", "def get_nodes_po(self):\r\n\r\n\t\tnode_stack = [(self.root, 0)]\r\n\r\n\t\twhile len(node_stack) > 0:\r\n\t\t\tyield node_stack[-1]\r\n\t\t\tnode, indent = node_stack.pop()\r\n\r\n\t\t\tfor child in node.children[::-1]:\r\n\t\t\t\tnode_stack.append((child,indent + 1))", "def preOrder(G,initial_vertex = whole_graph):\n for v,l,t in search(G,initial_vertex):\n #print('ingoing: '+str(v)+' outgoing: '+str(w)+' leader: '+str(l))\n yield v,l,t", "def depth_first(node):\n# --------------------------------------------------------------------\n yield node\n for n in node.children:\n for m in depth_first(n):\n yield m\n return", "def inorderIterative(self):\n stack = [] # Wir benutzen den Python eigenen Stack: eine Liste.\n\n n = self._head._right\n\n while (n is not self._sentinal):\n stack.append(n)\n n = n._left\n\n while stack:\n n = stack.pop()\n self.printnode(n)\n n = n._right\n while (n is not self._sentinal):\n stack.append(n)\n n = n._left", "def tree_item_iterator(items):\r\n structure = {}\r\n first_level = False\r\n for previous, current, next in previous_current_next(items):\r\n \r\n current_level = getattr(current, 'depth')\r\n\r\n if previous:\r\n structure['new_level'] = (getattr(previous,\r\n 'depth') < current_level)\r\n else:\r\n first_level = current_level\r\n structure['new_level'] = True\r\n\r\n if next:\r\n structure['closed_levels'] = range(current_level,\r\n getattr(next,\r\n 'depth'), -1)\r\n else:\r\n # All remaining levels need to be closed\r\n structure['closed_levels'] = range(current_level - first_level, -1, -1)\r\n\r\n # Return a deep copy of the structure dict so this function can\r\n # be used in situations where the iterator is consumed\r\n # immediately.\r\n yield current, copy.deepcopy(structure)", "def test_in_order_traversal_iterate(full_bst):\n bf = full_bst._in_order_iterate()\n assert next(bf) == 1\n assert next(bf) == 3\n assert next(bf) == 4\n assert next(bf) == 6\n assert next(bf) == 7\n assert next(bf) == 8\n assert next(bf) == 10\n assert next(bf) == 13\n assert next(bf) == 14", "def _pre_order(self, current_node):\n yield current_node._data\n if current_node._lchild:\n for node_data in self._pre_order(current_node._lchild):\n yield node_data\n if current_node._rchild:\n for node_data in self._pre_order(current_node._rchild):\n yield node_data", "def traverse(self, traversal_order):\n node_list = []\n if traversal_order == TreeTraversalOrder.PRE_ORDER:\n self._preorder_traversal(node_list, self.root)\n elif traversal_order == TreeTraversalOrder.IN_ORDER:\n self._inorder_traversal(node_list, self.root)\n elif traversal_order == TreeTraversalOrder.POST_ORDER:\n self._postorder_traversal(node_list, self.root)\n elif traversal_order == TreeTraversalOrder.LEVEL_ORDER:\n self._level_order_traveral(node_list, self.root)\n for node in node_list:\n yield node", "def walk(tree, depth=0):\n tree = tree or self.tree\n for idx,node in enumerate(tree):\n if isinstance(node, list):\n for idx, subnode, nextdepth in walk(node, depth+1):\n yield idx, subnode, nextdepth\n else:\n yield idx, node, depth", "def preorder(self):\n if not self.is_empty():\n for p in self._preorder(self.root()):\n yield p", "def _recursive_traversal_bfs(node, linked_by=None,\n max_depth=float('inf'),\n with_links=True, in_search=False, predicate=None):\n\n if predicate is None:\n predicate = lambda x: True\n\n iterator_queue = IteratorChain([(0, node.v_name, node)])\n #iterator_queue = iter([(0, node.v_name, node)])\n start = True\n visited_linked_nodes = set([])\n\n while True:\n try:\n depth, name, item = next(iterator_queue)\n full_name = item._full_name\n if start or predicate(item):\n if full_name in visited_linked_nodes:\n if in_search:\n # We need to return the node again to check if a link to the node\n # has to be found\n yield depth, name, item\n elif depth <= max_depth:\n if start:\n start = False\n else:\n if in_search:\n yield depth, name, item\n else:\n yield item\n\n if full_name in linked_by:\n visited_linked_nodes.add(full_name)\n\n if not item._is_leaf and depth < max_depth:\n child_iterator = NaturalNamingInterface._make_child_iterator(item,\n with_links,\n current_depth=depth)\n iterator_queue.add(child_iterator)\n #iterator_queue = itools.chain(iterator_queue, child_iterator)\n except StopIteration:\n break", "def levelorderIterative(self):\n from collections import deque\n queue = deque([]) # Wir benutzen die Python eigene Queue, eine deque.\n\n queue.append(self._head._right)\n\n while queue:\n n = queue.popleft()\n self.printnode(n)\n if n._left is not self._sentinal:\n queue.append(n._left)\n if n._right is not self._sentinal:\n queue.append(n._right)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of files matching pattern in base folder.
def find_files(base, pattern): return [n for n in fnmatch.filter(os.listdir(base), pattern) if os.path.isfile(os.path.join(base, n))]
[ "def find_files(base, pattern):\n return [n for n in fnmatch.filter(os.listdir(base), pattern) if os.path.isfile(os.path.join(base, n))]", "def find_files(pattern, base='.'):\n regex = re.compile(pattern) # 为了效率而编译了它\n matches = list()\n for root, dirs, files in os.walk(base):\n for f in files:\n if regex.match(f):\n matches.append(path.join(root, f))\n return matches", "def _list_files(base_path: str, extension: str):\n if base_path.endswith(os.sep):\n base_path = base_path[:1]\n\n search_path = os.path.join(base_path, \"**\", f\"*.{extension}\")\n return glob.glob(search_path, recursive=True)", "def all_files(pattern, search_path, pathsep=os.pathsep):\r\n for path in search_path.split(pathsep):\r\n for match in glob.glob(os.path.join(path, pattern)):\r\n yield match", "def recfind(sdir: str, pattern: str) -> List[str]:\n file_paths = []\n\n for root, dir_names, file_names in os.walk(sdir):\n for file_name in file_names:\n if re.match(pattern, file_name):\n file_path = os.path.join(root, file_name)\n file_paths.append(file_path)\n else:\n continue\n\n return file_paths", "def recursiveglob(root,pattern):\n matches = []\n for root, dirnames, filenames in os.walk(root):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n return matches", "def searchFiles(self):\n list_of_files = []\n for dirname, dirnames, filenames in os.walk(self.path):\n for filename in filenames:\n if filename.startswith(self.prefix) and filename.endswith(self.suffix):\n list_of_files.append(os.path.join(self.path, filename))\n return list_of_files", "def find_files(folder, to_match):\n assert (to_match != None), 'utils.find_files got invalid argument'\n list_of_files = []\n for fn in os.listdir(folder):\n m = re.match(to_match, fn)\n if m:\n path = folder + fn\n list_of_files.append(path)\n return list_of_files", "def find_files_like(datapath, pattern):\n # No need to import these at module level\n from os import listdir\n import re\n\n # Traverse file list and look for `pattern`\n filenames = []\n pattern = re.compile(pattern)\n for file in listdir(datapath):\n if pattern.search(file):\n filenames.append(file)\n\n return filenames", "def get_files(dir_path, pattern=\"\"):\n if os.path.isdir(dir_path):\n archives = []\n for dirpath, dirnames, filenames in os.walk(dir_path):\n for filename in filenames:\n if re.search(pattern, filename):\n archives.append(os.path.join(dirpath, filename))\n return archives\n else:\n raise FileUtilsError(dirErrorMsg + dir_path)", "def files(folderpath, pattern=\"*\"):\n return [f for f in folderpath.glob(pattern) if f.is_file()]", "def find(sdir: str, pattern: str) -> List[str]:\n file_paths = []\n\n for item in os.listdir(sdir):\n path = os.path.join(sdir, item)\n if os.path.isfile(path) and re.match(pattern, item) is not None:\n file_paths.append(path)\n else:\n continue\n\n return file_paths", "def _find_paths(dir_path, file_pattern):\n pattern = os.path.join(dir_path, \"**\", file_pattern)\n return glob.glob(pattern, recursive=True)", "def _get_files(self):\n\n glob_path = os.path.join(self.path, self.mask)\n return glob.glob(glob_path)", "def find_files(self, file_extension):\n return glob(f\"{self.input_dir}/*.{file_extension}\")", "def _find_files(root_dir, search):\n matches = []\n for root, _, filenames in os.walk(os.path.normpath(root_dir)):\n for filename in fnmatch.filter(filenames, search):\n matches.append(os.path.join(root, filename))\n return matches", "def FindMatchingFiles(pattern):\n path, _ = os.path.split(pattern)\n if path == \"\":\n path = \".\" # os.listdir fails with empty path\n def match(s): return s.startswith(pattern) and s.endswith(\".h5\")\n return list(filter(match, os.listdir(path)))", "def scan_paths(root_dir, pattern):\n\n root_dir = os.path.abspath(root_dir)\n\n pattern = re.compile(pattern)\n\n for root, dirs, files in scandir.walk(root_dir, followlinks=True):\n for name in files:\n\n # Match the extension.\n if pattern.search(name):\n yield os.path.join(root, name)", "def get_files_with_patterns(directory: str, match_patterns: List[str]) -> List[str]:\n index_files = []\n for pattern in match_patterns:\n index_files += list(glob.glob(os.path.join(directory, '**', pattern), recursive=True))\n return list(set(index_files))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an instance of the NavBar class and associates it with this class.
def create_nav_bar(self): self.nav_bar = NavBar(self)
[ "def __init__(self):\n super(_MenuBar, self).__init__()\n\n # Create the sliding menu button\n self._menu_button = QPushButton(\"Show/hide menu\")\n self._menu_button.clicked.connect(lambda _: get_manager().menu.toggle())\n\n # Create the exit button\n self._exit_button = QPushButton(\"Exit application\")\n self._exit_button.clicked.connect(lambda _: QApplication.closeAllWindows())\n\n # Add all widgets and set the layout\n self._layout = QHBoxLayout()\n self._layout.addWidget(self._menu_button)\n self._layout.addWidget(self._exit_button)\n self.setFixedHeight(MENU_BAR_HEIGHT)\n self.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self._layout)", "def navbar() -> html.Div:\r\n navbar = html.Div([\r\n html.Nav([\r\n html.Ul([\r\n # dcc.Link([\r\n html.A([\r\n html.Span([\"Crypto Trading Dashboard\"], className='link-text')\r\n ], className='nav-link')\r\n # ],href=\"/\", className='nav-item')\r\n ], className='navbar-nav')\r\n\r\n ], className='navbar')\r\n ])\r\n\r\n return navbar", "def _add_navbar(self):\n return dbc.NavbarSimple(\n id=\"navbarsimple-main\",\n children=[\n dbc.NavItem(dbc.NavLink(\n C.REPORT_TITLE,\n external_link=True,\n href=\"/report\"\n )),\n dbc.NavItem(dbc.NavLink(\n \"Comparisons\",\n active=True,\n external_link=True,\n href=\"/comparisons\"\n )),\n dbc.NavItem(dbc.NavLink(\n \"Coverage Data\",\n external_link=True,\n href=\"/coverage\"\n )),\n dbc.NavItem(dbc.NavLink(\n \"Documentation\",\n id=\"btn-documentation\",\n ))\n ],\n brand=C.BRAND,\n brand_href=\"/\",\n brand_external_link=True,\n class_name=\"p-2\",\n fluid=True\n )", "def _createMenuBar(self):\n menuBar = self.menuBar()\n\n fileMenu = QMenu(\"&File\", self)\n menuBar.addMenu(fileMenu)", "def bar(self) -> _MenuBar:\n return self._menu_bar", "def __init__( # pylint: disable=too-many-arguments\r\n self,\r\n pages: List[Union[pn.layout.Panel, pn.pane.Pane,]],\r\n page_outlet: pn.layout.ListPanel,\r\n *args,\r\n css_classes: Optional[List[Optional[List[str]]]] = None,\r\n title: str = \"Navigation\",\r\n sizing_mode: str = \"stretch_width\",\r\n **kwargs,\r\n ):\r\n if css_classes:\r\n pnx.fontawesome.extend()\r\n menuitems = [\r\n NavigationButton(page, page_outlet=page_outlet, css_classes=css,)\r\n for page, css in zip(pages, css_classes,)\r\n ]\r\n else:\r\n menuitems = [NavigationButton(page=page, page_outlet=page_outlet,) for page in pages]\r\n\r\n # title = pnx.SubHeader(title, text_align=text_align)\r\n title = pn.layout.HSpacer(height=20)\r\n super().__init__(\r\n title, *menuitems, sizing_mode=sizing_mode, *args, **kwargs,\r\n )\r\n\r\n page_outlet.clear()\r\n page_outlet.append(pages[0])", "def createMenus(self):\n\t\tself.fileMenu = self.menuBar().addMenu(\"&File\")\n\t\tself.editMenu = self.menuBar().addMenu(\"&Edit\")\n\t\tself.helpMenu = self.menuBar().addMenu(\"&Help\")", "def create_widget(self):\n # Qt behaves better when creating the menu bar without a parent.\n self.widget = QMenuBar()", "def initMenuBar(self):\n\n menuBar = self.menuBar()\n\n self.fileMenu(menuBar)\n self.editionMenu(menuBar)\n self.scenarioMenu(menuBar)", "def createMenu(self):\r\n self.menuFile = self.menuBar().addMenu(\"&File\")\r\n self.menuFile.addAction(self.actionQuit)\r\n self.menuFile.addAction(self.actionImportFile)\r\n self.menuFile.addAction(self.actionExportFile)\r\n\r\n self.menuContacts = self.menuBar().addMenu(\"&Contact\")\r\n self.menuContacts.addAction(self.actionNewContact)\r\n self.menuContacts.addAction(self.actionModContact)\r\n self.menuContacts.addAction(self.actionDelContact)\r\n self.menuContacts.addAction(self.actionDisplay)\r\n\r\n self.menuHelp = self.menuBar().addMenu(\"&?\")\r\n self.menuHelp.addAction(self.actionAbout)", "def as_html(self):\n tag = 'nav'\n classes = 'navbar'\n if self.style_inverse:\n classes = add_css_class(classes, 'navbar-inverse')\n if self.style_context:\n classes = add_css_class(classes, 'context-navbar')\n if self.style_static:\n classes = add_css_class(classes, 'navbar-static')\n else:\n classes = add_css_class(classes, 'navbar-top')\n classes = add_css_class(classes, 'pmd-navbar')\n classes = add_css_class(classes, 'pmd-z-depth')\n attrs = {'class': classes}\n content = self.render_content()\n content = text_concat(content, '<div class=\"pmd-sidebar-overlay\"></div>')\n return render_tag(tag, attrs=attrs, content=mark_safe(content), )", "def __init__(self):\n this = _coin.new_SoVRMLNavigationInfo()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def custom_navbar_url(request):\n\n from django.conf import settings\n\n context = {}\n try:\n context['upload_url'] = settings.NAVBAR_UPLOAD_URL\n except AttributeError:\n context['upload_url'] = None\n\n try:\n context['ext_resource_url'] = settings.NAVBAR_EXTERNAL_RESOURCES_URL\n except AttributeError:\n context['ext_resource_url'] = None\n\n try:\n context['doc_report_url'] = settings.NAVBAR_DOCUMENTS_REPORT_URL\n except AttributeError:\n context['doc_report_url'] = None\n\n try:\n context['contact_url'] = settings.NAVBAR_CONTACT_URL\n except AttributeError:\n context['contact_url'] = None\n\n try:\n context['biblio_url'] = settings.NAVBAR_BIBLIOGRAPHY_URL\n except AttributeError:\n context['biblio_url'] = None\n\n try:\n context['profile_url'] = settings.NAVBAR_PROFILE_URL\n except AttributeError:\n context['profile_url'] = None\n\n try:\n context['contributions_url'] = settings.NAVBAR_CONTRIBUTIONS_URL\n except AttributeError:\n context['contributions_url'] = None\n\n try:\n context['title_bims_abbr'] = settings.TITLE_BIMS_ABBREVIATION\n except AttributeError:\n context['title_bims_abbr'] = None\n\n try:\n context['title_bims_long'] = settings.TITLE_BIMS_LONG\n except AttributeError:\n context['title_bims_long'] = None\n\n return context", "def __init__(self):\n this = _coin.new_SoScXMLNavigation()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n _aui.AuiToolBar_swiginit(self,_aui.new_AuiToolBar(*args))", "def register(self, nav):\n try:\n nav = nav()\n except TypeError:\n pass\n\n if not isinstance(nav, NavType):\n raise TypeError(\"You can only register a Nav not a %r\" % nav)\n\n if nav.nav_group not in self._groups:\n self._groups[nav.nav_group] = []\n\n if nav not in self._groups[nav.nav_group]:\n self._groups[nav.nav_group].append(nav)", "def __init__(self, *args):\n _aui.AuiToolBarItem_swiginit(self,_aui.new_AuiToolBarItem(*args))", "def navbar(context, for_page=None):\n page = context.get('page', None)\n\n if for_page and not for_page.is_leaf_node():\n navbar = Page.objects.get_navbar(for_page=for_page)\n else:\n # Return root level pages\n navbar = Page.objects.get_navbar(for_page=None)\n\n return {\n 'page': page,\n 'navbar': navbar,\n }", "def render_base_layout(self):\n navbar = dbc.NavbarSimple(\n brand=self.title,\n dark=True,\n color=\"primary\",\n sticky=\"top\",\n id=self.navbar_id,\n style={\"marginBottom\": \"1em\"},\n )\n\n body = dbc.Container([], id=self.body_id)\n\n if self.credits:\n credit = dbc.NavLink(\n \"created with dasher\",\n className=\"small\",\n href=\"https://github.com/mfaafm/dasher\",\n external_link=True,\n )\n navbar.children = [credit]\n return navbar, body", "def createOldStyleMenuBar(self):\n mbar = MenuBar()\n nMenus = getMenuCount()\n i = 0\n while i < nMenus:\n mbar.add(createOldStyleMenu(getMenu(i)))\n i += 1\n return mbar" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an instance of the TextSection class and associates it with this class.
def create_text_section(self): y_scrollbar = ScrollBarComponent(self.root) y_scrollbar.set_options(RIGHT, Y) self.text_section = TextSection(self, yscrollcommand=y_scrollbar)
[ "def add_text(self, text, *args, **kwargs):\n # Pull down some kwargs.\n section_name = kwargs.pop('section', None)\n\n # Actually do the formatting.\n para, sp = self._preformat_text(text, *args, **kwargs)\n\n # Select the appropriate list to update\n if section_name is None:\n relevant_list = self.story\n else:\n relevant_list = self.sections[section_name]\n\n # Add the new content to list.\n relevant_list.append(para)\n relevant_list.append(sp)\n return", "def __init__(self, experiment):\r\n self.experiment = experiment\r\n self.text = TextHeader(experiment=experiment)", "def createTextNode(text):\n return Text(text)", "def __init__(self, title, content, numbering=True):\n super(Section, self).__init__(content, title)\n self.numbering = numbering", "def __init__(self, text):\n super(LabelTextMime, self).__init__()\n\n self.text = text", "def addText(self, text):\n self.text = text", "def text(self, x, y, s, fontdict=None,\n withdash=False, **kwargs):\n default = {\n 'verticalalignment' : 'bottom',\n 'horizontalalignment' : 'left',\n #'verticalalignment' : 'top',\n 'transform' : self.transData,\n }\n\n # At some point if we feel confident that TextWithDash\n # is robust as a drop-in replacement for Text and that\n # the performance impact of the heavier-weight class\n # isn't too significant, it may make sense to eliminate\n # the withdash kwarg and simply delegate whether there's\n # a dash to TextWithDash and dashlength.\n if withdash:\n t = mtext.TextWithDash(\n x=x, y=y, text=s,\n )\n else:\n t = mtext.Text(\n x=x, y=y, text=s,\n )\n self._set_artist_props(t)\n\n t.update(default)\n if fontdict is not None: t.update(fontdict)\n t.update(kwargs)\n self.texts.append(t)\n t._remove_method = lambda h: self.texts.remove(h)\n\n\n #if t.get_clip_on(): t.set_clip_box(self.bbox)\n if kwargs.has_key('clip_on'): t.set_clip_box(self.bbox)\n return t", "def addTextIndexes(self, textStart, textEnd):\n self.textStart = textStart\n self.textEnd = textEnd", "def add_text(self, text: str) -> HTMLNode:\n text_node = HTMLText(text)\n self.append(text_node)\n\n return self # for chaining", "def __init__(self, bs, w=None, **kwargs):\n assert w is not None\n Text.__init__(self, bs, w=w, **kwargs)", "def new_section(self, doc, *args, **kwargs):\n\n section = Section(doc, *args, **kwargs)\n if section.identifier:\n if section.identifier in self.sections:\n print(f'section identifier {section.identifier!r} already used')\n else:\n self.sections[section.identifier] = section\n doc.sections.append(section)\n return section", "def create_text_control(text, font='Times New Roman'):\n\n raise NotImplementedError('Function create_text_control not implemented for current DCC!')", "def add_text_in_doc(self, text):\n self.text = text\n self.title_doc()", "def __init__(self, path=\"\", text=\"\"):\n\n if path is not \"\" and text is not \"\":\n print('Error! Only one between file path and text must be provided')\n exit(1)\n\n eng = spacy.load('en', disable=['ner'])\n tokenizer = English().Defaults.create_tokenizer(eng)\n\n if path is not \"\":\n try:\n with open(path, 'r') as f:\n self._text_read = f.read()\n self._text = eng(self._text_read)\n\n except FileNotFoundError:\n print('Can\\'t access the file specified by', path,\n ', please provide a valide path')\n\n else:\n self._text_read = text\n self._text = eng(text)\n\n self._doc = tokenizer(self._text_read) # spacy-doc object\n # with preprocessed file", "def __init__(self, text):\n\n self.text = text\n\n self.tokenize()", "def make_text(text: str) -> SubAnnotation:\n return SubAnnotation(\"text\", text)", "def add_item(self, text=None, style=None):\n item = self._parent.add_paragraph(text, style=style)\n item.level = self.level\n item.numId = self.numId\n return item", "def create_section(self, level: int, section: str) -> None:\n self.add_output(section)\n self.add_output(self.sections[level] * len(section.rstrip()), line_breaks=2)", "def add_text_entry(self, text, color, flashing, flash_mask, transition, transition_out, priority, key):\n # remove old text in case it has the same key\n self._text_stack[key] = TextStackEntry(\n text, color, flashing, flash_mask, transition, transition_out, priority, key)\n self._update_stack()", "def __init__(self):\n this = _coin.new_SoTextDetail()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the `NSImageView`_ that this object wraps.
def getNSImageView(self): return self._nsObject
[ "def native(self) -> Any:\n return self._widget._mgui_get_native_widget()", "def view(self, make_const=False):\n if make_const:\n return Image(image=_galsim.ConstImageView[self.dtype](self.image.view()),\n wcs=self.wcs)\n else:\n return Image(image=self.image.view(), wcs=self.wcs)", "def root_native_widget(self) -> Any:\n return self._widget._mgui_get_root_native_widget()", "def custom_view(self) -> discord.ui.View | None:\n return self._custom_view", "def current_diagram_view(session):\n component_registry = session.get_service(\"component_registry\")\n view = component_registry.get(UIComponent, \"diagrams\").get_current_view()\n\n # realize view, forces bounding box recalculation\n while Gtk.events_pending():\n Gtk.main_iteration()\n\n return view", "def get_widget(self):\n return self.displayWidget", "def null(cls):\n return GXMVIEW()", "def canvas_widget(self):\n return self.__canvas_widget", "def get_tk_image(self):\n tki = ImageTk.PhotoImage(self.img)\n # we need to keep a reference, otherwise gc would kick in by mistake\n self.__work_around = tki\n return tki", "def right_camera_view(self):\n if not hasattr(self, '__right_camera_view'):\n self.__right_camera_view = self.__load_image(self.right_camera_view_path)\n return self.__right_camera_view", "def widget(self):\n return self.overlay.widget", "def wximg(self):\n if self._wx is None:\n img = self.copy(channels=3, depth=8)\n self._wx = wx.ImageFromBuffer(img.width, img.height, img.array)\n return self._wx", "def currentView( self ):\n panel = self.currentPanel()\n if ( panel ):\n return panel.currentWidget()\n return None", "def imshow(self, X, **kwargs):\n if self.image is not None:\n self.image.remove()\n self.image = super().imshow(X, zorder=2, **kwargs)\n self.ax.figure.canvas.draw()\n return self.image", "def instance_view(self) -> Optional['outputs.VirtualMachineExtensionInstanceViewResponse']:\n return pulumi.get(self, \"instance_view\")", "def widget(self):\r\n\r\n return self.__widget", "def Clone(self) -> \"itkImageVD23_Pointer\":\n return _itkImagePython.itkImageVD23_Clone(self)", "def _get_viewer(self):\n if self._viewer is None:\n self._viewer = mujoco_py.MjViewer(self.sim)\n self._viewer.cam.fixedcamid = self._camera_ids[0]\n self._viewer.cam.type = mujoco_py.generated.const.CAMERA_FIXED\n self._viewer_reset()\n return self._viewer", "def Clone(self) -> \"itkImageVD24_Pointer\":\n return _itkImagePython.itkImageVD24_Clone(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the image in the view. imagePath A file path to an image. imageNamed The name of an image already load as a `NSImage`_ by the application. imageObject A `NSImage`_ object.
def setImage(self, imagePath=None, imageNamed=None, imageObject=None): if imagePath is not None: image = NSImage.alloc().initWithContentsOfFile_(imagePath) elif imageNamed is not None: image = NSImage.imageNamed_(imageNamed) elif imageObject is not None: image = imageObject else: raise ValueError("no image source defined") self._nsObject.setImage_(image)
[ "def setImage(self, path):\n\t\tpass", "def setImage(self, image, normalize = None):\n \n self.viewer.setImage(image, normalize)\n self.updateCaption()", "def setImage(self, img, regions, sizes, image_id=...) -> None:\n ...", "def setImage(self, image):\n if type(image) is np.array:\n image = array2qimage(image)\n\n if type(image) is QPixmap:\n pixmap = image\n elif type(image) is QImage:\n pixmap = QPixmap.fromImage(image)\n else:\n raise RuntimeError(\"ImageViewer.setImage: Argument must be a QImage or QPixmap.\")\n if self.hasImage():\n self._pixmapHandle.setPixmap(pixmap)\n else:\n self._pixmapHandle = self.scene.addPixmap(pixmap)\n\n self.setSceneRect(QRectF(pixmap.rect())) # Set scene size to image size.\n\n # Add the mask layer\n self.mask_pixmap = QPixmap(pixmap.rect().width(), pixmap.rect().height())\n self.mask_pixmap.fill(QColor(0,0,0,0))\n self._overlayHandle = self.scene.addPixmap(self.mask_pixmap)\n\n # Add brush cursor to top layer\n self._cursorHandle = self.scene.addEllipse(0,0,self.brush_diameter,self.brush_diameter)\n\n # Add also X to the cursor for \"delete\" operation, and hide it by default only showing it when the\n # either the global drawing mode is set to ERASE or when CTRL is held while drawing\n self._deleteCrossHandles = (self.scene.addLine(0, 0, self.brush_diameter, self.brush_diameter),\n self.scene.addLine(0, self.brush_diameter, self.brush_diameter, 0))\n\n if self.current_painting_mode is not self.MODE_ERASE:\n self._deleteCrossHandles[0].hide()\n self._deleteCrossHandles[1].hide()\n\n self.updateViewer()", "def set_image_src(self, image_src):\n # load the image\n self.image_src = image_src\n self.image = simplegui.load_image(self.image_src)", "def setImage(self, image: 'SbImage') -> \"void\":\n return _coin.SoVRMLImageTexture_setImage(self, image)", "def set_image(self, image, scale = None):\n self._image = image\n if scale is not None:\n self.set_scale(scale)\n else:\n self.do_scale()\n self.draw()", "def set_image(self):\r\n self.sc.set_image()", "def SetAsOpenedImage(self, path):\n try:\n self._img = Image.open(path)\n except FileNotFoundError:\n if self._packedData != None:\n self._img = self._packedData\n else:\n print(\"WARNING: COULD NOT GET PACKED IMAGE DATA!\")", "def set_icon(self, image_name):\n self._icon = self._image[image_name]\n pygame.display.set_icon(self.get_icon())", "def set_img(arr):\n global tkImg, canvasImg, canvas\n tkImg = tk_img(arr)\n canvasImg = tk_imshow(canvas, tkImg)", "def updateImage(self):\n self.image = self.getImage(self.location, self.name, self.imageType)", "def logo_image(self, logo_image):\n self._logo_image = logo_image", "def update_image(window: tk.Tk, img: Image):\r\n\r\n window.display_image(img)", "def set_img_data(self, img_data):\n\n self._data = img_data\n self.update_window()\n self.update_icon()", "def setImageToCopy(self, imagePath):\n self.imageToCopy = imagePath", "def _new_image(self, msg):\n filepath = msg.data\n self.set_image(filepath)\n self.set_state(ImageViewer.STATE_IDLE)\n self.Refresh()", "def addImage(self, id, image):\n\t\tself.widgets.append(JPLImage(self.centralWidget, id, image))\n\t\t# Images don't get labels\n\t\tself.labels.append(None)", "def _setImagePath( self ):\n #cwd = os.getcwd()\n try:\n mypath = os.path.realpath( __file__ ) # endet mit /imagefactory.py\n # imagefactory.py entfernen:\n l = len( \"imagefactory.py\" )\n mypath = mypath[:-l]\n self._imagePath = mypath + \"images/\"\n\n # f = open( resourcepath )\n # #f = open( \"./resources.txt\", \"r\" )\n # lines = f.readlines()\n # for l in lines:\n # if l.startswith( \"imagepath\" ):\n # parts = l.split( \"=\" )\n # self._imagePath = parts[1][:-1] #truncate newline\n # f.close()\n # return\n except Exception as exc:\n print( \"ImageFactory._setImagePath(): failed open/read/close file ./resources.txt:\\n\\n\" + str(exc) )", "def select_path(self, path):\n try:\n newImg = PilImage.open(path).resize((300, 300))\n if platform == 'android':\n android_path = os.path.join(\n os.path.join(os.environ['ANDROID_PRIVATE'], 'app', 'images', 'kivy')\n )\n if not os.path.exists(os.path.join(android_path, 'default_identicon')):\n os.makedirs(os.path.join(android_path, 'default_identicon'))\n newImg.save(os.path.join(android_path, 'default_identicon', '{}.png'.format(\n self.kivy_state_obj.selected_address))\n )\n else:\n if not os.path.exists(os.path.join(self.image_dir, 'default_identicon')):\n os.makedirs(os.path.join(self.image_dir, 'default_identicon'))\n newImg.save(os.path.join(self.image_dir, 'default_identicon', '{0}.png'.format(\n self.kivy_state_obj.selected_address))\n )\n self.load_selected_Image(self.kivy_state_obj.selected_address)\n toast('Image changed')\n except Exception:\n toast('Exit')\n self.exit_manager()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The code below will need to be rewritten once the wordspider outputs csv with examples and translations. At present it takes input from a file containing example sentences on new lines, splits each line into words and removes punctuation marks, and generates urls based on the words.
def start_requests(self): with open('examples.csv', 'r') as file: fieldnames = [] for i, l in enumerate(file): fieldnames.append(i) with open('examples.csv') as csv_file: reader = csv.DictReader(csv_file) urls = [] baseurl = 'https://' + 'где-ударение.рф/в-слове-' for row in reader: sentence = row['example'] sentence_list.append(sentence.lower()) sentence = sentence.replace(',', '') sentence = sentence.replace('.', '') sentence = sentence.replace('!', '') sentence = sentence.replace('?', '') sentence = sentence.replace('—', '') sentence = sentence.replace('«', '') sentence = sentence.replace('»', '') sentence = sentence.replace(':', '') sentence = sentence.replace(';', '') sentence = sentence.replace('(', '') sentence = sentence.replace(')', '') sentence = sentence.replace('[', '') sentence = sentence.replace(']', '') sentence = sentence.replace('/', '') sentence = sentence.lower() words = sentence.split() # create list of only words that need stress targetwords = [word for word in words if needs_stress(word)] targetwords = set(targetwords) urls += [baseurl + word + '/' for word in targetwords] for url in urls: yield scrapy.Request(url=url, callback=self.parse)
[ "def parse_sentences():\n with open('questions.hyp', 'r') as rf, open('webpages.txt', 'w') as wf: # open input and output files\n for line in rf: # iterate over lines in input file (questions.hyp has the language model transcriptions)\n # get the relevant info from the line\n ls = line.split('(')\n question = ls[0][:-1]\n wavfile = ls[1].split(' ')[0]\n webpage, url = get_webpage(question) # get webpage and url\n wf.write(f'{wavfile}\\t{webpage}\\t{url}\\n') # print to file\n print('URLs written to webpages.txt')", "def target_urls_generator(input_csv):\n inputcsvWithoutExt = input_csv.replace(\".csv\", \"\")\n inputcsvWithoutExt = inputcsvWithoutExt.replace(\".CSV\", \"\")\n output_URLs_csv = \"%s_URLs.csv\" % inputcsvWithoutExt\n output_3cols_csv = \"%s_3cols.csv\" % inputcsvWithoutExt\n\n with open(output_URLs_csv, 'wb') as output_URLs_csvfile:\n output_URLs_csvwriter = csv.writer(output_URLs_csvfile, delimiter=',')\n with open(output_3cols_csv, 'wb') as output_3cols_csvfile:\n output_3cols_csvwriter = csv.writer(output_3cols_csvfile, delimiter=',')\n with open(input_csv, \"rb\") as input_csvfile:\n reader = csv.reader(input_csvfile, delimiter=\",\")\n for i, line in enumerate(reader):\n if DEBUG:\n print '{}: {}'.format(i, line)\n if len(line) > 3:\n if i > 0:\n DPCI = \"%s-%s-%s\" % (\n line[0].strip().zfill(3),\n line[1].strip().zfill(2),\n line[2].strip().zfill(4))\n else:\n continue\n else:\n DPCI = line[0].strip()\n\n # search_url = \"http://www.target.com/s?searchTerm=%s\" % DPCI\n s_json_url = \"http://tws.target.com/searchservice/item/\" \\\n \"search_results/v2/by_keyword?kwr=y&search_term=%s\" \\\n \"&alt=json&pageCount=24&response_group=Items&zone=mobile&offset=0\" % DPCI\n try:\n contents = requests.get(s_json_url).text\n res_json = json.loads(contents)\n URL = \"http://www.target.com/%s\" % \\\n res_json['searchResponse']['items']['Item'][0]['productDetailPageURL']\n reg_exp = re.findall(\"(.*)#\", URL)\n if len(reg_exp) > 0:\n URL = reg_exp[0]\n\n TCIN = \"\"\n reg_exp = re.findall(\"A-([0-9]+)\", URL)\n if len(reg_exp) > 0:\n TCIN = reg_exp[0]\n except:\n URL = \"\"\n TCIN = \"\"\n\n if len(URL) > 0:\n output_URLs_csvwriter.writerow([URL])\n output_3cols_csvwriter.writerow([DPCI, TCIN, URL])\n if DEBUG:\n print [DPCI, TCIN, URL]", "def preprocessing(company, lang):\n\n # get tweets\n tweets = np.array(execute(\"SELECT * FROM tweet WHERE searchterm = '@\" + company + \"'\"))\n tweets = tweets[:,2]\n\n # count retweets\n pattern = re.compile(\"^RT \")\n rt_tweets = [ tweet for tweet in tweets if pattern.match(tweet) ]\n\n # only lang tweets\n lang_tweets = []\n for tweet in rt_tweets:\n try:\n if detect(tweet) == lang:\n lang_tweets.append(tweet)\n except:\n continue\n\n # no urls\n url = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n no_url_tweets = [ re.sub(url, '', tweet) for tweet in lang_tweets ]\n\n # remove @ words\n no_arobas_tweets = [ re.sub(r\"([@?]\\w+)\\b\", '', text) for text in no_url_tweets ]\n\n # remove non-alphanumerical characters\n only_alphanum_tweets = [ re.sub(r'[^\\w]', ' ', text) for text in no_arobas_tweets ]\n\n # tokenizing\n tokenized_tweets = [ tweet.split(\" \") for tweet in only_alphanum_tweets ]\n\n # lower tweets and remove one char words\n lowered_tweets = [ [ word.lower() for word in text if len(word) > 1 ] for text in tokenized_tweets ]\n \n # remove stopwords\n stopwords = open(\"./stopwords\").read().split(\"\\n\")\n stopwords += [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\", \n \"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"jul\", \"aug\", \"sep\", \"oct\", \"nov\", \"dec\",\n \"amp\", \"rt\", \"https\"]\n filtered_tweets = [ [ word for word in text if word not in stopwords ] for text in lowered_tweets ]\n\n # isolate bigrams\n bigrams = mark_bigrams(filtered_tweets)\n\n # reduce to one list of words\n flat_text_bigrams = [ word for tweet in bigrams for word in tweet ]\n flat_text = [ word for tweet in filtered_tweets for word in tweet ]\n\n # get frequency dictionary\n frequ = collections.Counter(flat_text_bigrams).most_common()\n\n # return format\n # * name company\n # * number tweets\n # * nb retweet\n # * language chosen\n # * nb tweet in chosen language\n # * nb words\n # * nb unique words\n data = (company, len(tweets), len(rt_tweets), lang, len(lang_tweets), len(flat_text_bigrams), len(frequ), filtered_tweets)\n\n return data", "def get_sentence(filename):\n\n words = []\n with open(filename) as corp:\n for l in corp:\n l = l.translate(None, \"\\\"#$%&'()*+,-/:;<=>@[\\\\]^_`{|}~'\")\n words += l.strip().lower().split()\n words.reverse()\n buf = []\n while len(words):\n buf.append(words.pop())\n if buf[-1].endswith(\".\")\\\n or buf[-1].endswith(\"?\")\\\n or buf[-1].endswith(\"!\"):\n yield buf\n buf = []", "def fetch_words(url):\n with open(url) as story:\n story_words = []\n for line in story:\n line_words = line.split()\n for word in line_words:\n story_words.append(word)\n story.close()\n return story_words", "def main(url):\n words = fetch_words(url )\n print_items(words)", "def speech_urls(sub_pages_url):\n \n import urllib2,sys\n from bs4 import BeautifulSoup\n\n #Base Page\n soup = BeautifulSoup(urllib2.urlopen(sub_pages_url).read())\n\t\n #Speech URLs\n content = soup.find(\"div\", {\"class\":\"view-content\"})\n speeches = [\"\".join(x.findAll(\"a\")) for x in content.findAll(href=True)]\n \n base_url = \"http://www.whitehouse.gov\"\n\n try:\n f=open('speechurls.csv', 'a')\n for link in content.findAll('a', href=True):\n ext = link['href']\n speech_url = base_url+ext\n f.write(u'%s\\n' % (speech_url))\n finally:\n f.close()\n\n \"\"\"\n for link in content.findAll('a', href=True):\n ext = link['href']\n print base_url+ext\n \"\"\"\n\n #print speeches", "def extract_urls(text):\n for word in WORD_SPLIT_RE.split(text):\n if not ('.' in word or ':' in word):\n continue\n # Deal with punctuation.\n lead, middle, trail = '', word, ''\n for punctuation in TRAILING_PUNCTUATION:\n if middle.endswith(punctuation):\n middle = middle[:-len(punctuation)]\n trail = punctuation + trail\n for opening, closing in WRAPPING_PUNCTUATION:\n if middle.startswith(opening):\n middle = middle[len(opening):]\n lead = lead + opening\n # Keep parentheses at the end only if they're balanced.\n if (middle.endswith(closing) and\n middle.count(closing) == middle.count(opening) + 1):\n middle = middle[:-len(closing)]\n trail = closing + trail\n # Yield the resulting URL.\n if SIMPLE_URL_RE.match(middle):\n yield middle", "def extract_next_links(self, url_data):\r\n\r\n\r\n # Ban non-text/HTML type documents\r\n try:\r\n if not re.search(r\"text\", url_data[\"content_type\"]):\r\n return []\r\n except TypeError as e:\r\n return []\r\n\r\n # use relevant url depending on redirection\r\n url = \"\"\r\n if url_data[\"is_redirected\"]:\r\n url = url_data[\"final_url\"]\r\n else:\r\n url = url_data[\"url\"]\r\n\r\n # some final_urls are offsite\r\n if not \".ics.uci.edu\" in url:\r\n return []\r\n\r\n # Analytic #3a: list of downloaded URLs\r\n self.downloaded_urls.add(url)\r\n\r\n\r\n # Analytic #1: subdomains\r\n self.visited_subdomains[urlparse(url).netloc] += 1\r\n\r\n outputLinks = []\r\n\r\n # get document content\r\n try:\r\n doc = BeautifulSoup(url_data[\"content\"], features='lxml')\r\n except lxml.etree.ParserError as e:\r\n print(f\"{type(e)} ({url_data['url']}):\\n{e}\", file=self.log_file)\r\n return outputLinks\r\n except ValueError as e:\r\n print(f\"{type(e)} ({url_data['url']}):\\n{e}\", file=self.log_file)\r\n return outputLinks\r\n\r\n a_tags = doc.find_all('a', href=True)\r\n for a_tag in a_tags:\r\n href = a_tag[\"href\"]\r\n if href == '' or href[0] != '#':\r\n absolute = urljoin(url, href)\r\n outputLinks.append(absolute)\r\n\r\n # get document text\r\n doc_text = doc.get_text()\r\n # tokenization\r\n doc_words = self.tokenize(doc_text)\r\n\r\n # Analytic #4: Longest page in terms of words\r\n len_doc_words = len(doc_words)\r\n if self.max_words < len_doc_words:\r\n self.max_words = len_doc_words\r\n self.url_of_max_words = url\r\n\r\n\r\n # Analytic #5: 50 most common words\r\n for word in self.tokenize(doc_text):\r\n if self.is_not_stop_word(word):\r\n self.words[word] += 1\r\n\r\n return outputLinks", "def Extraction (self):\n with open(self.corpus, 'r') as f:\n line = True\n while line:\n line = f.readline()\n if TAG_START_PAGE in line:\n line = f.readline()\n if ':' not in line:\n #valid page\n word = line[line.index(TAG_START_TITLE) + len(TAG_START_TITLE):line.index(TAG_END_TITLE)] \n #loop until found start tag\n while TAG_START_TRAD not in line and TAG_END_PAGE not in line:\n line = f.readline ()\n# print (line)\n if TAG_END_PAGE in line:\n continue\n #Now start extracting traductions\n while line.strip() != '':\n if line.startswith(TAG_START_LANG) and TAG_END_LANG in line:\n lang = line[len(TAG_START_LANG):line.index(TAG_END_LANG)]\n if '|' in lang:\n lang = lang[:lang.index('|')]\n #first hyper filter\n line = re.sub(HYPER_FILTER,']]',line)\n #traductions extraction\n trad = [t[2:-2] for l in line.split(',') for t in re.findall(PATTERN_TRAD, l) if len(t.split()) > 0]\n #fine filter\n traductions = []\n for t in trad: \n if t.startswith('[['):\n t = t[2:]\n if ']]' in t:\n while ']]' in t and '[[' in t:\n traductions.append(t[:t.index(']]')])\n t = t[t.index('[[')+2:]\n if ']]' in t:\n traductions.append(t[:t.index(']]')])\n elif '[[' in t:\n traductions.append(t[t.index('[[')+2:])\n else:\n traductions.append(t)\n else:\n traductions.append(t) \n #clear non-traductions\n for t in traductions:\n for exclude in self.exclude_Tags :\n if exclude in t:\n traductions.remove(t)\n break\n print (word, self.lang, lang, traductions)\n with open(self.csv, 'a') as csv:\n for t in traductions:\n if len(t.strip()) > 0:\n line = ''.join([self.lang, SEP_CSV, word, SEP_CSV, lang, SEP_CSV, t]) + '\\n'\n csv.write (line)\n line = f.readline ()\n continue", "def fetch_words(url):\n # This is function docstring which is documentation for function, modules and scripts\n story= urlopen(url)\n story_words= []\n\n for line in story:\n line_words = line.decode('utf8').split()\n for word in line_words:\n story_words.append(word)\n\n story.close()\n return story_words", "def parse_all(driver):\n url_lst = input(\"Enter name of file with .txt extension with list of urls: \")\n data = pd.read_csv(url_lst,header=None,names=['url']) #text file containing a list of the scraped urls (should be in same directory)\n file_name = input(\"Input the file name with .txt extension you wish to store abstracts in: \")\n file = open(file_name,'w')\n\n max_iters = len(data) #total number of scraped urls to be parsed\n print(\"The parser will parse: \" + str(max_iters) + \" urls.\")\n\n for i in range(0,max_iters):\n print('On url ',i)\n driver.refresh()\n time.sleep(2)\n urli = str(extractor(data.iloc[i,0],3))\n file.write(urli)\n file.write('\\n')\n driver.quit()\n\n return file_name", "def speech_urls(sub_pages_url):\n \n import urllib2,sys\n from bs4 import BeautifulSoup\n\n #Base Page\n soup = BeautifulSoup(urllib2.urlopen(sub_pages_url).read())\n\t\n #Speech URLs\n content = soup.find(\"div\", {\"class\":\"view-content\"})\n speeches = [\"\".join(x.findAll(\"a\")) for x in content.findAll(href=True)]\n \n base_url = \"http://www.whitehouse.gov\"\n\n try:\n f=open('speechurls.csv', 'a')\n for link in content.findAll('a', href=True):\n ext = link['href']\n speech_url = base_url+ext\n f.write(u'%s\\n' % (speech_url))\n finally:\n f.close()", "def formatWordUrl(inputWord):\n url = 'https://www.thesaurus.com/browse/'\n url = url + inputWord.strip().lower().replace(' ', '%20')\n return url", "def learn_domain_sentiment_words(articles, _filename):\n # Invers freq = log(Anzahl docs / Anzahl docs mit Term)\n sentiment_words = pd.read_csv(articleAnalysis.lex_folder + 'sentiment_lexicon.csv', skiprows=[1], header=0)\n pos_sentiment = [w for w in sentiment_words['Positive sentiment'].fillna('').tolist() if w != '']\n neg_sentiment = [w for w in sentiment_words['Negative sentiment'].fillna('').tolist() if w != '']\n doc_freq_pos = dict.fromkeys(pos_sentiment, 1)\n doc_freq_neg = dict.fromkeys(neg_sentiment, 1)\n for a in articles:\n new_pos, new_neg = find_conjunctions(a, pos_sentiment)\n new_neg_2, new_pos_2 = find_conjunctions(a, neg_sentiment)\n new_pos.update(new_pos_2)\n new_neg.update(new_neg_2)\n for w in new_pos:\n if w in doc_freq_pos:\n doc_freq_pos[w] += 1\n else:\n doc_freq_pos[w] = 1\n for w in new_neg:\n if w in doc_freq_neg:\n doc_freq_neg[w] += 1\n else:\n doc_freq_pos[w] = 1\n no_art = len(articles)\n print(\"Save dictionary at \", articleAnalysis.lex_folder)\n # write newly learned lexicon to file\n if not (\".csv\") in _filename:\n _filename = _filename + \".csv\"\n f = open(articleAnalysis.lex_folder + \"sentiment_lexicon_\" + _filename, \"w+\", newline='', encoding=\"UTF-8\"\n )\n file_writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)\n file_writer.writerow([\"positive\", \"pos_iwf\", \"negative\", \"neg_iwf\"])\n for i in range(max(len(doc_freq_neg), len(doc_freq_pos))):\n if i < min(len(doc_freq_neg), len(doc_freq_pos)):\n file_writer.writerow(\n [list(doc_freq_pos)[i],\n np.log10(no_art / list(doc_freq_pos.values())[i]),\n list(doc_freq_neg)[i],\n np.log10(no_art / list(doc_freq_neg.values())[i])])\n else:\n if len(doc_freq_neg) == max(len(doc_freq_neg), len(doc_freq_pos)):\n file_writer.writerow(\n [\"\",\n \"\",\n list(doc_freq_neg)[i],\n np.log10(no_art / list(doc_freq_neg.values())[i])])\n else:\n file_writer.writerow(\n [list(doc_freq_pos)[i],\n np.log10(no_art / list(doc_freq_pos.values())[i]),\n \"\",\n \"\"])\n f.close()", "def generate_sentence_retrieval_training_set(path_to_infile, outfile, path_wiki_titles):\n\toutfile = open(outfile, \"w\")\n\tdocs = load_wiki_docs(path_to_infile, path_wiki_titles)\n\n\t\n\twith open(path_to_infile) as infile:\n\t\tfor line in infile:\n\t\t\tdata = json.loads(line)\n\t\t\tclaim = data[\"claim\"]\n\t\t\tlabel = data[\"label\"]\n\t\t\tevidence = data[\"evidence\"]\n\t\t\tpred_pages = data[\"predicted_pages\"]\n\n\t\t\t# if not verifiable, we don't have evidence and just continue\n\t\t\tif data[\"verifiable\"] == \"NOT VERIFIABLE\":\n\t\t\t\tcontinue\n\n\t\t\tpositive_examples = set()\n\t\t\tnegative_examples = set()\n\t\t\tgood_docs = set()\n\n\t\t\tfor evid in evidence:\n\t\t\t\tfor i,item in enumerate(evid):\n\t\t\t\t\tAnnotation_ID, Evidence_ID, Wikipedia_URL, sentence_ID = item\n\t\t\t\t\tWikipedia_URL = unicodedata.normalize(\"NFC\", Wikipedia_URL)\n\n\t\t\t\t\t# add positive example (only the first evidence)\n\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\tpositive_examples.add((claim, Wikipedia_URL, sentence_ID, 0))\n\n\t\t\t\t\t\t# sample negative evidence:\n\t\t\t\t\t\tneg = sample_negative_example(Wikipedia_URL, docs)\n\t\t\t\t\t\tif neg != -1:\n\t\t\t\t\t\t\t#negative_examples.add((claim, neg[0], neg[1], 2))\n\t\t\t\t\t\t\tfor n in neg:\n\t\t\t\t\t\t\t\tnegative_examples.add((claim, n[0], n[1], 2))\n\t\t\t\t\t\tgood_docs.add(Wikipedia_URL)\n\t\t\t\t\t\n\t\t\t\t\t# otherwise we just want to add the document so that we don't sample negative examples from a \"good\" document\n\t\t\t\t\telse:\n\t\t\t\t\t\tgood_docs.add(Wikipedia_URL)\n\n\n\n\t\t\t# sample negative examples from other predicted pages which are not in good evidence\n\t\t\tfor page in pred_pages:\n\t\t\t\tif page in docs:\n\t\t\t\t\tif page not in good_docs:\n\t\t\t\t\t\tneg = sample_negative_example(page, docs)\n\n\t\t\t\t\t\tif neg != -1:\n\t\t\t\t\t\t\t#negative_examples.add((claim, neg[0], neg[1], 2))\n\t\t\t\t\t\t\t# only add first three sentences (first few are most indicative given false positive wiki docs, especially the first sentence)\n\t\t\t\t\t\t\tfor n in neg[:3]:\n\t\t\t\t\t\t\t\tnegative_examples.add((claim, n[0], n[1], 2))\n\t\t\t# write positive and negative evidence to file\n\t\t\tfor ex in positive_examples:\n\t\t\t\tsent = docs[ex[1]][ex[2]].split(\"\\t\")[1]\n\t\t\t\toutfile.write(ex[0] + \"\\t\" + ex[1] + \"\\t\" + sent + \"\\t\" + str(ex[3]) + \"\\t\" + str(ex[2]) + \"\\t\" + label + \"\\n\")\n\t\t\tfor ex in negative_examples:\n\t\t\t\ttry:\n\t\t\t\t\tsent = docs[ex[1]][ex[2]].split(\"\\t\")[1]\n\t\t\t\t#\tprint (ex[1], ex[2], \"------\",ex[0], \"-------\", sent, \"------\", ex[3])\n\t\t\t\t\toutfile.write(ex[0] + \"\\t\" + ex[1] + \"\\t\" + sent + \"\\t\" + \"2\" + \"\\t\" + str(ex[2]) + \"\\t\" + label + \"\\n\")\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\toutfile.close()", "def tokenize(text):\n # regex for URLs to be replaced with a placeholder\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n detected_urls = re.findall(url_regex,text)\n for url in detected_urls:\n text = text.replace(url,\"urlplaceholder\")\n # the words in the text input to then be split, tokenised and lemmatized, removing stop words. \n words = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n tokens = [lemmatizer.lemmatize(word) for word in words if word not in stopwords.words(\"english\")]\n return tokens", "def __init__(self,domain_name,filename):\n \n self.domain_name = domain_name\n \n # either load search distribution from file or generated from search terms\n if filename[-3:] in [\"csv\",\"tsv\"]:\n fh = open(filename).readlines()\n domain_dist = {l[0]:(l[1],l[2]) for line in fh for l in [line.strip().split('\\t')]}\n \n else: # create from search terms\n \n # open file and convert input data to a list of InputMarker objects\n fh = open(filename).readlines()\n categories = fh[1].strip().split('\\t')\n input_markers = list()\n for line in fh[1:]:\n parameters = line.strip().split('\\t')\n polarity = 0\n if len(parameters) == 4: polarity = parameters[3]\n marker = InputMarker(domain_name,parameters[0],parameters[0][:3],parameters[1],\n parameters[2],int(polarity))\n input_markers.append(marker)\n \n # sort markers to make sure that negative markers are listed at the end to delete all\n # verses where they occur\n input_markers.sort(key=lambda x: (x.text,x.polarity))\n self.input_markers = input_markers\n \n # make dictionary with text as key and markers as values\n text_by_markers = collections.defaultdict(list)\n for input_marker in input_markers:\n text_by_markers[input_marker.text].append(input_marker)\n self.text_by_markers = text_by_markers\n \n # convenience functions to add or remove a given list of verses\n def addtodict(list_of_verses,form):\n for verse in list_of_verses:\n domain_verses[verse].append(form)\n def remfromdict(list_of_verses):\n for verse in list_of_verses:\n domain_verses[verse].clear()\n \n domain_verses_texts = dict()\n all_relevant_verses = set()\n text_verses = dict()\n \n # go through each text and extract the distribution\n for text in text_by_markers:\n t = reader.ParText(text,portions=range(40,67)) # only NT for the time being\n text_verses[text] = t.get_verseids()\n wordforms_by_verses = t.wordforms_verses()\n substrings_by_wordforms = t.substrings_wordforms()\n \n domain_verses = collections.defaultdict(list)\n markers = list()\n \n # go through each marker in the respective text\n for marker in text_by_markers[text]:\n #print(marker)\n string = marker.form\n category = marker.type\n polarity = marker.polarity\n if polarity == 0: markers.append(string) \n rel_verses = list()\n if category.lower() == \"w\":\n rel_verses = wordforms_by_verses[string]\n elif category.lower() == \"m\":\n string_wordforms = substrings_by_wordforms[string]\n for string_wordform in string_wordforms:\n rel_verses.extend(wordforms_by_verses[string_wordform])\n elif category.lower() == \"r\":\n verse_tuples = t.get_verses_strings()\n regex = re.compile(string)\n rel_verses = [v[0] for v in verse_tuples for m in [regex.search(v[1])] if m]\n else:\n print(\"Error: This marker category does not exist!\")\n \n if polarity == 1:\n remfromdict(rel_verses)\n else:\n addtodict(rel_verses,string)\n \n # normalize all extracted verses by the number of markers that are present in the \n # verse in comparison to the overall number of markers for the respective text.\n domain_verses_normalized = dict()\n for verse in domain_verses:\n if domain_verses[verse]:\n markers_in_verse = len(set(domain_verses[verse]))\n weight = markers_in_verse/len(markers)\n occurrences = len(domain_verses[verse])\n domain_verses_normalized[verse] = (occurrences,weight)\n \n domain_verses_texts[text] = domain_verses_normalized\n all_relevant_verses.update(domain_verses.keys())\n \n # normalize the individual values over all texts\n # the normalization is in terms of the number of overall number of texts that have\n # the resp. verse (marked in textcount)\n domain_dist = dict()\n for verse in all_relevant_verses:\n textcount = 0 \n weightvalue = 0\n markercount = 0\n for t in domain_verses_texts:\n if verse in text_verses[t]: # ignore texts that don't have this verse\n textcount += 1\n if verse in domain_verses_texts[t]:\n weightvalue += domain_verses_texts[t][verse][1]\n markercount += domain_verses_texts[t][verse][0]\n weight = weightvalue/textcount\n markercount = math.ceil(markercount/textcount)\n if textcount > 0: domain_dist[verse] = (markercount,weight)\n \n self.domain_dist = domain_dist", "def translate_data(file):\n\n #load the csv file\n df = pd.read_csv(file, index_col=None, header=0)\n for lang in LANGUAGES_TO_TRANSLATE:\n translator = Translator()\n translations = []\n #iterate on all the words to translate them\n for index, row in df.iterrows():\n word = row[0]\n translation = translator.translate(word, src=\"en\", dest=lang).text\n translations.append(translation)\n\n df[lang] = translations\n\n df.to_csv(file, index=None, header=0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the shared storage from the redis server for the service.
def get_shared_storage(self): shared_storage = self.redis_client.get(self.service_type) shared_storage = json.loads(shared_storage) validate_json(shared_storage, self.schema) return shared_storage
[ "def star_storage_service(self) -> StorageService:\n return self.storage_services[self.config.storage.star]", "def set_shared_storage(self, shared_storage):\n validate_json(shared_storage, self.schema)\n shared_storage = json.dumps(shared_storage)\n self.redis_client.set(self.service_type, shared_storage)\n return True", "def get_shared_json_data(self, shared_id):\n try:\n response = self.client.get_object(Bucket=iris_settings.IRIS_SESSION_BUCKET, Key=\"shared/\" + shared_id)\n return json.loads(response['Body'].read())\n except ClientError:\n raise IrisStorageError(\"Error getting data\")", "def _get_storage(self, for_write=False):", "def vault_storage_service(self) -> StorageService:\n return self.storage_services[self.config.storage.vault]", "def get_shared_item(self, shared_url):\n ret = None\n \n if self.id_share_pattern.match(shared_url):\n id_search = self.id_share_pattern.search(shared_url)\n if id_search:\n file_id = id_search.group(5)\n #print('Match:', shared_url, ', fileid:', file_id)\n ret = self.get_shared_item_by_id(file_id)\n else:\n ret = self.client.get_shared_item(shared_url).__dict__\n \n if self.DEBUG:\n print(ret)\n #print('ret:', ret)\n return ret", "def get_redis(self, key):\n return r.hget(self.rhash, key)", "def get_storage_master(self):\n return self.storage_master", "def getSharedData(self):\n return {}", "def _get_storage(agent: AbstractAgent) -> Optional[Storage]:\n if agent.storage_uri:\n # threaded has to be always True, cause synchronous operations are supported\n return Storage(agent.storage_uri, threaded=True)\n return None # pragma: nocover", "def get_redis():\n if 'redis' not in g:\n # connect to redis\n raddr = app.config['REDIS_HOST']\n rhost = raddr.split(':')[0]\n rport = int(raddr.split(':')[-1])\n try:\n g.redis = Redis(host=rhost, port=rport)\n except ConnectionError as e:\n err = f\"Could not connect to Redis: {e}\"\n logger.error(err)\n abort(503, err)\n return g.redis", "def db(cls):\r\n return redisco.get_client()", "def get_redis_cache(redis_conn, redis_key):\n cached_data = redis_conn.get(redis_key)\n if cached_data:\n data = json.loads(cached_data)\n else:\n data = False\n\n return data", "def get_storage(self, script_hash, key, **kwargs):\n hexkey = binascii.hexlify(key.encode('utf-8')).decode('utf-8')\n hexresult = self._call(\n JSONRPCMethods.GET_STORAGE.value, params=[script_hash, hexkey, ], **kwargs)\n try:\n assert hexresult\n result = bytearray(binascii.unhexlify(hexresult.encode('utf-8')))\n except AssertionError:\n result = hexresult\n return result", "def getStorage( self, parameterDict ):\n # The storage name must be supplied.\n if parameterDict.has_key( 'StorageName' ):\n storageName = parameterDict['StorageName']\n else:\n errStr = \"StorageFactory.getStorage: StorageName must be supplied\"\n gLogger.error( errStr )\n return S_ERROR( errStr )\n\n # ProtocolName must be supplied otherwise nothing with work.\n if parameterDict.has_key( 'ProtocolName' ):\n protocolName = parameterDict['ProtocolName']\n else:\n errStr = \"StorageFactory.getStorage: ProtocolName must be supplied\"\n gLogger.error( errStr )\n return S_ERROR( errStr )\n\n # The other options need not always be specified\n if parameterDict.has_key( 'Protocol' ):\n protocol = parameterDict['Protocol']\n else:\n protocol = ''\n\n if parameterDict.has_key( 'Port' ):\n port = parameterDict['Port']\n else:\n port = ''\n\n if parameterDict.has_key( 'Host' ):\n host = parameterDict['Host']\n else:\n host = ''\n\n if parameterDict.has_key( 'Path' ):\n path = parameterDict['Path']\n else:\n path = ''\n\n if parameterDict.has_key( 'SpaceToken' ):\n spaceToken = parameterDict['SpaceToken']\n else:\n spaceToken = ''\n\n if parameterDict.has_key( 'WSUrl' ):\n wsPath = parameterDict['WSUrl']\n else:\n wsPath = ''\n\n return self.__generateStorageObject( storageName, protocolName, protocol, path, host, port, spaceToken, wsPath, parameterDict )", "def get_redis_connection():\n return redis.StrictRedis(host=C.redis_host, port=C.redis_port, db=C.redis_task_db)", "async def get_redis(self) -> Redis:\n async with self._create_pool_lock:\n if self.redis is None:\n self.redis = await self.create_redis_pool()\n return self.redis", "def _get_redis_connection():\r\n url = current_app.config.get('REDIS_URL', 'redis://localhost:6379')\r\n return redis.from_url(url)", "def cache_volume_data(self):\n return self._service.cache_volume_data()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the shared storage in the redis server for the service.
def set_shared_storage(self, shared_storage): validate_json(shared_storage, self.schema) shared_storage = json.dumps(shared_storage) self.redis_client.set(self.service_type, shared_storage) return True
[ "def get_shared_storage(self):\n shared_storage = self.redis_client.get(self.service_type)\n shared_storage = json.loads(shared_storage)\n validate_json(shared_storage, self.schema)\n return shared_storage", "def store_shared_json_data(self, shared_id, json_data):\n try:\n self.client.put_object(Bucket=iris_settings.IRIS_SESSION_BUCKET, Key=\"shared/\" + shared_id,\n Body=json.dumps(json_data))\n except ClientError:\n raise IrisStorageError(\"Error storing data\")", "def set_redis(self, key, value):\n r.hset(self.rhash, key, value)", "def enable_storage(self):\n self.storage_enabled = True", "def shared_key(self, shared_key: ConfigNodePropertyString):\n\n self._shared_key = shared_key", "def star_storage_service(self) -> StorageService:\n return self.storage_services[self.config.storage.star]", "def set_service(self, service):\n SHIM_LOGGER.info('Starting service')\n self.request_queue = service.request_queue\n\n self.service = service\n self._request('init')", "def set_storage_path(self, file_path: str):\n self.storage_path = file_path", "def set_shared_objects(self, shared_objects: Any = None) -> None:\n self.shared_objects = shared_objects", "def set(self, key, value):\n try:\n dkey = digest(key)\n except Exception as err:\n _log.error(\"Failed to calculate digest of key={}, err={}\".format(key, err))\n raise\n# _log.debug(\"AppendServer::set:\"\n# \"\\n\\tkey={}\"\n# \"\\n\\tdkey={}\"\n# \"\\n\\tvalue={}\".format(key, dkey.encode('hex'), value))\n node = Node(dkey)\n\n def store(nodes):\n _log.debug(\"AppendServer::set Setting '%s' on %s\" % (key, [x.id.encode('hex') for x in nodes]))\n# _log.debug(\"AppendServer::set Setting '%s' on %s\" % (key, map(str, nodes)))\n # if this node is close too, then store here as well\n if not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):\n self.storage[dkey] = value\n ds = [self.protocol.callStore(n, dkey, value) for n in nodes]\n return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n _log.warning(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find().addCallback(store)", "def set_shared_instance(cls, db):\n cls._instance = db", "def shared(self, shared):\n if shared is None:\n raise ValueError(\"Invalid value for `shared`, must not be `None`\") # noqa: E501\n\n self._shared = shared", "def setfshare(self, protocol, vfs, sharename,\n fpg=None, fstore=None, comment=None,\n abe=None, allowip=None,\n denyip=None, allowperm=None, denyperm=None, cache=None,\n ca=None,\n options=None, clientip=None,\n ssl=None):", "def setup_redis(name: str, host: str, port: int, **kw) -> None:\n redis_client = kw.pop(\"redis_client\", redis.StrictRedis)\n SYSTEMS[name] = redis_client(host=host, port=port, **kw)", "def set(self, chore):\n\n # Just set using the node and dumped data\n\n self.redis.set(f\"/chore/{chore['id']}\", json.dumps(chore))", "def hemlock_server_store(self, args, var_d):\n arg_d = [\n '--credential_file'\n ]\n return self.check_args(args, arg_d, var_d)", "def create_share(self, share, share_server):", "def set_shared_muse_instance(muse_instance):\n global _shared_muse_instance\n _shared_muse_instance = muse_instance", "def set_shared_with_me(self, shared_with_me):\n\n\t\tif shared_with_me is not None and not isinstance(shared_with_me, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: shared_with_me EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__shared_with_me = shared_with_me\n\t\tself.__key_modified['shared_with_me'] = 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
funtction to authenticate user activation from the email
def activate(request, uidb64, token): try: uid = force_text(urlsafe_base64_decode(uidb64)) user = User.objects.get(pk=uid) except (TypeError, ValueError, OverflowError, User.DoesNotExist): user = None if user is not None and account_activation_token.check_token(user, token): user.is_active = True user.profile.email_confirmed = True user.save() login(request, user) return redirect('home') else: return render(request, 'registration/account_activation_invalid.html')
[ "def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n UserContact.objects.filter(user_id=user.id).update(trackers_activated=1)\n\n messages.info(request, _(\"Votre email est confirmé, merci\"))\n return redirect(\"homepage\")\n else:\n messages.info(request, _(\"Lien d'activation invalide\"))\n return redirect(\"homepage\")", "def activate_email(request):\n from tendenci.apps.registration.models import RegistrationProfile\n from tendenci.apps.accounts.utils import send_registration_activation_email\n form = ActivateForm(request.GET)\n\n if form.is_valid():\n email = form.cleaned_data['email']\n username = form.cleaned_data['username']\n u = None\n if email and username:\n [u] = User.objects.filter(is_active=False, email=email, username=username)[:1] or [None]\n\n if email and not u:\n [u] = User.objects.filter(is_active=False, email=email).order_by('-is_active')[:1] or [None]\n\n if u:\n [rprofile] = RegistrationProfile.objects.filter(user=u)[:1] or [None]\n if rprofile and rprofile.activation_key_expired():\n rprofile.delete()\n rprofile = None\n if not rprofile:\n rprofile = RegistrationProfile.objects.create_profile(u)\n # send email\n send_registration_activation_email(u, rprofile, next=request.GET.get('next', ''))\n context = RequestContext(request)\n template_name = \"profiles/activate_email.html\"\n return render_to_response(template_name,\n { 'email': email},\n context_instance=context)\n\n raise Http404", "def activate_new_email(request, uidb64, token):\n user = UserCreationForm().get_user(uidb64)\n if user is not None and email_activation_token.check_token(user, token):\n user.former_email = user.email\n user.email = user.change_email\n user.change_email = None\n user.is_change_allowed = True\n user.change_email_tracker = None\n user.save()\n return redirect('/dashboard')\n return render(request, 'registration/signup_activation_invalid.html')", "def activate_account(token):\n try:\n email = util.ts.loads(token, salt=\"activation-key\", max_age=86400)\n except:\n return abort(404)\n\n user = User.get(email=email)\n\n if user is None:\n return abort(404)\n\n user.email_confirmed = True\n\n db.session.add(user)\n db.session.commit()\n\n login_user(user)\n\n flash(\"Your account has been activated. Welcome to Bookends!\")\n\n return redirect(url_for('index'))", "def activation_email(request, user):\n link = request.route_url(\n 'register_activate',\n code='-'.join(\n [text_type(user.pid),\n user.activation.code]))\n # link = '-'.join(['register.activate', text_type(user.pid), user.activation.code])\n emailtext = _(\"Please validate your email and activate your account by visiting: {link}\")\n body = emailtext.format(link=link)\n return {\n \"request\": request,\n \"subject\": _(\"Please activate your account\"),\n \"recipients\": [user.email],\n \"body\": body\n }", "def activate():\n if not session.get(\"user_id\"):\n return render_template(\"auth/activate.html\")\n\n g.user.account_status = True\n DB.session.commit()\n return render_template(\n \"auth/activate.html\",\n message=\"Successfully activated your account.\"\n )", "def dispatch_activation_email(self, user, request):\n\n # create the activation record\n activation = UserActivation.objects.create(user_id=user.id)\n\n # create the link\n current_site = get_current_site(request)\n protocol = 'https' if request.is_secure() else 'http' \n link = '{}://{}/activate/{}'.format(protocol, current_site.domain, activation.key)\n\n try:\n # dispatch the email\n send_mail(\n 'Activate your account at {}'.format(current_site.name),\n 'Please activate your account by following this link \\n\\n{}\\n\\n-'.format(link),\n 'test@example.com',\n [user.email],\n fail_silently=False\n )\n except error:\n print( error )", "def activate_user(self, activation_key):\n \n if SHA1_RE.search(activation_key):\n try:\n registration = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False \n user = registration.user\n user.is_active = True\n user.save()\n \n # Set activation key of the registration model as a default string to prevent double activation\n registration.activation_key = self.model.ACTIVATED\n registration.save()\n return user\n return False", "def test_activate_user(self, *_):\n mock_db.DB = {\"email@test.com\": {'confirmed': False}}\n token = jwt.encode(\n {'user': \"email@test.com\"},\n SUPER_SECRET_CONFIRMATION_KEY,\n algorithm='HS256')\n\n response = self.client.open(\n '/api/v1/user/confirm',\n method='POST',\n content_type='application/json',\n query_string=[('token', token)])\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def activation_sent(request):\n messages.success(request, 'Vérifiez vos mails pour activer votre compte')\n return redirect('website:products')\n # return render(request, 'auth/activation_request.html') # This will activate user’s account", "def activate(self, suppliedkey, now = datetime.datetime.now()):\n\t\t# variable to store success of the activation attempt\n\t\tisokay = False\n\t\tif inTime(now, self.key_expiration):\n\t\t\tif suppliedkey == self.registration_key:\n\t\t\t\tself.activated = True\n\t\t\t\t# success\n\t\t\t\tisokay = not isokay\n\t\telse:\n\t\t\tself.expired = True\n\t\t# update user object\n\t\tself.update()\n\t\treturn isokay", "def activate(request, activation_key, template_name='registration/activate.html'):\n\tactivation_key = activation_key.lower() # Normalize before trying anything with it.\n\taccount = RegistrationProfile.objects.activate_user(activation_key)\n\n\tif (account):\n\t\tlogger.info(\"%s - account-activate: user %s\" %(request.META.get('REMOTE_ADDR'), account))\n\telse:\n\t\tlogger.error(\"%s - account-activate: activation_key %s\" %(request.META.get('REMOTE_ADDR'), activation_key))\n\treturn render_to_response(template_name,\n\t\t\t\t\t\t\t { 'account': account,\n\t\t\t\t\t\t\t\t'expiration_days': getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', 5) },\n\t\t\t\t\t\t\t context_instance=RequestContext(request))", "def activate(self, id):\n log.debug('Activation request with key = %s' % id)\n\n if id is None:\n log.error('Key is None; redirecting to main page')\n h.rails.redirect_to(h.url_for(action=None))\n\n user = meta.session.query(User).filter_by(verification=id).first()\n\n if user is not None:\n log.debug('Activating user \"%s\"' % user.name)\n user.verification = None\n meta.session.commit()\n else:\n log.error('Could not find user; redirecting to main page')\n h.rails.redirect_to(h.url_for(action=None, id=None))\n\n c.user = user\n return render('/register/activated.mako')", "def test_users_activation_email_send(self):\n pass", "def send_activation(request):\n form = ResendActivationForm(request.data)\n if form.is_valid():\n requesting_user = form.user_cache\n\n mail_subject = _(\"Activate %(user)s account on %(forum_name)s forums\") % {\n 'user': requesting_user.username,\n 'forum_name': settings.forum_name,\n }\n\n mail_user(\n request,\n requesting_user,\n mail_subject,\n 'misago/emails/activation/by_user',\n {\n 'activation_token': make_activation_token(requesting_user),\n },\n )\n\n return Response({\n 'username': form.user_cache.username,\n 'email': form.user_cache.email,\n })\n else:\n return Response(\n form.get_errors_dict(),\n status=status.HTTP_400_BAD_REQUEST,\n )", "def activate(request):\n if request.method == 'GET':\n\n try:\n data = request.GET.copy()\n\n if data:\n # Getting user_activation object where activation key\n activate = UserActivation.objects.get(\n activation_key=data['id'])\n data1 = {'is_active': True}\n\n # Update auth_user table is_active status to True.\n User.objects.filter(pk=activate.user_id).update(**data1)\n ctx = {'title': 'Activate page'}\n return render_to_response(\n 'home/activate.html', ctx,\n context_instance=RequestContext(request))\n\n else:\n feedback = \"Page Not Found\"\n\n # Context to send in html.\n ctx = {'title': 'Activate page', 'feedback': feedback}\n return render_to_response(\n 'home/activate.html', ctx,\n context_instance=RequestContext(request))\n\n except Exception as e:\n logger.exception(\"EXCEPTION :\" + str(e))\n\n # Store exception in feedback.\n feedback = str(e)\n\n # Context to send in html.\n ctx = {'title': 'Activate page', 'feedback': feedback}\n return render_to_response('home/activate.html', ctx,\n context_instance=RequestContext(request))", "def test_activation_valid(self):\n user = SignupManager.create_user(**self.user_info)\n active_user = SignupManager.activate_user(user.signup.activation_key)\n\n # The returned user should be the same as the one just created.\n self.failUnlessEqual(user, active_user)\n\n # The user should now be active.\n self.failUnless(active_user.is_active)\n\n # The user should have permission to view and change its profile\n #self.failUnless('view_profile' in get_perms(active_user, active_user.get_profile()))\n #self.failUnless('change_profile' in get_perms(active_user, active_user.get_profile()))\n\n # The activation key should be the same as in the settings\n self.assertEqual(active_user.signup.activation_key,\n auth_settings.BAPH_ACTIVATED)", "def http_verify (ctx, uid, token) :\n\n username, success_url = yield db.handle_verify(user_id, token)\n \n success_url = utils.build_url(success_url,\n uid = user_id,\n username = username,\n )\n\n ctx.redirectTo(success_url)\n\n returnValue( (\"Account '%s' successfully activated\" % username) )", "def test_users_activation_activate(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rendering images by profile owner
def profile(request): current_user = request.user prof_details = User_prof.objects.filter(username = current_user.id).all() user_images = Image.objects.filter(username = current_user).all() return render(request, 'all_templates/profile.html', {'prof_data': prof_details,'user_images':user_images})
[ "def render_profile_pic(self, data: Dict[str, Any] = {}):\n profile_pic = data.get('profile', \"\")\n\n # with st.beta_container():\n # _, mid_column, _ = st.beta_columns(3)\n\n # with mid_column: \n # render_svg(\"./assets/images/Synergos-Logo.svg\")\n render_svg(\"./assets/images/Synergos-Logo.svg\")", "def avatar(value, arg):\n user_id = int(arg)\n authors = Author.objects.filter(user__id=user_id)\n if authors is None:\n return static('avatar-stock.svg')\n\n author = authors.first()\n profile_picture_path = author.cover_image_url()\n return static(profile_picture_path)", "def social_image(request):\n return render(request, \"core/office/social_image.html\", {})", "def memberphotos(request, username, template_name = \"photos/memberphotos.html\", group_slug = None, bridge = None):\n\n if bridge:\n try:\n group = bridge.get_group(group_slug)\n except ObjectDoesNotExist:\n raise Http404\n else:\n group = None\n\n user = get_object_or_404(User, username = username)\n\n photos = Image.objects.filter(\n member__username = username,\n is_public = True,\n )\n\n if group:\n photos = group.content_objects(photos, join = \"pool\")\n else:\n photos = photos.filter(pool__object_id = None)\n\n photos = photos.order_by(\"-date_added\")\n\n return render_to_response(template_name, {\n \"group\": group,\n \"photos\": photos,\n }, context_instance = RequestContext(request))", "def profilePageView(request):\n user = request.user\n if user.is_anonymous():\n return HttpResponseRedirect('../homepage')\n profile = Profile.objects.get(user=user, username=user.username)\n albumsprofile = ProfileAlbum.objects.all().filter(profile_id=profile)\n albums = []\n for item in albumsprofile:\n albums.append(item.album_id)\n photos = Image.objects.all().filter(autor=profile)\n return render(request, 'profilepage.html', {'albums':albums, 'photos':photos})", "def getProfileImage(self, name):\n for p in self.persons:\n if \"pseudo\" in self.persons[p]:\n if name == self.persons[p][\"pseudo\"]:\n image_id = self.persons[p][\"face\"][\"id\"]\n key = self.persons[p][\"face\"][\"key\"]\n return self.getCameraPicture(image_id, key)\n return None, None", "def my_photoroll(request):\n user = request.user\n\n # Catch if Profile is not set up and prompt user to enter details\n try:\n profile = request.user.profile\n except ObjectDoesNotExist:\n return redirect('dam:my_user_profile')\n\n photos = Photo.objects.filter(owner=request.user)\\\n .order_by('created_datetime')[:100] # TODO order by Exif created tag\n # TODO get the full list and use JS to limit # displayed\n\n return render(request, 'dam/user_photoroll.html', {\n 'title': SITE_TITLE,\n 'user': user,\n 'profile': profile,\n 'photos': photos\n })", "def user_photo(user):\n\n if user.photo:\n return URL('default', 'download', args=user.photo)\n elif user.sex:\n return URL('static', f'img/avatar_{user.sex.lower()}_1.png')\n else:\n return URL('static', 'img/boxed_bg.png')", "def sperk_profile(request, user, logo, template='sperk/sperk-profile.html', page_template='includes/skigit_list.html'):\n\n if User.objects.filter(id=request.user.id).exists():\n ski_share_list = []\n busniess_logo = []\n friend_list = []\n like_dict = []\n id = user\n logoid = logo\n profile_list = Profile.objects.filter(user__id=user)\n try:\n request_user = User.objects.get(pk=profile_list[0].user.id, is_active=True)\n except ObjectDoesNotExist:\n messages.error(request, 'Sorry, Your Request User Not Found.')\n return HttpResponseRedirect('/') # HttpResponseRedirect\n\n if not request_user.profile.is_completed['status']:\n messages.error(request, 'Sorry, Your request user profile is not active.')\n return HttpResponseRedirect('/')\n\n busniesslogo = BusinessLogo.objects.get(id=logo, is_deleted=False)\n\n for b_logo in profile_list:\n for bb_logo in b_logo.logo_img.filter(is_deleted=False).all():\n bb_logo.img_id = bb_logo.id\n bb_logo.l_img = get_thumbnail(bb_logo.logo, '130x130', crop='center', quality=100, format='PNG').url\n busniess_logo.append(bb_logo)\n\n for user_list in profile_list:\n company_url = ProfileUrl.objects.filter(user=user_list.user)\n\n if Embed.objects.filter(to_user=request_user, is_embed=True).exists():\n embed_skigit_list = Embed.objects.filter(to_user=request_user, is_embed=True).values_list('skigit_id',\n flat=True)\n if request.user.is_authenticated():\n if Friend.objects.filter(Q(to_user=request.user.id) | Q(from_user=request.user.id), status=1).exists():\n f_list = Friend.objects.filter(Q(to_user=request.user.id) | Q(from_user=request.user.id), status=1)\n from_user_list = f_list.exclude(from_user=request.user.id).values_list('from_user',\n flat=True).distinct()\n to_user_list = f_list.exclude(to_user=request.user.id).values_list('to_user', flat=True).distinct()\n fr_list = list(merge(from_user_list, to_user_list))\n friends_detail = Profile.objects.filter(user__id__in=fr_list).order_by('user__username')\n for friends in friends_detail:\n if friends.profile_img:\n l_img = get_thumbnail(friends.profile_img, '35x35', crop='center', quality=100, format='PNG').url\n else:\n l_img = '/static/skigit/detube/images/noimage_user.jpg'\n friend_list.append({'uid': friends.user.id, 'username': friends.user.username,\n 'name': friends.user.get_full_name(), 'image': l_img})\n video_likes = Like.objects.filter(user_id=request.user.id, status=True)\n for likes in video_likes:\n like_dict.append(likes.skigit_id)\n vid = VideoDetail.objects.select_related('skigit_id').filter(skigit_id__id__in=embed_skigit_list)\n serializer = VideoDetailSerializer(vid, many=True)\n for vid_data in vid:\n sharObj = Share.objects.filter(skigit_id=vid_data, is_active=True, user=user).order_by(\n 'to_user', '-pk').distinct('to_user')\n for sh in sharObj:\n ski_share_list.append(\n {'share_date': sh.created_date, 'username': sh.to_user.username, 'vid': sh.skigit_id_id})\n request_user = request_user\n #video_detail = serializer.data\n video_detail = vid\n video_likes = like_dict\n friend_list = friend_list\n order_value = '1'\n togal_val = '1'\n skigit_list = ski_share_list\n users = get_all_logged_in_users()\n unembed = False\n if request_user == request.user:\n unembed = True\n if request.is_ajax():\n template = page_template\n return render(request, template, locals())", "def profile_pic(user_id):\n fpath = os.path.join(current_app.config['UPLOAD_DIR'], str(user_id) + '.png')\n if os.path.isfile(fpath):\n return send_file(fpath)\n\n else:\n return current_app.send_static_file('img/default_profile.png')", "def generate_images(self):\n\n speaker = self.submission.speakers.all()[0]\n\n if speaker.avatar:\n avatar = get_thumbnail(speaker.avatar, '160x160', crop='center', quality=80)\n avatar = Image.open(avatar.storage.path(avatar.name))\n elif speaker.get_gravatar:\n r = requests.get(\n \"https://www.gravatar.com/avatar/\" + speaker.gravatar_parameter,\n allow_redirects=True\n )\n if r.status_code == 200:\n avatar = Image.open(BytesIO(r.content))\n avatar = avatar.resize((160, 160), Image.ANTIALIAS)\n else:\n avatar = Image.new('RGBA', (160, 160), 0)\n else:\n avatar = Image.new('RGBA', (160, 160), 0)\n\n # Now turn the avatar circular\n\n bigsize = (avatar.size[0] * 3, avatar.size[1] * 3)\n mask = Image.new('L', bigsize, 0)\n draw = ImageDraw.Draw(mask)\n draw.ellipse((0, 0) + bigsize, fill=255)\n mask = mask.resize(avatar.size, Image.ANTIALIAS)\n avatar.putalpha(mask)\n\n data_dir = os.path.join(os.path.dirname(__file__), \"some_banners\")\n\n background = Image.open(\n os.path.join(data_dir, \"some_twitter_card.png\")\n )\n\n new_card = Image.new('RGBA', background.size, (0, 0, 0, 0))\n\n # Add the background\n new_card.paste(background, (0, 0))\n\n # Add the avatar\n new_card.paste(avatar, (58, 77), mask)\n\n # Write the speaker names\n draw = ImageDraw.Draw(new_card)\n font = ImageFont.truetype(os.path.join(data_dir, \"fonts\", \"Poppins-SemiBold.ttf\"), 56)\n\n offset = 60\n\n speaker_lines = wrap(self.speakers, 30).split(\"\\n\")\n for line in speaker_lines:\n draw.text((280, offset), line, (230, 28, 93), font=font)\n offset += 65\n\n font = ImageFont.truetype(os.path.join(data_dir, \"fonts\", \"Poppins-SemiBold.ttf\"), 56)\n\n title = self.submission.title\n if self.keynote:\n title = \"Keynote: \" + title\n\n title_lines = wrap(title, 30).split(\"\\n\")\n\n lines_available = 5 - len(speaker_lines)\n if len(title_lines) > lines_available:\n title_lines[lines_available - 1] += \"...\"\n\n if lines_available < 0:\n lines_available = 0\n\n for line in title_lines[:lines_available]:\n draw.text((280, offset), line, (255, 255, 255), font=font)\n offset += 65\n\n # Render it to screen\n # new_card.show()\n\n image_path = twitter_card_path(self, \"blahblah.png\")\n full_path = os.path.join(settings.MEDIA_ROOT, image_path)\n\n new_card.save(full_path, format='png')\n\n self.twitter_card_image = image_path\n self.save()", "def test_list_image_param_owner(self):\n image_id = self.created_images[0]\n # Get image metadata\n image = self.client.show_image(image_id)\n\n params = {\"owner\": image['owner']}\n self._list_by_param_value_and_assert(params)", "def my_galleries(request):\n user = request.user\n profile = request.user.profile\n galleries = Gallery.objects.filter(owner=request.user)\\\n .order_by('created_datetime')\n return render(request, 'dam/user_galleries.html', {\n 'title': SITE_TITLE,\n 'user': user,\n 'profile': profile,\n 'galleries': galleries\n })", "def get_image(self):", "def test_core_get_gallery_images_scope_owner_v1(self):\n pass", "def profile_img_json(request):\n user = request.user\n\n _json = {'available': prev_profile_imgs(user)}\n\n try:\n _current = user.profile_img.path\n except ValueError:\n _current = ''\n _json['current'] = os.path.basename(_current)\n return JsonResponse(_json)", "def profile_image_url(self):\n fb_uid = SocialAccount.objects.filter(user_id=self.user.id, provider='facebook')\n tw_uid = SocialAccount.objects.filter(user_id=self.user.id, provider='twitter')\n gg_uid = SocialAccount.objects.filter(user_id=self.user.id, provider='google')\n\n if len(fb_uid):\n return \"http://graph.facebook.com/{}/picture?width=40&height=40\".format(fb_uid[0].uid)\n elif len(tw_uid):\n extra_data = tw_uid[0].extra_data\n pic = extra_data['profile_image_url_https']\n return pic\n elif len(gg_uid):\n extra_data = gg_uid[0].extra_data\n pic = extra_data['picture']\n return pic\n return \"http://www.gravatar.com/avatar/{}?s=40\".format(\n hashlib.md5(self.user.email).hexdigest())", "def get_avatar(request, backend, strategy, details, response,\n user=None, *args, **kwargs):\n url = None\n if backend.name == 'google-oauth2':\n url = response['image'].get('url')\n ext = url.split('.')[-1]\n domain = response['domain']\n user_id = response['id']\n if url:\n request.session['avatar_url'] = url.split('?')[0]+'?sz=200'\n\n request.session['gender'] = response.get('gender', 'male')\n\n if domain and user_id:\n #check if user is admin \n response = requests.get(\n ' https://www.googleapis.com/admin/directory/v1/users/{}'.format(user_id),\n params={'access_token': response['access_token']}\n )\n request.session['isAdmin'] = response.json().get('isAdmin')\n request.session['domain'] = domain", "async def pic(self, ctx, args=None, language=\"en\"):\n quora_username = await self.get_username(ctx, args)\n await self._generate_view(ctx, quora_username, \"pic\", language)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Copy file at original_path to file at destination_path
def copy_file(original_path, destination_path): shutil.copyfile(original_path, destination_path)
[ "def copy_file(self, origin_path: str, dest_path: str):\n shutil.copy2(origin_path, dest_path)", "def copy_file(cls, path, source_dir, destination_dir):\n if not (source_dir / path).exists():\n return\n shutil.copyfile(str(source_dir / path), str(destination_dir / path))", "def copy(source, destination):\n source = os.path.abspath(source)\n destination = os.path.abspath(destination)\n if source != destination:\n shutil.copyfile(source, destination)", "def copyfile(self, src, dst):\n self.logger.debug('Copying file %s to %s.', src, dst)\n shutil.copy2(src, dst)", "def copy_file(self, path: str, filename: str, new_path: str, new_filename: str = None):\n new_filename = new_filename or filename\n with TemporaryFile() as file:\n self.read_file(path, filename, file)\n file.seek(0)\n self.write_file(new_path, new_filename, file)", "def _copy(self, src, dest):\n shutil.copyfile(src, dest)\n try:\n shutil.copystat(src, dest)\n except OSError:\n self.log.debug(\"copystat on %s failed\", dest, exc_info=True)", "def copy_file(self, source_path, remote_path):\n sftp = self.get_sftp()\n sftp.put(source_path, remote_path)", "def cp(src_filename, dst_filename):\n src_is_remote = is_remote_path(src_filename)\n dst_is_remote = is_remote_path(dst_filename)\n if src_is_remote == dst_is_remote:\n return auto(copy_file, src_filename, dst_filename)\n filesize = auto(get_filesize, src_filename)\n if src_is_remote:\n with open(dst_filename, 'wb') as dst_file:\n return remote(send_file_to_host, src_filename, dst_file, filesize,\n xfer_func=recv_file_from_remote)\n with open(src_filename, 'rb') as src_file:\n return remote(recv_file_from_host, src_file, dst_filename, filesize,\n xfer_func=send_file_to_remote)", "def copy(from_file_path, to_file_path):\n dirname = os.path.dirname(to_file_path)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n shutil.copy2(from_file_path, to_file_path)", "def copy_file(self, source, destination, cmd=True):\n self._check_path(source)\n self._check_path(destination)\n\n if cmd:\n self.run_shell_cmd(command=[\"copy\", source, destination], cmd=True)\n else:\n self.run_shell_cmd(command=\"copy \"+source+\" \"+destination,\n cmd=False)", "def copy(source, destination):\n\ttry:\n\t\tshutil.copyfile(translatePath(source), translatePath(destination))\n\t\treturn True\n\texcept:\n\t\treturn False", "def copy_file(server, source, target):\n with setup_server_connection(server) as connection:\n Transfer(connection).put(local=source, remote=target)", "def copy_file (\n source_path,\n target_path,\n allow_undo=True,\n no_confirm=False,\n rename_on_collision=True,\n silent=False,\n hWnd=None\n):\n return _file_operation (\n shellcon.FO_COPY,\n source_path,\n target_path,\n allow_undo,\n no_confirm,\n rename_on_collision,\n silent,\n hWnd\n )", "def copy(input_path, output_path):\n _check_output_path(output_path)\n _makedirs(output_path)\n try:\n shutil.copy2(input_path, output_path)\n except FileNotFoundError:\n raise DoxhooksFileSystemError(\"Cannot find file:\", input_path)\n except OSError as error:\n raise DoxhooksFileSystemError(\n \"Cannot copy file from {!r} to {!r}.\"\n .format(input_path, output_path)) from error", "def copy_file(self, src, dst):\n dst_existed = False\n pre_hash = None\n if not self.changed:\n if os.path.isfile(dst):\n dst_existed = True\n pre_hash = self.get_hash(dst)\n copyfile(src, dst)\n if not self.changed:\n if dst_existed:\n post_hash = self.get_hash(dst)\n self.changed = pre_hash == post_hash\n else:\n if os.path.isfile(dst):\n self.changed = True", "def CopyFileToDir(original_file, source_dir, dest_dir, preserve_dirs=False):\n if not original_file.startswith(source_dir):\n print \"%s is not in %s!\" % (original_file, source_dir)\n return\n relative_path = os.path.basename(original_file)\n if preserve_dirs:\n # Add any dirs below source_dir to the final destination\n filePath = original_file.replace(source_dir, \"\").lstrip(\"/\")\n filePath = os.path.dirname(filePath)\n dest_dir = os.path.join(dest_dir, filePath)\n new_file = os.path.join(dest_dir, relative_path)\n full_dest_dir = os.path.dirname(new_file)\n if not os.path.isdir(full_dest_dir):\n try:\n os.makedirs(full_dest_dir, 0755)\n except OSError, e:\n if e.errno == EEXIST:\n print \"%s already exists, continuing anyways\" % full_dest_dir\n else:\n raise\n if os.path.exists(new_file):\n try:\n os.unlink(new_file)\n except OSError, e:\n # If the file gets deleted by another instance of post_upload\n # because there was a name collision this improves the situation\n # as to not abort the process but continue with the next file\n print \"Warning: The file %s has already been unlinked by \" + \\\n \"another instance of post_upload.py\" % new_file\n return\n\n # Try hard linking the file\n if original_file in _linkCache:\n for src in _linkCache[original_file]:\n try:\n os.link(src, new_file)\n os.chmod(new_file, 0644)\n return\n except OSError:\n pass\n\n tmp_fd, tmp_path = tempfile.mkstemp(dir=dest_dir)\n tmp_fp = os.fdopen(tmp_fd, 'wb')\n shutil.copyfileobj(open(original_file, 'rb'), tmp_fp)\n tmp_fp.close()\n os.chmod(tmp_path, 0644)\n os.rename(tmp_path, new_file)\n _linkCache.setdefault(original_file, []).append(new_file)", "def copy_remote(src_path, dst_path):\n assert ':' not in src_path, src_path\n idx = dst_path.find(':')\n dst = dst_path[:idx]\n file_path = dst_path[idx+1:]\n assert ':' not in file_path, dst_path\n if os.path.isfile(src_path):\n cmd = 'scp %s %s' % (src_path, dst_path)\n else:\n cmd = 'scp -r %s %s' % (src_path, dst_path)\n res = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')\n return res", "def safe_copyfile(src, dest):\n if os.path.isdir(dest):\n dest = os.path.join(dest, os.path.basename(src))\n if os.path.lexists(dest):\n if not global_options['overwrite']:\n raise ValueError(\"was asked to copy %s but destination already exists: %s\"\n % (src, dest))\n else:\n # to make sure we can write there ... still fail if it is entire directory ;)\n os.unlink(dest)\n shutil.copyfile(src, dest)", "def _move_path_to_path_or_stream(src, dst):\n if is_writable_file_like(dst):\n fh = (io.open(src, 'r', encoding='latin-1')\n if file_requires_unicode(dst)\n else io.open(src, 'rb'))\n with fh:\n shutil.copyfileobj(fh, dst)\n else:\n # Py3: shutil.move(src, dst, copy_function=shutil.copyfile)\n open(dst, 'w').close()\n mode = os.stat(dst).st_mode\n shutil.move(src, dst)\n os.chmod(dst, mode)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Rename the file at path to new_file_name, optionally appending a prefix, suffix, and changing the extension.
def rename(filepath, new_file_name=None, prefix=None, suffix=None, new_extension=None): old_file_path, old_extension = os.path.splitext(filepath) old_file_name = os.path.basename(old_file_path) file_dir = os.path.dirname(old_file_path) if not new_file_name: new_file_name = old_file_name if not new_extension: new_extension = old_extension if prefix: new_file_name = prefix + new_file_name if suffix: new_file_name += suffix new_file_path = os.path.join(file_dir, new_file_name + new_extension) os.rename(filepath, new_file_path) return new_file_path
[ "def modify_filename_in_path(file_path, new_name=None, added=None,\n prefix=False):\n # Normalize input to Path object and build new file name.\n file_path = Path(file_path)\n if new_name is None:\n new_name = file_path.stem\n if added is not None:\n if prefix:\n new_name = added + new_name\n else:\n new_name = new_name + added\n output = Path(file_path.parent, new_name).with_suffix(file_path.suffix)\n return output", "def _rename_filename(self, filename):\n directory = os.path.dirname(self._filepath) # keep the same path\n extension = os.path.splitext(self._filepath)[1] # keep the extension\n\n # Concatenate the new path for the file, rename the file and update the\n # _filepath variable.\n new_path = os.path.join(directory, filename + extension)\n os.rename(self._filepath, new_path)\n self._filepath = new_path", "def change_ext(path, new_ext=\"txt\"):\n name, ext = os.path.splitext(path)\n if not new_ext.startswith(\".\"):\n new_ext = \".\" + new_ext\n return name + new_ext", "def change_extension(path, new_extension):\n \n path = stringify(path)\n new_extension = stringify(new_extension)\n if new_extension is None:\n return path\n dot = path.rfind('.')\n if dot != -1:\n path = path[:dot]\n return '{}.{}'.format(path, new_extension)", "def rename(oldPath, newPath, **kwargs):\n import os\n return os.rename(oldPath, newPath, **kwargs)", "def change_ext(filename, new_ext):\n return re.sub(r\"\\.\\w+$\", new_ext, filename)", "def rename_file(self, old, new):\n del self.file_dict[os.path.basename(old)]\n self.file_dict[os.path.basename(new)] = new\n # reconstruct to include new file\n self.mp3_basenames = tuple(sorted(self.file_dict.keys()))\n\n del self.meta_cache[os.path.basename(old)]\n self.parse_info_for_status(os.path.basename(new)) # replace in meta_cache", "def _change_ext(self, filename):\n name = utils.get_name(self.name)\n ext = utils.get_ext(filename)\n self.name = name + ext", "def add_suffix_in_filename(path_file, suffix):\n name, extension = os.path.splitext(path_file)\n return name + suffix + extension", "def UpdateFile(oldFilePath, newFilePath):\n perm = os.stat(oldFilePath).st_mode\n os.rename(newFilePath, oldFilePath)\n os.chmod(oldFilePath, perm)", "def RenameFile(options, filepath):\n\n # split the filepath and the filename apart, this ensures that we dont\n # rename the directory in addtion to the filename\n pathname = os.path.dirname(filepath)\n filename = os.path.basename(filepath)\n\n if options.superverbose:\n print '==========================================='\n print filename\n\n # trim the beginning of filename by supplied number\n if options.trimfront:\n filename = filename[options.trimfront:]\n if options.superverbose:\n print '...front trimmed %s' % filename\n\n # trim ending of filename by supplied number\n if options.trimback:\n filename = filename[:len(filename) - options.trimback]\n if options.superverbose:\n print '...back trimmed %s' % filename\n\n # replace matches in filename with supplied value\n if options.replace:\n for vals in options.replace:\n filename = filename.replace(vals[0], vals[1])\n if options.superverbose:\n print '...replaced vals %s' % filename\n\n # convert filename to all lowercase letters\n if options.lowercase:\n filename = filename.lower()\n if options.superverbose:\n print '...lowercased %s' % filename\n\n # convert filename to all uppercase letters\n if options.uppercase:\n filename = filename.upper()\n if options.superverbose:\n print '...uppercased %s' % filename\n\n # rejoin the filename and filepath\n new_filepath = os.path.join(pathname, filename)\n\n # finally, we actually rename the file on the filesystem\n try:\n\n print \"%s -> %s\" % (filepath, new_filepath)\n\n # if this is not a dry-run, then rename the files on disk\n if not options.dryrun:\n os.rename(filepath, new_filepath)\n\n except OSError, ex:\n print >>sys.stderr, \"Error renaming '%s': %s\" % (filepath, ex.strerror)", "def rename_and_overwrite_file(source_filename, destination_filename):\n os.replace(source_filename, destination_filename)", "def rename_file(self, file_id, name):\n pass", "def replace_file_extension(file_name, extension):\n prefix, _, _ = file_name.rpartition('.')\n return prefix + '.' + extension", "def copy_rename(old_path, new_path = \".\"):\n new_name = old_path.split(\"/\")[-1]\n if new_name[-4:] != \".ggb\":\n raise ValueError(\"the given path does not lead to a .ggb file\")\n # rename to .zip\n new_name = new_name[:-3] + \"zip\"\n new_path = os.path.join(new_path, new_name)\n # copy to new_path\n shutil.copy(old_path, new_path)\n return new_path", "def replace_prefix(filepath, oldprefix, newprefix):\n path, filename = os.path.split(filepath)\n if not filename.startswith(oldprefix):\n raise ValueError(f'{filename} does not start with {oldprefix}')\n\n filename = filename.replace(oldprefix, newprefix, 1)\n return os.path.join(path, filename)", "def rename(path, new_path):\n return exec_fn(lambda: os_rename(path, new_path))", "def _RenameFile(filename, rename_template, directory_path):\n if rename_template is None:\n return filename\n source_base, extension = os.path.splitext(filename)\n if extension:\n extension = extension[1:]\n source_dir = os.path.basename(os.path.abspath(directory_path))\n now_time = datetime.datetime.utcnow()\n replace_map = {\n 'source_dir': source_dir,\n 'source_base': source_base,\n 'source_ext': extension,\n 'upload_time': unicode(now_time).replace(' ', '_'),\n }\n output_file = string.Template(rename_template).substitute(replace_map)\n return output_file", "def rename( self, source : str, target : str, *, ext : str = None ):\n src_full = self.fullKeyName( source, ext=ext )\n tar_full = self.fullKeyName( target, ext=ext )\n os.rename(src_full, tar_full)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find a hobart compatible database in the given directory, if one exists.
def find_hobart_database(directory): if not path.is_directory(directory): raise exceptions.InvalidDirectoryError(directory) hobart_database_name = 'dbft?.db' hobart_glob = path.make_path(directory, hobart_database_name) results = glob.glob(hobart_glob) if not results or not len(results): raise exceptions.NoDatabaseError(directory) return results[-1]
[ "def __discover_database_path__(self):\n for path in MIXXX_DATABASE_PATHS:\n if os.path.isfile(path):\n return path\n raise MixxxDatabaseError('Mixxx database not found')", "def find_config_db():\n search_path = [os.path.dirname(os.path.realpath(sys.argv[0])),\n '/usr/share/timesync',\n '/usr/local/share/timesync']\n for root in search_path:\n config_db = os.path.join(root, 'config')\n if os.path.isdir(config_db):\n return config_db", "def find_agenda_db(log):\n\n home = os.path.expanduser(\"~\")\n db_file = \"{0}{1}\".format(home, DB_LOCATION)\n if not os.path.isfile(db_file):\n log.debug(\n \"Agenda db not found at {0}\".format(db_file))\n\n log.debug(db_file)\n return db_file", "def find_db_path():\n db_filename = 'observer_encrypted.db' if use_encrypted_database else 'observer.db'\n # logger = logging.getLogger('__main__')\n if os.path.exists(os.path.join(os.getcwd(), '../data/' + db_filename)):\n path = '../data'\n elif os.path.exists(os.path.join(os.getcwd(), 'data/' + db_filename)):\n path = 'data'\n elif os.path.exists(os.path.join(os.getcwd(), '../../data/' + db_filename)):\n path = '../../data' # Unit tests\n else:\n errmsg = 'Error locating database ' + db_filename\n logging.error(errmsg)\n raise FileNotFoundError(errmsg)\n\n return os.path.join(path, db_filename)", "def database_exists():\n # Check that the database file exists. #\n expected_db_name = 'pokemon.db'\n expected_db_abspath = os.path.abspath(os.path.join(\n os.path.dirname(__file__),\n expected_db_name\n ))\n\n return os.path.exists(expected_db_abspath)", "def check_db(self):\n if not os.path.exists(self.db_base_path):\n raise DatabaseDoesNotExist", "def guess_database(args):\n return _guess_database_file(args.gtf, args.database)", "def test_create_db_file_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())", "def test_create_db_dir_if_not_exist(self):\n databasemanager.DatabaseManager(driftwood())", "def get_database():\r\n dbpath = \"/\".join(__file__.split('/')[:-1] + ['samples.db'])\r\n return shelve.open(dbpath,protocol=2,writeback=True)", "def library_db(library_root):\n return os.path.join(library_root, \"libraries.db\")", "def find_db(self):\n cookies_path = _os.path.expanduser(self._db_paths[_get_platform()])\n\n cookies_path = self._find_db_extra(cookies_path)\n\n if not _os.path.exists(cookies_path):\n raise MissingCookiesDB(\"Cookie does not exist at \"\n \"{0}\".format(cookies_path))\n return cookies_path", "def db_exists(self):\r\n return self.func.file_exists(self.conf[\"path_to_database\"])", "def test_database(self):\n tester = os.path.exists(\"lingualizer_alchemy.db\")\n self.assertEqual(tester, True)", "def _get_database_directory():\n return get_database_directory()", "def find_db_file(dbfile, localize=None, loclist=None):\n ret = []\n loclist = mk_loclist(localize, loclist)\n dirs = get_db_dirs(None)\n for dir in dirs:\n for file in os.listdir(dir):\n # try for localized versions in order\n for loc in loclist:\n tfile = loc + file\n #print 'tfile>%s<' % tfile\n modname, modext = os.path.splitext(tfile)\n if modext != '.py':\n continue\n\n if fnmatch.fnmatch(modname, dbfile):\n ret.append(os.path.normpath(dir+'/'+modname+'.py'))\n else:\n continue\n #print 'returning>%s<' % string.join(ret, ', ')\n return ret", "def db_path(path):\r\n return os.path.join(database_dir, *path.split('/'))", "def model_db(model_root):\n return os.path.join(model_root, \"model.db\")", "def build_db_path(directory):\n\n return directory / 'test.sqlite'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a mongodb collection (all elements must have same attributes) to a shapefile
def mongodb2shape(mongodb_server, mongodb_port, mongodb_db, mongodb_collection, output_shape): print ' Converting a mongodb collection to a shapefile ' connection = Connection(mongodb_server, mongodb_port) print 'Getting database MongoDB %s...' % mongodb_db db = connection[mongodb_db] print 'Getting the collection %s...' % mongodb_collection collection = db[mongodb_collection] print 'Exporting %s elements in collection to shapefile...' % collection.count() drv = ogr.GetDriverByName("ESRI Shapefile") ds = drv.CreateDataSource(output_shape) lyr = ds.CreateLayer('test', None, ogr.wkbUnknown) print 'Shapefile %s created...' % ds.name cursor = collection.find() # define the progressbar pbar = ProgressBar(collection.count()).start() k=0 # iterate the features in the collection and copy them to the shapefile # for simplicity we export only the geometry to the shapefile # if we would like to store also the other fields we should have created a metadata element with fields datatype info for element in cursor: element_geom = element['geom'] feat = ogr.Feature(lyr.GetLayerDefn()) feat.SetGeometry(ogr.CreateGeometryFromWkt(element_geom)) lyr.CreateFeature(feat) feat.Destroy() k = k + 1 pbar.update(k) pbar.finish() print '%s features loaded in shapefile from MongoDb.' % lyr.GetFeatureCount()
[ "def shape2mongodb(shape_path, mongodb_server, mongodb_port, mongodb_db, mongodb_collection, append, query_filter):\n print ' Converting a shapefile to a mongodb collection '\n driver = ogr.GetDriverByName('ESRI Shapefile')\n print 'Opening the shapefile %s...' % shape_path\n ds = driver.Open(shape_path, 0)\n if ds is None:\n print 'Can not open', ds\n sys.exit(1)\n lyr = ds.GetLayer()\n totfeats = lyr.GetFeatureCount()\n lyr.SetAttributeFilter(query_filter)\n print 'Starting to load %s of %s features in shapefile %s to MongoDB...' % (lyr.GetFeatureCount(), totfeats, lyr.GetName())\n print 'Opening MongoDB connection to server %s:%i...' % (mongodb_server, mongodb_port)\n connection = Connection(mongodb_server, mongodb_port)\n print 'Getting database %s' % mongodb_db\n db = connection[mongodb_db]\n print 'Getting the collection %s' % mongodb_collection\n collection = db[mongodb_collection]\n if append == False:\n print 'Removing features from the collection...'\n collection.remove({})\n print 'Starting loading features...'\n # define the progressbar\n pbar = ProgressBar(maxval=lyr.GetFeatureCount()).start()\n k=0\n # iterate the features and access its attributes (including geometry) to store them in MongoDb\n feat = lyr.GetNextFeature()\n while feat:\n mongofeat = {}\n geom = feat.GetGeometryRef()\n mongogeom = geom.ExportToWkt()\n mongogeom = geom.ExportToJson()\n #print geom.ExportToJson()\n mongofeat['geom'] = mongogeom\n # iterate the feature's fields to get its values and store them in MongoDb\n feat_defn = lyr.GetLayerDefn()\n for i in range(feat_defn.GetFieldCount()):\n value = feat.GetField(i)\n if isinstance(value, str):\n value = unicode(value, 'latin-1')\n field = feat.GetFieldDefnRef(i)\n fieldname = field.GetName()\n mongofeat[fieldname] = value\n # insert the feature in the collection\n collection.insert(mongofeat)\n feat.Destroy()\n feat = lyr.GetNextFeature()\n k = k + 1\n pbar.update(k)\n pbar.finish()\n print '%s features loaded in MongoDb from shapefile.' % lyr.GetFeatureCount()", "def extractshapes(shape_path, wkt_or_json=\"JSON\"):\n l=[]\n driver = ogr.GetDriverByName('ESRI Shapefile')\n ds = driver.Open(shape_path, 0)\n if ds is None:\n print 'Can not open', ds\n sys.exit(1)\n lyr = ds.GetLayer()\n totfeats = lyr.GetFeatureCount()\n lyr.SetAttributeFilter('')\n print 'Starting to load %s of %s features in shapefile %s...' % (lyr.GetFeatureCount(), totfeats, lyr.GetName())\n pbar = ProgressBar(maxval=lyr.GetFeatureCount()).start()\n k=0\n # iterate the features and access its attributes (including geometry) to store them in MongoDb\n feat = lyr.GetNextFeature()\n while feat:\n geom = feat.GetGeometryRef()\n #mongogeom = geom.ExportToWkt()\n #mongogeom = geom.ExportToJson()\n #print geom.ExportToJson()\n if wkt_or_json.upper()==\"JSON\":\n g = geom.ExportToJson()\n elif wkt_or_json.upper()==\"WKT\":\n g = geom.ExportToWkt()\n \n # iterate the feature's fields to get its values and store them in MongoDb\n feat_defn = lyr.GetLayerDefn()\n for i in range(feat_defn.GetFieldCount()):\n value = feat.GetField(i)\n if isinstance(value, str):\n value = unicode(value, 'latin-1')\n field = feat.GetFieldDefnRef(i)\n fieldname = field.GetName()\n d[fieldname] = value\n l.append(g) \n feat.Destroy()\n feat = lyr.GetNextFeature()\n k = k + 1\n pbar.update(k)\n pbar.finish()\n return l", "def write_to_mongo(self, db):\n col_name = 'lat%d' % self.set_n\n mongo_col = getattr(db, col_name)\n mongo_col.drop()\n for k, v in self._lattice.iteritems():\n doc = {'set': str(sorted(k)), 'value': str(v)}\n mongo_col.insert(doc)", "def project_shapefile(self, shapefile):", "def Shapefile(**keywords):\n keywords['type'] = 'shape'\n return CreateDatasource(keywords)", "def TweetsToShapefile(infile, outfile, CRS):\n \n ### Read in tweets \n Tweets = ReadTweetJSON(infile)\n \n ### Create dataframe containing text, coords/geo, label. Add datetime later if needed\n text = extract_text_from_tweetlist(Tweets)\n coords = extract_geo_from_tweetlist(Tweets)\n place = extract_place_from_tweetlist(Tweets)\n label = []\n \n ### Make it a DF\n Thankful = pd.DataFrame(\n {'text':text,\n 'label':None,\n 'coords':coords,\n 'place':place\n })\n \n ### Keep only georeferenced tweets\n GeoOnly = Thankful.dropna(subset=['coords'])\n \n ### For some reason the long/lat are reversed in the tweet metadata. this is dumb.\n for i in GeoOnly['coords']:\n i.reverse()\n \n ### Apply topology to data\n GeoOnly['coords'] = GeoOnly['coords'].apply(Point)\n \n ### Create geodataframe\n GeoOnlyDF = geopandas.GeoDataFrame(GeoOnly, geometry='coords')\n \n GeoOnlyDF.crs = CRS\n \n ### Write to file\n GeoOnlyDF.to_file(outfile, driver='ESRI Shapefile', encoding = \"utf-8\")", "def jsonldify_collection(cls, collection: dict, locale_: str) -> dict:\n temporal_extent = collection.get('extent', {}).get('temporal', {})\n interval = temporal_extent.get('interval')\n if interval is not None:\n interval = f'{interval[0][0]}/{interval[0][1]}'\n\n spatial_extent = collection.get('extent', {}).get('spatial', {})\n bbox = spatial_extent.get('bbox')\n crs = spatial_extent.get('crs')\n hascrs84 = crs.endswith('CRS84')\n\n dataset = {\n \"@type\": \"Dataset\",\n \"@id\": f\"{cls.base_url}/collections/{collection['id']}\",\n \"name\": l10n.translate(collection['title'], locale_),\n \"description\": l10n.translate(collection['description'], locale_),\n \"license\": cls.fcmld['license'],\n \"keywords\": l10n.translate(collection.get('keywords'), locale_),\n \"spatial\": None if (not hascrs84 or not bbox) else [{\n \"@type\": \"Place\",\n \"geo\": {\n \"@type\": \"GeoShape\",\n \"box\": f'{_bbox[0]},{_bbox[1]} {_bbox[2]},{_bbox[3]}'\n }\n } for _bbox in bbox],\n \"temporalCoverage\": interval\n }\n dataset['url'] = dataset['@id']\n\n links = collection.get('links', [])\n if links:\n dataset['distribution'] = list(map(lambda link: {k: v for k, v in {\n \"@type\": \"DataDownload\",\n \"contentURL\": link['href'],\n \"encodingFormat\": link['type'],\n \"description\": l10n.translate(link['title'], locale_),\n \"inLanguage\": link.get(\n 'hreflang', l10n.locale2str(cls.default_locale)\n ),\n \"author\": link['rel'] if link.get(\n 'rel', None\n ) == 'author' else None\n }.items() if v is not None}, links))\n\n return dataset", "def mongodb_import(collection_name:str):\n import pymongo\n from pymongo import MongoClient\n import pandas as pd\n \n auth = \"______________\"\n db_name = 'COVID19-DB'\n \n client = pymongo.MongoClient(auth) # defaults to port 27017\n db = client[db_name]\n cdc_ts = pd.DataFrame(list(db[collection_name].find({})))\n return cdc_ts", "def run(input_shapefile: \"Input Shapefile\" =\"counties/ctygeom.shp\"):\n # Ceate outline geojson structure\n geojson = {\"type\": \"FeatureCollection\", \"features\": [], \"crs\": {\"type\": \"EPSG\", \"properties\": {\"code\": None}}, \"bbox\": []}\n\n num_ticks = 60\n # input_shapefile = input(\"Enter the path (if necessary) and name fo the input shapefile: \")\n\n # print(\"{}\".format(\"=\" * num_ticks))\n # print(\"Getting information for '{}'\".format(input_shapefile))\n # print(\"{}\\n\".format(\"-\" * num_ticks))\n logging.info(\"Getting information for '{}'\".format(input_shapefile))\n\n try:\n with fiona.open(input_shapefile, \"r\") as fh:\n logging.info(\"Driver: \\t{}\".format(fh.driver))\n logging.info(\"Encoding:\\t{}\".format(fh.encoding))\n logging.info(\"Geometry:\\t{}\".format(fh.schema[\"geometry\"]))\n logging.info(\"CRS: \\t{}\".format(fh.crs[\"init\"].upper()))\n logging.info(\"Bounds: \\t{}\".format(fh.bounds))\n logging.info(\"Features \\t{}\".format(len(fh)))\n\n print(\"Attribute Types\")\n\n # Add crs and bbox properties to the geojson structure\n geojson[\"crs\"][\"properties\"][\"code\"] = int(fh.crs[\"init\"].split(\":\")[1])\n geojson[\"bbox\"] = fh.bounds\n\n header_string = \"\"\n csv_header = \"\"\n for k, v in fh.schema[\"properties\"].items():\n print(\"\\t{:10}\\t{}\".format(k, v))\n header_string += \"\\t{:>30}\".format(k)\n csv_header += \"{}\\t\".format(k)\n print(\"\\n\"+header_string)\n\n with open(input_shapefile.split(\".\")[0]+\".csv\", \"w\") as fh_csv:\n fh_csv.write(\"{}\\n\".format(csv_header[:-1]))\n for feature in fh:\n # add each feature to geojson structure, Fiona gives it to us in a suitable format so no further processing\n # required\n geojson[\"features\"].append(feature)\n\n data_string = \"\"\n csv_data = \"\"\n for k,v in feature[\"properties\"].items():\n data_string+= \"\\t{:>30}\".format(v)\n csv_data += \"{}\\t\".format(v)\n print(data_string)\n fh_csv.write(\"{}\\n\".format(csv_data[:-1]))\n\n # Create output geojson file and convert geojson python stucture to json\n with open(input_shapefile.split(\".\")[0]+\".json\", \"w\") as fh:\n fh.write(json.dumps(geojson))\n\n except Exception as e:\n print(e)\n quit()\n finally:\n print(\"{}\".format(\"=\" * num_ticks))", "def test_geo_json_geometry_collection_serialization(self):\n\n # Construct dict forms of any model objects needed in order to build this model.\n\n geo_json_geometry_model = {} # GeoJsonGeometry\n geo_json_geometry_model['type'] = 'Point'\n geo_json_geometry_model['coordinates'] = [{ 'foo': 'bar' }]\n\n # Construct a json representation of a GeoJsonGeometryCollection model\n geo_json_geometry_collection_model_json = {}\n geo_json_geometry_collection_model_json['type'] = 'Point'\n geo_json_geometry_collection_model_json['geometries'] = [geo_json_geometry_model]\n\n # Construct a model instance of GeoJsonGeometryCollection by calling from_dict on the json representation\n geo_json_geometry_collection_model = GeoJsonGeometryCollection.from_dict(geo_json_geometry_collection_model_json)\n assert geo_json_geometry_collection_model != False\n\n # Construct a model instance of GeoJsonGeometryCollection by calling from_dict on the json representation\n geo_json_geometry_collection_model_dict = GeoJsonGeometryCollection.from_dict(geo_json_geometry_collection_model_json).__dict__\n geo_json_geometry_collection_model2 = GeoJsonGeometryCollection(**geo_json_geometry_collection_model_dict)\n\n # Verify the model instances are equivalent\n assert geo_json_geometry_collection_model == geo_json_geometry_collection_model2\n\n # Convert model instance back to dict and verify no loss of data\n geo_json_geometry_collection_model_json2 = geo_json_geometry_collection_model.to_dict()\n assert geo_json_geometry_collection_model_json2 == geo_json_geometry_collection_model_json", "def geojson_to_shp(infile,outfile):\n cmd = \"ogr2ogr\"\n driver = \"ESRI Shapefile\"\n\n st,r = sp.getstatusoutput(cmd + \" --version\")\n\n if st == 0:\n process = sp.Popen([cmd, \"-f\", driver, outfile, infile])\n else:\n print(\"Couldn't find {}, please install GDAL\".format(cmd))", "def getShapes(fileRef,shapeCol='null'):\n fileRef = getWebShape(fileRef)\n geoDF = gp.GeoDataFrame.from_file(fileRef).to_crs(epsg=crsESPG)\n shapeCol = getShapeCol(geoDF,shapeCol)\n geoSeries = geoDF[shapeCol]\n geoDF[shapeCol] = geoSeries\n return geoDF", "def export_collection(collection, output_fpath, fields=None, query=None,\n ftype=None, escape_dollar=None, verbose=None, auto=None):\n if ftype is None:\n ftype = 'csv'\n if escape_dollar is None:\n escape_dollar = True\n if '~' in output_fpath:\n output_fpath = os.path.expanduser(output_fpath)\n cmd = 'mongoexport --collection=\"{}\" --out=\"{}\"'.format(\n collection.name, output_fpath)\n msg = \"Exporting collection {} to {}\".format(collection.name, output_fpath)\n if fields:\n cmd += ' --fields=\"{}\"'.format(','.join(fields))\n msg += \", limiting to fields {}\".format(fields)\n if query:\n msg += \", with query {},\".format(query)\n query = strictify_query(query)\n query = \"{}\".format(query)\n query = query.replace(\" \", \"\")\n if escape_dollar:\n query = query.replace(\"$\", \"\\$\")\n assert isinstance(query, str)\n cmd += ' --query=\"{}\"'.format(query)\n if ftype:\n cmd += ' --type=\"{}\"'.format(ftype)\n msg += \" with {} file type,\".format(ftype)\n _mongo_cmd(cmd=cmd, msg=msg, db_obj=collection.database, mode='reading',\n verbose=verbose, auto=auto)", "def mongo_export(campaign_id, db=ZECLI_DB, collection=None,\n query='', file_type='csv'):\n\n uri = f'mongodb+srv://{USER_MONGODB}:{PSWD_MONGODB}\\\n @{CLUSTER_MONGODB}/{ZECLI_DB}'\n\n # This is the first row in the csv file\n fields = 'from_,date_,Cc_,Subject_,content_'\n file_name = campaign_id + '.' + file_type\n path = 'api/v1/static/'\n\n command = f'mongoexport --uri={uri} --db={db} --collection={collection}\\\n --fields={fields} --query={query} --type={file_type}\\\n --out={path}{file_name}'\n\n system(command)", "def insertIntoMongoDF(database,collection,mongo_conn,df):\r\n db = mongo_conn.get_database(database)\r\n db.get_collection(collection).drop()\r\n db.create_collection(collection)\r\n db.get_collection(collection).insert_many(df.to_dict(orient='record'))", "def load(self):\n # Get each document and place in collections list\n loaded_colls = []\n for doc in self._dbcollection.find():\n\n # decode and deserialize data\n collection = jsonpickle.decode(doc['jp_collection'], keys=True)\n\n # Add database id to collection object\n collection.db_id = doc['_id']\n loaded_colls.append(collection)\n if len(loaded_colls) <= 0:\n # Return empty collection\n return [Collection(\"My Collection\")]\n return loaded_colls", "def shp_to_json(base_path, shp_path, name):\n print \" -- Projecting shapefile to WGS-84 and converting to JSON\"\n\n # define ogr drivers\n shp_driver = ogr.GetDriverByName('ESRI Shapefile')\n json_driver = ogr.GetDriverByName('GeoJSON')\n\n # define the input layer\n shp = shp_driver.Open(shp_path)\n shp_lyr = shp.GetLayer()\n\n # create the output layer\n json_path = os.path.join(base_path, name + \".geojson\")\n if os.path.exists(json_path):\n json_driver.DeleteDataSource(json_path)\n json = json_driver.CreateDataSource(json_path)\n json_lyr = json.CreateLayer(json_path, geom_type=ogr.wkbMultiPolygon)\n json_lyr_defn = json_lyr.GetLayerDefn()\n\n # create the CoordinateTransformation\n json_ref = osr.SpatialReference()\n json_ref.ImportFromEPSG(4326)\n coord_trans = osr.CoordinateTransformation(\n shp_lyr.GetSpatialRef(), json_ref)\n\n # add fields to output layer\n shp_lyr_defn = shp_lyr.GetLayerDefn()\n for i in range(0, shp_lyr_defn.GetFieldCount()):\n field_defn = shp_lyr_defn.GetFieldDefn(i)\n json_lyr.CreateField(field_defn)\n\n # loop through the input features\n shp_feat = shp_lyr.GetNextFeature()\n while shp_feat:\n # reproject the input geometry\n geom = shp_feat.GetGeometryRef()\n geom.Transform(coord_trans)\n # create a new feature\n json_feat = ogr.Feature(json_lyr_defn)\n # set the feature's geometry and attributes\n json_feat.SetGeometry(geom)\n for i in range(0, json_lyr_defn.GetFieldCount()):\n json_feat.SetField(\n json_lyr_defn.GetFieldDefn(i).GetNameRef(),\n shp_feat.GetField(i))\n # add new feature to output Layer\n json_lyr.CreateFeature(json_feat)\n # destroy the features and get the next input feature\n json_feat.Destroy()\n shp_feat.Destroy()\n shp_feat = shp_lyr.GetNextFeature()\n\n # close the datasets\n shp.Destroy()\n json.Destroy()\n\n return json_path", "def dataframe_to_mongo(df, db_name, collection, host='localhost', port=27017, username=None, password=None):\n \n db = get_mongo_database(db_name, host, port, username, password)\n \n records = df.to_dict('records')\n db[collection].insert(records)", "def ShapefileToMemory(shapefileFolder,inFileName,outFileName):\n # open the inShapefile as the driver type\n inDriver = ogr.GetDriverByName('ESRI Shapefile')\n inDataSource = inDriver.Open(shapefileFolder + '\\\\' + inFileName, 0)\n inLayer = inDataSource.GetLayer()\n \n # create the output driver\n outDriver = ogr.GetDriverByName('MEMORY')\n \n print('Out driver set as ' + format(outDriver.GetName()))\n \n # create output shape file\n outDataSource = outDriver.CreateDataSource('memData_' + format(outFileName))\n outFile = outDataSource.CreateLayer(outFileName, inLayer.GetSpatialRef(), inLayer.GetGeomType())\n \n # Add input Layer Fields to the output Layer\n outFile.CreateFields(inLayer.schema)\n \n # Get the output Layer's Feature Definition\n outLayerDefn = outFile.GetLayerDefn()\n \n inLayer.ResetReading()\n \n # Add features to the output Layer\n for input_feat in inLayer:\n \n # Create output Feature\n outFeature = ogr.Feature(outLayerDefn)\n\n # Set geometry as centroid\n geom = input_feat.GetGeometryRef()\n outFeature.SetGeometry(geom)\n \n # Add field values from input Layer\n for i in range(0, outLayerDefn.GetFieldCount()):\n field_value = input_feat.GetField(i)\n outFeature.SetField(i, field_value)\n \n # Add new feature to output Layer\n outFile.CreateFeature(outFeature)\n \n # Save and close DataSources\n del input_feat\n del inLayer\n del inDataSource\n del inDriver\n \n return outDataSource,outFile" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visits the current node printing the value
def visit(self): print self.val
[ "def _print_value(node):\n if is_red(node):\n return \"{red_color}{value}{reset_color}\".format(\n red_color=TERM_RED_COLOR,\n value=node.value,\n reset_color=TERM_RESET_COLOR\n )\n else:\n return str(node.value)", "def print_node_data(self):\n current = self.head\n while current is not None:\n print current.data\n current = current.next", "def display(self):\n print(self.nodes)", "def __str__(self):\n return (f' The value of the node is {self.val}')", "def print_var(self):\n def helper(node):\n if self.is_leaf(node):\n return\n else:\n print(node.var)\n helper(node.get_left)\n helper(node.get_right)\n helper(self.root)\n return", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def draw(self):\n print self.treeString()", "def print(self):\n print('(', end='')\n self.printBST()\n print(')', end=' ')", "def emit_node(node):\n yield str(node)", "def printTree(self):\n pass", "def print_disp(self):\n print \" Node Displacement\"\n for (i, u) in enumerate(self.disp):\n print \"{0:=5d} {1: .6f}\".format(i+1, u)", "def bft_print(self, starting_node):\n qq = Queue()\n qq.enqueue(starting_node)\n\n while qq.len() > 0:\n current = qq.dequeue()\n print(current.value)\n if current.left:\n qq.enqueue(current.left)\n if current.right:\n qq.enqueue(current.right)", "def get_value(self):\n return self.node.value()", "def retrieve_value(node):\n\n return node.value", "def output_graph(self):\n for n in self._nodes.values():\n print(str(n.get_name()) + \": \" + n.get_type())\n print(n.get_prior())\n print(n.get_neighbors())", "def update_value(self, value):\n self.node_value = value", "def print_root_node(self):\n print(self.root_node.name)", "def _display(self, pos, value):\n x, y = pos\n # Double x position because displayed maze is double-wide.\n console.set_display(y * 2 + 1, x * 4 + 2, value)", "def Print(self, indentionLevel, visited): \n\t\ts=''\n\t\tfor i in range(indentionLevel):\n\t\t\ts+=\"-\"\n#\t\tif self.id in visited:\n#\t\t\tprint \"^-%s(*%s*, %s, %s [%s])\" %(s, self.id, self.name, self.desc, self.type)\n#\t\t\treturn\n\t\tvisited.add(self.id)\n\t\tprint \"%s(*%s*, %s, %s [%s])\" %(s, self.id, self.name, self.desc, self.type)\n\t\tprint \"Genes: \", self.genes\n\t\t\n\t\tfor child in self.children:\n\t\t\tchild.Print(indentionLevel+1, visited)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a string decision tree.
def build_decision_tree(t, v, d=''): n_nodes = t.tree_.node_count children_left = t.tree_.children_left children_right = t.tree_.children_right feature = t.tree_.feature threshold = t.tree_.threshold node_depth = np.zeros(shape=n_nodes) is_leaves = np.zeros(shape=n_nodes, dtype=bool) stack = [(0, -1)] while len(stack) > 0: node_id, parent_depth = stack.pop() node_depth[node_id] = parent_depth + 1 if (children_left[node_id] != children_right[node_id]): stack.append((children_left[node_id], parent_depth + 1)) stack.append((children_right[node_id], parent_depth + 1)) else: is_leaves[node_id] = True tree_output = '' tree_output += ( "The binary tree structure has %s nodes and has " "the following tree structure:\n" % n_nodes ) for i in range(n_nodes): if is_leaves[i]: tree_output += ( "%snode=%s leaf node (%s).\n" % (int(node_depth[i]) * "\t", i, v[i]) ) else: if d != '': cat_dict = d[int(feature[i]) + 1] category = cat_dict['header'] if d[int(feature[i]) + 1]['type'] == 'c': threshold_value = d[int(feature[i]) + 1]['values'][0] comp_operator = "==" else: threshold_value = threshold[i] comp_operator = "<=" else: category = feature[i] threshold_value = threshold[i] comp_operator = "<=" tree_output += ( "%snode=%s test node (%s): go to node %s if %s " "%s %s else to " "node %s.\n" % (int(node_depth[i]) * "\t", i, v[i], children_left[i], category, comp_operator, threshold_value, children_right[i] ) ) return tree_output
[ "def build_tree(data):\n attributes = list(data.columns.values)\n target = attributes[-1]\n return create_decision_tree(data,attributes,target,IG)", "def build_tree(data):\n #print(\"Creating node from data...\")\n #pp.pprint(data)\n node = Node()\n\n # Check to see if all the labels are the same, if so we are creating a RESULT\n # node\n result = majority_class(data)\n node.majority = result['majority']\n if result['unanimous']:\n #print(f\"RESULT: {result['majority']}\")\n node.type = 'RESULT'\n return node\n\n # If not we are creating a DECISION node\n node.type = 'DECISION'\n index = select_attribute(data)\n node.index = index\n node.branches = {}\n #print(f\"DECISION: Splitting on index {index}...\")\n groups = split_on_attribute(data, index)\n for attribute_value, group_data in groups.items():\n #print(f\"Creating {attribute_value} node\")\n node.branches[attribute_value] = build_tree(group_data)\n return node", "def construct(self, string):\n string.strip()\n\n for match in re.finditer(r\" OR \", string):\n pos = match.start()\n if string[:pos] and string[pos+4:] and not self.isWithinParantheses(string, pos):\n logging.debug(\"parsing OR\")\n self.operator = Operation.OR\n self.left = Tree(string[:pos])\n self.right = Tree(string[pos+4:])\n return False\n\n for match in re.finditer(r\" AND \", string):\n pos = match.start()\n if string[:pos] and string[pos+5:] and not self.isWithinParantheses(string, pos):\n logging.debug(\"parsing AND\")\n self.operator = Operation.AND\n self.left = Tree(string[:pos])\n self.right = Tree(string[pos+5:])\n return False\n\n for match in re.finditer(r\"NOT \", string):\n pos = match.start()\n if string[pos+4:] and not self.isWithinParantheses(string, pos):\n logging.debug(\"parsing NOT\")\n self.operator = Operation.NOT\n self.right = Tree(string[pos+4:])\n return False\n\n # By the time we get to handling the (), the string should\n # have been reduced to the from \"(....)\"\n p = p_scan.match(string)\n\n if p != None and p.groups()[1]:\n g = p.groups()\n logging.debug(\"INITIAL GROUPS ARE: \"+str(g))\n self.construct(g[1])\n return False\n \n\n\n self.string = string.strip()\n return False", "def build_tree(data, impurity, p_val=1):\r\n \r\n \r\n ###########################################################################\r\n # TODO: Implement the function. #\r\n ###########################################################################\r\n #\r\n root_of_tree = DecisionNode(None, None, data, p_val)\r\n #build_tree_helper will build and return the root of the tree recursively\r\n root = build_tree_helper(root_of_tree, impurity, p_val)\r\n ###########################################################################\r\n # END OF YOUR CODE #\r\n ###########################################################################\r\n return root", "def build_astng_tree(self):\n from logilab.astng.builder import ASTNGBuilder\n tree = ASTNGBuilder().string_build(self.sourcecode)\n return tree", "def input_tree(self):\n\n if self.starttreename:\n if self.starttreename[-3:] == 'xml':\n self.starttree = Phylo.read(self.starttreename, \"phyloxml\")\n elif self.starttreename[-6:] == 'newick':\n self.starttree = Phylo.read(self.starttreename, \"newick\")\n\n print \"Generating phylogenetic tree...\"\n\n if self.treetype[-3:] == 'xml':\n self.tree = Phylo.read(self.treetype, \"phyloxml\")\n elif self.treetype[-3:] == 'nwk':\n self.tree = Phylo.read(self.treetype, \"newick\")\n elif self.treetype == 'pars':\n self.parsimony_tree()\n elif self.treetype == 'PhyML':\n self.phyml_tree()\n else:\n self.raxml_tree()\n\n self.tree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.treeparents = self.all_parents(self.tree)\n for btree in self.btrees:\n btree.collapse_all(lambda c: c.branch_length <= 0.0)\n self.btreeparents.append(self.all_parents(btree))", "def str2tree(s, binarize=False):\n if not s.startswith('('):\n s = \"( {} )\".format(s)\n if binarize:\n s = s.replace(\"(\", \"(X\")\n return Tree.fromstring(s)", "def test_build_paragraph_tree(self):\n text = \"This (a) is a good (1) test (2) of (3) some (b) body.\"\n self.assertEqual(\n self.regParser.build_tree(text),\n Node(\"This \", children=[\n Node(\"(a) is a good \", label=['a'], children=[\n Node(\"(1) test \", label=['a', '1']),\n Node(\"(2) of \", label=['a', '2']),\n Node(\"(3) some \", label=['a', '3'])\n ]),\n Node(\"(b) body.\", label=['b'])\n ])\n )", "def create_from_string(self, string, node):\n while len(string) > 0:\n if string[0] == ')':\n string.pop(0)\n return node\n\n newnode, has_children = self.behaviors.get_node_from_string(string[0], self.state_machine)\n string.pop(0)\n if has_children:\n #Node is a control node or decorator with children - add subtree via string and then add to parent\n newnode = self.create_from_string(string, newnode)\n node.add_child(newnode)\n else:\n #Node is a leaf/action node - add to parent, then keep looking for siblings\n node.add_child(newnode)\n\n #This return is only reached if there are too few up nodes\n return node", "def test_str(self):\n good = \"\"\"2\\t|\\t1\\t|\\tsuperkingdom\\t|\\t\\t|\\t0\\t|\\t0\\t|\\t11\\t|\\t0\\t|\\t0\\t|\\t0\\t|\\t0\\t|\\t0\\t|\\t\\t|\\n\"\"\"\n node = NcbiTaxon(good)\n self.assertEqual(str(node), good)\n root = \"\"\"1\\t|\\t1\\t|\\tno rank\\t|\\t\\t|\\t8\\t|\\t0\\t|\\t1\\t|\\t0\\t|\\t0\\t|\\t0\\t|\\t0\\t|\\t0\\t|\\t\\t|\"\"\"\n NcbiTaxon(root)\n self.assertEqual(str(root), root)", "def setUp(self):\n self.Empty = TreeNode()\n self.Single = TreeNode(Name='a')\n self.Child = TreeNode(Name='b')\n self.OneChild = TreeNode(Name='a', Children=[self.Child])\n self.Multi = TreeNode(Name = 'a', Children='bcd')\n self.Repeated = TreeNode(Name='x', Children='aaa')\n self.BigName = map(TreeNode, '0123456789')\n self.BigParent = TreeNode(Name = 'x', Children = self.BigName)\n self.Comparisons = map(TreeNode, 'aab')\n \n nodes = dict([(x, TreeNode(x)) for x in 'abcdefgh'])\n nodes['a'].append(nodes['b'])\n nodes['b'].append(nodes['c'])\n nodes['c'].append(nodes['d'])\n nodes['c'].append(nodes['e'])\n nodes['c'].append(nodes['f'])\n nodes['f'].append(nodes['g'])\n nodes['a'].append(nodes['h'])\n self.TreeNode = nodes\n self.TreeRoot = nodes['a']\n\n self.s = '((H,G),(R,M));'\n self.t = DndParser(self.s, TreeNode)\n self.s2 = '(((H,G),R),M);'\n self.t2 = DndParser(self.s2, TreeNode)\n self.s4 = '(((H,G),(O,R)),X);'\n self.t4 = DndParser(self.s4, TreeNode)", "def construct_decision_tree(self, data, outputs):\n self.features = data.shape[1]\n self.classes = max(outputs)\n if self.decisionTree is None:\n self.decisionTree = DecisionTreeClassifier()\n self.decisionTree.fit(data, outputs)", "def build_tree(self):\r\n self.root = best_split(self.train_data)\r\n split_branch(self.root, 1,self.settings)\r\n return self.root", "def create_simple_tree(x, y):\n dec_tree = tree.DecisionTreeClassifier()\n return dec_tree.fit(x, y)", "def build_tree(math_exp_string):\n if not validate_math_exp(math_exp_string):\n raise InvalidInput('Validation Error, one or more parenthesis are not closed properly')\n \n exp_list = filter_exp_list(math_exp_string)\n stack = Stack()\n current_node = Tree()\n\n for token in exp_list:\n\n if token == '(':\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n\n elif token == ')':\n if stack.size():\n current_node = stack.pop()\n\n elif token in operator_map.keys():\n if current_node.get_val():\n if current_node.get_val() == token:\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n else:\n parent = Tree(token)\n parent.update_child(current_node)\n parent.add_child()\n stack.push(parent)\n current_node = parent.get_newborn_child()\n else:\n current_node.set_val(token)\n current_node.add_child()\n stack.push(current_node)\n current_node = current_node.get_newborn_child()\n\n else:\n try:\n current_node.set_val(float(token))\n except ValueError, e:\n logging.info(e.message)\n current_node.set_val(token)\n current_node = stack.pop()\n\n return current_node", "def makeCode(root,string,dic = {}):\r\n #Base case\r\n # If the left and the right of the root are none\r\n # Then it is a leaf node so we just print its value\r\n if root.left == None and root.right == None:\r\n # Make the string its Huffman Code for future use\r\n dic[root.data] = string\r\n return dic\r\n\r\n # if we go to left then add \"0\" to the code.\r\n # if we go to the right add \"1\" to the code.\r\n \r\n makeCode(root.left, string+\"0\",dic)\r\n makeCode(root.right, string+\"1\",dic)", "def build_tree(t):\n root = ParseTree(None)\n\n if isinstance(t, str):\n root = ParseTree(t)\n\n elif t is not None:\n root = ParseTree(None)\n for c in t:\n if c is '[':\n node = build_tree(range_to_id(t))\n root.children.append(node)\n break\n else:\n node = build_tree(c)\n root.children.append(node)\n\n return root", "def create_tree(data_):\n if isinstance(data_, dict):\n return OpTreeNode(data_[LABEL_OP],\n create_tree(data_[LABEL_LHS]),\n create_tree(data_[LABEL_RHS]))\n return OpTreeNode(data_)", "def tree_or_string(s):\n if s.startswith(\"(\"):\n return Tree.fromstring(s)\n return s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create simple decision tree.
def create_simple_tree(x, y): dec_tree = tree.DecisionTreeClassifier() return dec_tree.fit(x, y)
[ "def build_tree(data):\n attributes = list(data.columns.values)\n target = attributes[-1]\n return create_decision_tree(data,attributes,target,IG)", "def build_tree(data, impurity, p_val=1):\r\n \r\n \r\n ###########################################################################\r\n # TODO: Implement the function. #\r\n ###########################################################################\r\n #\r\n root_of_tree = DecisionNode(None, None, data, p_val)\r\n #build_tree_helper will build and return the root of the tree recursively\r\n root = build_tree_helper(root_of_tree, impurity, p_val)\r\n ###########################################################################\r\n # END OF YOUR CODE #\r\n ###########################################################################\r\n return root", "def build_decision_tree(t, v, d=''):\n n_nodes = t.tree_.node_count\n children_left = t.tree_.children_left\n children_right = t.tree_.children_right\n feature = t.tree_.feature\n threshold = t.tree_.threshold\n\n node_depth = np.zeros(shape=n_nodes)\n\n is_leaves = np.zeros(shape=n_nodes, dtype=bool)\n stack = [(0, -1)]\n\n while len(stack) > 0:\n node_id, parent_depth = stack.pop()\n node_depth[node_id] = parent_depth + 1\n\n if (children_left[node_id] != children_right[node_id]):\n stack.append((children_left[node_id], parent_depth + 1))\n stack.append((children_right[node_id], parent_depth + 1))\n else:\n is_leaves[node_id] = True\n tree_output = ''\n tree_output += (\n \"The binary tree structure has %s nodes and has \"\n \"the following tree structure:\\n\" % n_nodes\n )\n\n for i in range(n_nodes):\n if is_leaves[i]:\n tree_output += (\n \"%snode=%s leaf node (%s).\\n\"\n % (int(node_depth[i]) * \"\\t\",\n i,\n v[i])\n )\n else:\n if d != '':\n cat_dict = d[int(feature[i]) + 1]\n category = cat_dict['header']\n if d[int(feature[i]) + 1]['type'] == 'c':\n threshold_value = d[int(feature[i]) + 1]['values'][0]\n comp_operator = \"==\"\n else:\n threshold_value = threshold[i]\n comp_operator = \"<=\"\n else:\n category = feature[i]\n threshold_value = threshold[i]\n comp_operator = \"<=\"\n tree_output += (\n \"%snode=%s test node (%s): go to node %s if %s \"\n \"%s %s else to \"\n \"node %s.\\n\"\n % (int(node_depth[i]) * \"\\t\",\n i,\n v[i],\n children_left[i],\n category,\n comp_operator,\n threshold_value,\n children_right[i]\n )\n )\n\n return tree_output", "def construct_decision_tree(self, data, outputs):\n self.features = data.shape[1]\n self.classes = max(outputs)\n if self.decisionTree is None:\n self.decisionTree = DecisionTreeClassifier()\n self.decisionTree.fit(data, outputs)", "def build_tree(data):\n #print(\"Creating node from data...\")\n #pp.pprint(data)\n node = Node()\n\n # Check to see if all the labels are the same, if so we are creating a RESULT\n # node\n result = majority_class(data)\n node.majority = result['majority']\n if result['unanimous']:\n #print(f\"RESULT: {result['majority']}\")\n node.type = 'RESULT'\n return node\n\n # If not we are creating a DECISION node\n node.type = 'DECISION'\n index = select_attribute(data)\n node.index = index\n node.branches = {}\n #print(f\"DECISION: Splitting on index {index}...\")\n groups = split_on_attribute(data, index)\n for attribute_value, group_data in groups.items():\n #print(f\"Creating {attribute_value} node\")\n node.branches[attribute_value] = build_tree(group_data)\n return node", "def decisionTreeClassifier(self):\n name = \"DT\"\n model = tree.DecisionTreeClassifier()\n\n model.fit(self.X_train, self.y_train)\n y_pred = model.predict(self.X_test)\n\n print(\"***** Decision Tree Classifier ******* \\n\")\n\n # Display results\n cv_scores = self._repr(model, y_pred)\n\n # Append value to the results\n self.results.append((name, cv_scores.mean()))\n\n # Save the model\n self.saveModel(model, name)\n return", "def decision_tree(data):\n\tfeat_names = [\n\t\t\t\t\"age\",\n\t\t\t\t\"bp\",\n\t\t\t\t\"sg\",\n\t\t\t\t\"al\",\n\t\t\t\t\"su\", # normal->1, abnormal->0\n\t\t\t\t\"rbc\", # normal->1, abnormal->0\n\t\t\t\t\"pc\", # present->1, notpresent->0\n\t\t\t\t\"pcc\", # present->1, notpresent->0\n\t\t\t\t\"ba\",\n\t\t\t\t\"bgr\",\n\t\t\t\t\"bu\",\n\t\t\t\t\"sc\",\n\t\t\t\t\"sod\",\n\t\t\t\t\"pot\",\n\t\t\t\t\"hemo\",\n\t\t\t\t\"pcv\",\n\t\t\t\t\"wbcc\",\n\t\t\t\t\"rbcc\",\n\t\t\t\t\"htn\", #yes->1, no->0\n\t\t\t\t\"dm\", #yes->, no->0\n\t\t\t\t\"cad\", # yes->1, no->0\n\t\t\t\t\"appet\", # good->1, poor->0\n\t\t\t\t\"pe\", # yes->1, no->0\n\t\t\t\t\"ane\", # yes->1, no->0\n\t]\n\n\t# extract the first 24 column (cut out the class one)\n\tdata_set = np.delete(data, 24, 1)\n\ttarget = data[:, 24]\n\t# training phase\n\tclf = tree.DecisionTreeClassifier(\"entropy\")\n\tclf = clf.fit(data_set, target)\n\t\n\tprint clf.feature_importances_\n\t\n\t#view decision tree\n\tdot_data = tree.export_graphviz(clf, \n\t\t\t\t\t\tout_file = \"Tree.dot\", \n\t\t\t\t\t\tfeature_names = feat_names,\n\t\t\t\t\t\tclass_names = [\"notcdk\", \"cdk\"],\n\t\t\t\t\t\tfilled = True,\n\t\t\t\t\t\trounded = True,\n\t\t\t\t\t\tspecial_characters = True,\n\t)", "def create_default_tree() -> SkillDecisionTree:\n a = SkillDecisionTree(RogueAttack(), f6, 6)\n b = SkillDecisionTree(RogueAttack(), f6, 8)\n c = SkillDecisionTree(RogueSpecial(), f6, 7)\n d = SkillDecisionTree(RogueSpecial(), f5, 4, [a])\n e = SkillDecisionTree(MageSpecial(), f3, 2, [b])\n f = SkillDecisionTree(RogueAttack(), f4, 1, [c])\n g = SkillDecisionTree(MageAttack(), f2, 3, [d])\n return SkillDecisionTree(MageAttack(), f1, 5, [g, e, f])", "def decision_tree(df):\n X_train, X_test, y_train, y_test, X, y = split(df)\n clf = DecisionTreeClassifier()\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(\"Decision Tree Accuracy:\", metrics.accuracy_score(y_test, y_pred))\n print(\"Confusion Matrix:\", confusion_matrix(y_test, y_pred))\n print(\"Precision:\", precision_score(y_test, y_pred))\n print(\"Recall:\", recall_score(y_test, y_pred))\n\n dot_data = StringIO()\n export_graphviz(clf, out_file=dot_data,\n filled=True, rounded=True, feature_names=features, class_names=['Yes', 'No'])\n graph = pydotplus.graph_from_dot_data(dot_data.getvalue())\n graph.write_png('tree_viz.png')\n Image(graph.create_png())", "def test_create_decision_tree_using_post(self):\n pass", "def decision_tree_classifier(**kwargs):\n return base_models.DecisionTreeClassifier(**kwargs)", "def train_decision_tree():\n return train_decision_tree_service()", "def rand_decision_tree(data, max_levels):\n if max_levels <= 0: # the maximum level depth is reached\n return make_leaf(data)\n\n if threshold is None: # there is no split that gains information\n return make_leaf(data)\n tree = Tree()\n tree.leaf = False\n tree.feature, tree.threshold = find_rand_split(data)\n data_left, data_right = split_data(data, tree.feature, tree.threshold)\n tree.left = rand_decision_tree(data_left, max_levels - 1)\n tree.right = rand_decision_tree(data_right, max_levels - 1)\n return tree", "def test_create_decision_tree_result_using_post(self):\n pass", "def make_tree(dataset):\n\treturn make_tree_helper(dataset)", "def build_tree_helper(root_of_tree, impurity, p_val = 1):\r\n \r\n #check purity of current DicisionNode\r\n #TODO make condition for the case of pruning (where p_val != 1) as this means not all leaves have impurity zero.\r\n if (impurity(root_of_tree.data) == 0):\r\n root_of_tree.isLeaf = True\r\n return root_of_tree\r\n \r\n #get best best_feature and best_threshold for given data\r\n best_feature, best_threshold = get_best_feature(root_of_tree.data, impurity)\r\n #set feature and values fields of this decisionnNode object.\r\n root_of_tree.feature = best_feature\r\n root_of_tree.value = best_threshold\r\n #get children after split \r\n child1, child2 = get_best_features_children(root_of_tree.data, best_feature, best_threshold)\r\n #get best values for children in order to make them correct desicionNodes\r\n child1_best_feature, child1_best_threshold = get_best_feature(child1, impurity)\r\n child2_best_feature, child2_best_threshold = get_best_feature(child2, impurity)\r\n #create decisonNodes for child1,child2\r\n child1_dec_node = DecisionNode(child1_best_feature, child1_best_threshold, child1, p_val)\r\n child2_dec_node = DecisionNode(child2_best_feature, child2_best_threshold, child2, p_val)\r\n #assign them their parents\r\n child1_dec_node.set_parent(root_of_tree)\r\n child2_dec_node.set_parent(root_of_tree)\r\n child1_dec_node.root = False\r\n child2_dec_node.root = False\r\n \r\n #recursively add children to the roots values.\r\n #if chi_square == 1 then continue building as normal\r\n if(p_val == 1):\r\n root_of_tree.add_child(build_tree_helper(child1_dec_node, impurity, p_val))\r\n root_of_tree.add_child(build_tree_helper(child2_dec_node, impurity, p_val))\r\n else:\r\n if(pre_prune(root_of_tree) >= chi_table[p_val]):\r\n root_of_tree.isLeaf = True\r\n root_of_tree.add_child(build_tree_helper(child1_dec_node, impurity, p_val))\r\n root_of_tree.add_child(build_tree_helper(child2_dec_node, impurity, p_val))\r\n \r\n return root_of_tree", "def decisionBest(self):\n print(\"\\n\"+\"DECISION TREE\"+\"\\n\")\n self.model = tree.DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=46,\n max_features='auto', max_leaf_nodes=None,\n min_impurity_decrease=0.0, min_impurity_split=None,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, presort=False,\n random_state=42, splitter='best')\n self.evaluate()", "def setup_classification_tree(\n min_examples_at_split=30, min_examples_at_leaf=30):\n\n return DecisionTreeClassifier(\n criterion='entropy', min_samples_split=min_examples_at_split,\n min_samples_leaf=min_examples_at_leaf, random_state=RANDOM_SEED\n )", "def train(self, retrain=False):\n if self.root and not retrain:\n raise Exception(\"DECISION TREE::ERROR::ALREADY TRAINED CLASSIFIER AND RETRAIN PARAMETER IS FALSE\")\n print \"Generating Decision Tree...\"\n groups, index, value = self.best_split(self.points)\n self.root = TreeNode(groups, index, value)\n self.nodes.append(self.root)\n print \"...\"\n tree = self.make_tree(self.root, 1, self.max_depth, self.min_group_size)\n print \"Done\"\n return tree" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a DataFrame with fake "new experimental data" for testing purposes. Does this by instantiating a temporary CFM model and using the future_data it generates as a template. Then the template is filled with fake data.
def create_fake_CFM_exp_data(cfm_target_col='BL1-A', cfm_experimental_condition_cols=None): cfm_data = load_CFM_demo_data() if cfm_experimental_condition_cols is None: cfm_experimental_condition_cols = ['strain_name', 'inducer_concentration_mM'] temp_cfm = CircuitFluorescenceModel(initial_data=cfm_data, exp_condition_cols=cfm_experimental_condition_cols, target_col=cfm_target_col) new_experiment_data_fake = temp_cfm.future_data.copy() # dropping dist_position since future data won't have it. Instead the CFM evaluate method will generate dist_position new_experiment_data_fake.drop(columns=["dist_position"], inplace=True) cfm_target_min = cfm_data[cfm_target_col].min() cfm_target_max = cfm_data[cfm_target_col].max() new_experiment_data_fake[cfm_target_col] = np.random.randint(cfm_target_min, cfm_target_max, len(new_experiment_data_fake)) # create fake replicates new_experiment_data_fake["replicate"] = np.random.randint(1, 4, len(new_experiment_data_fake)) return new_experiment_data_fake
[ "def raw_data(self) -> pd.DataFrame:\n\n min_date = \"2016-01-01\"\n max_date = \"2019-12-13\"\n raw_data = [\n self.generate_data_for_one_customer(i, min_date, max_date)\n for i in range(100)\n ]\n raw_data = pd.concat(raw_data, axis=0)\n for i in range(10):\n raw_data[f\"feat_{i}\"] = np.random.randn(raw_data.shape[0])\n return raw_data", "def create_dummy_df(samples: int = 10) -> pd.DataFrame:\n random_number_gen = np.random.RandomState(12)\n \n feature_1 = 10000 * random_number_gen.binomial(n = 5, p = 0.3, size = samples)\n feature_2 = 750 * random_number_gen.power(3, size = samples)\n feature_3 = 55 * random_number_gen.randn(samples)\n feature_4 = random_number_gen.beta(1.5, 4.6, size = samples)\n\n features = {\n 'binomial': feature_1,\n 'power': feature_2,\n 'randn': feature_3,\n 'beta': feature_4\n }\n\n df = pd.DataFrame.from_dict(features)\n \n return df", "def create_pyspark_dataframe_with_mocked_personal_data() -> PySparkDataFrame:\n spark = SparkSession.builder.appName(\"snowflake.com\").getOrCreate()\n schema = StructType(\n [\n StructField(\"firstname\", StringType(), True),\n StructField(\"middlename\", StringType(), True),\n StructField(\"lastname\", StringType(), True),\n StructField(\"id\", StringType(), True),\n StructField(\"gender\", StringType(), True),\n StructField(\"salary\", IntegerType(), True),\n ]\n )\n return spark.createDataFrame(data=PERSONAL_DATA, schema=schema)", "def _create_fcst_df(target_date, origin_df, fill=None):\n oi = origin_df.index\n if not isinstance(oi, pd.MultiIndex):\n if isinstance(origin_df, pd.Series):\n if fill is None:\n template = pd.Series(np.zeros(len(target_date)), index=target_date)\n else:\n template = pd.Series(fill, index=target_date)\n template.name = origin_df.name\n return template\n else:\n if fill is None:\n template = pd.DataFrame(\n np.zeros((len(target_date), len(origin_df.columns))),\n index=target_date,\n columns=origin_df.columns.to_list(),\n )\n else:\n template = pd.DataFrame(\n fill, index=target_date, columns=origin_df.columns.to_list()\n )\n return template\n else:\n idx = origin_df.index.to_frame(index=False)\n instance_names = idx.columns[0:-1].to_list()\n time_names = idx.columns[-1]\n idx = idx[instance_names].drop_duplicates()\n\n timeframe = pd.DataFrame(target_date, columns=[time_names])\n\n target_frame = idx.merge(timeframe, how=\"cross\")\n freq_inferred = target_date[0].freq\n mi = (\n target_frame.groupby(instance_names, as_index=True)\n .apply(\n lambda df: df.drop(instance_names, axis=1)\n .set_index(time_names)\n .asfreq(freq_inferred)\n )\n .index\n )\n\n if fill is None:\n template = pd.DataFrame(\n np.zeros((len(target_date) * idx.shape[0], len(origin_df.columns))),\n index=mi,\n columns=origin_df.columns.to_list(),\n )\n else:\n template = pd.DataFrame(\n fill,\n index=mi,\n columns=origin_df.columns.to_list(),\n )\n\n template = template.astype(origin_df.dtypes.to_dict())\n return template", "def create_fake_HRM_exp_data(hrm_target_col=\"logFC_wt\", hrm_experimental_condition_cols=None):\n hrm_data = load_HRM_demo_data()\n if hrm_experimental_condition_cols is None:\n hrm_experimental_condition_cols = [\"ca_concentration\", \"iptg_concentration\", \"va_concentration\",\n \"xylose_concentration\", \"timepoint_5.0\", \"timepoint_18.0\"]\n\n temp_hrm = HostResponseModel(initial_data=hrm_data, exp_condition_cols=hrm_experimental_condition_cols,\n target_col=hrm_target_col)\n new_experiment_data_fake = temp_hrm.future_data.copy()\n hrm_target_min = hrm_data[hrm_target_col].min()\n hrm_target_max = hrm_data[hrm_target_col].max()\n new_experiment_data_fake[hrm_target_col] = np.random.randint(hrm_target_min, hrm_target_max, len(new_experiment_data_fake))\n return new_experiment_data_fake", "def createDataFrame():\n\n df = pd.DataFrame(data = {\"Calories\": None, \"Water / g\":None, \"Fat / g\": None, \"Protein / g\": None, \"Cholesterol / mg\":None}, index = DFmanager.getTimeIndex(), dtype = \"float64\")\n df.dropna(inplace = True)\n return df", "def create_test_data():\n # create example data from scratch\n local_records = [\n Row(id=1, first_name='Dan', second_name='Germain', floor=1),\n Row(id=2, first_name='Dan', second_name='Sommerville', floor=1),\n Row(id=3, first_name='Alex', second_name='Ioannides', floor=2),\n Row(id=4, first_name='Ken', second_name='Lai', floor=2),\n Row(id=5, first_name='Stu', second_name='White', floor=3),\n Row(id=6, first_name='Mark', second_name='Sweeting', floor=3),\n Row(id=7, first_name='Phil', second_name='Bird', floor=4),\n Row(id=8, first_name='Kim', second_name='Suter', floor=4)\n ]\n\n df = spark.createDataFrame(local_records)\n\n # write to Parquet file format\n (df\n .coalesce(1)\n .write\n .parquet('tests/test_data/employees', mode='overwrite'))\n\n # create transformed version of data\n # df_tf = transform_data(df)\n\n # write transformed version of data to Parquet\n (df\n .coalesce(1)\n .write\n .parquet('tests/test_data/employees_report', mode='overwrite'))\n\n spark.stop()\n return None", "def create_empty_df():\n return pd.DataFrame()", "def create_dataframe():\n # Import Libraries\n import pandas as pd\n # Function\n df_cols = [\n 'sequence', # STR\n 'on_site_score' # FLOAT\n ]\n df = pd.DataFrame(columns=df_cols)\n \"\"\"\n implement memory optimization by assigning appropriate dtype\n \"\"\"\n return df", "def mock_preprocessed_content():\n\n return pd.DataFrame(data={\n 'id': ['article_1', 'article_2'],\n 'processed_content': ['some words', 'some more words']\n })", "def fake_data():\n return MetaData(id=FAKE_DATA_ID, path=\"file:///path/to/fake.bam\", sample=\"fake\")", "def edata_fixture():\n edata_pre = amici.ExpData(2, 0, 0,\n np.array([0., 0.1, 0.2, 0.5, 1., 2., 5., 10.]))\n edata_pre.setObservedData([1.5] * 16)\n edata_pre.fixedParameters = np.array([5., 20.])\n edata_pre.fixedParametersPreequilibration = np.array([0., 10.])\n edata_pre.reinitializeFixedParameterInitialStates = True\n\n # edata for postequilibration\n edata_post = amici.ExpData(2, 0, 0,\n np.array([float('inf')] * 3))\n edata_post.setObservedData([0.75] * 6)\n edata_post.fixedParameters = np.array([7.5, 30.])\n\n # edata with both equilibrations\n edata_full = amici.ExpData(2, 0, 0,\n np.array([0., 0., 0., 1., 2., 2., 4., float('inf'), float('inf')]))\n edata_full.setObservedData([3.14] * 18)\n edata_full.fixedParameters = np.array([1., 2.])\n edata_full.fixedParametersPreequilibration = np.array([3., 4.])\n edata_full.reinitializeFixedParameterInitialStates = True\n\n return edata_pre, edata_post, edata_full", "def test_price_features_should_work_for_future_month():\n sales_df = dummy_data()\n X_df = sales_df.copy()\n new_dbn = X_df.date_block_num.max() + 1\n idx1 = X_df.index.max() + 1\n X_df.loc[idx1, :] = [new_dbn, 1, 0, 180, 5]\n\n idx2 = X_df.index.max() + 1\n X_df.loc[idx2, :] = [new_dbn, 2, 0, 180, 5]\n\n new_X_df = get_price_features(sales_df, X_df)\n assert new_X_df.loc[idx1, 'avg_item_price'] == 130\n assert new_X_df.loc[idx1, 'std_item_price'] == np.float32(np.std([100, 160], ddof=1))\n assert new_X_df.loc[idx1, 'last_item_price'] == 130\n\n assert new_X_df.loc[idx2, 'avg_item_price'] == 130\n assert new_X_df.loc[idx2, 'std_item_price'] == np.float32(np.std([100, 160], ddof=1))\n assert new_X_df.loc[idx2, 'last_item_price'] == 100", "def _create_initial_frame_dataset(self):\n dataset = self._create_dataset(\n shuffle_files=self._simulation_random_starts\n )\n if self._simulation_random_starts:\n dataset = dataset.shuffle(buffer_size=1000)\n return dataset.repeat().batch(self._batch_size)", "def create_rand_int_df():\n try:\n df_rand = pd.DataFrame(\n np.random.randint(0, 999, size=(100000, 10)), columns=list(\"ABCDEFGHIJ\")\n )\n return df_rand\n except Exception as error:\n logger.exception(f\"Unable to create random integer DataFrame: {error}\")\n raise", "def generate_flight_data_fixture(self, data):\n return copy(data)", "def recreate_data_used_for_training(model_uid, model_features):\n path = os.path.join(model_uid, 'data')\n aws.download_directory_from_s3('churn-model-data-science-modeling', path)\n x_train = joblib.load(os.path.join(path, 'x_train.pkl'))\n x_train.reset_index(inplace=True, drop=True)\n x_test = joblib.load(os.path.join(path, 'x_test.pkl'))\n x_test.reset_index(inplace=True, drop=True)\n x_df = pd.concat([x_train, x_test], axis=0)\n x_df = x_df[model_features]\n return x_df", "def generate_fake_data(n_utterances=3, n_frames_utterance=10, n_features=10, \n n_utt_other_class=1):\n n_rows = n_utterances * n_frames_utterance\n fake_features = np.random.randn(n_rows, n_features)\n fake_targets = np.zeros(n_rows)\n fake_targets[-n_utt_other_class*n_frames_utterance:] = 1\n return fake_features, fake_targets", "def test_summary_to_dataframe(setup, mock_generate_reports):\n mock_generate_reports(SUMMARY_REPORT)\n\n rf = Reportforce(\"fake@username.com\", \"1234\", \"token\")\n summary_df = rf.get_report(\"ID\", id_column=\"label1\")\n\n summary_df.to_pickle(\"tests/data/summary_df.pickle\")\n\n pd.testing.assert_frame_equal(expected_summary_df, summary_df)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a DataFrame with fake "new experimental data" for testing purposes. Does this by instantiating a temporary HRM model and using the future_data it generates as a template. Then the template is filled with fake data.
def create_fake_HRM_exp_data(hrm_target_col="logFC_wt", hrm_experimental_condition_cols=None): hrm_data = load_HRM_demo_data() if hrm_experimental_condition_cols is None: hrm_experimental_condition_cols = ["ca_concentration", "iptg_concentration", "va_concentration", "xylose_concentration", "timepoint_5.0", "timepoint_18.0"] temp_hrm = HostResponseModel(initial_data=hrm_data, exp_condition_cols=hrm_experimental_condition_cols, target_col=hrm_target_col) new_experiment_data_fake = temp_hrm.future_data.copy() hrm_target_min = hrm_data[hrm_target_col].min() hrm_target_max = hrm_data[hrm_target_col].max() new_experiment_data_fake[hrm_target_col] = np.random.randint(hrm_target_min, hrm_target_max, len(new_experiment_data_fake)) return new_experiment_data_fake
[ "def create_fake_CFM_exp_data(cfm_target_col='BL1-A', cfm_experimental_condition_cols=None):\n cfm_data = load_CFM_demo_data()\n if cfm_experimental_condition_cols is None:\n cfm_experimental_condition_cols = ['strain_name', 'inducer_concentration_mM']\n\n temp_cfm = CircuitFluorescenceModel(initial_data=cfm_data, exp_condition_cols=cfm_experimental_condition_cols,\n target_col=cfm_target_col)\n new_experiment_data_fake = temp_cfm.future_data.copy()\n # dropping dist_position since future data won't have it. Instead the CFM evaluate method will generate dist_position\n new_experiment_data_fake.drop(columns=[\"dist_position\"], inplace=True)\n cfm_target_min = cfm_data[cfm_target_col].min()\n cfm_target_max = cfm_data[cfm_target_col].max()\n new_experiment_data_fake[cfm_target_col] = np.random.randint(cfm_target_min, cfm_target_max, len(new_experiment_data_fake))\n # create fake replicates\n new_experiment_data_fake[\"replicate\"] = np.random.randint(1, 4, len(new_experiment_data_fake))\n return new_experiment_data_fake", "def raw_data(self) -> pd.DataFrame:\n\n min_date = \"2016-01-01\"\n max_date = \"2019-12-13\"\n raw_data = [\n self.generate_data_for_one_customer(i, min_date, max_date)\n for i in range(100)\n ]\n raw_data = pd.concat(raw_data, axis=0)\n for i in range(10):\n raw_data[f\"feat_{i}\"] = np.random.randn(raw_data.shape[0])\n return raw_data", "def create_dummy_df(samples: int = 10) -> pd.DataFrame:\n random_number_gen = np.random.RandomState(12)\n \n feature_1 = 10000 * random_number_gen.binomial(n = 5, p = 0.3, size = samples)\n feature_2 = 750 * random_number_gen.power(3, size = samples)\n feature_3 = 55 * random_number_gen.randn(samples)\n feature_4 = random_number_gen.beta(1.5, 4.6, size = samples)\n\n features = {\n 'binomial': feature_1,\n 'power': feature_2,\n 'randn': feature_3,\n 'beta': feature_4\n }\n\n df = pd.DataFrame.from_dict(features)\n \n return df", "def hourly_data_reg():\n return generate_df_with_reg_for_tests(\n freq=\"H\",\n periods=24*500,\n train_start_date=datetime.datetime(2018, 1, 1),\n conti_year_origin=2018)", "def createDataFrame():\n\n df = pd.DataFrame(data = {\"Calories\": None, \"Water / g\":None, \"Fat / g\": None, \"Protein / g\": None, \"Cholesterol / mg\":None}, index = DFmanager.getTimeIndex(), dtype = \"float64\")\n df.dropna(inplace = True)\n return df", "def create_test_data():\n # create example data from scratch\n local_records = [\n Row(id=1, first_name='Dan', second_name='Germain', floor=1),\n Row(id=2, first_name='Dan', second_name='Sommerville', floor=1),\n Row(id=3, first_name='Alex', second_name='Ioannides', floor=2),\n Row(id=4, first_name='Ken', second_name='Lai', floor=2),\n Row(id=5, first_name='Stu', second_name='White', floor=3),\n Row(id=6, first_name='Mark', second_name='Sweeting', floor=3),\n Row(id=7, first_name='Phil', second_name='Bird', floor=4),\n Row(id=8, first_name='Kim', second_name='Suter', floor=4)\n ]\n\n df = spark.createDataFrame(local_records)\n\n # write to Parquet file format\n (df\n .coalesce(1)\n .write\n .parquet('tests/test_data/employees', mode='overwrite'))\n\n # create transformed version of data\n # df_tf = transform_data(df)\n\n # write transformed version of data to Parquet\n (df\n .coalesce(1)\n .write\n .parquet('tests/test_data/employees_report', mode='overwrite'))\n\n spark.stop()\n return None", "def create_empty_df():\n return pd.DataFrame()", "def create_pyspark_dataframe_with_mocked_personal_data() -> PySparkDataFrame:\n spark = SparkSession.builder.appName(\"snowflake.com\").getOrCreate()\n schema = StructType(\n [\n StructField(\"firstname\", StringType(), True),\n StructField(\"middlename\", StringType(), True),\n StructField(\"lastname\", StringType(), True),\n StructField(\"id\", StringType(), True),\n StructField(\"gender\", StringType(), True),\n StructField(\"salary\", IntegerType(), True),\n ]\n )\n return spark.createDataFrame(data=PERSONAL_DATA, schema=schema)", "def generate_mock_data():\n current_app.config['USE_MOCK_DATA'] = False\n\n def _format_path(fn: str) -> str:\n return os.path.join(current_app.config['DATA_STORE'], fn)\n\n from .data.usgs import get_live_usgs_data\n from .data.usgs import USGS_STATIC_FILE_NAME\n from .data.hobolink import get_live_hobolink_data\n from .data.hobolink import HOBOLINK_STATIC_FILE_NAME\n\n df_hobolink = get_live_hobolink_data()\n df_usgs = get_live_usgs_data()\n\n fname_hobolink = _format_path(HOBOLINK_STATIC_FILE_NAME)\n df_hobolink.to_pickle(fname_hobolink)\n click.echo(f'Wrote HOBOlink data to {fname_hobolink!r}')\n\n fname_usgs = _format_path(USGS_STATIC_FILE_NAME)\n df_usgs.to_pickle(fname_usgs)\n click.echo(f'Wrote USGS data to {fname_hobolink!r}')", "def create_dataframe():\n # Import Libraries\n import pandas as pd\n # Function\n df_cols = [\n 'sequence', # STR\n 'on_site_score' # FLOAT\n ]\n df = pd.DataFrame(columns=df_cols)\n \"\"\"\n implement memory optimization by assigning appropriate dtype\n \"\"\"\n return df", "def create_rand_int_df():\n try:\n df_rand = pd.DataFrame(\n np.random.randint(0, 999, size=(100000, 10)), columns=list(\"ABCDEFGHIJ\")\n )\n return df_rand\n except Exception as error:\n logger.exception(f\"Unable to create random integer DataFrame: {error}\")\n raise", "def edata_fixture():\n edata_pre = amici.ExpData(2, 0, 0,\n np.array([0., 0.1, 0.2, 0.5, 1., 2., 5., 10.]))\n edata_pre.setObservedData([1.5] * 16)\n edata_pre.fixedParameters = np.array([5., 20.])\n edata_pre.fixedParametersPreequilibration = np.array([0., 10.])\n edata_pre.reinitializeFixedParameterInitialStates = True\n\n # edata for postequilibration\n edata_post = amici.ExpData(2, 0, 0,\n np.array([float('inf')] * 3))\n edata_post.setObservedData([0.75] * 6)\n edata_post.fixedParameters = np.array([7.5, 30.])\n\n # edata with both equilibrations\n edata_full = amici.ExpData(2, 0, 0,\n np.array([0., 0., 0., 1., 2., 2., 4., float('inf'), float('inf')]))\n edata_full.setObservedData([3.14] * 18)\n edata_full.fixedParameters = np.array([1., 2.])\n edata_full.fixedParametersPreequilibration = np.array([3., 4.])\n edata_full.reinitializeFixedParameterInitialStates = True\n\n return edata_pre, edata_post, edata_full", "def test_create_from_dataframe_run(self, runs_gridsearch: RunList):\n\n df = runs_gridsearch.to_dataframe(include_config=False)\n ex = V(Experiment.from_dataframe(df))\n # Each run becomes an individual hypothesis.\n assert len(ex.hypotheses) == 12", "def test_summary_to_dataframe(setup, mock_generate_reports):\n mock_generate_reports(SUMMARY_REPORT)\n\n rf = Reportforce(\"fake@username.com\", \"1234\", \"token\")\n summary_df = rf.get_report(\"ID\", id_column=\"label1\")\n\n summary_df.to_pickle(\"tests/data/summary_df.pickle\")\n\n pd.testing.assert_frame_equal(expected_summary_df, summary_df)", "def generate_fake_data(n_utterances=3, n_frames_utterance=10, n_features=10, \n n_utt_other_class=1):\n n_rows = n_utterances * n_frames_utterance\n fake_features = np.random.randn(n_rows, n_features)\n fake_targets = np.zeros(n_rows)\n fake_targets[-n_utt_other_class*n_frames_utterance:] = 1\n return fake_features, fake_targets", "def _create_initial_frame_dataset(self):\n dataset = self._create_dataset(\n shuffle_files=self._simulation_random_starts\n )\n if self._simulation_random_starts:\n dataset = dataset.shuffle(buffer_size=1000)\n return dataset.repeat().batch(self._batch_size)", "def get_data_df(self):\n if self.plot_type_data == ptc.REPLICATE:\n y = self.get_replicate_y_data()\n sd = self.get_replicate_sd()\n else:\n y = [self.y_data]\n sd = self.sd\n simulation_condition_id = \\\n self.line_data[ptc.SIMULATION_CONDITION_ID].iloc[0]\n observable_id = self.line_data[ptc.OBSERVABLE_ID].iloc[0]\n df = pd.DataFrame(\n {C.Y: y, C.NAME: self.legend_name,\n C.IS_SIMULATION: self.is_simulation,\n C.DATASET_ID: self.dataset_id,\n C.SD: sd, C.SEM: self.sem,\n C.SIMULATION_CONDITION_ID: simulation_condition_id,\n C.OBSERVABLE_ID: observable_id})\n return df", "def _create_fcst_df(target_date, origin_df, fill=None):\n oi = origin_df.index\n if not isinstance(oi, pd.MultiIndex):\n if isinstance(origin_df, pd.Series):\n if fill is None:\n template = pd.Series(np.zeros(len(target_date)), index=target_date)\n else:\n template = pd.Series(fill, index=target_date)\n template.name = origin_df.name\n return template\n else:\n if fill is None:\n template = pd.DataFrame(\n np.zeros((len(target_date), len(origin_df.columns))),\n index=target_date,\n columns=origin_df.columns.to_list(),\n )\n else:\n template = pd.DataFrame(\n fill, index=target_date, columns=origin_df.columns.to_list()\n )\n return template\n else:\n idx = origin_df.index.to_frame(index=False)\n instance_names = idx.columns[0:-1].to_list()\n time_names = idx.columns[-1]\n idx = idx[instance_names].drop_duplicates()\n\n timeframe = pd.DataFrame(target_date, columns=[time_names])\n\n target_frame = idx.merge(timeframe, how=\"cross\")\n freq_inferred = target_date[0].freq\n mi = (\n target_frame.groupby(instance_names, as_index=True)\n .apply(\n lambda df: df.drop(instance_names, axis=1)\n .set_index(time_names)\n .asfreq(freq_inferred)\n )\n .index\n )\n\n if fill is None:\n template = pd.DataFrame(\n np.zeros((len(target_date) * idx.shape[0], len(origin_df.columns))),\n index=mi,\n columns=origin_df.columns.to_list(),\n )\n else:\n template = pd.DataFrame(\n fill,\n index=mi,\n columns=origin_df.columns.to_list(),\n )\n\n template = template.astype(origin_df.dtypes.to_dict())\n return template", "def test_data():\n\t\n\t# Create DataFrame.\n\tdf = data.manipulate()\n\n\t# Check type of output.\n\tassert isinstance(df, pd.DataFrame)\n\n\t# Check column count.\n\tassert len(df.columns) == 44" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scrapes imdb top 250 page to find rank, title, year, rating, no of users rated, url for user reviews
def scrape_imdb_top250(): # IMDB top 250 Movies source = requests.get('https://www.imdb.com/chart/top/?ref_=nv_mv_250').text soup = BeautifulSoup(source, 'lxml') table = soup.find('tbody', class_='lister-list') rank = 1 movies = [] for rowRaw in table.find_all('tr'): row = rowRaw.find('td', class_='titleColumn') title = row.a.text year = row.span.text.strip("()") ratingRaw = rowRaw.find('td', class_='ratingColumn imdbRating') rating = float(ratingRaw.text) no_of_users = ratingRaw.strong['title'].split(' ')[3] no_of_users = int(no_of_users.replace(',', '')) review_url = row.a['href'] movie = ( rank, title, year, rating, no_of_users, review_url ) rank += 1 movies.append(movie) return movies
[ "def scrape_user_reviews(movies):\n user_reviews = []\n for movie in movies:\n review_count = 0\n review_movie_rank = movie[1]\n review_movie = movie[2]\n review_url = movie[6]\n # form the proper url\n review_url = f\"https://www.imdb.com/{review_url}reviews?sort=reviewVolume&dir=desc&ratingFilter=0\"\n # sleep for random time to avoid IP Block\n # sleep(randint(1, 5))\n response = requests.get(review_url).text\n soup = BeautifulSoup(response, 'lxml')\n\n for review_container in soup.find_all('div', class_='imdb-user-review'):\n review_meta = review_container.find('div', class_='display-name-date')\n review_title = review_container.a.text.strip('\\n')\n review_date = review_container.find('span', class_='review-date').text\n reviewer_rating = review_container.find('div', class_='ipl-ratings-bar')\n if reviewer_rating == None:\n reviewer_rating = ''\n else:\n reviewer_rating = reviewer_rating.text.strip('\\n')\n reviewer = review_meta.a.text\n review_content = review_container.find('div', class_='content').div.text\n review = (\n review_count,\n review_movie,\n review_movie_rank,\n review_title,\n reviewer_rating,\n reviewer,\n review_date,\n review_content\n )\n review_count += 1\n print(review_movie, review_count)\n user_reviews.append(review)\n return user_reviews", "def scrap_by_users(user_url):\n user_id = user_url.split('?')[-1].split('=')[-1]\n add_start = 'https://www.yelp.com/user_details_reviews_self?rec_pagestart='\n response = requests.get(user_url)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, 'html.parser')\n count_rev = int(soup.select_one('li.review-count').select_one('strong').text)\n revs = []\n time.sleep(1)\n if count_rev > 0:\n\n\n raw_reviews = soup.select('div.review')\n ### check that reviews > 0\n for row in raw_reviews:\n rev = parse_review(row)\n rev['user_id'] = user_id\n revs.append(rev)\n\n for page in range(10, min(count_rev,50), 10):\n url_add = add_start+str(page)+'&userid='+user_id\n response = requests.get(url_add)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, 'html.parser')\n\n raw_reviews = soup.select('div.review')\n if raw_reviews is None:\n break\n for row in raw_reviews:\n rev = parse_review(row)\n rev['user_id'] = user_id\n revs.append(rev)\n time.sleep(1)\n return(revs)\n\n else:\n return None", "def get_top10_films_by_genre_name(current_user, genre_name):\r\n\r\n url = \"https://unogsng.p.rapidapi.com/search\"\r\n\r\n genre_id = str(get_genre_id_by_name(genre_name))\r\n genre_id = genre_id.replace('{','')\r\n genre_id = genre_id.replace('}','')\r\n\r\n parameter_list = {\"genrelist\": f\"{genre_id}\",\"orderby\":\"rating\",\r\n \"limit\":\"10\"} \r\n\r\n querystring = {}\r\n\r\n # Fill in the entries one by one if they have values\r\n for key in parameter_list:\r\n if parameter_list[key]:\r\n if parameter_list[key] != \"\":\r\n querystring[key] = parameter_list[key]\r\n\r\n headers = {\r\n 'x-rapidapi-key': \"\",\r\n 'x-rapidapi-host': \"unogsng.p.rapidapi.com\"\r\n }\r\n\r\n headers['x-rapidapi-key'] = os.environ.get('API_TOKEN_1') \r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n\r\n #take the response and unpack it into a workable format\r\n search_results = json.loads(response.text)\r\n search_results_values = search_results.values()\r\n\r\n #extract the embedded dictionary from 2 levels down in results\r\n try:\r\n listify_results = list(search_results_values)\r\n result_list = listify_results[2] \r\n\r\n except IndexError:\r\n return {\"error\": \"your search was too specific and returned no results. please try again.\"}\r\n \r\n\r\n #then wrap it back into a dictionary using index/result number as key\r\n recommendations = dict()\r\n\r\n for index, movie in enumerate(result_list):\r\n recommendations[index + 1] = movie\r\n\r\n # store results, qstr, and login_user in the query_history table\r\n add_query_to_query_history(current_user, str(querystring), \r\n str(recommendations), str(genre_id), None, None, \r\n None, None, None, None, None, None)\r\n\r\n return recommendations", "def parse_professor_page(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n reviews_heading = soup.find('div', attrs={'data-table':'rating-filter'})\n if reviews_heading is None:\n return 0, []\n num_reviews = int(reviews_heading.text.split()[0])\n reviews_table = soup.find('table', attrs={'class':'tftable'})\n reviews = []\n for row in reviews_table.find_all('tr')[1:]:\n if row.get('id'):\n reviews.append(_parse_reviews_row(row))\n return num_reviews, reviews", "def extract_page_reviews(\n page_source_soup : BeautifulSoup,\n verbose : int = 0):\n reviews = page_source_soup.find_all(name=\"div\", attrs={\"data-hook\":\"review\"})\n contents = []\n for i, review in enumerate(reviews):\n try:\n content = {}\n profile = review.find(name=\"a\", attrs={\"class\":\"a-profile\"})[\"href\"]\n name = review.find(name=\"span\", attrs={\"class\":\"a-profile-name\"}).text\n rating = review.find(name=\"a\", attrs={\"class\":\"a-link-normal\"})[\"title\"]\n title = review.find(name=\"a\", attrs={\"data-hook\":\"review-title\"}).text\n date = review.find(name=\"span\", attrs={\"data-hook\":\"review-date\"}).text\n body = review.find(name=\"span\", attrs={\"data-hook\":\"review-body\"})\n helpful_count = review.find(name=\"span\", attrs={\"data-hook\":\"helpful-vote-statement\"})\n images = review.find(name=\"div\", attrs={\"class\":\"review-image-tile-section\"})\n content[\"reviewer_id\"] = extract_profile_id(profile) \n content[\"name\"] = name\n content[\"rating\"] = rating\n content[\"title\"] = title\n content[\"date\"] = date\n content[\"helpful_count\"] = helpful_count\n content[\"body\"] = body\n content[\"images\"] = images\n contents.append(content)\n except Exception as e:\n print(f\"Failed review extraction from page source, exception : {e}\")\n return contents", "def data_from_reviews(base_url):\n\n # COMPLETE 1 OF 2 FILL-INS IN THE WORKING URL\n for die in range(1, 7):\n \n # COMPLETE 2 0F 2 FILL-INS IN THE WORKING URL\n for page in inftyrage():\n url = base_url.format(die, page)\n \n soup = BeautifulSoup(get(url).text, 'lxml')\n \n # CHECK IF WE HAVE MOVED PAST THE FINAL PAGE, BY GETTING ERROR404 \n status = soup.find('body', attrs = {'class':'error404'})\n if status is not None:\n break\n \n # GET ALL MEDIA (MOVIES/SERIES/GAMES) ON PAGE\n media = soup.find_all('article')\n\n for article in media:\n \n # GET ARTICLE URL FOR RETRIEVING FACTS\n url = article.find('h2').a['href']\n\n # GET FACTS\n data = get_facts(url)\n data['terningkast'] = die\n yield data", "def scrape_top_250(soup):\n movie_urls = set()\n # beginning of the movie link\n link = \"https://www.imdb.com\"\n\n # searches for all movielinks one the page\n for title in soup.find_all('a'):\n url = title.get('href')\n\n # check if an url was extracted\n if url == None:\n pass\n # check if the url refers to moviepage\n elif \"chttp_tt\" in url:\n # completes the url to the moviepage\n complete_url = link + url\n movie_urls.add(complete_url)\n\n return movie_urls", "def retrieve_reviews_ratings(soup, idx):\n # Set container holding review details\n container = soup.findAll('div', class_=\"_2wrUUKlw _3hFEdNs8\")\n\n page_reviews = []\n page_ratings = []\n page_titles = []\n\n # Find all levels of rating\n rating_re = compile(\"ui_bubble_rating (.*)\")\n\n for item in container:\n \n rating_raw = item.find('span', class_=rating_re)\n rating_int = int(rating_raw.attrs['class'][1].split(\"_\")[1][-2])\n page_ratings.append(rating_int)\n\n review = item.find('q', class_=\"IRsGHoPm\").text\n \n # Check for more text after \"Read More\" activated, complete review text\n expanded = item.find('span', class_=\"_1M-1YYJt\")\n if expanded:\n review += expanded.text\n page_reviews.append(review)\n\n # Save review title\n title = item.find('a', class_='ocfR3SKN').text\n page_titles.append(title)\n\n # For monitoring during runtime\n print('page', idx + 1)\n \n return page_reviews, page_ratings, page_titles", "def fetch_data(movies):\n reviews = list()\n for key, val in movies.items():\n\n # sending request to access the particular url\n movie_url = val[1]\n print(\"Getting Data of Movie : {}\".format(key))\n response = requests.get(movie_url)\n soup = BeautifulSoup(response.content, 'lxml')\n content = soup.find_all('section', class_ = \"ipc-page-section ipc-page-section--base\")\n \n review_url = soup.find_all('a', class_ = \"ipc-title ipc-title--section-title ipc-title--base ipc-title--on-textPrimary ipc-title-link-wrapper\")\n review_url = \"https://www.imdb.com\" + review_url[2]['href']\n \n review_url_response = requests.get(review_url)\n review_url_soup = BeautifulSoup(review_url_response.content, 'lxml')\n \n # here we have got several reviews from a single movie.\n total_reviews = review_url_soup.find_all('div', class_ = \"review-container\")\n # here, it made us necessary to iterate a loop, because it contains several reviews, and every review is important to us.\n for review in total_reviews:\n # using exception handling in case, if there is no title or review or rating is not present.\n try:\n rating = review.find(\"div\", class_ = \"ipl-ratings-bar\")\n rating = rating.find('span').text.strip().split(\"/\")[0]\n except:\n rating = \" \"\n try:\n title = review.find('a', class_ = \"title\").text.strip()\n except: \n title = \"NaN\"\n try:\n review_content = review.find('div', class_ = \"text show-more__control\").text.strip()\n except:\n review_content = None\n \n\n # Appending data to the list\n reviews.append((rating, title, review_content))\n \n print(\"Total Reviews Fetch from the data are : {}\".format(len(reviews)))\n \n return reviews # return type: list of tuples", "def userReviews():\n usersList = files.readUsers()\n beersList = files.readBeers()\n breweryList = files.readBreweries()\n breweryToBeers = files.readBreweryToBeers()\n\n total = 0\n totalUsersComplete = 0\n for userHash, user in usersList.iteritems():\n totalUsersComplete += 1\n # if the data has been normalized, old data will not\n # have usernames. Ignore older users which may have\n # already gotten reviews\n if user.username:\n userId = user.uid\n username = user.username\n user.username = None\n userReviewCount = 0\n offsetTotal = 0\n ratings = {}\n\n print 'Processing ' + str(userId) + ': ' + username\n # each response returns at most 25 reviews. To get more user\n # reviews, call again with an offset get at most 50 reviews\n # from the same user\n while (userReviewCount < 2):\n print username + ': ' + str(userReviewCount + 1)\n data = untappd.getUserReviewData(username, offsetTotal)\n offset = data['response']['beers']['count']\n offsetTotal += offset\n reviews = data['response']['beers']['items']\n for review in reviews:\n userRating = review['rating_score']\n if userRating > 0:\n beerInfo = review['beer']\n breweryInfo = review['brewery']\n # fill in beer information\n if hash(str(beerInfo['bid'])) not in beersList:\n stylesList = []\n style = unicode(beerInfo['beer_style']).encode(\"utf-8\")\n styles = style.lower().title().split('/')\n for style in styles:\n style = style.strip()\n stylesList.append(style)\n beerAttribs = {\n 'bid': str(beerInfo['bid']),\n 'name': unicode(beerInfo['beer_name']).encode(\"utf-8\"),\n 'label': beerInfo['beer_label'],\n 'abv': beerInfo['beer_abv'],\n 'ibu': beerInfo['beer_ibu'],\n 'style': stylesList,\n 'description': unicode(beerInfo['beer_description']).encode(\"utf-8\"),\n 'rating': beerInfo['rating_score'],\n 'numRatings': 1,\n 'brewery': str(breweryInfo['brewery_id'])\n }\n beer = UT.UntappdBeer(beerAttribs)\n beersList[hash(beer.bid)] = beer\n else:\n beersList[hash(str(beerInfo['bid']))].numRatings += 1\n # fill in brewery information\n if hash(str(breweryInfo['brewery_id'])) not in breweryList:\n breweryAttribs = {\n 'breweryId': str(breweryInfo['brewery_id']),\n 'name': unicode(breweryInfo['brewery_name']).encode(\"utf-8\"),\n 'label': breweryInfo['brewery_label'],\n 'country': unicode(breweryInfo['country_name']).encode(\"utf-8\"),\n 'location': unicode(breweryInfo['location']).encode(\"utf-8\")\n }\n brewery = UT.UntappdBrewery(breweryAttribs)\n breweryList[hash(brewery.breweryId)] = brewery\n\n # map breweery_id to a list of beers produced there\n if hash(str(breweryInfo['brewery_id'])) not in breweryToBeers:\n # store the current beer in a list of beers of\n # the brewery\n breweryToBeers[hash(str(breweryInfo['brewery_id']))] = {str(breweryInfo['brewery_id']): [str(beerInfo['bid'])]}\n else:\n # add current beer to brewery's list of beers\n breweryToBeers[hash(str(breweryInfo['brewery_id']))][str(breweryInfo['brewery_id'])].append(str(beerInfo['bid']))\n\n # add list of beer ratings to user\n ratings[str(beerInfo['bid'])] = userRating\n userReviewCount += 1\n user.ratings = ratings\n\n # store the dictionaries after new data so user doesn't kill process before writing\n # with open('../data/users.json', 'wb') as usersFile:\n # json = jpickle.encode(usersList)\n # usersFile.write(json)\n # with open('../data/beers.json', 'wb') as beersFile:\n # json = jpickle.encode(beersList)\n # beersFile.write(json)\n # with open('../data/breweries.json', 'wb') as breweriesFile:\n # json = jpickle.encode(breweryList)\n # breweriesFile.write(json)\n # with open('../data/breweryToBeers.json', 'wb') as breweryToBeersFile:\n # json = jpickle.encode(breweryToBeers)\n # breweryToBeersFile.write(json)\n\n # if the offset is less than 25, then there are no more reviews to retrieve\n if offset < 25:\n break\n writeJSONFile('../data/users.json', usersList)\n writeJSONFile('../data/beers.json', beersList)\n writeJSONFile('../data/breweries.json', breweryList)\n writeJSONFile('../data/breweryToBeers.json', breweryToBeers)\n\n total += len(ratings)\n print str(userId) + ': ' + username + ', Processed: ' + str(len(ratings)) + ' reviews'\n print 'Total Reviews: ' + str(total)\n print 'Total Users Completed: ' + str(totalUsersComplete)\n sleep(37 * (userReviewCount))\n else:\n total += len(user.ratings)", "def scrape_reviews_helper(isbn, page):\n book_page_url = f\"https://www.goodreads.com/api/reviews_widget_iframe?did=0&format=html&\" \\\n f\"hide_last_page=true&isbn={isbn}&links=660&min_rating=&page={page}&review_back=fff&stars=000&text=000\"\n print(book_page_url)\n start_review_page_scrape = time()\n webpage = requests_session.get(book_page_url)\n if webpage.status_code == 404:\n return\n soup = BeautifulSoup(webpage.content, \"html.parser\")\n names_raw = soup.find_all('a', itemprop=\"discussionUrl\") # find names of the review authors\n names = [name.text for name in names_raw]\n\n ratings_raw = soup.find_all('span', class_=\"gr_rating\") # find ratings of the review\n ratings = [rating.text.count(\"★\") for rating in ratings_raw] # convert starred rating into integer value\n\n full_review_texts = []\n full_review_links = soup.find_all('link',itemprop=\"url\") # find links to the full reviews\n\n iteration = 0\n for full_review_link in full_review_links:\n full_review_texts.append(find_full_review_text(full_review_link,iteration/10 + page))\n iteration +=1\n print(f\"-Finished page({page}) surface scraping in {time() - start_review_page_scrape:.2f}\")\n\n start_computing=time()\n computed_reviews = zip(names, ratings, dask.compute(*full_review_texts))\n print(f\"--Finished {page} full text computing in {time() - start_computing:.2f}\")\n\n # start_adding_time = time()\n for review_tuple in computed_reviews:\n reviews.add_review(Review(review_tuple))\n # print(f\"Added reviews(page {page}) to the ReviewList in {time() - start_adding_time:.2f}\")", "def get_games(game_dictionary, url, page_no, more):\n # Need this to trick Metacritic into not realizing its a bot script\n request = urllib2.Request(url, headers={ 'User-Agent': 'Mozilla/5.0' })\n\n try:\n page = urllib2.urlopen(request)\n except urllib2.URLError, e:\n if hasattr(e, 'reason'):\n print 'Failed to reach url'\n print 'Reason: ', e.reason\n sys.exit()\n elif hasattr(e, 'code'):\n if e.code == 404:\n print 'Error: ', e.code\n sys.exit()\n\n\n content = page.read()\n soup = BeautifulSoup(content, \"html5lib\")\n\n try:\n if soup.find_all(\"p\", class_=\"no_data\")[0].text == 'No Results Found':\n more = False\n\n except:\n # Pulling the titles, with exception in order to filter out other titles that aren't part of table\n # i.e. ads for new releases\n raw_title = soup.find_all(\"div\", class_=\"basic_stat product_title\")\n titles = []\n for i in raw_title:\n items = i.text.split('\\n')\n try:\n text = items[2].strip(\" \")\n except:\n continue\n titles.append(text)\n\n # Extract the average Metascore\n raw_metascore = soup.find_all(\"div\", class_=re.compile(\"^metascore_w\"))\n metascores=[]\n for i in raw_metascore:\n metascores.append(i.text)\n\n # Average user score and release dates stored in the same item for extraction\n raw_user_date = soup.find_all(\"div\", class_=\"more_stats condensed_stats\")\n scores = []\n dates = []\n for i in raw_user_date:\n items = i.text.split('\\n')\n user_score = items[4].strip(\" \") # 4th item of splitted string contains scores\n scores.append(user_score)\n release = items[9].strip(\" \") # 9th item of splitted string contains release date\n dates.append(release)\n\n\n for x in range(len(titles)):\n game_dictionary[titles[x]] = {\"Metascore\": metascores[x], \"UserScore\": scores[x], \"Release\": dates[x]}\n\n wait_time = round(max(0, 1 + random.gauss(0,0.5)), 2)\n time.sleep(wait_time)\n\n return game_dictionary, page_no, more", "def get_recent_reviews(n=None):\n\n #start = time.clock()\n\n base_url = 'https://www.residentadvisor.net/'\n reviews = []\n\n review_sources = ['album','single','recommend']\n for review_source in review_sources:\n review_url = urljoin(base_url,'reviews.aspx?format={0}'.format(review_source))\n\n # fetch the review page\n request = Request(url=review_url,\n data=None,\n headers={'User-Agent': 'the-pulse/reviews-v0.1'})\n soup = BeautifulSoup(urlopen(request).read(), \"lxml\")\n\n urls = [x.a['href'] for x in soup.findAll('article')]\n \n today = datetime.today().date()\n yesterday = (datetime.today() - timedelta(1)).date()\n \n keep_going = True \n i = 0\n imax = 5\n # loop through reviews, newest first, keeping all the ones published yesterday\n while keep_going or i >= imax:\n review = get_review(urljoin(base_url,urls[i]))\n i += 1\n\t #print(i)\n if i >= imax:\n keep_going = False\n if review.date_reviewed() == yesterday: \n # the first review was published yesterday, so check for more\n reviews.append(review) \n elif review.date_reviewed() == today:\n # skip over the reviews today, not ideal but allows us to be certain that \n # no reviews are missed since ra releases reviews intermittently throughout the day\n pass\n else:\n # the current review is old, jump out\n keep_going = False\n\n #print(reviews)\n #print('Got {} RA reviews in {:.0} seconds'.format(len(reviews), time.clock() - start))\n return remove_dupes(reviews)", "def get_info_about_imdb_video(video_url):\n webpage = requests.get(video_url)\n soup = BS(webpage.content, 'html.parser')\n\n # extract title and year\n h1_title = soup.find('div', class_='title_wrapper').h1\n title = h1_title.contents[0].strip()\n year = h1_title.span.a.get_text()\n\n # extract genres\n genres = list()\n div_storyline = soup.find('div', id='titleStoryLine')\n for div in (div_storyline.find_all('div', class_='see-more inline canwrap')):\n if 'Genre' in div.h4.string:\n for a_genre in div.find_all('a'):\n genres.append(a_genre.get_text().strip())\n break\n genres = ';'.join(genres)\n\n # extract languagem runtime, budget and revenue\n language, runtime, budget, revenue = '', '', '', ''\n div_details = soup.find('div', id='titleDetails')\n for div_txt_block in div_details.find_all('div', class_='txt-block'):\n try:\n attr_type = div_txt_block.h4.get_text()[:-1]\n except Exception:\n continue\n if attr_type not in ['Language', 'Budget', 'Runtime', 'Cumulative Worldwide Gross']:\n continue\n if attr_type == 'Language':\n language = div_txt_block.a.get_text()\n if attr_type == 'Budget':\n budget_str = div_txt_block.contents[2]\n budget = budget_str[budget_str.find('$')+1:].strip().replace(',', '')\n elif attr_type == 'Runtime':\n runtime = div_txt_block.time.string.split()[0]\n else:\n revenue_str = div_txt_block.contents[2]\n revenue = revenue_str[revenue_str.find('$')+1:].strip().replace(',', '')\n\n # extract directors, writers and actors\n directors, writers, actors = get_persons_related_to_imdb_video(video_url)\n\n return title, year, genres, language, runtime, budget, revenue, directors, writers, actors", "def requestReviewInfoUnOfficial(businessID):\n reviewList = []\n #7 variables recorded from each review\n #reviewList.append([\"name\",\"location\",\"friend-count\",\"review-count\",\"photo-count\", \"elite-year\", \"rating\",\"date\",\"comment\"])\n #url for first page\n url = \"https://www.yelp.com/biz/{0}?sort_by=date_desc&start=0\".format(businessID)\n page = requests.get(url)\n #Uses beautifulsoup library to retrieve page and parsers html tree\n soup = BeautifulSoup(page.content, 'html.parser')\n #finds number of review pages to iterate through for the individual restaurant \n pageNum = getPageNumber(soup)\n print(\"{0} Number of pages: {1}\".format(businessID,pageNum))\n #increments of 20, each review page for a restaurant contains 20 reviews per a page\n for i in range(0,40,20): #currently only looking at first 2 pages since database already exists and program now justs updates. \n print(i)\n if i != 0: #for all pages that follow, must update soup\n url = \"https://www.yelp.com/biz/{0}?sort_by=date_desc&start={1}\".format(businessID,i)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #finds div with list of reviews, list is further organized as an array of divs \n reviewers = soup.find_all('div', class_= \"review review--with-sidebar\")\n numReviews = len(reviewers)\n \n for i in range(numReviews):#iterates through list of reviews organized by divs\n review = getSingleReview(reviewers[i])\n reviewList.append(review)\n \n return reviewList", "def collectAllReviews(url, appName, savePath):\n groupName = removeSpecialChars(appName.lower())\n groupName = groupName.replace(' ', '-')\n\n pageNumber = 1\n reviewsUrl = url + '/group/' + groupName + '?reviews=1&page='\n\n while scrapeReviewsOnPage(reviewsUrl, pageNumber, savePath):\n pageNumber += 1", "def top_movies(self):\n top_movies = {}\n data = requests.get(self.url.format('Top250Movies',self.api_key)).json()\n # Loops through the dictionary\n for item in data['items']:\n top_movies.setdefault(item['rank'], item['title'])\n\n return top_movies", "def movie_info(movie_url):\r\n\t\treq = Request(movie_url, headers={'User-Agent': 'Mozilla/5.0'})\r\n\t\twebpage = urlopen(req).read()\r\n\t\tparse = SoupStrainer(class_=['product_page_title oswald', 'score fl', 'runtime', 'ranking_title'])\r\n\t\tprofile = BeautifulSoup(webpage, 'lxml', parse_only=parse)\r\n\r\n\t\ttitle_info = profile.find('div', class_='product_page_title oswald').h1.text\r\n\t\truntime = profile.find('div', class_='runtime').text.split(\":\")[1].strip()\r\n\t\tscores = profile.find_all('div', class_='score fl')\r\n\t\tmetascore = scores[0].div.text\r\n\t\ttry:\r\n\t\t\tuserscore = scores[1].div.text\r\n\t\texcept:\r\n\t\t\tuserscore = \"tbd\"\r\n\r\n\t\tmovie_dict = {\"title\": title_info, \"metascore\": metascore, \"userscore\": userscore, \r\n\t\t\"runtime\": runtime}\r\n\t\tfor x in movie_dict:\r\n\t\t\tif movie_dict[x] is None or movie_dict[x] == \"tbd\":\r\n\t\t\t\tmovie_dict[x] = \"N/A\"\r\n\t\tprint(movie_dict[\"title\"] + \"\\n\" + \"Metascore: \" + movie_dict[\"metascore\"]\r\n\t\t\t+ \"\\n\" + \"Userscore: \" + movie_dict[\"userscore\"] + \"\\n\" + \"Runtime: \" + movie_dict[\"runtime\"] + \"\\n\" + \"_________________\")\r\n\t\tsys.stdout.flush()", "def getReviewer(gScholarURL,reviewerEmail,reviewerDir,reviewerFiles,reviewerTmpDir='/tmp/gscholar_dl/',numPapersToRetrieve=1000):\n\n #reviewerTmpDir=tmpDir+reviewerEmail+'/'\n\n createDir(reviewerDir)\n createDir(reviewerTmpDir)\n\n if len(gScholarURL) >0: \n # Save info to a reviewer file \n st = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H%M%S')\n f = open('%sgscholar_url_%s.csv' % (reviewerTmpDir,st),'w')\n f.write('%s,%s\\n' % (reviewerEmail,gScholarURL))\n f.close()\n\n reviewerFileLocLink=reviewerTmpDir+reviewerEmail+'.html'\n reviewerFileLoc=reviewerTmpDir+reviewerEmail+'_'+st+'.html'\n if not getPage(gScholarURL, reviewerFileLoc, link_name=reviewerFileLocLink):\n print 'problem retrieving link'\n return \n else: \n print \"Got empty reviewer scholar URL, using most recent one\"\n reviewerFileLoc = os.path.realpath(reviewerTmpDir+reviewerEmail+'.html')\n if not os.path.exists(reviewerFileLoc):\n print \"Could not find reviewers' profile\", reviewerFileLoc\n\n # get most recent profile file\n #try: \n # reviewerFileLoc = max(glob.glob('%sgscholar_url*.csv' % reviewerTmpDir))\n #except ValueError:\n # print \"Could not find reviewers' profile\", reviewerTmpDir\n # return \n print reviewerFileLoc\n\n f = open(reviewerFileLoc, 'r') \n bs = BeautifulSoup.BeautifulSoup(''.join(f.read()))\n f.close()\n\n #papers = bs.findAll(attrs={\"class\": \"cit-table item\"})\n papers = bs.findAll(attrs={\"class\": \"gsc_a_tr\"})\n print 'found', len(papers), 'papers from', reviewerEmail\n if numPapersToRetrieve < len(papers): \n papers = papers[:numPapersToRetrieve]\n print '\\tLimiting retrieval to %d papers' % len(papers)\n for j,paper in enumerate(papers):\n print 'PAPER:', j\n for i,td in enumerate(paper.findAll('td')):\n if i==0: # paper title, link, author names. \n paper_page_url = 'http://scholar.google.com'+td.a.get('href')\n paper_title = td.a.string\n print '\\tlink', paper_page_url\n print '\\tauthors', papers[0].td.span.string\n print '\\ttitle:', paper_title\n\n #filename_title = sanitize(re.sub(\"\"\"[\\W |\\/&#'\"():;]\"\"\", '_', paper_title),expungeNonAscii=True,inputIsUTF8=False)+'.pdf'\n filename_title = sanitize(re.sub('[\\W]', '_', paper_title),expungeNonAscii=True,inputIsUTF8=False)+'.pdf'\n if len(filename_title+'.html') > 255: # ext4 limits the length of filenames\n filename_title = filename_title[:240] + '%06d' % random.randint(100000) + '.pdf'\n paper_loc = reviewerTmpDir+filename_title+'.html'\n getPage(paper_page_url, paper_loc)\n f = open(paper_loc, 'r') \n doc = f.read()\n f.close()\n bs_paper = BeautifulSoup.BeautifulSoup(''.join(doc))\n if bs_paper.findAll(text=\"[PDF]\"): \n paper_pdf_url = bs_paper.findAll(text=\"[PDF]\")[0].findPrevious('a')['href']\n filename_tmp_loc = reviewerTmpDir+filename_title\n filename_loc = reviewerDir+filename_title\n if not os.path.exists(filename_loc) or os.path.getsize(filename_loc) == 0:\n if getPage(paper_pdf_url, filename_tmp_loc):\n if not alreadyInReviewerProfile(filename_tmp_loc, reviewerFiles):\n print '\\tAdding file to reviewer profile'\n os.rename(filename_tmp_loc, filename_loc)\n else:\n print '\\tfile with identical hash already exists'\n else:\n print '\\tunable to download paper'\n else:\n print '\\tpaper previously downloaded'\n else:\n print '\\tno PDF link'\n elif i==1: # citations\n if td.a is not None: \n num_citations = td.a.string\n else:\n num_citations = 0 \n print '\\tnum. citations', num_citations \n elif i==2: # empty\n pass\n elif i==3: # year\n if td.string is not None: \n year = td.string\n else:\n year = 'na'\n print '\\tyear', year" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scrapes the user review page for all movies and collects reviews from the top reviewers. The data from this can be further used for sentiment analysis and other data analysis.
def scrape_user_reviews(movies): user_reviews = [] for movie in movies: review_count = 0 review_movie_rank = movie[1] review_movie = movie[2] review_url = movie[6] # form the proper url review_url = f"https://www.imdb.com/{review_url}reviews?sort=reviewVolume&dir=desc&ratingFilter=0" # sleep for random time to avoid IP Block # sleep(randint(1, 5)) response = requests.get(review_url).text soup = BeautifulSoup(response, 'lxml') for review_container in soup.find_all('div', class_='imdb-user-review'): review_meta = review_container.find('div', class_='display-name-date') review_title = review_container.a.text.strip('\n') review_date = review_container.find('span', class_='review-date').text reviewer_rating = review_container.find('div', class_='ipl-ratings-bar') if reviewer_rating == None: reviewer_rating = '' else: reviewer_rating = reviewer_rating.text.strip('\n') reviewer = review_meta.a.text review_content = review_container.find('div', class_='content').div.text review = ( review_count, review_movie, review_movie_rank, review_title, reviewer_rating, reviewer, review_date, review_content ) review_count += 1 print(review_movie, review_count) user_reviews.append(review) return user_reviews
[ "def fetch_data(movies):\n reviews = list()\n for key, val in movies.items():\n\n # sending request to access the particular url\n movie_url = val[1]\n print(\"Getting Data of Movie : {}\".format(key))\n response = requests.get(movie_url)\n soup = BeautifulSoup(response.content, 'lxml')\n content = soup.find_all('section', class_ = \"ipc-page-section ipc-page-section--base\")\n \n review_url = soup.find_all('a', class_ = \"ipc-title ipc-title--section-title ipc-title--base ipc-title--on-textPrimary ipc-title-link-wrapper\")\n review_url = \"https://www.imdb.com\" + review_url[2]['href']\n \n review_url_response = requests.get(review_url)\n review_url_soup = BeautifulSoup(review_url_response.content, 'lxml')\n \n # here we have got several reviews from a single movie.\n total_reviews = review_url_soup.find_all('div', class_ = \"review-container\")\n # here, it made us necessary to iterate a loop, because it contains several reviews, and every review is important to us.\n for review in total_reviews:\n # using exception handling in case, if there is no title or review or rating is not present.\n try:\n rating = review.find(\"div\", class_ = \"ipl-ratings-bar\")\n rating = rating.find('span').text.strip().split(\"/\")[0]\n except:\n rating = \" \"\n try:\n title = review.find('a', class_ = \"title\").text.strip()\n except: \n title = \"NaN\"\n try:\n review_content = review.find('div', class_ = \"text show-more__control\").text.strip()\n except:\n review_content = None\n \n\n # Appending data to the list\n reviews.append((rating, title, review_content))\n \n print(\"Total Reviews Fetch from the data are : {}\".format(len(reviews)))\n \n return reviews # return type: list of tuples", "def movie_reviews(self, movie_id):\n self.endpoint = 'movie_reviews.json'\n self.payload = {'movie_id': movie_id}\n return self.__make_request()", "def extract_page_reviews(\n page_source_soup : BeautifulSoup,\n verbose : int = 0):\n reviews = page_source_soup.find_all(name=\"div\", attrs={\"data-hook\":\"review\"})\n contents = []\n for i, review in enumerate(reviews):\n try:\n content = {}\n profile = review.find(name=\"a\", attrs={\"class\":\"a-profile\"})[\"href\"]\n name = review.find(name=\"span\", attrs={\"class\":\"a-profile-name\"}).text\n rating = review.find(name=\"a\", attrs={\"class\":\"a-link-normal\"})[\"title\"]\n title = review.find(name=\"a\", attrs={\"data-hook\":\"review-title\"}).text\n date = review.find(name=\"span\", attrs={\"data-hook\":\"review-date\"}).text\n body = review.find(name=\"span\", attrs={\"data-hook\":\"review-body\"})\n helpful_count = review.find(name=\"span\", attrs={\"data-hook\":\"helpful-vote-statement\"})\n images = review.find(name=\"div\", attrs={\"class\":\"review-image-tile-section\"})\n content[\"reviewer_id\"] = extract_profile_id(profile) \n content[\"name\"] = name\n content[\"rating\"] = rating\n content[\"title\"] = title\n content[\"date\"] = date\n content[\"helpful_count\"] = helpful_count\n content[\"body\"] = body\n content[\"images\"] = images\n contents.append(content)\n except Exception as e:\n print(f\"Failed review extraction from page source, exception : {e}\")\n return contents", "def userReviews():\n usersList = files.readUsers()\n beersList = files.readBeers()\n breweryList = files.readBreweries()\n breweryToBeers = files.readBreweryToBeers()\n\n total = 0\n totalUsersComplete = 0\n for userHash, user in usersList.iteritems():\n totalUsersComplete += 1\n # if the data has been normalized, old data will not\n # have usernames. Ignore older users which may have\n # already gotten reviews\n if user.username:\n userId = user.uid\n username = user.username\n user.username = None\n userReviewCount = 0\n offsetTotal = 0\n ratings = {}\n\n print 'Processing ' + str(userId) + ': ' + username\n # each response returns at most 25 reviews. To get more user\n # reviews, call again with an offset get at most 50 reviews\n # from the same user\n while (userReviewCount < 2):\n print username + ': ' + str(userReviewCount + 1)\n data = untappd.getUserReviewData(username, offsetTotal)\n offset = data['response']['beers']['count']\n offsetTotal += offset\n reviews = data['response']['beers']['items']\n for review in reviews:\n userRating = review['rating_score']\n if userRating > 0:\n beerInfo = review['beer']\n breweryInfo = review['brewery']\n # fill in beer information\n if hash(str(beerInfo['bid'])) not in beersList:\n stylesList = []\n style = unicode(beerInfo['beer_style']).encode(\"utf-8\")\n styles = style.lower().title().split('/')\n for style in styles:\n style = style.strip()\n stylesList.append(style)\n beerAttribs = {\n 'bid': str(beerInfo['bid']),\n 'name': unicode(beerInfo['beer_name']).encode(\"utf-8\"),\n 'label': beerInfo['beer_label'],\n 'abv': beerInfo['beer_abv'],\n 'ibu': beerInfo['beer_ibu'],\n 'style': stylesList,\n 'description': unicode(beerInfo['beer_description']).encode(\"utf-8\"),\n 'rating': beerInfo['rating_score'],\n 'numRatings': 1,\n 'brewery': str(breweryInfo['brewery_id'])\n }\n beer = UT.UntappdBeer(beerAttribs)\n beersList[hash(beer.bid)] = beer\n else:\n beersList[hash(str(beerInfo['bid']))].numRatings += 1\n # fill in brewery information\n if hash(str(breweryInfo['brewery_id'])) not in breweryList:\n breweryAttribs = {\n 'breweryId': str(breweryInfo['brewery_id']),\n 'name': unicode(breweryInfo['brewery_name']).encode(\"utf-8\"),\n 'label': breweryInfo['brewery_label'],\n 'country': unicode(breweryInfo['country_name']).encode(\"utf-8\"),\n 'location': unicode(breweryInfo['location']).encode(\"utf-8\")\n }\n brewery = UT.UntappdBrewery(breweryAttribs)\n breweryList[hash(brewery.breweryId)] = brewery\n\n # map breweery_id to a list of beers produced there\n if hash(str(breweryInfo['brewery_id'])) not in breweryToBeers:\n # store the current beer in a list of beers of\n # the brewery\n breweryToBeers[hash(str(breweryInfo['brewery_id']))] = {str(breweryInfo['brewery_id']): [str(beerInfo['bid'])]}\n else:\n # add current beer to brewery's list of beers\n breweryToBeers[hash(str(breweryInfo['brewery_id']))][str(breweryInfo['brewery_id'])].append(str(beerInfo['bid']))\n\n # add list of beer ratings to user\n ratings[str(beerInfo['bid'])] = userRating\n userReviewCount += 1\n user.ratings = ratings\n\n # store the dictionaries after new data so user doesn't kill process before writing\n # with open('../data/users.json', 'wb') as usersFile:\n # json = jpickle.encode(usersList)\n # usersFile.write(json)\n # with open('../data/beers.json', 'wb') as beersFile:\n # json = jpickle.encode(beersList)\n # beersFile.write(json)\n # with open('../data/breweries.json', 'wb') as breweriesFile:\n # json = jpickle.encode(breweryList)\n # breweriesFile.write(json)\n # with open('../data/breweryToBeers.json', 'wb') as breweryToBeersFile:\n # json = jpickle.encode(breweryToBeers)\n # breweryToBeersFile.write(json)\n\n # if the offset is less than 25, then there are no more reviews to retrieve\n if offset < 25:\n break\n writeJSONFile('../data/users.json', usersList)\n writeJSONFile('../data/beers.json', beersList)\n writeJSONFile('../data/breweries.json', breweryList)\n writeJSONFile('../data/breweryToBeers.json', breweryToBeers)\n\n total += len(ratings)\n print str(userId) + ': ' + username + ', Processed: ' + str(len(ratings)) + ' reviews'\n print 'Total Reviews: ' + str(total)\n print 'Total Users Completed: ' + str(totalUsersComplete)\n sleep(37 * (userReviewCount))\n else:\n total += len(user.ratings)", "def collectAllReviews(url, appName, savePath):\n groupName = removeSpecialChars(appName.lower())\n groupName = groupName.replace(' ', '-')\n\n pageNumber = 1\n reviewsUrl = url + '/group/' + groupName + '?reviews=1&page='\n\n while scrapeReviewsOnPage(reviewsUrl, pageNumber, savePath):\n pageNumber += 1", "def recommend_movies(target_rating: Rating,\n movies: MovieDict, \n user_ratings: UserRatingDict,\n movie_users: MovieUserDict,\n num_movies: int) -> List[int]:\n\n # Your code here\n \n movie_score = {}\n \n ## First step = 'we will need to find users similar'\n similar_user = get_similar_users(target_rating, user_ratings, movie_users) \n \n ## Second step = 'This will be our list of candidate movies'\n ## get_candidate_mov created\n candidate_mov = get_candidate_mov(similar_user, user_ratings, target_rating)\n \n ## Third step = 'track a \"score\" for each movie'\n ## get_mov_score created\n for mov in candidate_mov:\n movie_score[mov] = get_mov_score(mov, \n user_ratings, \n similar_user, \n candidate_mov) \n \n ## Forth step = 'The return list should contain movie ids with the highest scores'\n ## sort_score_list created\n sorted_list = sort_score_list(movie_score)\n \n ## Last step = ' list should be no longer than the value of this parameter'\n final_list = sorted_list[:num_movies]\n \n return final_list", "def scrape_imdb_top250():\n # IMDB top 250 Movies\n source = requests.get('https://www.imdb.com/chart/top/?ref_=nv_mv_250').text\n\n soup = BeautifulSoup(source, 'lxml')\n\n table = soup.find('tbody', class_='lister-list')\n\n rank = 1\n movies = []\n for rowRaw in table.find_all('tr'):\n row = rowRaw.find('td', class_='titleColumn')\n title = row.a.text\n year = row.span.text.strip(\"()\")\n ratingRaw = rowRaw.find('td', class_='ratingColumn imdbRating')\n rating = float(ratingRaw.text)\n no_of_users = ratingRaw.strong['title'].split(' ')[3]\n no_of_users = int(no_of_users.replace(',', ''))\n review_url = row.a['href']\n movie = (\n rank,\n title,\n year,\n rating,\n no_of_users,\n review_url\n )\n rank += 1\n movies.append(movie)\n\n return movies", "def default_movies_for_user(userscore, services, num_movies, watched_movies):\n \n movies = []\n alreadyseen = []\n total = 0\n genrescore = userscore.copy()\n for genre in genrescore:\n total += genrescore[genre]\n\n for genre in genrescore:\n genrescore[genre] = genrescore[genre] / total\n\n for genre in genrescore:\n genrescore[genre] = math.ceil(genrescore[genre] * num_movies)\n\n moviessofar = 0\n services_string = '|'.join(services)\n watchprovidersstring = \"&with_watch_providers=\" + services_string + \"&watch_region=US\"\n if services == []:\n watchprovidersstring = ''\n page = 1\n response = requests.get(\"https://api.themoviedb.org/3/discover/movie?api_key=\" + tmdb_api_key +\n \"&language=en-US&region=US&sort_by=popularity.desc&include_adult=false&include_video=false&page=\" + str(page) +\n watchprovidersstring + \"&with_watch_monetization_types=flatrate\")\n data = response.json()['results']\n\n for genre in genrescore:\n while moviessofar < genrescore[genre]:\n for result in data:\n if result['title'] not in alreadyseen and (str(result['id']) not in watched_movies) and moviessofar < genrescore[genre] and str(genre) in str(result['genre_ids']):\n movie = {}\n movie['id'] = result['id']\n movie['title'] = result['title']\n movie['genre_ids'] = result['genre_ids']\n movie['image'] = 'https://image.tmdb.org/t/p/w500' + result['poster_path']\n sources = sources_from_tmdbID(movie['id'])\n if sources != 'None':\n sources_with_service = [sources[x] for x in sources if str(sources[x]) in services] \n movie['source'] = sources_with_service\n movies.append(movie)\n alreadyseen.append(result['title'])\n moviessofar += 1\n page += 1\n if moviessofar < genrescore[genre]:\n response = requests.get(\"https://api.themoviedb.org/3/discover/movie?api_key=\" + tmdb_api_key +\n \"&language=en-US&region=US&sort_by=popularity.desc&include_adult=false&include_video=false&page=\" + str(page) +\n watchprovidersstring + \"&with_watch_monetization_types=flatrate\")\n data = response.json()['results']\n moviessofar = 0\n\n random.shuffle(movies)\n if len(movies) - num_movies > 0:\n return movies[:-(len(movies) - num_movies)]\n return movies", "def scrap_by_users(user_url):\n user_id = user_url.split('?')[-1].split('=')[-1]\n add_start = 'https://www.yelp.com/user_details_reviews_self?rec_pagestart='\n response = requests.get(user_url)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, 'html.parser')\n count_rev = int(soup.select_one('li.review-count').select_one('strong').text)\n revs = []\n time.sleep(1)\n if count_rev > 0:\n\n\n raw_reviews = soup.select('div.review')\n ### check that reviews > 0\n for row in raw_reviews:\n rev = parse_review(row)\n rev['user_id'] = user_id\n revs.append(rev)\n\n for page in range(10, min(count_rev,50), 10):\n url_add = add_start+str(page)+'&userid='+user_id\n response = requests.get(url_add)\n if response.status_code == 200:\n soup = BeautifulSoup(response.content, 'html.parser')\n\n raw_reviews = soup.select('div.review')\n if raw_reviews is None:\n break\n for row in raw_reviews:\n rev = parse_review(row)\n rev['user_id'] = user_id\n revs.append(rev)\n time.sleep(1)\n return(revs)\n\n else:\n return None", "def get_recent_reviews(n=None):\n\n #start = time.clock()\n\n base_url = 'https://www.residentadvisor.net/'\n reviews = []\n\n review_sources = ['album','single','recommend']\n for review_source in review_sources:\n review_url = urljoin(base_url,'reviews.aspx?format={0}'.format(review_source))\n\n # fetch the review page\n request = Request(url=review_url,\n data=None,\n headers={'User-Agent': 'the-pulse/reviews-v0.1'})\n soup = BeautifulSoup(urlopen(request).read(), \"lxml\")\n\n urls = [x.a['href'] for x in soup.findAll('article')]\n \n today = datetime.today().date()\n yesterday = (datetime.today() - timedelta(1)).date()\n \n keep_going = True \n i = 0\n imax = 5\n # loop through reviews, newest first, keeping all the ones published yesterday\n while keep_going or i >= imax:\n review = get_review(urljoin(base_url,urls[i]))\n i += 1\n\t #print(i)\n if i >= imax:\n keep_going = False\n if review.date_reviewed() == yesterday: \n # the first review was published yesterday, so check for more\n reviews.append(review) \n elif review.date_reviewed() == today:\n # skip over the reviews today, not ideal but allows us to be certain that \n # no reviews are missed since ra releases reviews intermittently throughout the day\n pass\n else:\n # the current review is old, jump out\n keep_going = False\n\n #print(reviews)\n #print('Got {} RA reviews in {:.0} seconds'.format(len(reviews), time.clock() - start))\n return remove_dupes(reviews)", "def process_reviews(self, reviews):\n self._create_reviews_df(reviews)\n self._calculate_timepoint_statistics()\n self._create_timepoint_dataframe()", "def extracts_reviews(self) -> None:\n review_parts = self.data.count(review_part_start) # count review tokens\n if review_parts > 0:\n start_idx = self.data.find(review_part_start) # starting point\n end_idx = self.data.find(review_part_end, start_idx) # starting end point\n while start_idx != -1: # As long as there are still reviews\n # extract the header an find the body\n header = (\n remove_html_code(\n self.data[start_idx + len(review_part_start) : end_idx]\n )\n + \". \"\n )\n start_idx = self.data.find(review_part_start, end_idx)\n end_idx = self.data.find(review_part_end, start_idx)\n # extract the body\n content = remove_html_code(\n self.data[start_idx + len(review_part_start) : end_idx]\n )\n start_idx = self.data.find(review_part_start, end_idx)\n end_idx = self.data.find(review_part_end, start_idx)\n # concat the header and the body, store into the review array\n self.reviews.append(header + content)", "def reviews(self):\n qs = self._reviews_all.filter(reply_to=None)\n # Force the query to occur immediately. Several\n # reviews-related tests hang if this isn't done.\n return qs", "def scrapeMovies():\n\n moviesData = []\n\n moviesHTML = 'https://www.imdb.com/list/ls500759439/'\n response = requests.get(moviesHTML)\n html = response.content\n pageHTML = BeautifulSoup(html, 'html.parser')\n \n movies = pageHTML.find_all('div', {'class':'lister-item mode-detail'})\n\n for movie in movies:\n\n title = movie.find('h3', {'class': 'lister-item-header'})\n title = title.text.split(\"\\n\")\n \n runtime = movie.find('span', {'class': 'runtime'})\n runtime = runtime.text.split(\" min\")\n\n genre = movie.find('span', {'class':'genre'})\n genre = genre.text.split(\",\")\n genre = genre[0].split(\"\\n\")\n\n data = [str(title[2]), int(runtime[0]), str(genre[1]), 'Movie']\n\n moviesData.append(data)\n\n dataFrame = pd.DataFrame(moviesData, columns=[\n 'scraped title', 'runtime', 'genre', 'S/M'])\n dataFrame = dataFrame.fillna(0)\n\n return dataFrame", "def reviews(self, tv_id, page=1):\n return self._request_obj(\n self._urls[\"reviews\"] % tv_id,\n params=\"page=%s\" % page,\n key=\"results\"\n )", "def get_review_ids(self):\n review_page_step = 10\n download_url = \"%s/%s_Review-%s-%s-Reviews\" % (self.__entity_type,\n self.__base_url,\n self.__entity_location,\n self.__entity_id)\n re_review_id_pattern = re.compile(r'/ShowUserReviews-g%s-d%s-r([0-9]+)-' % \n (self.__entity_location, self.__entity_id))\n \n \n \n n_reviews_downloaded = 0\n page_reviews_ids = 0\n no_more_review_ids = False\n while(n_reviews_downloaded < self.__max_num_reviews and not no_more_review_ids):\n download_url = \"%s-or%s\" % (download_url, page_reviews_ids * review_page_step)\n htmlwebpage = self.__get_webpage(download_url)\n reviews_ids = set()\n if not htmlwebpage:\n review_ids = None\n raise TripAdvisorReviewsIdsDownloadError(self.__entity_id)\n else:\n new_reviews_ids = re_review_id_pattern.findall(htmlwebpage.decode(\"utf-8\"))\n no_more_review_ids = self.__is_no_more_reviews(new_reviews_ids, reviews_ids)\n if not no_more_review_ids:\n review_ids.update(new_reviews_ids)\n if len(new_reviews_ids) + len(reviews_ids) > self.__max_num_reviews:\n reviews_ids = review_ids[:self.__max_num_reviews]\n page_reviews_ids +=1\n return reviews_ids", "def get_movies_list(url):\n\n # sending request to access the particular url\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'lxml')\n content = soup.find_all('tbody', class_ = \"lister-list\")\n \n # We have got our movie names using list comprehension\n movies_names = [content[0].find_all('tr')[i].find('td', class_ = \"titleColumn\").a.text for i in range(len(content[0].find_all('tr')))]\n \n # here we have not use list comprehension because there are some movies which don't have their ratings\n rating = []\n for i in range(len(content[0].find_all('tr'))):\n\n try:\n rating.append(content[0].find_all('tr')[i].find('td', class_ = \"ratingColumn imdbRating\").strong.text)\n except:\n # Here, we mark that rating will be empty if no rating is present, later while performing any task,\n # we will fill this value by proper techniques \n rating.append(\" \")\n\n # Links for each movie\n links = [content[0].find_all('tr')[i].find('td', class_ = \"titleColumn\").a['href'] for i in range(len(content[0].find_all('tr')))]\n\n # here we have created movies dictonary in which all the data of each movie is present.\n movies = {}\n for i in range(len(content[0].find_all('tr'))):\n if movies.get(movies_names[i]) is None:\n movies[movies_names[i]] = {}\n link = \"https://www.imdb.com\" + links[i]\n movies[movies_names[i]] = (rating[i], link)\n else:\n link = \"https://www.imdb.com\" + links[i]\n movies[movies_names[i]] = (rating[i], link)\n\n\n return movies # Return type: DICT", "def retrieve_reviews_ratings(soup, idx):\n # Set container holding review details\n container = soup.findAll('div', class_=\"_2wrUUKlw _3hFEdNs8\")\n\n page_reviews = []\n page_ratings = []\n page_titles = []\n\n # Find all levels of rating\n rating_re = compile(\"ui_bubble_rating (.*)\")\n\n for item in container:\n \n rating_raw = item.find('span', class_=rating_re)\n rating_int = int(rating_raw.attrs['class'][1].split(\"_\")[1][-2])\n page_ratings.append(rating_int)\n\n review = item.find('q', class_=\"IRsGHoPm\").text\n \n # Check for more text after \"Read More\" activated, complete review text\n expanded = item.find('span', class_=\"_1M-1YYJt\")\n if expanded:\n review += expanded.text\n page_reviews.append(review)\n\n # Save review title\n title = item.find('a', class_='ocfR3SKN').text\n page_titles.append(title)\n\n # For monitoring during runtime\n print('page', idx + 1)\n \n return page_reviews, page_ratings, page_titles", "def requestReviewInfoUnOfficial(businessID):\n reviewList = []\n #7 variables recorded from each review\n #reviewList.append([\"name\",\"location\",\"friend-count\",\"review-count\",\"photo-count\", \"elite-year\", \"rating\",\"date\",\"comment\"])\n #url for first page\n url = \"https://www.yelp.com/biz/{0}?sort_by=date_desc&start=0\".format(businessID)\n page = requests.get(url)\n #Uses beautifulsoup library to retrieve page and parsers html tree\n soup = BeautifulSoup(page.content, 'html.parser')\n #finds number of review pages to iterate through for the individual restaurant \n pageNum = getPageNumber(soup)\n print(\"{0} Number of pages: {1}\".format(businessID,pageNum))\n #increments of 20, each review page for a restaurant contains 20 reviews per a page\n for i in range(0,40,20): #currently only looking at first 2 pages since database already exists and program now justs updates. \n print(i)\n if i != 0: #for all pages that follow, must update soup\n url = \"https://www.yelp.com/biz/{0}?sort_by=date_desc&start={1}\".format(businessID,i)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #finds div with list of reviews, list is further organized as an array of divs \n reviewers = soup.find_all('div', class_= \"review review--with-sidebar\")\n numReviews = len(reviewers)\n \n for i in range(numReviews):#iterates through list of reviews organized by divs\n review = getSingleReview(reviewers[i])\n reviewList.append(review)\n \n return reviewList" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the socket_type_hash of this DestinyDefinitionsDestinyItemSocketEntryDefinition.
def socket_type_hash(self): return self._socket_type_hash
[ "def type(self) -> SocketType:\n return self._type", "def socket_id(self):\n return self._test_protocol.socket_id", "def getTypeId(self) -> \"SoType\":\n return _coin.SoKeyboardEvent_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.ScXMLSendElt_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.ScXMLEventElt_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.SoLocation2Event_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.SoEventCallback_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.SoSFColor_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.SoScXMLEvent_getTypeId(self)", "def _startd_collector_hash(self):\n try:\n # The Collector will fallback to Machine and SlotID concatenated\n # together but TJ said we could just get away with Name + MyAddress\n # as this simplifies the SQL column uniqueness constraints\n return hash(','.join([str(self['Name']),\n str(self['MyAddress'])]))\n except KeyError:\n # This could happen if the StartD ad is missing Name or MyAddress attrs,\n # which the Collector should not allow to happen\n return 1", "def getTypeId(self) -> \"SoType\":\n return _coin.SoListener_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.SoPackedColor_getTypeId(self)", "def _node_hash(self, node, port):\n\n return hash(frozenset([node['mgmt_ip'], port]))", "def getTypeId(self) -> \"SoType\":\n return _coin.SoSFBool_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.ScXMLOnEntryElt_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.SoSFBox2d_getTypeId(self)", "def get_app_id_by_socket(self, sock_fd):\r\n with self.app_list_lock:\r\n for i in range(len(self.app_list)):\r\n if self.app_list[i].socket_fd == sock_fd:\r\n return self.app_list[i].app_id\r\n return None", "def getTypeId(self) -> \"SoType\":\n return _coin.ScXMLEventTarget_getTypeId(self)", "def getTypeId(self) -> \"SoType\":\n return _coin.ScXMLElseIfElt_getTypeId(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the socket_type_hash of this DestinyDefinitionsDestinyItemSocketEntryDefinition.
def socket_type_hash(self, socket_type_hash): self._socket_type_hash = socket_type_hash
[ "def socket_type_hash(self):\n return self._socket_type_hash", "def type(self) -> SocketType:\n return self._type", "def write_socket(self, socket_info: int):\n self.write_metadata_by_name(self.SOCKET_KEY, str(socket_info))", "def set_guest_socket(self, guest_socket):\n \n self.guest_socket = guest_socket", "def mock_socket(self, host, port, socket):\n self._mock_sockets[host + \":\" + str(port)] = socket", "def setSocketPosition(self):\n self.grSocket.setPos(*self.node.getSocketPosition(self.index, self.position, self.count_on_this_node_side))", "def set_type(self, type):\n return _raw_util.raw_message_sptr_set_type(self, type)", "def socket_set_var(self, var, value, socket_name):\n msg = \"socket_set_var(\\\"{}\\\",{},\\\"{}\\\")\".format(\n var,\n value,\n socket_name)\n self._add_line_to_program(msg)\n self.sync()", "def FLISetFrameType(self, handle, frame_type):\n frame_type = self._check_valid(frame_type, 'frame type')\n self._call_function('setting frame type', self._CDLL.FLISetFrameType,\n handle, ctypes.c_long(frame_type))", "def setHash(self, mcanHash):\n self.setByte('hashH', mcanHash >> 8)\n self.setByte('hashL', mcanHash & 0xff)", "def put(self, dtyp ):\n if isinstance(dtyp, dihtype):\n self.maxgid += 1\n self.dihtypes[self.maxgid] = copy.deepcopy(dtyp)\n else:\n print \"Attempting to add non-dihtype type to container\"\n raise TypeError", "def wrap_socket(\n self, socket: _pysocket.socket, server_hostname: Optional[str]\n ) -> TLSWrappedSocket:\n buffer = self.wrap_buffers(server_hostname)\n return TLSWrappedSocket(socket, buffer)", "def _set_network_type(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=RestrictedClassType(\n base_type=six.text_type,\n restriction_type=\"dict_key\",\n restriction_arg={\n \"POINT_TO_POINT_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospf-types:POINT_TO_POINT_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospft:POINT_TO_POINT_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospf-types:BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospft:BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"NON_BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospf-types:NON_BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n \"oc-ospft:NON_BROADCAST_NETWORK\": {\n \"@module\": \"openconfig-ospf-types\",\n \"@namespace\": \"http://openconfig.net/yang/ospf-types\",\n },\n },\n ),\n is_leaf=True,\n yang_name=\"network-type\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"http://openconfig.net/yang/network-instance\",\n defining_module=\"openconfig-network-instance\",\n yang_type=\"identityref\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"network_type must be of a type compatible with identityref\"\"\",\n \"defined-type\": \"openconfig-network-instance:identityref\",\n \"generated-type\": \"\"\"YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type=\"dict_key\", restriction_arg={'POINT_TO_POINT_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:POINT_TO_POINT_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:POINT_TO_POINT_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'NON_BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospf-types:NON_BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}, 'oc-ospft:NON_BROADCAST_NETWORK': {'@module': 'openconfig-ospf-types', '@namespace': 'http://openconfig.net/yang/ospf-types'}},), is_leaf=True, yang_name=\"network-type\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='identityref', is_config=True)\"\"\",\n }\n )\n\n self.__network_type = t\n if hasattr(self, \"_set\"):\n self._set()", "def socket_id(self):\n return self._test_protocol.socket_id", "def settings_hash(self, settings_hash):\n\n self._settings_hash = settings_hash", "def _set_ss(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"ss\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ss must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"ss\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__ss = t\n if hasattr(self, '_set'):\n self._set()", "def set_socket(sock, iface=None):\n global _default_sock # pylint: disable=invalid-name, global-statement\n global _fake_context # pylint: disable=invalid-name, global-statement\n _default_sock = sock\n if iface:\n _default_sock.set_interface(iface)\n _fake_context = _FakeSSLContext(iface)", "def set_socket(sock, iface=None):\r\n global _default_sock # pylint: disable=invalid-name, global-statement\r\n global _fake_context # pylint: disable=invalid-name, global-statement\r\n _default_sock = sock\r\n if iface:\r\n _default_sock.set_interface(iface)\r\n _fake_context = _FakeSSLContext(iface)", "def wrap_socket(self, socket: _pysocket.socket) -> TLSWrappedSocket:\n buffer = self.wrap_buffers()\n return TLSWrappedSocket(socket, buffer)", "def setTypeAtAddress(self,addr,length,typeValue):\n \"\"\"The type must be <b>TYPE_UNDEFINED</b>, <b>TYPE_INT8</b>, ...\"\"\"\n return HopperLowLevel.setTypeAtAddress(self.__internal_segment_addr__,addr,length,typeValue)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the single_initial_item_hash of this DestinyDefinitionsDestinyItemSocketEntryDefinition. If a valid hash, this is the hash identifier for the DestinyInventoryItemDefinitionrepresenting the Plug that will be initially inserted into the item on item creation.Otherwise, this Socket will either start without a plug inserted, or will have one randomlyinserted.
def single_initial_item_hash(self): return self._single_initial_item_hash
[ "def _get_hash_partial(self):\n hash_value = 0\n \n # available\n hash_value ^= self.available\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # format\n hash_value ^= self.format.value << 1\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n # pack_id\n hash_value ^= self.pack_id\n \n # sort_value\n hash_value ^= self.sort_value << 5\n \n # tags\n tags = self.tags\n if (tags is not None):\n hash_value ^= len(tags) << 9\n \n for tag in tags:\n hash_value ^= hash(tag)\n \n # type\n hash_value ^= self.type.value << 13\n \n # user\n hash_value ^= hash(self.user)\n \n return hash_value", "def _startd_collector_hash(self):\n try:\n # The Collector will fallback to Machine and SlotID concatenated\n # together but TJ said we could just get away with Name + MyAddress\n # as this simplifies the SQL column uniqueness constraints\n return hash(','.join([str(self['Name']),\n str(self['MyAddress'])]))\n except KeyError:\n # This could happen if the StartD ad is missing Name or MyAddress attrs,\n # which the Collector should not allow to happen\n return 1", "def _hashable(item):\n try:\n hash(item)\n except TypeError:\n return util_hash.hash_data(item)\n else:\n return item", "def get_mc_shower_h_first_int(self):\n return self.lib.get_mc_shower_h_first_int()", "def contents_hash(self):\n # type: () -> int\n if self._hash is None:\n self._hash = hash(tuple(self.items()))\n return self._hash", "def hashid(self) :\n\t\ttry :\n\t\t\treturn self._hashid\n\t\texcept Exception as e:\n\t\t\traise e", "def initial_sku(self):\n return self._initial_sku", "def get_hash_sha1(self):\n\n if sha1 is not None:\n return sha1( self.get_data() ).hexdigest()", "def _next_free_slot(self, first_hash):\n curr_index = first_hash\n try_number = 0\n tried = []\n #print self._data\n while self._data[curr_index] is not None:\n tried.append(curr_index)\n if try_number + 1 >= self.n_slots // 2:\n #print self._data\n print('Size = ' + str(self.n_slots))\n print('Number of items = ' + str(self.n_items))\n print(\"Failed to find an empty slot...\")\n print('Try number = '+str(try_number))\n print('List of tried slots = '+str(tried))\n print('Current table = '+str(self._data))\n raise ValueError(\"Failed to find an empty slot!!!! \"+\n \"This can happen with quadratic probing \"+\n \"if the table is over half full\")\n else:\n try_number += 1\n curr_index = (first_hash + try_number**2) % self.n_slots\n return curr_index", "def getInitialAttribute(self) -> \"char const *\":\n return _coin.ScXMLStateElt_getInitialAttribute(self)", "def get_mc_shower_primary_id(self):\n return self.lib.get_mc_shower_primary_id()", "def item_init(event):\n\n item_init.log.info(\"Initializing Items\")\n for item_name in [i for i in items if get_metadata(i, \"init\")]:\n value = get_value(item_name, \"init\")\n\n # Always update if override is True\n if get_key_value(item_name, \"init\", \"override\") == \"True\":\n post_update_if_different(item_name, value)\n item_init.log.info(\"Overriding current value {} of {} to {}\"\n .format(items[item_name], item_name, value))\n\n # If not overridden, only update if the Item is currently NULL or UNDEF.\n elif isinstance(items[item_name], UnDefType):\n item_init.log.info(\"Initializing {} to {}\"\n .format(item_name, value))\n postUpdate(item_name, value)\n\n # Delete the metadata now that the Item is initialized.\n if get_key_value(item_name, \"init\", \"clear\") == \"true\":\n item_init.log.info(\"Removing init metadata from {}\"\n .format(item_name))\n remove_metadata(item_name, \"init\")", "def getInitial(self) -> \"ScXMLInitialElt *\":\n return _coin.ScXMLStateElt_getInitial(self)", "def put(self, item):\n hash_value = self.hash_function(item)\n slot_placed = -1\n if self.slots[hash_value] is None or self.slots[hash_value] == item: # empty slot or slot contains item already\n self.slots[hash_value] = item\n slot_placed = hash_value\n else:\n next_slot = self.rehash(hash_value)\n while self.slots[next_slot] is not None and self.slots[next_slot] != item:\n next_slot = self.rehash(next_slot)\n if next_slot == hash_value: # we have done a full circle through the hash table\n # no available slots\n return slot_placed\n\n self.slots[next_slot] = item\n slot_placed = next_slot\n return slot_placed", "def socket_type_hash(self):\n return self._socket_type_hash", "def getStartingAddress(self):\n return HopperLowLevel.getSectionStartingAddress(self.__internal_section_addr__)", "def get_hash_for_device(uuid: int, location: str) -> ElementModQ:\n return hash_elems(uuid, location)", "def hash(self):\n return self.wh", "def get(self, item):\n start_slot = self.hash_function(item)\n\n stop = False\n found = False\n position = start_slot\n while self.slots[position] is not None and not found and not stop:\n if self.slots[position] == item:\n found = True\n else:\n position = self.rehash(position)\n if position == start_slot:\n stop = True\n if found:\n return position\n return -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the single_initial_item_hash of this DestinyDefinitionsDestinyItemSocketEntryDefinition. If a valid hash, this is the hash identifier for the DestinyInventoryItemDefinitionrepresenting the Plug that will be initially inserted into the item on item creation.Otherwise, this Socket will either start without a plug inserted, or will have one randomlyinserted.
def single_initial_item_hash(self, single_initial_item_hash): self._single_initial_item_hash = single_initial_item_hash
[ "def single_initial_item_hash(self):\n return self._single_initial_item_hash", "def initialize_item_factory_state(self, state_key, initial_data):\n self._initialize_state(\n self._game_state['item_factories'], state_key, initial_data)", "def item_init(event):\n\n item_init.log.info(\"Initializing Items\")\n for item_name in [i for i in items if get_metadata(i, \"init\")]:\n value = get_value(item_name, \"init\")\n\n # Always update if override is True\n if get_key_value(item_name, \"init\", \"override\") == \"True\":\n post_update_if_different(item_name, value)\n item_init.log.info(\"Overriding current value {} of {} to {}\"\n .format(items[item_name], item_name, value))\n\n # If not overridden, only update if the Item is currently NULL or UNDEF.\n elif isinstance(items[item_name], UnDefType):\n item_init.log.info(\"Initializing {} to {}\"\n .format(item_name, value))\n postUpdate(item_name, value)\n\n # Delete the metadata now that the Item is initialized.\n if get_key_value(item_name, \"init\", \"clear\") == \"true\":\n item_init.log.info(\"Removing init metadata from {}\"\n .format(item_name))\n remove_metadata(item_name, \"init\")", "def setInitial(self, initial: 'ScXMLInitialElt') -> \"void\":\n return _coin.ScXMLStateElt_setInitial(self, initial)", "def set_initial_inventory(self, machine, items_inventory, item_costs):\n return machine._set_initial_inventory(items_inventory, item_costs)", "def setInitial(self, initial: 'ScXMLInitialElt') -> \"void\":\n return _coin.ScXMLScxmlElt_setInitial(self, initial)", "def setInitialAttribute(self, initial: 'char const *') -> \"void\":\n return _coin.ScXMLStateElt_setInitialAttribute(self, initial)", "def initial_sku(self, initial_sku):\n\n self._initial_sku = initial_sku", "def set_species_initial_value(self, species_initial_value):\n self.listOfSpecies['S'].initial_value = species_initial_value[0]\n self.listOfSpecies['I'].initial_value = species_initial_value[1]\n self.listOfSpecies['R'].initial_value = species_initial_value[2]", "def add_item(self, item):\n item.universe = self\n self.items[item.uuid] = item", "def slotStartupConfig(self):\n\n for item in self.__topology.selectedItems():\n if isinstance(item, IOSRouter):\n item.changeStartupConfig()", "def setInitialAttribute(self, initial: 'char const *') -> \"void\":\n return _coin.ScXMLScxmlElt_setInitialAttribute(self, initial)", "def setInitial(self, initial: 'ScXMLInitialElt') -> \"void\":\n return _coin.ScXMLParallelElt_setInitial(self, initial)", "def MAC_DEF(self, item=''):\n debug('GCSCommands.MAC_DEF(item=%r)', item)\n if item:\n checksize((1,), item)\n else:\n item = ''\n cmdstr = self.__getcmdstr('MAC DEF', item)\n self.__msgs.send(cmdstr)", "def setup_item(self, item: BaseBasketItem, request: HttpRequest) -> None:", "def setPriorBlockHash(self, priorHash):\n self.parentBlockHash = priorHash", "def initialize_first_player(self) -> None:\n self.players[\"0\"] = HumanPlayer(self.player_marks[\"0\"])\n self.players[\"0\"].set_player_info(\"First Player\")", "def step_hash(self, step_hash):\n\n self._step_hash = step_hash", "def put(self, item):\n hash_value = self.hash_function(item)\n slot_placed = -1\n if self.slots[hash_value] is None or self.slots[hash_value] == item: # empty slot or slot contains item already\n self.slots[hash_value] = item\n slot_placed = hash_value\n else:\n next_slot = self.rehash(hash_value)\n while self.slots[next_slot] is not None and self.slots[next_slot] != item:\n next_slot = self.rehash(next_slot)\n if next_slot == hash_value: # we have done a full circle through the hash table\n # no available slots\n return slot_placed\n\n self.slots[next_slot] = item\n slot_placed = next_slot\n return slot_placed", "def initialState(sid, data):\n #stprint(\"initialState: \" + str(data))\n uid = connections.get(sid, None)\n game = games.get(uid, None)\n\n if game is not None:\n game.initial_state = data\n #game.set_initial_state(uid, data)\n #game.event(uid, event_type='initial_state', event_data=data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the reusable_plug_items of this DestinyDefinitionsDestinyItemSocketEntryDefinition.
def reusable_plug_items(self): return self._reusable_plug_items
[ "def reusable_config_values(self) -> pulumi.Input['ReusableConfigValuesArgs']:\n return pulumi.get(self, \"reusable_config_values\")", "def reusable_config(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"reusable_config\")", "def hot_pluggable(self):\n ret = self._get_attr(\"hotPluggable\")\n return ret", "def get_items(self):\n return [i for i in self.game.items if i.room == self]", "def min_noutput_items(self):\n return _raw_util.raw_message_source_sptr_min_noutput_items(self)", "def min_noutput_items(self):\n return _mediatools_swig.mediatools_audiosource_s_sptr_min_noutput_items(self)", "def pc_noutput_items_var(self):\n return _mediatools_swig.mediatools_audiosource_s_sptr_pc_noutput_items_var(self)", "def net_receivables(self):\n return self._net_receivables", "def getEquipped(self) -> List[InventoryItem]:\r\n\t\treturn sorted((i for i in self.items.values() if i.is_equipped), key=lambda i: i.slot)", "def min_noutput_items(self):\n return _raw_util.raw_message_sink_sptr_min_noutput_items(self)", "def canvas_items(self):\n return copy.copy(self.__canvas_items)", "def getConfigItems(self, ses=None):\n if self._config.has_key(ses):\n return self._config[ses].values()\n\n return []", "def items(self) -> MiResponseResponseToDisplayUiTemplateItems:\n return self._items", "def message_subscribers(self, which_port):\n return _mediatools_swig.mediatools_audiosource_s_sptr_message_subscribers(self, which_port)", "def items(self):\n return self.item_set", "def poller_configurations(self):\n configs = []\n for cacheablesource in self.config.findall('cacheablesource'):\n for cachearea in self.config.findall('cachearea'):\n if 'sources' not in cachearea.attrib:\n configs.append({'cacheablesource': cacheablesource, \n 'cachearea': cachearea})\n elif cacheablesource.attrib['id'] in \\\n cachearea.attrib['sources'].split():\n configs.append({'cacheablesource': cacheablesource, \n 'cachearea': cachearea})\n return configs", "def outlets(self) -> Outlet:\n return self._outlets", "def plug_ins(self):\n ret = self._get_attr(\"plugIns\")\n return [IExtPackPlugIn(a) for a in ret]", "def min_noutput_items(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_min_noutput_items(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the reusable_plug_items of this DestinyDefinitionsDestinyItemSocketEntryDefinition.
def reusable_plug_items(self, reusable_plug_items): self._reusable_plug_items = reusable_plug_items
[ "def reusable_plug_items(self):\n return self._reusable_plug_items", "def reusable_config_values(self) -> pulumi.Input['ReusableConfigValuesArgs']:\n return pulumi.get(self, \"reusable_config_values\")", "def reusable_config(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"reusable_config\")", "def set_hot_pluggable_for_device(self, name, controller_port, device, hot_pluggable):\n if not isinstance(name, basestring):\n raise TypeError(\"name can only be an instance of type basestring\")\n if not isinstance(controller_port, baseinteger):\n raise TypeError(\"controller_port can only be an instance of type baseinteger\")\n if not isinstance(device, baseinteger):\n raise TypeError(\"device can only be an instance of type baseinteger\")\n if not isinstance(hot_pluggable, bool):\n raise TypeError(\"hot_pluggable can only be an instance of type bool\")\n self._call(\"setHotPluggableForDevice\",\n in_p=[name, controller_port, device, hot_pluggable])", "def addModsToItems(self):\n self.setKv(\"mods\", self.mods)", "def suggestion_chips(self, chips:list):\r\n \r\n add_chip = []\r\n for chip in chips:\r\n add_chip.append({\r\n \"text\": chip,\r\n \"image\": {\r\n \"src\": {\r\n \"rawUrl\": \"\"\r\n }\r\n },\r\n \"link\": \"\"\r\n })\r\n\r\n return {\r\n \"payload\":\r\n {\r\n \"richContent\":\r\n [\r\n [\r\n {\r\n \"type\": \"chips\",\r\n \"options\": add_chip\r\n }\r\n ]\r\n ]\r\n\r\n }\r\n }", "def slotStartupConfig(self):\n\n for item in self.__topology.selectedItems():\n if isinstance(item, IOSRouter):\n item.changeStartupConfig()", "def hot_pluggable(self):\n ret = self._get_attr(\"hotPluggable\")\n return ret", "def set_min_noutput_items(self, m):\n return _mediatools_swig.mediatools_audiosource_s_sptr_set_min_noutput_items(self, m)", "def add_standard_hipersocket(self):\n\n # Adapter properties that will be auto-set:\n # - object-uri\n # - adapter-family\n # - network-port-uris (to empty array)\n faked_hs2 = self.faked_cpc.adapters.add({\n 'object-id': HS2_OID,\n 'parent': self.faked_cpc.uri,\n 'class': 'adapter',\n 'name': HS2_NAME,\n 'description': 'Hipersocket #2',\n 'status': 'active',\n 'type': 'hipersockets',\n 'adapter-id': '123',\n 'detected-card-type': 'hipersockets',\n 'port-count': 1,\n 'network-port-uris': [],\n 'state': 'online',\n 'configured-capacity': 32,\n 'used-capacity': 0,\n 'allowed-capacity': 32,\n 'maximum-total-capacity': 32,\n 'physical-channel-status': 'operating',\n 'maximum-transmission-unit-size': 56,\n })\n\n # Port properties that will be auto-set:\n # - element-uri\n # Properties in parent adapter that will be auto-set:\n # - network-port-uris\n faked_hs2.ports.add({\n 'element-id': 'fake-port21-oid',\n 'parent': faked_hs2.uri,\n 'class': 'network-port',\n 'index': 0,\n 'name': 'fake-port21-name',\n 'description': 'Hipersocket #2 Port #1',\n })\n return faked_hs2", "def AddMultiSlot(self, slot):\n if isinstance(slot, BLNlpClipsMultislotMap):\n self._multislots.append(slot)\n else:\n s = BLNlpClipsMultislotMap()\n if \"Name\" in slot:\n s.Name = slot[\"Name\"]\n if \"Type\" in slot:\n s.Type = slot[\"Type\"]\n if \"Default\" in slot:\n s.Default = slot[\"Default\"]\n else:\n s.Name = slot\n self._multislots.append(s)", "def ConfiguredAsgElectronRingerSelector( quality, menu, cutIDConfDict = None, **kwargs ):\n try:\n RingerSelectorConfigurable = ElectronRingerMap[(quality, menu)]\n\n if cutIDConfDict is None:\n cutIDConfDict = {}\n # Configure it\n ringerSelectorTool = RingerSelectorConfigurable(cutIDConfDict = cutIDConfDict,\n **kwargs)\n return ringerSelectorTool\n except KeyError:\n from AthenaCommon.Logging import logging\n import traceback\n mlog = logging.getLogger( 'ConfiguredAsgElectronRingerSelector.py' )\n mlog.error(\"There is no such configuration available:\\n %s\", traceback.format_exc())\n raise", "def set_max_noutput_items(self, m):\n return _mediatools_swig.mediatools_audiosource_s_sptr_set_max_noutput_items(self, m)", "def is_set_max_noutput_items(self):\n return _mediatools_swig.mediatools_audiosource_s_sptr_is_set_max_noutput_items(self)", "def update_confusables():\n LINE_RE = re.compile(\n r'^(?P<confusable_cp>[0-9A-F ]+) ;\\t'\n r'(?P<confused_with_cp>[0-9A-F ]+) ;.*',\n re.UNICODE)\n\n confusables = []\n\n for info in _load_data(CONFUSABLES_URL, LINE_RE):\n # Parse and check the character that another may be confused with.\n confused_with_cp_strs = info['confused_with_cp'].split(' ')\n\n if len(confused_with_cp_strs) != 1:\n # We don't care about any confusables with multi-codepoint\n # characters.\n continue\n\n confused_with_cp = int(confused_with_cp_strs[0], 16)\n\n # Skip anything that's not confused with a standard ASCII character.\n # We're only concerned with confusing characters that may be in\n # common identifiers.\n if confused_with_cp >= 128:\n continue\n\n confused_with_alias = get_alias(confused_with_cp)\n\n # Skip anything that's not confused with a common or latin character.\n if confused_with_alias not in ('COMMON', 'LATIN'):\n continue\n\n # Parse and check confusable characters.\n confusable_cp_strs = info['confusable_cp'].split(' ')\n assert len(confusable_cp_strs) == 1, confusable_cp_strs\n\n confusable_cp = int(confusable_cp_strs[0], 16)\n\n # There are some confusables, like \"1\" and \"l\", that will be in\n # this file. Ignore anything that's ASCII. We only want the more\n # exotic characters.\n if confusable_cp < 128:\n continue\n\n confusables.append((\n confusable_cp,\n chr(confusable_cp),\n chr(confused_with_cp),\n ))\n\n confusables.sort()\n\n return confusables", "def add(self, chips):\n self.chips += chips", "def give_pot(self, player):\n player.add_chips(self._chips)\n self.remove_chips(self._chips)", "def add_zabbix_dynamic_item(self):\n\n self.zagg_sender.add_zabbix_dynamic_item(self.args.discovery_key,\n self.args.macro_string,\n self.args.macro_names.split(','),\n )", "def min_noutput_items(self):\n return _mediatools_swig.mediatools_audiosource_s_sptr_min_noutput_items(self)", "def spawn_random(self, item_id, spawnable_ids):\n spawnable_dict = {}\n for item_id in spawnable_ids:\n spawnable_dict[item_id] = self.item_dict[item_id]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the neutron_utils.get_external_network_names to ensure the configured self.ext_net_name is contained within the returned list
def test_retrieve_ext_network_name(self): neutron = neutron_utils.neutron_client(self.os_creds, self.os_session) ext_networks = neutron_utils.get_external_networks(neutron) found = False for network in ext_networks: if network.name == self.ext_net_name: found = True break self.assertTrue(found)
[ "def get_external_lns_names(self) -> List[Info]:\n return self.get_lns_names(domain=self.wallet.get_addresses(), inv=True)", "def ext_network_ok(default_gateway, external_netmask, external_ip_range):\n ext_network = to_network(default_gateway, external_netmask)\n ext_ip_low, ext_ip_high = [ipaddress.IPv4Address(x) for x in external_ip_range]\n high_ip_ok = ext_ip_high in ext_network\n low_ip_ok = ext_ip_low in ext_network\n answer = high_ip_ok and low_ip_ok\n return answer", "def test_include_multiple_networks(self):\n networks = ['Disney Channel', 'HGTV', 'CBS']\n new_episodes = self.schedule.include_networks(networks)\n for episode in new_episodes:\n self.assertTrue(episode.show.network[\"name\"] in networks)", "def test_l3ext_name_for_invalidname_in_configpush(self):\n\n config_file = \"\"\"\n {\n \"clusters\": [\n {\n \"name\": \"Configpushtest*-(1)\",\n \"id\": \"56c55b8761707062b2d11b00\",\n \"descr\": \"sample description\",\n \"route_tag\": {\n \"subnet_mask\": \"173.38.111.0/24\",\n \"name\": \"rtp1-dcm01n-gp-db-dr2:iv2133\"\n },\n \"labels\": [\n\n ],\n \"nodes\": [\n ]\n },\n {\n \"name\": \"Configpushtest*-(2)\",\n \"id\": \"56c3d31561707035c0c12b00\",\n \"descr\": \"sample description\",\n \"approved\": true,\n \"route_tag\": {\n \"subnet_mask\": \"0.0.0.0/0\",\n \"name\": \"INTERNET-EXTNET\"\n },\n \"labels\": [\n\n ],\n \"nodes\": [\n ]\n }\n ],\n \"policies\": [\n {\n \"src\": \"56c55b8761707062b2d11b00\",\n \"dst\": \"56c3d31561707035c0c12b00\",\n \"src_name\": \"Configpushtest-policy*-(1)\",\n \"dst_name\": \"Configpushtest-policy*-(2)\",\n \"descr\": \"sample description\",\n \"whitelist\": [\n ]\n }\n ]\n}\n \"\"\"\n load_config = LoadConfig()\n tenant_name = 'configpush-test1'\n app_name = 'app-test'\n l3ext_name = 'l3external-test***#####:::{{{}}}}'\n load_config.load_configFile(\n config_file,\n is_file=False,\n tenant_name=tenant_name,\n app_name=app_name,\n l3ext_name=l3ext_name)\n time.sleep(5)\n\n tenants = Tenant.get(load_config.session)\n for tenant in tenants:\n if tenant.name == tenant_name:\n tenant.mark_as_deleted()\n resp = tenant.push_to_apic(load_config.session)\n if not resp.ok:\n print(\"tenant deletion failed\")\n\n time.sleep(5)\n load_config = LoadConfig()\n load_config.load_configFile(config_file, is_file=False)\n time.sleep(5)\n tenants = Tenant.get_deep(load_config.session, names=[load_config.tool.tenant_name])\n for tenant in tenants:\n if tenant.name == tenant_name:\n self.assertTrue(True, \"tenant exists with name \" + tenant_name)\n app_profiles = tenant.get_children(AppProfile)\n app = app_profiles[0]\n self.assertEquals(app[0].name, app_name, \"application profile with given name doesnot exist\" + app_name)\n\n outsideL3s = tenant.get_children(OutsideL3)\n print(\"outsideL3s[0].name is \" + outsideL3s[0].name)\n self.assertEquals(\n outsideL3s[0].name,\n l3ext_name,\n \"External routed network with name doesnot exist\" +\n l3ext_name)", "def get_invalid_ext_net_ids(self, dst_info, ext_net_map):\n\n invalid_ext_nets = {'src_nets': [], 'dst_nets': []}\n\n src_ext_nets_ids = [net.id for net in self.by_id.values() if\n net.external]\n dst_ext_nets_ids = [net.id for net in dst_info.by_id.values() if\n net.external]\n\n for src_net_id, dst_net_id in ext_net_map.items():\n if src_net_id not in src_ext_nets_ids:\n invalid_ext_nets['src_nets'].append(src_net_id)\n if dst_net_id not in dst_ext_nets_ids:\n invalid_ext_nets['dst_nets'].append(dst_net_id)\n\n return invalid_ext_nets", "def test_list_name_not_in_external_choices_sheet_raises_error(self):\n md = \"\"\"\n | survey | | | | |\n | | type | name | label | choice_filter |\n | | select_one state | state | State | |\n | | select_one_external city | city | City | state=${state} |\n | | select_one_external suburby | suburb | Suburb | state=${state} and city=${city} |\n \"\"\"\n self.assertPyxformXform(\n md=md + self.all_choices,\n errored=True,\n error__contains=[\"List name not in external choices sheet: suburby\"],\n )", "def is_valid_net_addr(network: str):\n return add_wildcard_ip(network)", "def checkForNewNetworks(self):\r\n pass", "def test003_check_connectivity_through_external_network(self):\n self.lg('%s STARTED' % self._testID)\n\n self.lg(\"Create VM1,should succeed.\")\n vm1_id = self.cloudapi_create_machine(cloudspace_id=self.cloudspace_id)\n\n self.lg(\"Attach VM1 to an external network, should succeed\")\n reponse = self.api.cloudbroker.machine.attachExternalNetwork(machineId=vm1_id)\n self.assertTrue(reponse)\n\n self.lg(\"Assign IP to VM1's external netowrk interface, should succeed.\")\n vm1_nics = self.api.cloudapi.machines.get(machineId=vm1_id)[\"interfaces\"]\n vm1_nic = [x for x in vm1_nics if \"externalnetworkId\" in x[\"params\"]][0]\n self.assertTrue(vm1_nic)\n vm1_ext_ip = vm1_nic[\"ipAddress\"]\n vm1_client = VMClient(vm1_id)\n vm1_client.execute(\"ip a a %s dev eth1\"%vm1_ext_ip, sudo=True)\n vm1_client.execute(\"nohup bash -c 'ip l s dev eth1 up </dev/null >/dev/null 2>&1 & '\", sudo=True)\n\n self.lg(\"Check if you can ping VM1 from outside, should succeed\")\n vm1_ext_ip = vm1_ext_ip[:vm1_ext_ip.find('/')]\n response = os.system(\"ping -c 1 %s\"%vm1_ext_ip)\n self.assertFalse(response)\n\n self.lg(\"Check that you can connect to vm with new ip ,should succeed\")\n vm1_client = VMClient(vm1_id, external_network=True)\n stdin, stdout, stderr = vm1_client.execute(\"ls /\")\n self.assertIn('bin', stdout.read())", "def internal_networks(self):\n ret = self._get_attr(\"internalNetworks\")\n return ret", "def netns_list(self):\n return self._netns_list", "def _dev_not_in_used_interfaces(self, dev):\n used_interfaces_names = []\n for iface in self.used_interfaces:\n used_interfaces_names.append(iface.name)\n if dev.name in used_interfaces_names:\n return False\n else:\n return True", "def extract_live_network_2g_externals(self):\n pass", "def test_include_networks(self):\n new_episodes = self.schedule.include_networks(['Disney Channel'])\n for episode in new_episodes:\n self.assertTrue(episode.show.network[\"name\"] in ['Disney Channel'])", "def test_names(self):\n for prefix, entry in self.registry.items():\n with self.subTest(prefix=prefix):\n self.assertFalse(\n entry.name is None\n and \"name\" not in get_external(prefix, \"miriam\")\n and \"name\" not in get_external(prefix, \"ols\")\n and \"name\" not in get_external(prefix, \"obofoundry\"),\n msg=f\"{prefix} is missing a name\",\n )", "def check_networks_configuration(\n vm_name, check_nic=False, dns_search=None, dns_servers=None\n):\n status = True\n if check_nic:\n logger.info(\"Check the NIC file name exists\")\n cmd = config_virt.CHECK_NIC_EXIST\n if check_data_on_vm(vm_name, cmd, config_virt.CLOUD_INIT_NIC_NAME):\n logger.info(\"NIC file name exist\")\n else:\n logger.error(\"NIC file name doesn't exist\")\n status = False\n if dns_search:\n logger.info(\"Check DNS search, expected: %s\", dns_search)\n cmd = config_virt.CHECK_DNS_IN_GUEST % dns_search\n if check_data_on_vm(vm_name, cmd, dns_search):\n logger.info(\"DNS search check pass\")\n else:\n logger.error(\"DNS search check failed\")\n status = False\n if dns_servers:\n logger.info(\"Check DNS servers, expected: %s\", dns_servers)\n cmd = config_virt.CHECK_DNS_IN_GUEST % dns_servers\n if check_data_on_vm(vm_name, cmd, dns_servers):\n logger.info(\"DNS servers check pass\")\n else:\n logger.error(\"DNS servers check failed\")\n status = False\n return status", "def _get_all_interfaces_names(self): \n \n listOfInterfaceName = []\n \n cmd = 'ip link show | awk \\'/eth[0-9]/ {print $0}\\''\n stdout, stderr, rc = _exec_command(cmd)\n \n if rc !=0 or stderr !='':\n raise AssertionError('*ERROR* cmd=%s, rc=%s, %s %s' %(cmd,rc,stdout,stderr)) \n \n listOfContent = stdout.split('\\n')\n for content in listOfContent:\n subcontent = content.split(':')\n if (len(subcontent) > 2):\n listOfInterfaceName.append(subcontent[1].lstrip())\n \n return listOfInterfaceName", "def _validate_networks(self, attrs):\n service_settings = attrs['service_settings']\n project = attrs['project']\n\n if is_basic_mode():\n customer = project.customer\n try:\n network = models.Network.objects.filter(\n settings=service_settings, customernetwork__customer=customer\n ).get()\n except ObjectDoesNotExist:\n raise serializers.ValidationError(\n 'There is no network assigned to the current customer.'\n )\n except MultipleObjectsReturned:\n raise serializers.ValidationError(\n 'There are multiple networks assigned to the current customer.'\n )\n else:\n attrs['networks'] = [network]\n return attrs\n\n networks = attrs.get('networks', [])\n\n for network in networks:\n if network.settings != service_settings:\n raise serializers.ValidationError(\n 'This network is not available for this service.'\n )\n\n if not network.customernetwork_set.filter(\n customer=project.customer\n ).exists():\n raise serializers.ValidationError(\n 'This network is not available for this customer.'\n )\n\n return attrs", "def netlist_comp_check(skidl_netlist, pyspice_netlist):\n #only care about the final netlist string\n skidl_netlist=skidl_netlist.str()\n pyspice_netlist=pyspice_netlist.str()\n \n #check the lengths\n if len(skidl_netlist)>len(pyspice_netlist):\n return('skidl_netlist is longer then pyspice_netlist')\n elif len(skidl_netlist)<len(pyspice_netlist):\n return('skidl_netlist is shorter then pyspice_netlist') \n \n #compare strings char by char\n else:\n string_check=[i for i in range(len(skidl_netlist)) if skidl_netlist[i] != pyspice_netlist[i]]\n if string_check==[]:\n return 'Match'\n else:\n print('Match failed skidl_netlist:')\n print(f'{[i|1 for i in string_check]}')\n return string_check" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the neutron_utils.create_network() function
def test_create_network(self): self.network = neutron_utils.create_network( self.neutron, self.os_creds, self.net_config.network_settings) self.assertEqual(self.net_config.network_settings.name, self.network.name) self.assertTrue(validate_network( self.neutron, self.keystone, self.net_config.network_settings.name, True, self.os_creds.project_name)) self.assertEqual(len(self.net_config.network_settings.subnet_settings), len(self.network.subnets))
[ "def create_network():\n with settings(warn_only=True):\n run(f'docker network create {network_name}')", "def test_create_network_null_name(self):\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds,\n network_settings=NetworkConfig())", "def test_create_network_empty_name(self):\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds,\n network_settings=NetworkConfig(name=''))", "def create_network_action(self, netsim, number, prefix):\n self.log.info('Creating new netsim network')\n response = None\n while True:\n # Create the network\n create_response = netsim.create_network(number, prefix)\n response = create_response\n if create_response.error:\n break\n # Init netsim device configuration\n init_response = netsim.init_config('')\n if init_response.error:\n response = init_response\n break\n # Load init configuration to cdb\n load_response = netsim.load_config()\n if load_response.error:\n response = load_response\n break\n # all operations finished\n break\n\n return response", "def test_create_network_stateful(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:0:0:0:0:0:0/64',\n ip_version=6, dns_nameservers=['2620:0:ccc:0:0:0:0:2'],\n gateway_ip='1:1:0:0:0:0:0:1', start='1:1::ff', end='1:1::ffff',\n enable_dhcp=True, ipv6_ra_mode='dhcpv6-stateful',\n ipv6_address_mode='dhcpv6-stateful')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)\n\n self.assertEqual(self.network_settings.name, self.network.name)\n\n subnet_settings = self.network_settings.subnet_settings[0]\n self.assertEqual(1, len(self.network.subnets))\n subnet = self.network.subnets[0]\n\n self.assertEqual(self.network.id, subnet.network_id)\n self.assertEqual(subnet_settings.name, subnet.name)\n self.assertEqual(subnet_settings.start, subnet.start)\n self.assertEqual(subnet_settings.end, subnet.end)\n self.assertEqual('1:1::/64', subnet.cidr)\n self.assertEqual(6, subnet.ip_version)\n self.assertEqual(1, len(subnet.dns_nameservers))\n self.assertEqual(\n sub_setting.dns_nameservers[0], subnet.dns_nameservers[0])\n self.assertTrue(subnet.enable_dhcp)\n self.assertEqual(\n subnet_settings.ipv6_ra_mode.value, subnet.ipv6_ra_mode)\n self.assertEqual(\n subnet_settings.ipv6_address_mode.value, subnet.ipv6_address_mode)", "def test_organization_networks_create(self):\n self.assertEqual(\n \"https://dashboard.meraki.com/api/v0/organizations/\"\n + ORGANIZATION_ID\n + \"/networks\"\n , MerakiAPI(KEY)\n .organizations(ORGANIZATION_ID)\n .networks()\n .lazy()\n .create({})\n .cached\n .url\n )", "def test_create_network_fail_not_superuser(client: TestClient, db: Session) -> None:\n user = create_random_user(db)\n user_token_headers = authentication_token_from_email(\n client=client, email=user.email, db=db\n )\n data = {\"node_type\": \"network\", \"name\": random_lower_string(), \"is_active\": True}\n response = client.post(\n f\"{settings.API_V1_STR}/nodes/\",\n headers=user_token_headers,\n json=data,\n )\n assert response.status_code == 403\n content = response.json()\n assert content[\"detail\"] == \"Only superusers can create new networks.\"", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal", "def create_network(request):\n cloud_id = request.matchdict['cloud']\n\n params = params_from_request(request)\n network_params = params.get('network')\n subnet_params = params.get('subnet')\n\n auth_context = auth_context_from_request(request)\n\n if not network_params:\n raise RequiredParameterMissingError('network')\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner, id=cloud_id)\n except me.DoesNotExist:\n raise CloudNotFoundError\n\n network = methods.create_network(auth_context.owner, cloud, network_params)\n network_dict = network.as_dict()\n\n # Bundling Subnet creation in this call because it is required\n # for backwards compatibility with the current UI\n if subnet_params:\n try:\n subnet = create_subnet(auth_context.owner, cloud,\n network, subnet_params)\n except Exception as exc:\n # Cleaning up the network object in case subnet creation\n # fails for any reason\n network.ctl.delete()\n raise exc\n network_dict['subnet'] = subnet.as_dict()\n\n return network.as_dict()", "def create_network(self, name, neutron_net_id):\n # find a v4 and/or v6 subnet of the network\n shared = \\\n self.neutron_api.get_neutron_network(neutron_net_id)[\n 'shared']\n subnets = self.neutron_api.list_subnets(network_id=neutron_net_id)\n subnets = subnets.get('subnets', [])\n v4_subnet = self._get_subnet(subnets, ip_version=4)\n v6_subnet = self._get_subnet(subnets, ip_version=6)\n if not v4_subnet and not v6_subnet:\n raise exception.ZunException(_(\n \"The Neutron network %s has no subnet\") % neutron_net_id)\n\n # IPAM driver specific options\n ipam_options = {\n \"Driver\": CONF.network.driver_name,\n \"Options\": {\n 'neutron.net.shared': str(shared)\n },\n \"Config\": []\n }\n\n # Driver specific options\n options = {\n 'neutron.net.uuid': neutron_net_id,\n 'neutron.net.shared': str(shared)\n }\n\n if v4_subnet:\n ipam_options[\"Options\"]['neutron.pool.uuid'] = \\\n self._get_subnetpool(v4_subnet)\n ipam_options['Options']['neutron.subnet.uuid'] = \\\n v4_subnet.get('id')\n ipam_options[\"Config\"].append({\n \"Subnet\": v4_subnet['cidr'],\n \"Gateway\": v4_subnet['gateway_ip']\n })\n\n options['neutron.pool.uuid'] = v4_subnet.get('subnetpool_id')\n options['neutron.subnet.uuid'] = v4_subnet.get('id')\n if v6_subnet:\n ipam_options[\"Options\"]['neutron.pool.v6.uuid'] = \\\n self._get_subnetpool(v6_subnet)\n ipam_options['Options']['neutron.subnet.v6.uuid'] = \\\n v6_subnet.get('id')\n ipam_options[\"Config\"].append({\n \"Subnet\": v6_subnet['cidr'],\n \"Gateway\": v6_subnet['gateway_ip']\n })\n\n options['neutron.pool.v6.uuid'] = v6_subnet.get('subnetpool_id')\n options['neutron.subnet.v6.uuid'] = v6_subnet.get('id')\n\n LOG.debug(\"Calling docker.create_network to create network %s, \"\n \"ipam_options %s, options %s\", name, ipam_options, options)\n docker_network = self.docker.create_network(\n name=name,\n driver=CONF.network.driver_name,\n enable_ipv6=True if v6_subnet else False,\n options=options,\n ipam=ipam_options)\n\n return docker_network", "def test_create_subnet_null_name(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n with self.assertRaises(Exception):\n SubnetConfig(cidr=self.net_config.subnet_cidr)", "def create_network(req):\n db = YamlDB()\n err, msg = db.new_network_group(Const.KUBAM_CFG, req)\n if err == 1:\n return {\"error\": msg}, Const.HTTP_BAD_REQUEST\n return {\"status\": \"Network {0} created!\".format(req['name'])}, Const.HTTP_CREATED", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def test_create_subnet_empty_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = ''\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def test_create_port(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n subnet_setting = self.net_config.network_settings.subnet_settings[0]\n self.assertTrue(validate_subnet(\n self.neutron, self.network, subnet_setting.name,\n subnet_setting.cidr, True))\n\n self.port = neutron_utils.create_port(\n self.neutron, self.os_creds, PortConfig(\n name=self.port_name,\n ip_addrs=[{\n 'subnet_name': subnet_setting.name,\n 'ip': ip_1}],\n network_name=self.net_config.network_settings.name))\n validate_port(self.neutron, self.port, self.port_name)", "def create_networks():\n from neutronclient.v2_0 import client as NeutronClient\n auth_args = _auth_args()\n if not auth_args:\n return\n client = NeutronClient.Client(**auth_args)\n ensure_client_connectivity(client.list_networks)\n\n mgt_net = (\n config('management-network-cidr'), config('management-network-name'))\n ext_net = (\n config('external-network-cidr'), config('external-network-name'))\n\n networks = []\n subnets = []\n for net in [mgt_net, ext_net]:\n if net == ext_net:\n external = True\n else:\n external = False\n\n net_cidr, net_name = net\n network = get_or_create_network(client, net_name, external)\n networks.append(network)\n subnets.append(get_or_create_subnet(client, net_cidr, network['id']))\n\n # since this data is not available in any relation and to avoid a call\n # to neutron API for every config write out, save this data locally\n # for access from config context.\n net_data = {\n 'networks': networks,\n 'subnets': subnets,\n }\n with open(ASTARA_NETWORK_CACHE, 'w') as out:\n out.write(json.dumps(net_data))\n\n return net_data", "def networkCreateXML(self, xmlDesc):\n ret = libvirtmod.virNetworkCreateXML(self._o, xmlDesc)\n if ret is None:raise libvirtError('virNetworkCreateXML() failed', conn=self)\n __tmp = virNetwork(self, _obj=ret)\n return __tmp", "def test_create_network_with_bad_cidr(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:1:/48', ip_version=6)\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def test_create_network_no_dhcp_slaac(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:0:0:0:0:0:0/64',\n ip_version=6, dns_nameservers=['2620:0:ccc:0:0:0:0:2'],\n gateway_ip='1:1:0:0:0:0:0:1', start='1:1::ff', end='1:1::ffff',\n enable_dhcp=False, ipv6_ra_mode='slaac', ipv6_address_mode='slaac')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the neutron_utils.create_network() function with an empty network name
def test_create_network_empty_name(self): with self.assertRaises(Exception): self.network = neutron_utils.create_network( self.neutron, self.os_creds, network_settings=NetworkConfig(name=''))
[ "def test_create_network_null_name(self):\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds,\n network_settings=NetworkConfig())", "def create_network():\n with settings(warn_only=True):\n run(f'docker network create {network_name}')", "def test_create_network(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n self.assertEqual(len(self.net_config.network_settings.subnet_settings),\n len(self.network.subnets))", "def test_create_subnet_null_name(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n with self.assertRaises(Exception):\n SubnetConfig(cidr=self.net_config.subnet_cidr)", "def test_create_subnet_empty_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = ''\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def test_create_subnet_null_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = None\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def test_create_network_fail_not_superuser(client: TestClient, db: Session) -> None:\n user = create_random_user(db)\n user_token_headers = authentication_token_from_email(\n client=client, email=user.email, db=db\n )\n data = {\"node_type\": \"network\", \"name\": random_lower_string(), \"is_active\": True}\n response = client.post(\n f\"{settings.API_V1_STR}/nodes/\",\n headers=user_token_headers,\n json=data,\n )\n assert response.status_code == 403\n content = response.json()\n assert content[\"detail\"] == \"Only superusers can create new networks.\"", "def make_network(name: str = None) -> Network:\n return Network(\n name=(str(name) if name is not None else n()),\n version=n(),\n )", "def create_network_action(self, netsim, number, prefix):\n self.log.info('Creating new netsim network')\n response = None\n while True:\n # Create the network\n create_response = netsim.create_network(number, prefix)\n response = create_response\n if create_response.error:\n break\n # Init netsim device configuration\n init_response = netsim.init_config('')\n if init_response.error:\n response = init_response\n break\n # Load init configuration to cdb\n load_response = netsim.load_config()\n if load_response.error:\n response = load_response\n break\n # all operations finished\n break\n\n return response", "def docker_create_network(name):\n process = subprocess.Popen(\n [\n \"docker\",\n \"network\",\n \"create\",\n \"--opt\",\n \"encrypted\",\n \"--driver\",\n \"overlay\",\n name,\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n process.wait()\n logging.info(f\"created the {name} network\")", "def test_create_port_null_name(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n subnet_setting = self.net_config.network_settings.subnet_settings[0]\n self.assertTrue(validate_subnet(\n self.neutron, self.network, subnet_setting.name,\n subnet_setting.cidr, True))\n\n self.port = neutron_utils.create_port(\n self.neutron, self.os_creds,\n PortConfig(\n network_name=self.net_config.network_settings.name,\n ip_addrs=[{\n 'subnet_name': subnet_setting.name,\n 'ip': ip_1}]))\n\n port = neutron_utils.get_port_by_id(self.neutron, self.port.id)\n self.assertEqual(self.port, port)", "def create_network(request):\n cloud_id = request.matchdict['cloud']\n\n params = params_from_request(request)\n network_params = params.get('network')\n subnet_params = params.get('subnet')\n\n auth_context = auth_context_from_request(request)\n\n if not network_params:\n raise RequiredParameterMissingError('network')\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner, id=cloud_id)\n except me.DoesNotExist:\n raise CloudNotFoundError\n\n network = methods.create_network(auth_context.owner, cloud, network_params)\n network_dict = network.as_dict()\n\n # Bundling Subnet creation in this call because it is required\n # for backwards compatibility with the current UI\n if subnet_params:\n try:\n subnet = create_subnet(auth_context.owner, cloud,\n network, subnet_params)\n except Exception as exc:\n # Cleaning up the network object in case subnet creation\n # fails for any reason\n network.ctl.delete()\n raise exc\n network_dict['subnet'] = subnet.as_dict()\n\n return network.as_dict()", "def create_network(req):\n db = YamlDB()\n err, msg = db.new_network_group(Const.KUBAM_CFG, req)\n if err == 1:\n return {\"error\": msg}, Const.HTTP_BAD_REQUEST\n return {\"status\": \"Network {0} created!\".format(req['name'])}, Const.HTTP_CREATED", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal", "def create_network(self, name, neutron_net_id):\n # find a v4 and/or v6 subnet of the network\n shared = \\\n self.neutron_api.get_neutron_network(neutron_net_id)[\n 'shared']\n subnets = self.neutron_api.list_subnets(network_id=neutron_net_id)\n subnets = subnets.get('subnets', [])\n v4_subnet = self._get_subnet(subnets, ip_version=4)\n v6_subnet = self._get_subnet(subnets, ip_version=6)\n if not v4_subnet and not v6_subnet:\n raise exception.ZunException(_(\n \"The Neutron network %s has no subnet\") % neutron_net_id)\n\n # IPAM driver specific options\n ipam_options = {\n \"Driver\": CONF.network.driver_name,\n \"Options\": {\n 'neutron.net.shared': str(shared)\n },\n \"Config\": []\n }\n\n # Driver specific options\n options = {\n 'neutron.net.uuid': neutron_net_id,\n 'neutron.net.shared': str(shared)\n }\n\n if v4_subnet:\n ipam_options[\"Options\"]['neutron.pool.uuid'] = \\\n self._get_subnetpool(v4_subnet)\n ipam_options['Options']['neutron.subnet.uuid'] = \\\n v4_subnet.get('id')\n ipam_options[\"Config\"].append({\n \"Subnet\": v4_subnet['cidr'],\n \"Gateway\": v4_subnet['gateway_ip']\n })\n\n options['neutron.pool.uuid'] = v4_subnet.get('subnetpool_id')\n options['neutron.subnet.uuid'] = v4_subnet.get('id')\n if v6_subnet:\n ipam_options[\"Options\"]['neutron.pool.v6.uuid'] = \\\n self._get_subnetpool(v6_subnet)\n ipam_options['Options']['neutron.subnet.v6.uuid'] = \\\n v6_subnet.get('id')\n ipam_options[\"Config\"].append({\n \"Subnet\": v6_subnet['cidr'],\n \"Gateway\": v6_subnet['gateway_ip']\n })\n\n options['neutron.pool.v6.uuid'] = v6_subnet.get('subnetpool_id')\n options['neutron.subnet.v6.uuid'] = v6_subnet.get('id')\n\n LOG.debug(\"Calling docker.create_network to create network %s, \"\n \"ipam_options %s, options %s\", name, ipam_options, options)\n docker_network = self.docker.create_network(\n name=name,\n driver=CONF.network.driver_name,\n enable_ipv6=True if v6_subnet else False,\n options=options,\n ipam=ipam_options)\n\n return docker_network", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def test_create_network_no_dhcp_slaac(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:0:0:0:0:0:0/64',\n ip_version=6, dns_nameservers=['2620:0:ccc:0:0:0:0:2'],\n gateway_ip='1:1:0:0:0:0:0:1', start='1:1::ff', end='1:1::ffff',\n enable_dhcp=False, ipv6_ra_mode='slaac', ipv6_address_mode='slaac')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def test_organization_networks_create(self):\n self.assertEqual(\n \"https://dashboard.meraki.com/api/v0/organizations/\"\n + ORGANIZATION_ID\n + \"/networks\"\n , MerakiAPI(KEY)\n .organizations(ORGANIZATION_ID)\n .networks()\n .lazy()\n .create({})\n .cached\n .url\n )", "def test_create_port_null_network_object(self):\n with self.assertRaises(Exception):\n self.port = neutron_utils.create_port(\n self.neutron, self.os_creds,\n PortConfig(\n name=self.port_name,\n network_name=self.net_config.network_settings.name,\n ip_addrs=[{\n 'subnet_name':\n self.net_config.network_settings.subnet_settings[\n 0].name,\n 'ip': ip_1}]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the neutron_utils.create_network() function when the network name is None
def test_create_network_null_name(self): with self.assertRaises(Exception): self.network = neutron_utils.create_network( self.neutron, self.os_creds, network_settings=NetworkConfig())
[ "def test_create_network_empty_name(self):\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds,\n network_settings=NetworkConfig(name=''))", "def create_network():\n with settings(warn_only=True):\n run(f'docker network create {network_name}')", "def test_create_subnet_null_name(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n with self.assertRaises(Exception):\n SubnetConfig(cidr=self.net_config.subnet_cidr)", "def test_create_network(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n self.assertEqual(len(self.net_config.network_settings.subnet_settings),\n len(self.network.subnets))", "def test_create_port_null_name(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n subnet_setting = self.net_config.network_settings.subnet_settings[0]\n self.assertTrue(validate_subnet(\n self.neutron, self.network, subnet_setting.name,\n subnet_setting.cidr, True))\n\n self.port = neutron_utils.create_port(\n self.neutron, self.os_creds,\n PortConfig(\n network_name=self.net_config.network_settings.name,\n ip_addrs=[{\n 'subnet_name': subnet_setting.name,\n 'ip': ip_1}]))\n\n port = neutron_utils.get_port_by_id(self.neutron, self.port.id)\n self.assertEqual(self.port, port)", "def make_network(name: str = None) -> Network:\n return Network(\n name=(str(name) if name is not None else n()),\n version=n(),\n )", "def docker_create_network(name):\n process = subprocess.Popen(\n [\n \"docker\",\n \"network\",\n \"create\",\n \"--opt\",\n \"encrypted\",\n \"--driver\",\n \"overlay\",\n name,\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n process.wait()\n logging.info(f\"created the {name} network\")", "def test_create_port_null_network_object(self):\n with self.assertRaises(Exception):\n self.port = neutron_utils.create_port(\n self.neutron, self.os_creds,\n PortConfig(\n name=self.port_name,\n network_name=self.net_config.network_settings.name,\n ip_addrs=[{\n 'subnet_name':\n self.net_config.network_settings.subnet_settings[\n 0].name,\n 'ip': ip_1}]))", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal", "def test_create_subnet_null_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = None\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def create_network_action(self, netsim, number, prefix):\n self.log.info('Creating new netsim network')\n response = None\n while True:\n # Create the network\n create_response = netsim.create_network(number, prefix)\n response = create_response\n if create_response.error:\n break\n # Init netsim device configuration\n init_response = netsim.init_config('')\n if init_response.error:\n response = init_response\n break\n # Load init configuration to cdb\n load_response = netsim.load_config()\n if load_response.error:\n response = load_response\n break\n # all operations finished\n break\n\n return response", "def createNetwork(context):\n if common.MY_DEBUG:\n print 'ENTER vpc.createNetwork'\n\n my_vpc_name = getNetworkName(context)\n\n ret = {\n 'name': my_vpc_name,\n 'type': 'compute.v1.network',\n 'properties': {\n 'routingConfig': {\n 'routingMode': 'REGIONAL'\n },\n 'autoCreateSubnetworks': False\n }\n }\n if common.MY_DEBUG:\n print 'EXIT vpc.createNetwork, ret: ' + str(ret)\n return ret", "def create_network(self, name, neutron_net_id):\n # find a v4 and/or v6 subnet of the network\n shared = \\\n self.neutron_api.get_neutron_network(neutron_net_id)[\n 'shared']\n subnets = self.neutron_api.list_subnets(network_id=neutron_net_id)\n subnets = subnets.get('subnets', [])\n v4_subnet = self._get_subnet(subnets, ip_version=4)\n v6_subnet = self._get_subnet(subnets, ip_version=6)\n if not v4_subnet and not v6_subnet:\n raise exception.ZunException(_(\n \"The Neutron network %s has no subnet\") % neutron_net_id)\n\n # IPAM driver specific options\n ipam_options = {\n \"Driver\": CONF.network.driver_name,\n \"Options\": {\n 'neutron.net.shared': str(shared)\n },\n \"Config\": []\n }\n\n # Driver specific options\n options = {\n 'neutron.net.uuid': neutron_net_id,\n 'neutron.net.shared': str(shared)\n }\n\n if v4_subnet:\n ipam_options[\"Options\"]['neutron.pool.uuid'] = \\\n self._get_subnetpool(v4_subnet)\n ipam_options['Options']['neutron.subnet.uuid'] = \\\n v4_subnet.get('id')\n ipam_options[\"Config\"].append({\n \"Subnet\": v4_subnet['cidr'],\n \"Gateway\": v4_subnet['gateway_ip']\n })\n\n options['neutron.pool.uuid'] = v4_subnet.get('subnetpool_id')\n options['neutron.subnet.uuid'] = v4_subnet.get('id')\n if v6_subnet:\n ipam_options[\"Options\"]['neutron.pool.v6.uuid'] = \\\n self._get_subnetpool(v6_subnet)\n ipam_options['Options']['neutron.subnet.v6.uuid'] = \\\n v6_subnet.get('id')\n ipam_options[\"Config\"].append({\n \"Subnet\": v6_subnet['cidr'],\n \"Gateway\": v6_subnet['gateway_ip']\n })\n\n options['neutron.pool.v6.uuid'] = v6_subnet.get('subnetpool_id')\n options['neutron.subnet.v6.uuid'] = v6_subnet.get('id')\n\n LOG.debug(\"Calling docker.create_network to create network %s, \"\n \"ipam_options %s, options %s\", name, ipam_options, options)\n docker_network = self.docker.create_network(\n name=name,\n driver=CONF.network.driver_name,\n enable_ipv6=True if v6_subnet else False,\n options=options,\n ipam=ipam_options)\n\n return docker_network", "def create_network(req):\n db = YamlDB()\n err, msg = db.new_network_group(Const.KUBAM_CFG, req)\n if err == 1:\n return {\"error\": msg}, Const.HTTP_BAD_REQUEST\n return {\"status\": \"Network {0} created!\".format(req['name'])}, Const.HTTP_CREATED", "def create_network(request):\n cloud_id = request.matchdict['cloud']\n\n params = params_from_request(request)\n network_params = params.get('network')\n subnet_params = params.get('subnet')\n\n auth_context = auth_context_from_request(request)\n\n if not network_params:\n raise RequiredParameterMissingError('network')\n\n # TODO\n if not auth_context.is_owner():\n raise PolicyUnauthorizedError()\n\n try:\n cloud = Cloud.objects.get(owner=auth_context.owner, id=cloud_id)\n except me.DoesNotExist:\n raise CloudNotFoundError\n\n network = methods.create_network(auth_context.owner, cloud, network_params)\n network_dict = network.as_dict()\n\n # Bundling Subnet creation in this call because it is required\n # for backwards compatibility with the current UI\n if subnet_params:\n try:\n subnet = create_subnet(auth_context.owner, cloud,\n network, subnet_params)\n except Exception as exc:\n # Cleaning up the network object in case subnet creation\n # fails for any reason\n network.ctl.delete()\n raise exc\n network_dict['subnet'] = subnet.as_dict()\n\n return network.as_dict()", "def _create(self, driver: str, driver_options: Mapping[str, str]={}) -> None:\n assert(self.ip_network)\n dc = self._host.docker_client\n\n # Check if bridge already exists\n if self._docker_id is not None:\n try:\n dc.networks.get(self._docker_id)\n return\n except docker.errors.NotFound:\n self._docker_id = None\n log.warning(\"Docker network %s [%s] (%s) not found.\", self.name, self._host.name, self._docker_id)\n\n # Create bridge\n ipam_pool = docker.types.IPAMPool(\n subnet=str(self.ip_network)\n )\n ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n\n additional_args = {}\n if driver == 'overlay':\n # Allows standalone containers to connect to overlay networks.\n additional_args['attachable'] = True\n\n try:\n network = dc.networks.create(self.name, driver=driver, ipam=ipam_config,\n options=driver_options,\n enable_ipv6=(self.ip_network.version == 6),\n **additional_args)\n\n self._docker_id = network.id\n log.info(\"Created Docker %s network %s [%s] (%s) for '%s'.\",\n driver, self.name, self._host.name, self._docker_id, self.ip_network)\n\n except docker.errors.APIError as e:\n log.error(\"Error creating network %s: %s\", self.name, str(e))\n raise", "def get_or_create_network(self, name, driver='overlay'):\n try:\n return self._docker.networks.create(\n name=name,\n driver=driver,\n scope='swarm' if driver == 'overlay' else 'local',\n attachable=True)\n except docker.errors.APIError as exc:\n if 'already exists' in exc.explanation:\n return self._docker.networks.get(name)\n else:\n raise\n # self._dispatch(['network', 'create', name, '--attachable', '--scope', 'swarm'])\n # return self.get_network(name)", "def test_create_subnet_empty_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = ''\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def _create_virtual_network(self) -> dict:\n name = ''\n while not name:\n name = input(self.format('Virtual network name: '))\n name = name.strip()\n\n print('Creating virtual network...')\n\n try:\n virtual_network = self._run_az([\n 'network', 'vnet', 'create',\n '--name', name,\n '--location', self._selected_resource_group['location'],\n '--resource-group', self._selected_resource_group['name']\n ])\n self._az_virtual_networks.append(virtual_network)\n\n except APIError as e:\n print(self.format_error(str(e)))\n return self._create_virtual_network()\n\n return virtual_network" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the neutron_utils.create_neutron_subnet() function for an Exception when the subnet name is None
def test_create_subnet_null_name(self): self.network = neutron_utils.create_network( self.neutron, self.os_creds, self.net_config.network_settings) self.assertEqual(self.net_config.network_settings.name, self.network.name) self.assertTrue(validate_network( self.neutron, self.keystone, self.net_config.network_settings.name, True, self.os_creds.project_name)) with self.assertRaises(Exception): SubnetConfig(cidr=self.net_config.subnet_cidr)
[ "def test_create_subnet_null_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = None\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def test_create_subnet_empty_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = ''\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def test_create_subnet_no_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n self.assertEqual(\n webob.exc.HTTPClientError.code, res.status_int)", "def test_create_network_null_name(self):\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds,\n network_settings=NetworkConfig())", "def test_add_interface_router_null_subnet(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n self.router = neutron_utils.create_router(\n self.neutron, self.os_creds, self.net_config.router_settings)\n validate_router(\n self.neutron, self.keystone, self.net_config.router_settings.name,\n self.os_creds.project_name, True)\n\n with self.assertRaises(NeutronException):\n self.interface_router = neutron_utils.add_interface_router(\n self.neutron, self.router, None)", "def test_create_network_empty_name(self):\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds,\n network_settings=NetworkConfig(name=''))", "def test_create_network_with_bad_cidr(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:1:/48', ip_version=6)\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def test_create_subnet_with_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'cidr': '10.0.0.0/24',\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n subnet = self.deserialize(self.fmt, res)['subnet']\n self.assertIsNone(subnet['subnetpool_id'])", "def test_create_network_invalid_start_ip(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1::/48', ip_version=6,\n start='foo')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)\n\n self.assertEqual('1:1::2', self.network.subnets[0].start)\n self.assertEqual(\n '1:1:0:ffff:ffff:ffff:ffff:ffff', self.network.subnets[0].end)", "def test_add_interface_router_missing_subnet(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n self.router = neutron_utils.create_router(\n self.neutron, self.os_creds, self.net_config.router_settings)\n validate_router(\n self.neutron, self.keystone, self.net_config.router_settings.name,\n self.os_creds.project_name, True)\n\n for subnet in self.network.subnets:\n neutron_utils.delete_subnet(self.neutron, subnet)\n\n with self.assertRaises(NotFound):\n self.interface_router = neutron_utils.add_interface_router(\n self.neutron, self.router, self.network.subnets[0])", "def create_subnet(body=None):\n return IMPL.create_subnet(body)", "def check_negative_create_extra_subnet(self, network):\n exception_message = \"Quota exceeded for resources\"\n assert_that(\n calling(self.create).with_args(\n subnet_name=next(utils.generate_ids()),\n network=network,\n cidr=config.LOCAL_CIDR,\n check=False),\n raises(exceptions.OverQuotaClient, exception_message),\n \"Subnet for network with ID {!r} has been created though it \"\n \"exceeds the quota or OverQuotaClient exception with expected \"\n \"error message has not been appeared\".format(network['id']))", "def subnet_create_api():\r\n try:\r\n req = models.Subnet(request.json)\r\n req.validate()\r\n except Exception as e:\r\n return err_return('Parameter Invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n try:\r\n if not req.network_id:\r\n return err_return('networkid is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.subnet_id:\r\n req_id = str(uuid.uuid4())\r\n else:\r\n req_id = req.subnet_id\r\n sb_name = subnet_db_get_one('name', id=req_id)\r\n if sb_name:\r\n return err_return('id(%s) in use by %s' % (req_id, sb_name),\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n if req.subnet_name:\r\n if len(req.subnet_name) > NAME_MAX_LEN:\r\n return err_return('Length of name must be less than 255',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n else:\r\n req.subnet_name = ''\r\n\r\n external = network_db_get_one('external', id=req.network_id)\r\n if external is None:\r\n return err_return(\"networkid does not exist\",\r\n \"ParameterInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if not req.dns_nameservers:\r\n req.dns_nameservers = []\r\n if not req.allocation_pools:\r\n req.allocation_pools = []\r\n allocation_pools = []\r\n for all_pool in req.allocation_pools:\r\n allocation_pools.append(all_pool.to_primitive())\r\n req.allocation_pools = allocation_pools\r\n for pool in req.allocation_pools:\r\n if ip_to_bin(pool['start']) > ip_to_bin(pool['end']):\r\n return err_return(\"end_ip must be more than start_ip\",\r\n \"IPRangeError\", \"\", HTTP_BAD_REQUEST)\r\n\r\n if external == 0:\r\n if not req.cidr:\r\n return err_return('cidr is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not validate_cidr(req.cidr):\r\n return err_return('cidr invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.gateway_ip:\r\n return err_return('gateway ip is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n vl2lcid = yynetworkid_to_lcvl2id(req.network_id)\r\n log.debug('vl2lcid=%s' % vl2lcid)\r\n nets = [{\"prefix\": VFW_TOR_LINK_NET_PRE,\r\n \"netmask\": VFW_TOR_LINK_NET_MASK}]\r\n cidr = str(req.cidr).split('/')\r\n new_prf = cidr[0]\r\n new_mask = int(cidr[1])\r\n subnets = get_subnets_by_network(req.network_id)\r\n for subnet in subnets:\r\n cidr = subnet['cidr'].split('/')\r\n old_prf = cidr[0]\r\n old_mask = int(cidr[1])\r\n if subnet_equ(new_prf, old_prf, new_mask, old_mask):\r\n log.error('cidr is the same')\r\n return err_return('subnet already exist',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n nets.append({\"prefix\": old_prf, \"netmask\": old_mask})\r\n nets.append({\"prefix\": new_prf, \"netmask\": new_mask})\r\n log.debug('nets=%s' % nets)\r\n nw_name = network_db_get_one('name', id=req.network_id)\r\n payload = json.dumps({\"name\": nw_name, \"nets\": nets})\r\n r = lcapi.patch(conf.livecloud_url + '/v1/vl2s/%s' % vl2lcid,\r\n data=payload)\r\n if r.status_code != HTTP_OK:\r\n return Response(json.dumps(NEUTRON_400)), HTTP_NOT_FOUND\r\n nets = r.json()['DATA']['NETS']\r\n for net in nets:\r\n if subnet_equ(net['PREFIX'], new_prf,\r\n net['NETMASK'], new_mask):\r\n sb_lcuuid = net['LCUUID']\r\n sb_idx = net['NET_INDEX']\r\n break\r\n else:\r\n log.error('sb_lcuuid no found')\r\n sb_lcuuid = 'sb_lcuuid no found'\r\n sb_idx = -1\r\n else:\r\n subnetid = subnet_db_get_one('id', network_id=req.network_id)\r\n if subnetid:\r\n return err_return('subnet(%s) already exists' % subnetid,\r\n 'Fail', '', HTTP_BAD_REQUEST)\r\n # ISP\r\n if not req.allocation_pools:\r\n return err_return('allocation_pools can not be empty',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n id = subnet_db_get_one('id', network_id=req.network_id)\r\n if id:\r\n return subnet_get(subnetid=id)\r\n lcuuid = network_db_get_one('lcuuid', id=req.network_id)\r\n isp = lc_vl2_db_get_one('isp', lcuuid=lcuuid)\r\n items = lc_ip_res_db_get_all(req='ip, netmask, gateway, userid',\r\n isp=isp)\r\n if not items:\r\n return err_return(\"No ISP IP found\", \"BadRequest\",\r\n \"Please add ISP IP to system first\",\r\n HTTP_BAD_REQUEST)\r\n req.gateway_ip = items[0]['gateway']\r\n req.cidr = ip_mask_to_cidr(items[0]['ip'], items[0]['netmask'])\r\n isp_all_ips = []\r\n ip_to_userid = {}\r\n for it in items:\r\n isp_all_ips.append(it['ip'])\r\n ip_to_userid[it['ip']] = it['userid']\r\n req_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n for req_ip in req_ips:\r\n if req_ip not in isp_all_ips:\r\n return err_return(\"%s does not exist\" % req_ip,\r\n \"IPInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if ip_to_userid[req_ip] != 0:\r\n return err_return(\"%s in use\" % req_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n sb_lcuuid = str(uuid.uuid4())\r\n sb_idx = -1\r\n\r\n sql = (\"INSERT INTO neutron_subnets \"\r\n \"VALUES('%s','%s','%s','%s','%s','%s','%s','%s',%d)\" %\r\n (req_id, req.subnet_name, req.network_id,\r\n req.cidr, json.dumps(req.allocation_pools),\r\n req.gateway_ip, json.dumps(req.dns_nameservers),\r\n sb_lcuuid, sb_idx))\r\n log.debug('add subnet sql=%s' % sql)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n if external:\r\n sql = \"UPDATE ip_resource_v2_2 SET userid=%s WHERE ip in ('-1',\"\r\n for req_ip in req_ips:\r\n sql += \"'%s',\" % req_ip\r\n sql = sql[:-1]\r\n sql += \")\"\r\n log.debug('sql=%s' % sql)\r\n with MySQLdb.connect(**LCDB_INFO) as cursor:\r\n cursor.execute(sql, conf.livecloud_userid)\r\n\r\n resp, code = subnet_get(subnetid=req_id)\r\n return resp, HTTP_CREATED\r\n\r\n except Exception as e:\r\n log.error(e)\r\n return Response(json.dumps(NEUTRON_500)), HTTP_INTERNAL_SERVER_ERROR", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal", "def ex_create_network_interface(self, subnet, name=None,\r\n description=None,\r\n private_ip_address=None):\r\n raise NotImplementedError(self._not_implemented_msg)", "def create_subnet(DryRun=None, VpcId=None, CidrBlock=None, Ipv6CidrBlock=None, AvailabilityZone=None):\n pass", "def test_create_port_null_ip(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n subnet_setting = self.net_config.network_settings.subnet_settings[0]\n self.assertTrue(validate_subnet(\n self.neutron, self.network, subnet_setting.name,\n subnet_setting.cidr, True))\n\n with self.assertRaises(Exception):\n self.port = neutron_utils.create_port(\n self.neutron, self.os_creds,\n PortConfig(\n name=self.port_name,\n network_name=self.net_config.network_settings.name,\n ip_addrs=[{\n 'subnet_name': subnet_setting.name,\n 'ip': None}]))", "def test_create_network_no_dhcp_slaac(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:0:0:0:0:0:0/64',\n ip_version=6, dns_nameservers=['2620:0:ccc:0:0:0:0:2'],\n gateway_ip='1:1:0:0:0:0:0:1', start='1:1::ff', end='1:1::ffff',\n enable_dhcp=False, ipv6_ra_mode='slaac', ipv6_address_mode='slaac')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def test_create_network_invalid_gateway_ip(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1::/48', ip_version=6,\n gateway_ip='192.168.0.1')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the neutron_utils.create_neutron_subnet() function for an Exception when the subnet CIDR value is None
def test_create_subnet_null_cidr(self): self.net_config.network_settings.subnet_settings[0].cidr = None with self.assertRaises(Exception): self.network = neutron_utils.create_network( self.neutron, self.os_creds, self.net_config.network_settings)
[ "def test_create_subnet_empty_cidr(self):\n self.net_config.network_settings.subnet_settings[0].cidr = ''\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)", "def test_create_subnet_null_name(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n with self.assertRaises(Exception):\n SubnetConfig(cidr=self.net_config.subnet_cidr)", "def test_create_network_with_bad_cidr(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:1:/48', ip_version=6)\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def test_create_subnet_no_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n self.assertEqual(\n webob.exc.HTTPClientError.code, res.status_int)", "def test_create_subnet_with_cidr_and_default_subnetpool(self):\n with self.network() as network:\n tenant_id = network['network']['tenant_id']\n subnetpool_prefix = '10.0.0.0/8'\n with self.subnetpool(prefixes=[subnetpool_prefix],\n admin=True,\n name=\"My subnet pool\",\n tenant_id=tenant_id,\n min_prefixlen='25',\n is_default=True):\n data = {'subnet': {'network_id': network['network']['id'],\n 'cidr': '10.0.0.0/24',\n 'ip_version': constants.IP_VERSION_4,\n 'tenant_id': tenant_id}}\n subnet_req = self.new_create_request('subnets', data)\n res = subnet_req.get_response(self.api)\n subnet = self.deserialize(self.fmt, res)['subnet']\n self.assertIsNone(subnet['subnetpool_id'])", "def test_add_interface_router_null_subnet(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n self.router = neutron_utils.create_router(\n self.neutron, self.os_creds, self.net_config.router_settings)\n validate_router(\n self.neutron, self.keystone, self.net_config.router_settings.name,\n self.os_creds.project_name, True)\n\n with self.assertRaises(NeutronException):\n self.interface_router = neutron_utils.add_interface_router(\n self.neutron, self.router, None)", "def test_create_network_invalid_start_ip(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1::/48', ip_version=6,\n start='foo')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)\n\n self.assertEqual('1:1::2', self.network.subnets[0].start)\n self.assertEqual(\n '1:1:0:ffff:ffff:ffff:ffff:ffff', self.network.subnets[0].end)", "def test_create_network_invalid_end_ip(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1::/48', ip_version=6,\n end='bar')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)\n\n self.assertEqual('1:1::2', self.network.subnets[0].start)\n self.assertEqual(\n '1:1:0:ffff:ffff:ffff:ffff:ffff', self.network.subnets[0].end)", "def test_create_port_null_ip(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n subnet_setting = self.net_config.network_settings.subnet_settings[0]\n self.assertTrue(validate_subnet(\n self.neutron, self.network, subnet_setting.name,\n subnet_setting.cidr, True))\n\n with self.assertRaises(Exception):\n self.port = neutron_utils.create_port(\n self.neutron, self.os_creds,\n PortConfig(\n name=self.port_name,\n network_name=self.net_config.network_settings.name,\n ip_addrs=[{\n 'subnet_name': subnet_setting.name,\n 'ip': None}]))", "def subnet_create_api():\r\n try:\r\n req = models.Subnet(request.json)\r\n req.validate()\r\n except Exception as e:\r\n return err_return('Parameter Invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n try:\r\n if not req.network_id:\r\n return err_return('networkid is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.subnet_id:\r\n req_id = str(uuid.uuid4())\r\n else:\r\n req_id = req.subnet_id\r\n sb_name = subnet_db_get_one('name', id=req_id)\r\n if sb_name:\r\n return err_return('id(%s) in use by %s' % (req_id, sb_name),\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n if req.subnet_name:\r\n if len(req.subnet_name) > NAME_MAX_LEN:\r\n return err_return('Length of name must be less than 255',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n else:\r\n req.subnet_name = ''\r\n\r\n external = network_db_get_one('external', id=req.network_id)\r\n if external is None:\r\n return err_return(\"networkid does not exist\",\r\n \"ParameterInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if not req.dns_nameservers:\r\n req.dns_nameservers = []\r\n if not req.allocation_pools:\r\n req.allocation_pools = []\r\n allocation_pools = []\r\n for all_pool in req.allocation_pools:\r\n allocation_pools.append(all_pool.to_primitive())\r\n req.allocation_pools = allocation_pools\r\n for pool in req.allocation_pools:\r\n if ip_to_bin(pool['start']) > ip_to_bin(pool['end']):\r\n return err_return(\"end_ip must be more than start_ip\",\r\n \"IPRangeError\", \"\", HTTP_BAD_REQUEST)\r\n\r\n if external == 0:\r\n if not req.cidr:\r\n return err_return('cidr is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not validate_cidr(req.cidr):\r\n return err_return('cidr invalid', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n if not req.gateway_ip:\r\n return err_return('gateway ip is required', 'ParameterInvalid',\r\n '', HTTP_BAD_REQUEST)\r\n vl2lcid = yynetworkid_to_lcvl2id(req.network_id)\r\n log.debug('vl2lcid=%s' % vl2lcid)\r\n nets = [{\"prefix\": VFW_TOR_LINK_NET_PRE,\r\n \"netmask\": VFW_TOR_LINK_NET_MASK}]\r\n cidr = str(req.cidr).split('/')\r\n new_prf = cidr[0]\r\n new_mask = int(cidr[1])\r\n subnets = get_subnets_by_network(req.network_id)\r\n for subnet in subnets:\r\n cidr = subnet['cidr'].split('/')\r\n old_prf = cidr[0]\r\n old_mask = int(cidr[1])\r\n if subnet_equ(new_prf, old_prf, new_mask, old_mask):\r\n log.error('cidr is the same')\r\n return err_return('subnet already exist',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n nets.append({\"prefix\": old_prf, \"netmask\": old_mask})\r\n nets.append({\"prefix\": new_prf, \"netmask\": new_mask})\r\n log.debug('nets=%s' % nets)\r\n nw_name = network_db_get_one('name', id=req.network_id)\r\n payload = json.dumps({\"name\": nw_name, \"nets\": nets})\r\n r = lcapi.patch(conf.livecloud_url + '/v1/vl2s/%s' % vl2lcid,\r\n data=payload)\r\n if r.status_code != HTTP_OK:\r\n return Response(json.dumps(NEUTRON_400)), HTTP_NOT_FOUND\r\n nets = r.json()['DATA']['NETS']\r\n for net in nets:\r\n if subnet_equ(net['PREFIX'], new_prf,\r\n net['NETMASK'], new_mask):\r\n sb_lcuuid = net['LCUUID']\r\n sb_idx = net['NET_INDEX']\r\n break\r\n else:\r\n log.error('sb_lcuuid no found')\r\n sb_lcuuid = 'sb_lcuuid no found'\r\n sb_idx = -1\r\n else:\r\n subnetid = subnet_db_get_one('id', network_id=req.network_id)\r\n if subnetid:\r\n return err_return('subnet(%s) already exists' % subnetid,\r\n 'Fail', '', HTTP_BAD_REQUEST)\r\n # ISP\r\n if not req.allocation_pools:\r\n return err_return('allocation_pools can not be empty',\r\n 'ParameterInvalid', '', HTTP_BAD_REQUEST)\r\n id = subnet_db_get_one('id', network_id=req.network_id)\r\n if id:\r\n return subnet_get(subnetid=id)\r\n lcuuid = network_db_get_one('lcuuid', id=req.network_id)\r\n isp = lc_vl2_db_get_one('isp', lcuuid=lcuuid)\r\n items = lc_ip_res_db_get_all(req='ip, netmask, gateway, userid',\r\n isp=isp)\r\n if not items:\r\n return err_return(\"No ISP IP found\", \"BadRequest\",\r\n \"Please add ISP IP to system first\",\r\n HTTP_BAD_REQUEST)\r\n req.gateway_ip = items[0]['gateway']\r\n req.cidr = ip_mask_to_cidr(items[0]['ip'], items[0]['netmask'])\r\n isp_all_ips = []\r\n ip_to_userid = {}\r\n for it in items:\r\n isp_all_ips.append(it['ip'])\r\n ip_to_userid[it['ip']] = it['userid']\r\n req_ips = alloc_pools_to_ip_list(req.allocation_pools)\r\n for req_ip in req_ips:\r\n if req_ip not in isp_all_ips:\r\n return err_return(\"%s does not exist\" % req_ip,\r\n \"IPInvalid\", \"\", HTTP_BAD_REQUEST)\r\n if ip_to_userid[req_ip] != 0:\r\n return err_return(\"%s in use\" % req_ip,\r\n \"IPInUse\", \"\", HTTP_BAD_REQUEST)\r\n sb_lcuuid = str(uuid.uuid4())\r\n sb_idx = -1\r\n\r\n sql = (\"INSERT INTO neutron_subnets \"\r\n \"VALUES('%s','%s','%s','%s','%s','%s','%s','%s',%d)\" %\r\n (req_id, req.subnet_name, req.network_id,\r\n req.cidr, json.dumps(req.allocation_pools),\r\n req.gateway_ip, json.dumps(req.dns_nameservers),\r\n sb_lcuuid, sb_idx))\r\n log.debug('add subnet sql=%s' % sql)\r\n with MySQLdb.connect(**DB_INFO) as cursor:\r\n cursor.execute(sql)\r\n if external:\r\n sql = \"UPDATE ip_resource_v2_2 SET userid=%s WHERE ip in ('-1',\"\r\n for req_ip in req_ips:\r\n sql += \"'%s',\" % req_ip\r\n sql = sql[:-1]\r\n sql += \")\"\r\n log.debug('sql=%s' % sql)\r\n with MySQLdb.connect(**LCDB_INFO) as cursor:\r\n cursor.execute(sql, conf.livecloud_userid)\r\n\r\n resp, code = subnet_get(subnetid=req_id)\r\n return resp, HTTP_CREATED\r\n\r\n except Exception as e:\r\n log.error(e)\r\n return Response(json.dumps(NEUTRON_500)), HTTP_INTERNAL_SERVER_ERROR", "def test_add_interface_router_missing_subnet(self):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.net_config.network_settings)\n self.assertEqual(self.net_config.network_settings.name,\n self.network.name)\n self.assertTrue(validate_network(\n self.neutron, self.keystone,\n self.net_config.network_settings.name, True,\n self.os_creds.project_name))\n\n self.router = neutron_utils.create_router(\n self.neutron, self.os_creds, self.net_config.router_settings)\n validate_router(\n self.neutron, self.keystone, self.net_config.router_settings.name,\n self.os_creds.project_name, True)\n\n for subnet in self.network.subnets:\n neutron_utils.delete_subnet(self.neutron, subnet)\n\n with self.assertRaises(NotFound):\n self.interface_router = neutron_utils.add_interface_router(\n self.neutron, self.router, self.network.subnets[0])", "def test_create_network_invalid_gateway_ip(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1::/48', ip_version=6,\n gateway_ip='192.168.0.1')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def test_create_network_no_dhcp_slaac(self):\n sub_setting = SubnetConfig(\n name=self.guid + '-subnet', cidr='1:1:0:0:0:0:0:0/64',\n ip_version=6, dns_nameservers=['2620:0:ccc:0:0:0:0:2'],\n gateway_ip='1:1:0:0:0:0:0:1', start='1:1::ff', end='1:1::ffff',\n enable_dhcp=False, ipv6_ra_mode='slaac', ipv6_address_mode='slaac')\n self.network_settings = NetworkConfig(\n name=self.guid + '-net', subnet_settings=[sub_setting])\n\n with self.assertRaises(BadRequest):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds, self.network_settings)", "def create_subnet(DryRun=None, VpcId=None, CidrBlock=None, Ipv6CidrBlock=None, AvailabilityZone=None):\n pass", "def __create_network__(self,**kwargs):\n\t\tself.validate_args(**kwargs)\n\t\t#first create the network\n\t\texisting_networks = self.neutronClient.get_networks()\n\t\tnew_network = kwargs[\"network\"]\n\t\tnew_subnet_cidr = kwargs[\"cidr\"]\n\t\tsubnet_name = kwargs[\"subnet_name\"]\n enable_dhcp = kwargs.get(\"enable_dhcp\", True)\n\n\t\tnetVal = {}\n\t\tsubnetVal = {}\n\t\tnet_id = None\n\t\t#check if the network with the same name exists\n\t\tif not any(network.get('name',None) == new_network for network in existing_networks['networks']) :\n\t\t\t#did not find the network. go ahead and create the network and subnet\n\t\t\tnetVal = self.neutronClient.create_network(new_network)\n\t\t\tsubnetVal = self.neutronClient.create_subnet(netVal['network']['id'],new_subnet_cidr,subnet_name,enable_dhcp)\n netVal = netVal['network']\n subnetVal = subnetVal['subnet']\n\t\t\t#return the dict with the network and subnet details\n\t\telse :\n\t\t\t#network name exists. get network id\n\t\t\tfor network in existing_networks['networks']:\n if new_network == network['name']:\n\t\t\t\t\tnet_id = network['id']\n\t\t\t\t\tnetVal = network\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t#check if the required subnet also exists\n\t\t\texisting_subnet = self.neutronClient.get_subnets()\n\t\t\tif not any(subnet.get('cidr',None) == new_subnet_cidr for subnet in existing_subnet['subnets']):\n\t\t\t\t#subnet needs to be created under this network\n\t\t\t\tsubnetVal = self.neutronClient.create_subnet(net_id,new_subnet_cidr,subnet_name, enable_dhcp)\n subnetVal = subnetVal['subnet']\n\t\t\telse :\n\t\t\t\tfor subnet in existing_subnet['subnets']:\n #TOCHK: Dont use in for string comparisons\n \t#if new_subnet_cidr in subnet['cidr'] :\n if new_subnet_cidr == subnet['cidr']:\n \tsubnetVal = subnet\n\t\t\t\t\t\tbreak\n\t\tnetVal['subnets'] = subnetVal\n\t\treturn netVal", "def check_negative_create_extra_subnet(self, network):\n exception_message = \"Quota exceeded for resources\"\n assert_that(\n calling(self.create).with_args(\n subnet_name=next(utils.generate_ids()),\n network=network,\n cidr=config.LOCAL_CIDR,\n check=False),\n raises(exceptions.OverQuotaClient, exception_message),\n \"Subnet for network with ID {!r} has been created though it \"\n \"exceeds the quota or OverQuotaClient exception with expected \"\n \"error message has not been appeared\".format(network['id']))", "def create_subnet(body=None):\n return IMPL.create_subnet(body)", "def test_create_network_null_name(self):\n with self.assertRaises(Exception):\n self.network = neutron_utils.create_network(\n self.neutron, self.os_creds,\n network_settings=NetworkConfig())", "def _create_subnet(self, network):\n cfg = self.config.network\n tenant_cidr = netaddr.IPNetwork(cfg.tenant_network_cidr)\n result = None\n # Repeatedly attempt subnet creation with sequential cidr\n # blocks until an unallocated block is found.\n for subnet_cidr in tenant_cidr.subnet(cfg.tenant_network_mask_bits):\n body = dict(\n subnet=dict(\n ip_version=4,\n network_id=network.id,\n tenant_id=network.tenant_id,\n cidr=str(subnet_cidr),\n ),\n )\n try:\n result = self.network_client.create_subnet(body=body)\n break\n except exc.QuantumClientException as e:\n is_overlapping_cidr = 'overlaps with another subnet' in str(e)\n if not is_overlapping_cidr:\n raise\n self.assertIsNotNone(result, 'Unable to allocate tenant network')\n subnet = DeletableSubnet(client=self.network_client,\n **result['subnet'])\n self.assertEqual(subnet.cidr, str(subnet_cidr))\n self.set_resource(rand_name('subnet-smoke-'), subnet)\n return subnet" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }