query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
L{client.URI.originForm} produces an absolute I{URI} path including the I{URI} path.
def test_originForm(self): uri = client.URI.fromBytes(self.makeURIString(b"http://HOST/foo")) self.assertEqual(b"/foo", uri.originForm)
[ "def test_originFormNoPath(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST\"))\n self.assertEqual(b\"/\", uri.originForm)", "def test_originFormEmptyPath(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST/\"))\n self.assertEqual(b\"/\", uri.originForm)", "def build_uri(self, request):\n return request.build_absolute_uri(self.path)", "def absolute_url_path(self):\n spp = self.getPhysicalPath()\n try:\n toUrl = aq_acquire(self, 'REQUEST').physicalPathToURL\n except AttributeError:\n return path2url(spp) or '/'\n return toUrl(spp, relative=1) or '/'", "def cdn_frontdoor_origin_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def url_to_origin(url: str) -> httpcore.URL:\n u = httpx.URL(url)\n return httpcore.URL(scheme=u.raw_scheme, host=u.raw_host, port=u.port, target=\"/\")", "def get_uri(self):\n return self.host + '/' + self.get_path().lstrip('/')", "def cdn_frontdoor_origin_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def absolute_target_url(self):\n url = self.url()\n\n if self._url_uses_scheme(NON_RESOLVABLE_URL_SCHEMES):\n # For non http/https url schemes, there is no path to resolve.\n return url\n\n if url.startswith(\".\"):\n # we just need to adapt ../relative/links, /absolute/ones work\n # anyway -> this requires relative links to start with ./ or\n # ../\n context_state = self.context.restrictedTraverse(\"@@plone_context_state\")\n url = \"/\".join([context_state.canonical_object_url(), url])\n else:\n if \"resolveuid\" in url:\n uid = url.split(\"/\")[-1]\n obj = uuidToObject(uid)\n if obj:\n url = \"/\".join(obj.getPhysicalPath()[2:])\n if not url.startswith(\"/\"):\n url = \"/\" + url\n if not url.startswith((\"http://\", \"https://\")):\n url = self.request[\"SERVER_URL\"] + url\n\n return url", "def generate_origin(uri):\n try:\n parsed = urlparse(uri)\n return \"{}://{}\".format(parsed.scheme, parsed.netloc)\n except ValueError:\n return None", "def abs_slash(self):\n p = os.path.abspath(self)\n if not p.endswith('/'):\n return p + '/'\n return p", "def _make_absolute_path(self, file_or_url, basedir=''):\n url_parts = urlparse(file_or_url)\n if url_parts.scheme == '' and not os.path.isabs(url_parts.path):\n file_or_url = os.path.join(basedir, file_or_url)\n url_parts = urlparse(file_or_url)\n return file_or_url, url_parts", "def abs_path(self, remote_path):\n if os.path.isabs(remote_path):\n logger.warning('use absolute path as dfs remote path is not recommended, '\n 'which may lead to privilege bugs:', remote_path)\n return os.path.join(self.root, remote_path)", "def get_relative_url(self):\n\n newpath=\"\"\n if self.dirpath:\n newpath = \"\".join((\"/\", \"\".join([ x+'/' for x in self.dirpath])))\n\n if self.filelike:\n newpath = \"\".join((newpath, self.URLSEP, self.filename))\n \n return self.make_valid_url(newpath)", "def get_abs_local_path(self, arg):\n if os.path.isabs(arg):\n return arg\n else:\n return os.path.normpath(os.path.join(self.localdir, arg))", "def arn_url_path(self):\r\n return '/' + urlparse.quote(self.arn(), '')", "def abspath(origin, *relpath):\n if os.path.isfile(origin):\n origin = os.path.dirname(origin)\n return os.path.join(origin, *relpath)", "def absolute_url(self, relative=0):\n if relative:\n return self.virtual_url_path()\n\n spp = self.getPhysicalPath()\n\n try:\n toUrl = aq_acquire(self, 'REQUEST').physicalPathToURL\n except AttributeError:\n return path2url(spp[1:])\n return toUrl(spp)", "def absolute_url(self, url: str) -> str:\n if not url.startswith(\"http\"):\n return self.file_share_base_url.rstrip(\"/\") + \"/\" + url.lstrip(\"/\")\n return url" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
L{client.URI.originForm} produces a path of C{b'/'} when the I{URI} specifies no path.
def test_originFormNoPath(self): uri = client.URI.fromBytes(self.makeURIString(b"http://HOST")) self.assertEqual(b"/", uri.originForm)
[ "def test_originFormEmptyPath(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST/\"))\n self.assertEqual(b\"/\", uri.originForm)", "def test_originForm(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST/foo\"))\n self.assertEqual(b\"/foo\", uri.originForm)", "def cdn_frontdoor_origin_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def generate_origin(uri):\n try:\n parsed = urlparse(uri)\n return \"{}://{}\".format(parsed.scheme, parsed.netloc)\n except ValueError:\n return None", "def cdn_frontdoor_origin_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def test_emptyPath(self):\n uri = self.makeURIString(b\"http://HOST/\")\n self.assertURIEquals(\n client.URI.fromBytes(uri),\n scheme=b\"http\",\n netloc=self.uriHost,\n host=self.host,\n port=80,\n path=b\"/\",\n )", "def build_uri(self, request):\n return request.build_absolute_uri(self.path)", "def url_to_origin(url: str) -> httpcore.URL:\n u = httpx.URL(url)\n return httpcore.URL(scheme=u.raw_scheme, host=u.raw_host, port=u.port, target=\"/\")", "def abs_noslash(self):\n p = os.path.abspath(self)\n if p.endswith('/') and p not in ('/', '~/'):\n return p[:-1]\n return p", "def _path_from_uri(uri):\n uri = re.sub('^(\\.\\./)*', '', uri)\n return uri", "def abs_slash(self):\n p = os.path.abspath(self)\n if not p.endswith('/'):\n return p + '/'\n return p", "def get_uri(self):\n return self.host + '/' + self.get_path().lstrip('/')", "def absolute_url_path(self):\n spp = self.getPhysicalPath()\n try:\n toUrl = aq_acquire(self, 'REQUEST').physicalPathToURL\n except AttributeError:\n return path2url(spp) or '/'\n return toUrl(spp, relative=1) or '/'", "def build_url(self):\n url = super().build_url()\n if '/None/' in url:\n return url.replace('/None/', '/')\n else:\n return url", "def resolved_path(path_or_uri_like):\n if \"://\" not in path_or_uri_like:\n return path_or_uri_like\n elif path_or_uri_like.startswith(\"file://\"):\n return path_or_uri_like[len(\"file://\"):]\n else:\n return UNRESOLVED_URI", "def build_url(self):\n url = super().build_url()\n if '/None/' in url:\n return url.replace('/None/', '')\n else:\n return url", "def abbr_noslash(self):\n p = self.abs_slash.replace(os.path.expanduser('~/'), '~/')\n if p.endswith('/') and p not in ('/', '~/'):\n return p[:-1]\n return p", "def norm_path(remote_path):\n remote_path = remote_path[1:] if remote_path.startswith('/') else remote_path\n if remote_path.endswith('/'):\n remote_path = remote_path[:-2]\n if remote_path.startswith('..') or os.path.isabs(remote_path):\n return None\n return remote_path", "def has_origin(self, origin):\n\n if not isinstance(origin, RepositoryURI):\n origin = misc.url_affix_trailing_slash(origin)\n return origin in self.origins" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
L{client.URI.originForm} produces a path of C{b'/'} when the I{URI} specifies an empty path.
def test_originFormEmptyPath(self): uri = client.URI.fromBytes(self.makeURIString(b"http://HOST/")) self.assertEqual(b"/", uri.originForm)
[ "def test_originFormNoPath(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST\"))\n self.assertEqual(b\"/\", uri.originForm)", "def test_originForm(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST/foo\"))\n self.assertEqual(b\"/foo\", uri.originForm)", "def test_emptyPath(self):\n uri = self.makeURIString(b\"http://HOST/\")\n self.assertURIEquals(\n client.URI.fromBytes(uri),\n scheme=b\"http\",\n netloc=self.uriHost,\n host=self.host,\n port=80,\n path=b\"/\",\n )", "def cdn_frontdoor_origin_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def abs_noslash(self):\n p = os.path.abspath(self)\n if p.endswith('/') and p not in ('/', '~/'):\n return p[:-1]\n return p", "def generate_origin(uri):\n try:\n parsed = urlparse(uri)\n return \"{}://{}\".format(parsed.scheme, parsed.netloc)\n except ValueError:\n return None", "def cdn_frontdoor_origin_path(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cdn_frontdoor_origin_path\")", "def build_url(self):\n url = super().build_url()\n if '/None/' in url:\n return url.replace('/None/', '/')\n else:\n return url", "def build_url(self):\n url = super().build_url()\n if '/None/' in url:\n return url.replace('/None/', '')\n else:\n return url", "def abs_slash(self):\n p = os.path.abspath(self)\n if not p.endswith('/'):\n return p + '/'\n return p", "def build_uri(self, request):\n return request.build_absolute_uri(self.path)", "def url_to_origin(url: str) -> httpcore.URL:\n u = httpx.URL(url)\n return httpcore.URL(scheme=u.raw_scheme, host=u.raw_host, port=u.port, target=\"/\")", "def norm_path(remote_path):\n remote_path = remote_path[1:] if remote_path.startswith('/') else remote_path\n if remote_path.endswith('/'):\n remote_path = remote_path[:-2]\n if remote_path.startswith('..') or os.path.isabs(remote_path):\n return None\n return remote_path", "def abbr_noslash(self):\n p = self.abs_slash.replace(os.path.expanduser('~/'), '~/')\n if p.endswith('/') and p not in ('/', '~/'):\n return p[:-1]\n return p", "def _validate_authority_uri_abs_path(host, path):\n if len(host) > 0 and len(path) > 0 and not path.startswith(\"/\"):\n raise ValueError(\n \"Path in a URL with authority should start with a slash ('/') if set\"\n )", "def absolute_url_path(self):\n spp = self.getPhysicalPath()\n try:\n toUrl = aq_acquire(self, 'REQUEST').physicalPathToURL\n except AttributeError:\n return path2url(spp) or '/'\n return toUrl(spp, relative=1) or '/'", "def _path_from_uri(uri):\n uri = re.sub('^(\\.\\./)*', '', uri)\n return uri", "def get_uri(self):\n return self.host + '/' + self.get_path().lstrip('/')", "def _stripLeadingSlash(path: str) -> str:\n if len(path) >= 1 and path[0] == \"/\":\n return path[1:]\n else:\n return path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
L{client.URI.fromBytes} parses the scheme, host, and path elements into L{bytes}, even when passed an URL which has previously been passed to L{urlparse} as a L{unicode} string.
def test_externalUnicodeInterference(self): goodInput = self.makeURIString(b"http://HOST/path") badInput = goodInput.decode("ascii") urlparse(badInput) uri = client.URI.fromBytes(goodInput) self.assertIsInstance(uri.scheme, bytes) self.assertIsInstance(uri.host, bytes) self.assertIsInstance(uri.path, bytes)
[ "def urlunparse(parts):\n scheme, netloc, path, params, query, fragment = parts\n\n # Avoid encoding the windows drive letter colon\n if RE_DRIVE_LETTER_PATH.match(path):\n quoted_path = path[:3] + parse.quote(path[3:])\n else:\n quoted_path = parse.quote(path)\n\n return parse.urlunparse((\n parse.quote(scheme),\n parse.quote(netloc),\n quoted_path,\n parse.quote(params),\n parse.quote(query),\n parse.quote(fragment)\n ))", "def test_hostBracketIPv6AddressLiteral(self):\n uri = client.URI.fromBytes(b\"http://[::1]:80/index.html\")\n\n self.assertEqual(uri.host, b\"::1\")\n self.assertEqual(uri.netloc, b\"[::1]:80\")\n self.assertEqual(uri.toBytes(), b\"http://[::1]:80/index.html\")", "def test_emptyPath(self):\n uri = self.makeURIString(b\"http://HOST/\")\n self.assertURIEquals(\n client.URI.fromBytes(uri),\n scheme=b\"http\",\n netloc=self.uriHost,\n host=self.host,\n port=80,\n path=b\"/\",\n )", "def test_asURI(self):\n unicodey = ('http://\\N{LATIN SMALL LETTER E WITH ACUTE}.com/'\n '\\N{LATIN SMALL LETTER E}\\N{COMBINING ACUTE ACCENT}'\n '?\\N{LATIN SMALL LETTER A}\\N{COMBINING ACUTE ACCENT}='\n '\\N{LATIN SMALL LETTER I}\\N{COMBINING ACUTE ACCENT}'\n '#\\N{LATIN SMALL LETTER U}\\N{COMBINING ACUTE ACCENT}')\n iri = URL.fromText(unicodey)\n uri = iri.asURI()\n self.assertEqual(iri.host, '\\N{LATIN SMALL LETTER E WITH ACUTE}.com')\n self.assertEqual(iri.path[0],\n '\\N{LATIN SMALL LETTER E}\\N{COMBINING ACUTE ACCENT}')\n self.assertEqual(iri.asText(), unicodey)\n expectedURI = 'http://xn--9ca.com/%C3%A9?%C3%A1=%C3%AD#%C3%BA'\n actualURI = uri.asText()\n self.assertEqual(actualURI, expectedURI,\n '%r != %r' % (actualURI, expectedURI))", "def test_fromText(self):\n urlpath = URL.fromText(theurl)\n self.assertEqual(theurl, urlpath.asText())", "def test_host_from_uri(self):\n self.assertEqual(host_from_uri(u'http://a.b-c.com:8080'), (u'a.b-c.com', u'8080'))\n self.assertEqual(host_from_uri(u'https://a.b.com:8080'), (u'a.b.com', u'8080'))\n self.assertEqual(host_from_uri(u'http://www.example.com'), (u'www.example.com', u'80'))\n self.assertEqual(host_from_uri(u'https://www.example.com'), (u'www.example.com', u'443'))", "def parse_address(addr, strict=False):\n if not isinstance(addr, six.string_types):\n raise TypeError(\"expected str, got %r\" % addr.__class__.__name__)\n scheme, sep, loc = addr.rpartition(\"://\")\n if strict and not sep:\n msg = (\n \"Invalid url scheme. \"\n \"Must include protocol like tcp://localhost:8000. \"\n \"Got %s\" % addr\n )\n raise ValueError(msg)\n if not sep:\n scheme = DEFAULT_SCHEME\n return scheme, loc", "def urlunparse((scheme, netloc, url, params, query, fragment)):\r\n if params:\r\n url = \"%s;%s\" % (url, params)\r\n return urlunsplit((scheme, netloc, url, query, fragment))", "def _path_decode_py3(s):\n if isinstance(s, bytes):\n s = s.decode('utf-8')\n return urlparse.unquote(s)", "def test_originFormNoPath(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST\"))\n self.assertEqual(b\"/\", uri.originForm)", "def urljoin_bytes(*atoms):\n url = b'/'.join([x for x in atoms if x])\n while b'//' in url:\n url = url.replace(b'//', b'/')\n # Special-case the final url of \"\", and return \"/\" instead.\n return url or b'/'", "def uriDecode(text):\n i = 0\n output = ''\n while i < len(text) - 2:\n if text[i] != '%':\n output += text[i]\n i += 1\n continue\n\n # Each character is represented by a 2-digit hex-value\n sixteens = '0123456789ABCDEF'.find(text[i+1].upper())\n ones = '0123456789ABCDEF'.find(text[i+2].upper())\n if sixteens == -1 or ones == -1: # Not a valid hex-value\n raise SyntaxError()\n\n value = 16 * sixteens + ones\n output += chr(value)\n i += 3\n output += text[-2:]\n return output", "def test_originForm(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST/foo\"))\n self.assertEqual(b\"/foo\", uri.originForm)", "def MakeUrllibSafe(uriRef):\n # IDN support requires decoding any percent-encoded octets in the\n # host part (if it's a reg-name) of the authority component, and when\n # doing DNS lookups, applying IDNA encoding to that string first.\n # As of Python 2.3, there is an IDNA codec, and the socket and httplib\n # modules accept Unicode strings and apply IDNA encoding automatically\n # where necessary. However, urllib.urlopen() has not yet been updated\n # to do the same; it raises an exception if you give it a Unicode\n # string, and does no conversion on non-Unicode strings, meaning you\n # have to give it an IDNA string yourself. We will only support it on\n # Python 2.3 and up.\n #\n # see if host is a reg-name, as opposed to IPv4 or IPv6 addr.\n if isinstance(uriRef, unicode):\n try:\n uriRef = uriRef.encode('us-ascii') # parts of urllib are not unicode safe\n except UnicodeError:\n raise ValueError(\"uri %r must consist of ASCII characters.\" % uriRef)\n (scheme, auth, path, query, frag) = urlparse.urlsplit(uriRef)\n if auth and auth.find('@') > -1:\n userinfo, hostport = auth.split('@')\n else:\n userinfo = None\n hostport = auth\n if hostport and hostport.find(':') > -1:\n host, port = hostport.split(':')\n else:\n host = hostport\n port = None\n if host and REG_NAME_HOST_PATTERN.match(host):\n # percent-encoded hostnames will always fail DNS lookups\n host = urllib.unquote(host) #PercentDecode(host)\n # IDNA-encode if possible.\n # We shouldn't do this for schemes that don't need DNS lookup,\n # but are there any (that you'd be calling urlopen for)?\n if sys.version_info[0:2] >= (2, 3):\n if isinstance(host, str):\n host = host.decode('utf-8')\n host = host.encode('idna')\n # reassemble the authority with the new hostname\n # (percent-decoded, and possibly IDNA-encoded)\n auth = ''\n if userinfo:\n auth += userinfo + '@'\n auth += host\n if port:\n auth += ':' + port\n\n # On Windows, ensure that '|', not ':', is used in a drivespec.\n if os.name == 'nt' and scheme == 'file':\n path = path.replace(':', '|', 1)\n\n # Note that we drop fragment, if any. See RFC 3986 sec. 3.5.\n uri = urlparse.urlunsplit((scheme, auth, path, query, None))\n\n return uri", "def MakeUrllibSafe(uriRef):\r\n # IDN support requires decoding any percent-encoded octets in the\r\n # host part (if it's a reg-name) of the authority component, and when\r\n # doing DNS lookups, applying IDNA encoding to that string first.\r\n # As of Python 2.3, there is an IDNA codec, and the socket and httplib\r\n # modules accept Unicode strings and apply IDNA encoding automatically\r\n # where necessary. However, urllib.urlopen() has not yet been updated\r\n # to do the same; it raises an exception if you give it a Unicode\r\n # string, and does no conversion on non-Unicode strings, meaning you\r\n # have to give it an IDNA string yourself. We will only support it on\r\n # Python 2.3 and up.\r\n #\r\n # see if host is a reg-name, as opposed to IPv4 or IPv6 addr.\r\n if isinstance(uriRef, unicode):\r\n try:\r\n uriRef = uriRef.encode('us-ascii') # parts of urllib are not unicode safe\r\n except UnicodeError:\r\n raise ValueError(\"uri %r must consist of ASCII characters.\" % uriRef)\r\n (scheme, auth, path, query, frag) = urlparse.urlsplit(uriRef)\r\n if auth and auth.find('@') > -1:\r\n userinfo, hostport = auth.split('@')\r\n else:\r\n userinfo = None\r\n hostport = auth\r\n if hostport and hostport.find(':') > -1:\r\n host, port = hostport.split(':')\r\n else:\r\n host = hostport\r\n port = None\r\n if host and REG_NAME_HOST_PATTERN.match(host):\r\n # percent-encoded hostnames will always fail DNS lookups\r\n host = urllib.unquote(host) #PercentDecode(host)\r\n # IDNA-encode if possible.\r\n # We shouldn't do this for schemes that don't need DNS lookup,\r\n # but are there any (that you'd be calling urlopen for)?\r\n if sys.version_info[0:2] >= (2, 3):\r\n if isinstance(host, str):\r\n host = host.decode('utf-8')\r\n host = host.encode('idna')\r\n # reassemble the authority with the new hostname\r\n # (percent-decoded, and possibly IDNA-encoded)\r\n auth = ''\r\n if userinfo:\r\n auth += userinfo + '@'\r\n auth += host\r\n if port:\r\n auth += ':' + port\r\n\r\n # On Windows, ensure that '|', not ':', is used in a drivespec.\r\n if os.name == 'nt' and scheme == 'file':\r\n path = path.replace(':', '|', 1)\r\n\r\n # Note that we drop fragment, if any. See RFC 3986 sec. 3.5.\r\n uri = urlparse.urlunsplit((scheme, auth, path, query, None))\r\n\r\n return uri", "def test_originFormEmptyPath(self):\n uri = client.URI.fromBytes(self.makeURIString(b\"http://HOST/\"))\n self.assertEqual(b\"/\", uri.originForm)", "def url_parser(url):\r\n if url.startswith(URL_SCHEMES):\r\n return url\r\n else:\r\n return 'https://' + url", "def with_host(self, host):\n # N.B. doesn't cleanup query/fragment\n if not isinstance(host, str):\n raise TypeError(\"Invalid host type\")\n if not self.is_absolute():\n raise ValueError(\"host replacement is not allowed for relative URLs\")\n if not host:\n raise ValueError(\"host removing is not allowed\")\n val = self._val\n return URL(\n self._val._replace(\n netloc=self._make_netloc(val.username, val.password, host, val.port)\n ),\n encoded=True,\n )", "def urldecode(value):\n return urllib.unquote(urllib.unquote(value)).decode('utf8')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Brackets around IPv6 addresses are stripped in the host field. The host field is then exported with brackets in the output of L{client.URI.toBytes}.
def test_hostBracketIPv6AddressLiteral(self): uri = client.URI.fromBytes(b"http://[::1]:80/index.html") self.assertEqual(uri.host, b"::1") self.assertEqual(uri.netloc, b"[::1]:80") self.assertEqual(uri.toBytes(), b"http://[::1]:80/index.html")
[ "def ipv6_str_with_prefix(self):\n return f\"{self.ipv6}/{self.ipv6_network.prefixlen}\"", "def reverse_ipv6(ipv6):\n reverse_chars = ipaddress_local.ip_address(ipv6).exploded[::-1].replace(':', '')\n return '.'.join(reverse_chars) + '.ip6.arpa'", "def test_ip_address_ipv6_ipv4_mapped(self):\n data = r'::FFFF:192.168.1.35'\n expected = json.loads(r'''{\"version\":6,\"max_prefix_length\":128,\"ip\":\"::ffff:c0a8:123\",\"ip_compressed\":\"::ffff:c0a8:123\",\"ip_exploded\":\"0000:0000:0000:0000:0000:ffff:c0a8:0123\",\"ip_split\":[\"0000\",\"0000\",\"0000\",\"0000\",\"0000\",\"ffff\",\"c0a8\",\"0123\"],\"scope_id\":null,\"ipv4_mapped\":\"192.168.1.35\",\"six_to_four\":null,\"teredo_client\":null,\"teredo_server\":null,\"dns_ptr\":\"3.2.1.0.8.a.0.c.f.f.f.f.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa\",\"network\":\"::ffff:c0a8:123\",\"broadcast\":\"::ffff:c0a8:123\",\"hostmask\":\"::\",\"netmask\":\"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff\",\"cidr_netmask\":128,\"hosts\":1,\"first_host\":\"::ffff:c0a8:123\",\"last_host\":\"::ffff:c0a8:123\",\"is_multicast\":false,\"is_private\":true,\"is_global\":false,\"is_link_local\":false,\"is_loopback\":false,\"is_reserved\":true,\"is_unspecified\":false,\"int\":{\"ip\":281473913979171,\"network\":281473913979171,\"broadcast\":281473913979171,\"first_host\":281473913979171,\"last_host\":281473913979171},\"hex\":{\"ip\":\"00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:01:23\",\"network\":\"00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:01:23\",\"broadcast\":\"00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:01:23\",\"hostmask\":\"00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00\",\"netmask\":\"ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff\",\"first_host\":\"00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:01:23\",\"last_host\":\"00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:01:23\"},\"bin\":{\"ip\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111000000101010000000000100100011\",\"network\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111000000101010000000000100100011\",\"broadcast\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111000000101010000000000100100011\",\"hostmask\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"netmask\":\"11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\",\"first_host\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111000000101010000000000100100011\",\"last_host\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111000000101010000000000100100011\"}}''')\n self.assertEqual(jc.parsers.ip_address.parse(data, quiet=True), expected)", "def ipv6_tuple():\n return (\n \"::1\",\n \"2001:4860:4860::8844\",\n \"2001:4860:4860::8888\",\n \"fe80::6238:e0ff:fe06:3f6b%enp2s0\",\n )", "def test_ipv6address_format(self):\n n = 10**5\n data = ['s', 'b', 'x', 'n', '#b', '_b', '#_x']\n a1 = '1:2:3::6'\n addr = ip.IPv6Address(a1)\n eaddr = eip.IPv6Address(a1)\n fns = addr.__format__, eaddr.__format__\n for args in data:\n generic_test(self.report_6a, fn_name(), n, fns, args)", "def test_ip_address_ipv6_cidr(self):\n data = r'127:0:de::1/96'\n expected = json.loads(r'''{\"version\":6,\"max_prefix_length\":128,\"ip\":\"127:0:de::1\",\"ip_compressed\":\"127:0:de::1\",\"ip_exploded\":\"0127:0000:00de:0000:0000:0000:0000:0001\",\"ip_split\":[\"0127\",\"0000\",\"00de\",\"0000\",\"0000\",\"0000\",\"0000\",\"0001\"],\"scope_id\":null,\"ipv4_mapped\":null,\"six_to_four\":null,\"teredo_client\":null,\"teredo_server\":null,\"dns_ptr\":\"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.e.d.0.0.0.0.0.0.7.2.1.0.ip6.arpa\",\"network\":\"127:0:de::\",\"broadcast\":\"127:0:de::ffff:ffff\",\"hostmask\":\"::ffff:ffff\",\"netmask\":\"ffff:ffff:ffff:ffff:ffff:ffff::\",\"cidr_netmask\":96,\"hosts\":4294967294,\"first_host\":\"127:0:de::1\",\"last_host\":\"127:0:de::ffff:fffe\",\"is_multicast\":false,\"is_private\":false,\"is_global\":true,\"is_link_local\":false,\"is_loopback\":false,\"is_reserved\":true,\"is_unspecified\":false,\"int\":{\"ip\":1531727573536155682370944093904699393,\"network\":1531727573536155682370944093904699392,\"broadcast\":1531727573536155682370944098199666687,\"first_host\":1531727573536155682370944093904699393,\"last_host\":1531727573536155682370944098199666686},\"hex\":{\"ip\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:01\",\"network\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:00\",\"broadcast\":\"01:27:00:00:00:de:00:00:00:00:00:00:ff:ff:ff:ff\",\"hostmask\":\"00:00:00:00:00:00:00:00:00:00:00:00:ff:ff:ff:ff\",\"netmask\":\"ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:00:00:00:00\",\"first_host\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:01\",\"last_host\":\"01:27:00:00:00:de:00:00:00:00:00:00:ff:ff:ff:fe\"},\"bin\":{\"ip\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000001\",\"network\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"broadcast\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000011111111111111111111111111111111\",\"hostmask\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111\",\"netmask\":\"11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000\",\"first_host\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000001\",\"last_host\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000011111111111111111111111111111110\"}}''')\n self.assertEqual(jc.parsers.ip_address.parse(data, quiet=True), expected)", "def encode_address(address: str) -> bytes:\n is_ipv6 = ':' in address\n address_family = socket.AF_INET6 if is_ipv6 else socket.AF_INET\n return socket.inet_pton(address_family, address)", "def test_ip_address_ipv6_cidr_scope(self):\n data = r'127:0:de::1%128aBc123/96'\n expected = json.loads(r'''{\"version\":6,\"max_prefix_length\":128,\"ip\":\"127:0:de::1\",\"ip_compressed\":\"127:0:de::1\",\"ip_exploded\":\"0127:0000:00de:0000:0000:0000:0000:0001\",\"ip_split\":[\"0127\",\"0000\",\"00de\",\"0000\",\"0000\",\"0000\",\"0000\",\"0001\"],\"scope_id\":\"128aBc123\",\"ipv4_mapped\":null,\"six_to_four\":null,\"teredo_client\":null,\"teredo_server\":null,\"dns_ptr\":\"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.e.d.0.0.0.0.0.0.7.2.1.0.ip6.arpa\",\"network\":\"127:0:de::\",\"broadcast\":\"127:0:de::ffff:ffff\",\"hostmask\":\"::ffff:ffff\",\"netmask\":\"ffff:ffff:ffff:ffff:ffff:ffff::\",\"cidr_netmask\":96,\"hosts\":4294967294,\"first_host\":\"127:0:de::1\",\"last_host\":\"127:0:de::ffff:fffe\",\"is_multicast\":false,\"is_private\":false,\"is_global\":true,\"is_link_local\":false,\"is_loopback\":false,\"is_reserved\":true,\"is_unspecified\":false,\"int\":{\"ip\":1531727573536155682370944093904699393,\"network\":1531727573536155682370944093904699392,\"broadcast\":1531727573536155682370944098199666687,\"first_host\":1531727573536155682370944093904699393,\"last_host\":1531727573536155682370944098199666686},\"hex\":{\"ip\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:01\",\"network\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:00\",\"broadcast\":\"01:27:00:00:00:de:00:00:00:00:00:00:ff:ff:ff:ff\",\"hostmask\":\"00:00:00:00:00:00:00:00:00:00:00:00:ff:ff:ff:ff\",\"netmask\":\"ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:00:00:00:00\",\"first_host\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:01\",\"last_host\":\"01:27:00:00:00:de:00:00:00:00:00:00:ff:ff:ff:fe\"},\"bin\":{\"ip\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000001\",\"network\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"broadcast\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000011111111111111111111111111111111\",\"hostmask\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111\",\"netmask\":\"11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000\",\"first_host\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000001\",\"last_host\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000011111111111111111111111111111110\"}}''')\n self.assertEqual(jc.parsers.ip_address.parse(data, quiet=True), expected)", "def hexip(self, irc, msg, args, ip):\n quads = ip.split('.')\n ret = \"\"\n for quad in quads:\n i = int(quad)\n ret += '%02x' % i\n irc.reply(ret.upper())", "def _num_to_ipv6(num: int, shorten: bool, remove_zeroes: bool) -> str:\n\n segments = []\n for _ in range(IPV6_MAX_SEGMENT_COUNT):\n num, segment = divmod(num, IPV6_MAX_SEGMENT_VALUE+1)\n segments.append(hex(segment).split('x')[1].upper())\n\n if remove_zeroes and '0' in segments:\n\n # Goes over the segments to find the\n # longest strip with nothing but zeroes\n # and replaces it with an empty string.\n # The final str.join will turn to '::'.\n\n longest = 0\n longest_idx = 0\n current = 0\n current_idx = 0\n \n for idx, seg in enumerate(segments):\n\n if seg == '0':\n\n if not current:\n current_idx = idx\n current += 1\n\n else:\n current = 0\n\n if current > longest:\n longest = current\n longest_idx = current_idx\n\n segments = (\n (segments[:longest_idx] if 0 < longest_idx < IPV6_MAX_SEGMENT_COUNT-1 else [''])\n + ['']\n + (segments[longest_idx+longest:] if 0 < longest_idx+longest < IPV6_MAX_SEGMENT_COUNT else [''])\n )\n\n if not shorten:\n\n # Fills up any segments to full length by\n # adding missing zeroes to the front, if any.\n\n segments = [seg.zfill(4) if seg else '' for seg in segments]\n\n return ':'.join(segments[::-1])", "def ipv6(self):\n ret = self._get_attr(\"ipv6\")\n return ret", "def parse_ipv6(\n self,\n packed_data, # type: struct\n pointer, # type: int\n field_size # type: int\n ):\n payload = self.socket.inet_ntop(self.socket.AF_INET6,packed_data[pointer:pointer+field_size])\n return payload", "def get_addr(host, port):\n if \":\" in host: # IPv6\n return \"[%s]:%s\" % (host, port)\n else: # IPv4\n return \"%s:%s\" % (host, port)", "def getHostFrom(fromHost):", "def ip6_address(self, node_id):\n if not self.ip6:\n raise ValueError(\"ip6 prefixes have not been set\")\n return str(self.ip6.addr(node_id))", "def _GetHostsFromIp6Neigh(self, hosts):\n for (mac, ip6, iface, active) in self._ParseIp6Neighbors():\n ip6 = tr.helpers.NormalizeIPAddr(ip6)\n mac = mac.lower()\n host = hosts.get(mac, dict())\n self._AddLayer1Interface(host, iface)\n host['PhysAddress'] = mac\n if active:\n # Only store if known active. We don't want to override\n # Active=True from some other source.\n host['Active'] = active\n self._AddIpToHostDict(entry=host, ip=ip6)\n hosts[mac] = host", "def ip_packed_to_str(b):\n # IPv4\n if b.startswith(IPV4_MAPPED_IPV6_PREFIX):\n return inet_ntoa(b[-4:])\n\n # IPv6\n return inet_ntop(AF_INET6, b)", "def FormatPackedIP(packed_ip):\n packed_ip = ipaddr.Bytes(str(packed_ip))\n try:\n ip_address = ipaddr.IPv4Address(packed_ip)\n return str(ip_address)\n except ipaddr.AddressValueError as e:\n pass\n try:\n ip_address = ipaddr.IPv6Address(packed_ip)\n return str(ip_address)\n except ipaddr.AddressValueError as e:\n raise bigquery_client.BigqueryInvalidQueryError(e, None, None, None)", "def test_ipv6address_exploded(self):\n n = 10**4\n addrs = ['1:2:3:4:5:6::', '::', '::5:6:7:8', '1:2:3:4::', '1:2::7:8']\n for a in addrs:\n addr = ip.IPv6Address(a)\n time1, result1 = timefn(n, lambda: addr.exploded)\n eaddr = eip.IPv6Address(a)\n time2, result2 = timefn(n, lambda: eaddr.exploded)\n results = (time1, result1), (time2, result2)\n self.report_6a.report(fn_name(), n, results, addr)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Error metrics for an entire fold as defined in the preprocessing parameters. fold Fold.fold_1, Fold.fold_2, Fold.fold_3, Fold.fold_4, Fold.fold_benchmark mode 'train', 'valid' or 'test' to address the correct config parameter norm Flag if the spectrogram should be normed to 1 net The network used for classification, e.g. 'ConvNet, 'ResNet_v1' model_dir e.g. "./model_ResNet_fold_4/". For a specific checkpoint, change the checkpoint number in the chekpoint file from the model folder. save_dir folder were to save note activations, e.g. "./note_activations/" save_file name of the save file which ends with .txt
def compute_all_error_metrics(fold, mode, net, model_dir, save_dir, save_file, norm=False, n_onset_plus=25, offset=0): config = ppp.get_preprocessing_parameters(fold.value) audio_config = config['audio_config'] # load fold filenames = open(config[mode + '_fold'], 'r').readlines() filenames = [f.strip() for f in filenames] # build predictor predictor, hparams = build_predictor(net, model_dir) # init madmom peak picker proc = madmom.features.notes.NotePeakPickingProcessor(threshold=0.1, fps=100) # init piano note processor for onset prediction rnn_processor = madmom.features.notes.RNNPianoNoteProcessor() # init lists frame_wise_metrics = [] frame_wise_metrics_with_onset_pred = [] frame_wise_metrics_with_onset_pred_heuristic = [] frame_wise_onset_metrics = [] frame_wise_onset_plus_metrics = [] for index in range(0, n_onset_plus): frame_wise_onset_plus_metrics.append([]) frame_wise_offset_metrics = [] note_wise_onset_metrics = [] note_wise_onset_offset_metrics = [] note_wise_onset_metrics_with_onset_pred = [] note_wise_onset_offset_metrics_with_onset_pred = [] note_wise_onset_metrics_with_onset_pred_heuristic = [] note_wise_onset_offset_metrics_with_onset_pred_heuristic = [] #filenames = filenames[0:3] num_pieces = len(filenames) file_num = 0 onset_duration_heuristic = 10 for file in filenames: # split file path string at "/" and take the last split, since it's the actual filename note_activation, \ gt_frame, gt_onset, \ gt_offset, \ onset_plus = get_note_activation(config['audio_path'], file, audio_config, norm, config['context_frames'], predictor, n_onset_plus, config['is_hpcp'], use_rnn=hparams['use_rnn'], offset=offset) frames = np.greater_equal(note_activation, 0.5) # return precision, recall, f-score, accuracy (without TN) frame_wise_metrics.append(util.eval_frame_wise(note_activation, gt_frame)) # multiply note activation with ground truth in order to blend out the rest of the activation fn frame_wise_onset_metrics.append(util.eval_frame_wise(np.multiply(note_activation, gt_onset), gt_onset)) for index in range(0, n_onset_plus): frame_wise_onset_plus_metrics[index].append(util.eval_frame_wise(np.multiply(note_activation, onset_plus[index]), onset_plus[index])) frame_wise_offset_metrics.append(util.eval_frame_wise(np.multiply(note_activation, gt_offset), gt_offset)) rnn_act_fn = rnn_processor(os.path.join(config['audio_path'], file + '.wav')) onset_predictions_timings = proc(rnn_act_fn) onset_predictions = util.piano_roll_rep(onset_frames=(onset_predictions_timings[:, 0] / (1. / audio_config['fps'])).astype(int), midi_pitches=onset_predictions_timings[:, 1].astype(int) - 21, piano_roll_shape=np.shape(frames)) onset_predictions_with_heuristic = util.piano_roll_rep(onset_frames=(onset_predictions_timings[:, 0] / (1. / audio_config['fps'])).astype(int), midi_pitches=onset_predictions_timings[:, 1].astype(int) - 21, piano_roll_shape=np.shape(frames), onset_duration=onset_duration_heuristic) frames_with_onset_heuristic = np.logical_or(frames, onset_predictions_with_heuristic) frame_wise_metrics_with_onset_pred.append(util.eval_frame_wise(np.logical_or(frames, onset_predictions), gt_frame)) frame_wise_metrics_with_onset_pred_heuristic.append(util.eval_frame_wise(frames_with_onset_heuristic, gt_frame)) ref_intervals, ref_pitches = util.pianoroll_to_interval_sequence(gt_frame, frames_per_second=audio_config['fps'], min_midi_pitch=21, onset_predictions=gt_onset, offset_predictions=None) est_intervals, est_pitches = util.pianoroll_to_interval_sequence(frames, frames_per_second=audio_config['fps'], min_midi_pitch=21, onset_predictions=None, offset_predictions=None) est_intervals_onset_pred, est_pitches_onset_pred = util.pianoroll_to_interval_sequence(frames, frames_per_second= audio_config['fps'], min_midi_pitch=21, onset_predictions=onset_predictions, offset_predictions=None) est_intervals_onset_pred_heuristic, est_pitches_onset_pred_heuristic = util.pianoroll_to_interval_sequence(frames_with_onset_heuristic, frames_per_second= audio_config['fps'], min_midi_pitch=21, onset_predictions=onset_predictions, offset_predictions=None) # w/o onset predictions # return precision, recall, f-score, overlap_ratio note_wise_onset_metrics.append(mir_eval.transcription.precision_recall_f1_overlap(ref_intervals, util.midi_to_hz( ref_pitches), est_intervals, util.midi_to_hz( est_pitches), offset_ratio=None)) note_wise_onset_offset_metrics.append(mir_eval.transcription.precision_recall_f1_overlap(ref_intervals, util.midi_to_hz( ref_pitches), est_intervals, util.midi_to_hz( est_pitches))) # w/ onset predictions # return precision, recall, f-score, overlap_ratio note_wise_onset_metrics_with_onset_pred.append(mir_eval.transcription.precision_recall_f1_overlap(ref_intervals, util.midi_to_hz( ref_pitches), est_intervals_onset_pred, util.midi_to_hz( est_pitches_onset_pred), offset_ratio=None)) note_wise_onset_offset_metrics_with_onset_pred.append( mir_eval.transcription.precision_recall_f1_overlap(ref_intervals, util.midi_to_hz( ref_pitches), est_intervals_onset_pred, util.midi_to_hz( est_pitches_onset_pred))) # w/ onset predictions and heuristics # return precision, recall, f-score, overlap_ratio note_wise_onset_metrics_with_onset_pred_heuristic.append(mir_eval.transcription.precision_recall_f1_overlap(ref_intervals, util.midi_to_hz( ref_pitches), est_intervals_onset_pred_heuristic, util.midi_to_hz( est_pitches_onset_pred_heuristic), offset_ratio=None)) note_wise_onset_offset_metrics_with_onset_pred_heuristic.append( mir_eval.transcription.precision_recall_f1_overlap(ref_intervals, util.midi_to_hz( ref_pitches), est_intervals_onset_pred_heuristic, util.midi_to_hz( est_pitches_onset_pred_heuristic))) file_num += 1 print(file_num) # frame-wise metrics (precision/recall/f1-score mean_frame_wise = util.mean_eval_frame_wise(frame_wise_metrics, num_pieces) mean_frame_wise_onset = util.mean_eval_frame_wise(frame_wise_onset_metrics, num_pieces) mean_frame_wise_with_onset_pred = util.mean_eval_frame_wise(frame_wise_metrics_with_onset_pred, num_pieces) mean_frame_wise_with_onset_pred_heuristic = util.mean_eval_frame_wise(frame_wise_metrics_with_onset_pred_heuristic, num_pieces) mean_frame_wise_onset_plus = [] for index in range(0, n_onset_plus): mean_frame_wise_onset_plus.append(util.mean_eval_frame_wise(frame_wise_onset_plus_metrics[index], num_pieces)) mean_frame_wise_offset = util.mean_eval_frame_wise(frame_wise_offset_metrics, num_pieces) # note metrics w/o onset predictions (precision/recall/f1-score mean_note_wise_onset_metrics = util.mean_eval_frame_wise(note_wise_onset_metrics, num_pieces) mean_note_wise_onset_offset_metrics = util.mean_eval_frame_wise(note_wise_onset_offset_metrics, num_pieces) # note metrics w/ onset predictions (precision/recall/f1-score mean_note_wise_onset_metrics_with_onset_pred = util.mean_eval_frame_wise(note_wise_onset_metrics_with_onset_pred, num_pieces) mean_note_wise_onset_offset_metrics_with_onset_pred = util.mean_eval_frame_wise( note_wise_onset_offset_metrics_with_onset_pred, num_pieces) # note metrics w/ onset prediction heuristic (precision/recall/f1-score mean_note_wise_onset_metrics_with_onset_pred_heuristic = util.mean_eval_frame_wise(note_wise_onset_metrics_with_onset_pred_heuristic, num_pieces) mean_note_wise_onset_offset_metrics_with_onset_pred_heuristic = util.mean_eval_frame_wise( note_wise_onset_offset_metrics_with_onset_pred_heuristic, num_pieces) # write all metrics to file file = open(save_dir + save_file, "w") file.write("frame-wise metrics (precision/recall/f1-score) \n") file.write("mean: " + str(mean_frame_wise) + "\n") file.write("mean (onset prediction): " + str(mean_frame_wise_with_onset_pred) + "\n") file.write("mean (onset heuristic): " + str(mean_frame_wise_with_onset_pred_heuristic) + "\n") file.write("mean (onset only): " + str(mean_frame_wise_onset) + "\n") for index in range(0, n_onset_plus): file.write("mean (onset + " + str(index+1) + " only): " + str(mean_frame_wise_onset_plus[index]) + "\n") file.write("mean (offset only): " + str(mean_frame_wise_offset) + "\n") file.write("\n") file.write("----------------------------------------------------------------- \n") file.write("\n") file.write("note metrics w/o onset predictions (precision/recall/f1-score) \n") file.write("mean (w/o offset): " + str(mean_note_wise_onset_metrics) + "\n") file.write("mean (w/ offset): " + str(mean_note_wise_onset_offset_metrics) + "\n") file.write("\n") file.write("----------------------------------------------------------------- \n") file.write("\n") file.write("note metrics w/ onset predictions (precision/recall/f1-score) \n") file.write("mean (w/o offset): " + str(mean_note_wise_onset_metrics_with_onset_pred) + "\n") file.write("mean (w/ offset): " + str(mean_note_wise_onset_offset_metrics_with_onset_pred) + "\n") file.write("\n") file.write("----------------------------------------------------------------- \n") file.write("\n") file.write("note metrics w/ onset predictions and heuristic (" + str(onset_duration_heuristic) + " frames) (precision/recall/f1-score) \n") file.write("mean (w/o offset): " + str(mean_note_wise_onset_metrics_with_onset_pred_heuristic) + "\n") file.write("mean (w/ offset): " + str(mean_note_wise_onset_offset_metrics_with_onset_pred_heuristic) + "\n") file.close()
[ "def kfold(current_path,target_path,property_name,num_fold):\n \n with open(os.path.join(current_path,'SISSO.in'),'r') as f:\n input_file=f.read()\n task_number=int(re.findall(r'ntask\\s*=\\s*(\\d+)',input_file)[0])\n samples_number=re.findall(r'nsample\\s*=\\s*([\\d,]+)',input_file)[0]\n samples_number=re.split(r'[, ]+',samples_number)\n samples_number=list(map(int,samples_number))\n \n data_total=pd.read_csv(os.path.join(current_path,'train.dat'),sep=r'\\s+')\n \n i=1\n data_list=[]\n for sample_num in samples_number:\n data_list.append(list(range(i,i+sample_num)))\n i+=sample_num\n \n for task in range(0,task_number):\n random.shuffle(data_list[task])\n batch_size=list(map(lambda x: int(math.ceil(x/num_fold)), samples_number))\n \n try:\n if os.path.exists(os.path.join(target_path,'%s_cv'%property_name)):\n print('Directory already exists.\\nDo you want to remove the directory?')\n a=input('y|n\\n')\n if a=='y':\n shutil.rmtree(os.path.join(target_path,'%s_cv'%property_name))\n if a=='n':\n print('Please input a new target path!')\n return None\n except FileNotFoundError:\n if os.path.exists(target_path)==False:\n os.mkdir(target_path)\n finally:\n os.mkdir(os.path.join(target_path,'%s_cv'%property_name))\n target_path=os.path.join(target_path,'%s_cv'%property_name)\n data_total.to_csv(os.path.join(target_path,'train.dat'),index=False,sep=' ')\n with open(os.path.join(target_path,'cross_validation_info.dat'),'w') as f:\n json.dump({'cross_validation_type':'%d-fold'%num_fold,'shuffle_data_list':data_list},f)\n\n for i in range(0,num_fold):\n try:\n shutil.copytree(current_path,os.path.join(target_path,property_name+'_cv%d'%i))\n except FileExistsError:\n shutil.rmtree(os.path.join(target_path,property_name+'_cv%d'%i))\n shutil.copytree(current_path,os.path.join(target_path,property_name+'_cv%d'%i))\n \n \n val_list=[]\n train_list=[]\n for task in range(0,task_number):\n train_list_t=[]\n if batch_size[task]*i<samples_number[task]:\n for j in range(0,num_fold):\n if batch_size[task]*j<samples_number[task]:\n if i==j:\n val_list.append(data_list[task][batch_size[task]*j:min(batch_size[task]*(j+1),samples_number[task])])\n else:\n train_list_t.append(data_list[task][batch_size[task]*j:min(batch_size[task]*(j+1),samples_number[task])])\n else:\n break\n else:\n val_list.append([])\n train_list_t.append(data_list[task])\n train_list.append(np.hstack(train_list_t).tolist())\n \n train_len=list(map(len,train_list))\n val_len=list(map(len,val_list))\n \n with open(os.path.join(target_path,property_name+'_cv%d'%i,'shuffle.dat'),'w') as f:\n json.dump({'training_list':train_list,'training_samples_number':train_len,'validation_list':val_list,'validation_samples_number':val_len},f)\n \n data_train=data_total.iloc[np.hstack(train_list)-1]\n data_val=data_total.iloc[np.hstack(val_list)-1]\n data_train.to_csv(os.path.join(target_path,property_name+'_cv%d'%i,'train.dat'),index=False,sep=' ')\n data_val.to_csv(os.path.join(target_path,property_name+'_cv%d'%i,'validation.dat'),index=False,sep=' ')\n \n with open(os.path.join(target_path,property_name+'_cv%d'%i,'SISSO.in'),'r') as f:\n lines=f.readlines()\n for j in range(len(lines)):\n if lines[j].startswith('nsample'):\n lines[j]='nsample=%s'%(str(train_len).strip('[]'))+'\\t! number of samples for each task (seperate the numbers by comma for ntask >1)\\n'\n with open(os.path.join(target_path,property_name+'_cv%d'%i,'SISSO.in'),'w') as f:\n f.writelines(lines)", "def kfold(data, labels, n_folds, train_method, pred_method, classify_method, labels_formatting, metric, target_folds, verbose=True, **kwargs):\n try:\n assert n_folds > 1\n except AssertionError:\n print('Need more than one fold')\n\n try:\n assert len(data) == len(labels)\n except AssertionError:\n print('Error: Data and labels have different lengths') \n \n if verbose: print('Engaging n-fold cross validation with {0:d} folds on {1:d} items'.format(n_folds, len(data))) \n fold_size = int(len(data)/n_folds)\n # Random permuation of the data\n perm = np.random.permutation(len(data))\n data = data[perm]\n labels = labels[perm]\n\n res = []\n for fold in range(n_folds):\n if target_folds is not None and fold not in target_folds:\n res.append(np.nan)\n continue\n val_idx = range(fold*fold_size,(fold+1)*fold_size)\n val_data = np.array(data[val_idx])\n val_labels = np.array(labels[val_idx])\n\n train_data = np.array([element for i, element in enumerate(data) if i not in val_idx])\n train_labels = np.array([element for i, element in enumerate(labels) if i not in val_idx])\n\n train_method(train_data, train_labels, **kwargs)\n\n preds = pred_method(val_data, **kwargs)\n \n if metric.quantized:\n preds = classify_method(preds)\n res.append(metric.measure(np.ravel(preds), labels_formatting(val_labels)))\n if verbose: print('Fold {0:d}, {1:s}: {2:.2f}'.format(fold,metric.name,res[fold]))\n\n if verbose: print('Done! Average {0:s} is {1:.2f}'.format(metric.name,np.nanmean(res)))\n\n return np.nanmean(res)", "def train_k_fold(self):\n all_errors = []\n for current_k in range(self.k_folds):\n self.set_data(*self.evaluator.get_fold(current_k, self.fold_test_indices))\n self.hyperparameters['fold'] = current_k\n self.train_one_fold()\n all_errors.append(self.get_evaluation_report())\n return numpy.mean(all_errors, axis=0)", "def error(clf, X, y) :\n \n ### ========== TODO : START ========== ###\n # compute cross-validation error over ntrials\n # hint: use train_test_split (be careful of the parameters)\n skf = StratifiedKFold(y, n_folds=5)\n train_error = 0.0\n test_error = 0.0\n\n ntrials = 0\n for train_index, test_index in skf:\n #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n ntrials += 1\n X_train, X_test = X[train_index], X[test_index]\n y_train, y_test = y[train_index], y[test_index]\n clf.fit(X_train, y_train)\n\n y_pred = clf.predict(X_train)\n train_error_single = 1 - metrics.accuracy_score(y_train, y_pred, normalize=True)\n train_error += train_error_single\n\n y_pred = clf.predict(X_test)\n test_error_single = 1 - metrics.accuracy_score(y_test,y_pred,normalize=True)\n test_error += test_error_single\n\n train_error = train_error / ntrials\n test_error = test_error / ntrials\n return train_error, test_error", "def save_fold_data(all_fold_result, model_name, base_folder):\n create_folder(base_folder)\n\n for task_num, fold_result in enumerate(all_fold_result):\n task_folder = base_folder + f\"/task_{task_num}/\"\n create_folder(task_folder)\n for i, (pred, miss_data, model, loss_detail) in enumerate(fold_result):\n curr_folder = task_folder + f\"fold_{i}/\"\n create_folder(curr_folder)\n\n pred.to_csv(curr_folder + \"pred.csv\")\n with open(curr_folder + \"miss_data.pkl\", \"wb\") as handle:\n pickle.dump(miss_data, handle, protocol=pickle.HIGHEST_PROTOCOL) \n \n others.dump_json(curr_folder + \"loss_detail.json\", loss_detail)\n \n model_save_folder = curr_folder + model_name\n create_folder(model_save_folder)\n model.save(model_save_folder)", "def run_multiple_folds(self, dataset: Dataset, num_folds: int) -> List[BayesOptModel]: # pragma: no cover\n # Get a list of arrays of indices for test examples for each cross-validation split\n test_chunks = dataset.split_folds(num_folds, seed=self.config.seed)\n\n # Create a model for each train/test split and train it\n model_class = HMCBayesOptModel if self.config.training.hmc else BayesOptModel\n models = [\n model_class.from_config(config=self.config, dataset=dataset, test_ids=test_chunks[fold], fold_id=fold + 1)\n for fold in range(num_folds)\n ]\n\n for model in models:\n model.run()\n\n # Create train/test figure for each model\n logging.info(\"- Creating plots for each fold\")\n # It seems to be a MyPy False Negative\n categories: Sequence[Optional[str]] = self.config.data.categorical_inputs or [None] # type: ignore\n for fold, model in enumerate(models, 1):\n for category in categories:\n fname = self.config.results_dir / self.TRAIN_TEST_FOLD_PNG_BASE.format(self.by_clause(category), fold)\n plotting.plot_train_test_predictions(model, category=category, output_path=fname)\n fig, ax = plotting.plot_calibration_curve(\n model.minus_predict,\n datasets=[model.train, model.test], # type: ignore # auto\n labels=[\"Train\", \"Test\"],\n )\n fig.savefig(\n self.config.results_dir / self.CALIBRATION_FOLD_PNG_BASE.format(self.by_clause(category), fold),\n bbox_inches=\"tight\",\n )\n plt.close(fig)\n\n # Create cross-validation figure\n logging.info(\"- Creating cross-validation plots\")\n r: Optional[float] = None # Pearson correlation coefficient\n # noinspection PyArgumentList\n for category in categories:\n fig = plt.figure(figsize=(5, 5))\n # noinspection PyUnresolvedReferences\n ax = plt.subplot()\n\n # Check if all models have their test data sets\n test_datasets: List[Dataset] = [model.test for model in models if model.test is not None]\n assert len(test_datasets) == len(models), \"Some models don't have test data sets.\"\n\n # The category does not affect the returned value of r.\n r = plotting.plot_predictions_against_observed(\n ax,\n models=models, # type: ignore # auto\n datasets=test_datasets, # type: ignore # auto\n category=category,\n title=\"Cross-validation\",\n )\n fig.savefig(\n self.config.results_dir / self.XVAL_TEST_PNG_BASE.format(self.by_clause(category)), bbox_inches=\"tight\"\n )\n plt.close(fig)\n\n # Summarise\n assert r is not None\n logging.info(f\"- Cross-validation r: {r:.3f}\")\n results = pd.DataFrame(\n {\n \"Index\": [f\"Fold {f + 1}\" for f in range(num_folds)] + [\"Xval\"],\n \"Train\": [m.r_train for m in models] + [None], # type: ignore # auto\n \"Test\": [m.r_test for m in models] + [r], # type: ignore # auto\n }\n )\n results.to_csv(self.config.results_dir / self.TRAINING_CSV, index=False)\n\n return models # type: ignore # auto", "def runParallelCrossValidation(fold, data_folds, cross_validation_path, iteration=0, parameters=None):\n \n t0=time()\n \n log_id = \"<<{:2d}-fold>> \".format(fold) \n\n logging.info(log_id+\"Train-Validation. Saving information to {}\".format(cross_validation_path) )\n\n # Merging Train folds intro one single object. \n train_folds = []\n for j in range(len(data_folds)):\n if not j==fold:\n train_folds.append(data_folds[j])\n\n #Test fold is the one excluded.\n test_samples = data_folds[fold][\"data\"]\n test_labels = data_folds[fold][\"labels\"]\n \n #Building Train data and labels arrays. \n train_data = []\n train_labels = []\n for i in range(len(train_folds)):\n train_data.extend(train_folds[i][\"data\"])\n train_labels.extend(train_folds[i][\"labels\"])\n logging.info(log_id+\"Train set {} => Train has {} Samples and {} Labels.\".format(i,len(train_folds[i][\"data\"]),len(train_folds[i][\"labels\"])) )\n \n logging.info(log_id+\"Full Train Dataset has {} Samples and {} Labels.\".format(len(train_data),len(train_labels)))\n logging.info(log_id+\"Test Dataset has {} Samples and {} Labels.\".format(len(test_samples),len(test_labels)) )\n\n \n #Running the Pipeline. \n score = runPipeline (train_data, train_labels, test_samples, test_labels, \n pipelineID = \"iter_\"+str(iteration)+\"_fold_\"+str(fold), \n log_id=log_id,\n path_results = cross_validation_path,\n parameters=parameters,\n summary_filename= \"crossvalidation_summary.csv\" )\n\n t1=time()\n \n logging.info(log_id+\"runParallelCorssValidation ran in {:4.2f} sec.\".format(t1-t0))\n logging.info(log_id+\"Scores:\"+ ''.join(''.join((k,\"->\", str(v),\";\")) for k,v in score.items())) \n \n return score", "def cross_validate(self, \r\n train_valid_folds = 5,\r\n eval_func_names = \"mse\", \r\n model_params = None,\r\n include_rf_oob = True):\r\n \r\n \r\n if isinstance(eval_func_names, str):\r\n eval_func_names = [eval_func_names]\r\n\r\n train_valid_folds = prepare_folds(train_valid_folds,\r\n self.X_train,\r\n self.random_seed)\r\n\r\n self._prepare_params(model_params)\r\n\r\n if self.model_type == \"elastic_net\":\r\n cv_scores = model_utils._cv_elastic_net(self.X_train, self.y_train, train_valid_folds, \r\n eval_func_names,\r\n model_params,\r\n self.log_target_reg)\r\n\r\n elif self.model_type == \"random_forest\":\r\n if include_rf_oob:\r\n assert hasattr(self, \"model\"), \"random_forest must be trained first to include oob error\"\r\n oob_pred = self.model.oob_prediction_\r\n else:\r\n oob_pred = None\r\n cv_scores = model_utils._cv_random_forest(self.X_train, self.y_train, train_valid_folds,\r\n self.obj_func_name, \r\n eval_func_names,\r\n model_params,\r\n self.sample_weight,\r\n self.log_target_reg,\r\n oob_pred)\r\n\r\n\r\n elif self.model_type == \"lightgbm\":\r\n cv_scores = model_utils._cv_lightgbm(self.X_train, self.y_train, train_valid_folds, \r\n self.obj_func_name,\r\n eval_func_names,\r\n model_params,\r\n self.sample_weight,\r\n self.log_target_reg)\r\n \r\n\r\n elif self.model_type == \"xgboost\":\r\n cv_scores = model_utils._cv_xgboost(self.X_train, self.y_train, train_valid_folds, \r\n self.obj_func_name,\r\n eval_func_names,\r\n model_params,\r\n self.sample_weight,\r\n self.log_target_reg)\r\n \r\n else:\r\n raise NotImplementedError(\"model type {} not supported\".format(self.model_type))\r\n\r\n return cv_scores", "def validate_learning(data_array, fold_size):\r\n\r\n # Validates the descision tree learning function given a data array and a fold size\r\n labels = np.unique(data_array[:, data_array.shape[1]-1])\r\n total_confusion_matrix = np.zeros((len(labels), len(labels)))\r\n total_depth = 0\r\n\r\n # separate the data into training and testing for each fold.\r\n for fold in range(fold_size):\r\n print(\".\", end=\"\", flush=True)\r\n training_data, test_data = divide_dataset(data_array, fold_size, fold)\r\n # Train the tree and get its confusion matrix\r\n tree, depth = dt.decision_tree_learning(training_data)\r\n predicted_values, true_values = dt.classify_array(test_data, tree)\r\n confusion_matrix = dt.get_confusion_matrix(predicted_values, true_values)\r\n # Append its confusion matrix to the total matrix and depth to the total depth\r\n total_confusion_matrix += confusion_matrix\r\n total_depth += depth\r\n # Compute average by dividing total by fold size\r\n average_confusion_matrix = total_confusion_matrix/fold_size\r\n average_depth = total_depth/fold_size\r\n\r\n return average_confusion_matrix, average_depth", "def train_and_evaluate_fold(self, x_train, y_train, x_test, y_test, classifier, index, data_balancer=None):\n if data_balancer is not None:\n x_train, y_train = data_balancer.fit_sample(x_train, y_train)\n\n # Training fold specific statistics\n verbose_print(\"\\n== Training Stats Fold {0} ==\".format(index + 1))\n verbose_print(\"Number of rows for training fold {0}: {1}\".format(index + 1, x_train.shape[0]))\n verbose_print(\"Number of defaulters for training fold {0}: {1}\".format(index + 1, y_train[y_train == 1].shape[0]))\n\n start_time = timer()\n classifier.fit(x_train, y_train)\n end_time = timer()\n fit_time = end_time - start_time\n\n # Testing fold specific statistics\n verbose_print(\"== Testing Stats Fold {0} ==\".format(index + 1))\n verbose_print(\"Number of rows for training fold {0}: {1}\".format(index + 1, len(y_test)))\n verbose_print(\"Number of defaulters for training fold {0}: {1}\".format(index + 1, np.count_nonzero(y_test == 1)))\n\n # Test accuracy\n test_classification = classifier.predict(x_test)\n test_classification = np.array(test_classification)\n test_classification = test_classification.flatten()\n\n try:\n test_probabilities = classifier.predict_proba(x_test)\n if len(test_probabilities[0]) < 2:\n raise RuntimeError(\"test probabilities is not correct length\")\n except Exception:\n test_probabilities = [[-1, -1]] * len(test_classification)\n\n outcome_decision_values = None\n try:\n predictions = classifier.predict_proba(x_test)\n outcome_decision_values = predictions[:, 1]\n except Exception as e:\n outcome_decision_values = None\n verbose_print(\"WARNING: unable to calculate classification accuracy - {0} - {1}\".format(classifier.__class__.__name__, e))\n\n fpr, tpr = None, None\n if outcome_decision_values is not None:\n try:\n fpr, tpr, _ = roc_curve(y_test, outcome_decision_values)\n fpr = fpr.tolist()\n tpr = tpr.tolist()\n except Exception as e:\n print(e)\n\n self.ml_stats.calculate_and_append_fold_accuracy(test_classification, y_test, tpr, fpr, fit_time, test_probabilities=test_probabilities)", "def load_fold_data(base_folder, model_name, model_class, save_path=\"save/\"):\n base_folder = save_path + base_folder\n\n task_list = []\n for task_folder in sorted(os.listdir(base_folder)):\n if \".json\" in task_folder:\n continue\n task_folder = base_folder + \"/\" + task_folder\n\n fold_result_list = []\n for fold_folder in sorted(os.listdir(task_folder)):\n curr_folder = task_folder + \"/\" + fold_folder + \"/\"\n pred = pd.read_csv(curr_folder + \"pred.csv\")\n with open(curr_folder + \"miss_data.pkl\", \"rb\") as handle:\n miss_data = pickle.load(handle)\n \n loss_detail = load_json(curr_folder + \"loss_detail.json\")\n model = model_class.load_from_path(\n curr_folder + model_name\n )\n result_fold = FoldWalkForewardResult(\n pred=pred, missing_data=miss_data, model=model, loss_detail=loss_detail\n )\n fold_result_list.append(result_fold)\n \n task_list.append(fold_result_list)\n \n return task_list", "def validate_training(image_dir,\n gt_dir,\n models_dir,\n validation_main_dir,\n segmentation_labels,\n evaluation_labels=None,\n step_eval=1,\n padding=None,\n cropping=None,\n target_res=1.,\n sigma_smoothing=0,\n keep_biggest_component=False,\n conv_size=3,\n n_levels=5,\n nb_conv_per_level=2,\n unet_feat_count=24,\n feat_multiplier=2,\n activation='elu',\n compute_distances=False,\n compute_score_whole_structure=True,\n recompute=True):\n\n # create result folder\n utils.mkdir(validation_main_dir)\n\n # loop over models\n list_models = utils.list_files(models_dir, expr=['dice', '.h5'], cond_type='and')[::step_eval]\n loop_info = utils.LoopInfo(len(list_models), 1, 'validating', True)\n for model_idx, path_model in enumerate(list_models):\n\n # build names and create folders\n model_val_dir = os.path.join(validation_main_dir, os.path.basename(path_model).replace('.h5', ''))\n dice_path = os.path.join(model_val_dir, 'dice.npy')\n utils.mkdir(model_val_dir)\n\n if (not os.path.isfile(dice_path)) | recompute:\n loop_info.update(model_idx)\n predict(path_images=image_dir,\n path_segmentations=model_val_dir,\n path_model=path_model,\n segmentation_labels=segmentation_labels,\n padding=padding,\n cropping=cropping,\n target_res=target_res,\n sigma_smoothing=sigma_smoothing,\n keep_biggest_component=keep_biggest_component,\n conv_size=conv_size,\n n_levels=n_levels,\n nb_conv_per_level=nb_conv_per_level,\n unet_feat_count=unet_feat_count,\n feat_multiplier=feat_multiplier,\n activation=activation,\n gt_folder=gt_dir,\n evaluation_labels=evaluation_labels,\n compute_distances=compute_distances,\n compute_score_whole_structure=compute_score_whole_structure,\n recompute=recompute,\n verbose=False)", "def evaluate(self, training: Tuple[NdArrayLike, NdArrayLike],\n testing: Tuple[NdArrayLike, NdArrayLike],\n label_map: Dict[int, str] = None, splits=5,\n fold_generator=None):\n\n self.test_predictions = []\n self.fold = 0\n\n print(\"----------------------------------------\")\n print(f\"evaluating {self.name} model\")\n print(\"----------------------------------------\")\n\n training_data, training_labels = training\n testing_data, testing_labels = testing\n\n if fold_generator is None:\n kf = KFold(n_splits=splits)\n fold_generator = kf.split(training_data)\n\n for train_index, val_index in fold_generator:\n self._init_logger()\n x_train = training_data[train_index]\n y_train = training_labels[train_index]\n\n x_val = training_data[val_index]\n y_val = training_labels[val_index]\n\n callbacks = [ModelCheckpoint(self.model_path, save_best_only=True, save_weights_only=True)]\n if self.use_logger:\n callbacks += [WandbCallback(save_model=False)]\n\n model = self.train((x_train, y_train), (x_val, y_val), callbacks)\n\n self.log_validation(model, x_val, y_val, label_map)\n test_pred = self.log_testing(model, testing_data, testing_labels, label_map)\n self.log_examples(model, testing_data, testing_labels, test_pred)\n self._end_fold(test_pred)\n\n if self.use_ensemble:\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n self._init_logger(ensemble=True)\n ensemble_pred = self.predict_ensemble()\n self.log_ensemble(testing_labels, ensemble_pred, label_map)\n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n\n self._end_fold(ensemble_pred)", "def setup(dataset_dir, current_dir, model_dir, feature_experiment, data_mode,\n gender, current_fold, data_type='', path_to_logger_for_test=None):\n reproducibility(chosen_seed)\n checkpoint_run = None\n checkpoint = False\n next_fold = False\n next_exp = False\n if not os.path.exists(features_dir):\n print('There is no folder and therefore no database created. '\n 'Create the database first')\n sys.exit()\n if os.path.exists(current_dir) and os.path.exists(model_dir) and debug:\n shutil.rmtree(current_dir, ignore_errors=False, onerror=None)\n # THIS WILL DELETE EVERYTHING IN THE CURRENT WORKSPACE #\n if not os.path.exists(data_fold_dir):\n os.makedirs(data_fold_dir)\n os.makedirs(data_fold_dir_equal)\n dataset_processing.partition_dataset(workspace_main_dir,\n feature_experiment,\n features_dir,\n sub_dir,\n current_dir,\n data_mode,\n dataset_dir,\n total_folds,\n gender)\n\n if os.path.exists(current_dir) and os.path.exists(model_dir):\n temp_dirs = os.listdir(model_dir)\n temp_dirs = natsort.natsorted(temp_dirs, reverse=True)\n temp_dirs = [d for d in temp_dirs if '.pth' in d]\n if len(temp_dirs) == 0:\n pass\n else:\n if int(temp_dirs[0].split('_')[1]) == final_iteration and mode ==\\\n 'train':\n directory = model_dir.split('/')[-1]\n final_directory = model_dir.replace(directory, 'Fold_'+str(total_folds))\n if os.path.exists(final_directory):\n temp_dirs2 = os.listdir(final_directory)\n temp_dirs2 = natsort.natsorted(temp_dirs2, reverse=True)\n temp_dirs2 = [d for d in temp_dirs2 if '.pth' in d]\n if int(temp_dirs2[0].split('_')[1]) == final_iteration:\n if i == config.EXP_RUNTHROUGH-1:\n print(f\"A directory at this location exists: {current_dir}\")\n sys.exit()\n else:\n next_exp = True\n return None, None, None, None, next_fold, next_exp\n else:\n next_fold = True\n return None, None, None, None, next_fold, next_exp\n else:\n next_fold = True\n return None, None, None, None, next_fold, next_exp\n else:\n print(f\"Current directory exists but experiment not finished\")\n print(f\"Loading from checkpoint: {int(temp_dirs[0].split('_')[1])}\")\n checkpoint_run = os.path.join(model_dir, temp_dirs[0])\n checkpoint = True\n elif not os.path.exists(current_dir):\n os.mkdir(current_dir)\n util.create_directories(current_dir, config.EXP_FOLDERS)\n os.mkdir(model_dir)\n elif os.path.exists(current_dir) and not os.path.exists(model_dir):\n os.mkdir(model_dir)\n\n if mode == 'test' and path_to_logger_for_test is not None and data_type \\\n == 'test':\n if os.path.exists(path_to_logger_for_test):\n shutil.rmtree(path_to_logger_for_test, ignore_errors=False,\n onerror=None)\n os.mkdir(path_to_logger_for_test)\n main_logger = logging_info(path_to_logger_for_test, current_fold,\n data_type)\n else:\n main_logger = logging_info(current_dir, current_fold, data_type)\n\n model = create_model(main_logger)\n\n return main_logger, model, checkpoint_run, checkpoint, next_fold, next_exp", "def _run_validation_error_and_summaries(self, epoch, validation_set, validation_set_label):\n\n if self.verbose == 1:\n print('At step %d (%.2f seconds): ' % (epoch, self.train_time), end='')\n print('[Train Stat (average over past steps)] - ', end='')\n if self.triplet_strategy != 'none':\n print('Triplet: ', end='')\n print('Fraction=%.4f\\t' % np.mean(self.fraction_triplet_batch), end='')\n print('Number=%.2f\\t' % np.mean(self.num_triplet_batch), end='')\n print('Cost: ', end='')\n print('Overall=%.4f\\t' % (np.mean(self.train_cost_batch[0])), end='')\n if self.triplet_strategy != 'none':\n print('Autoencoder=%.4f\\t' % np.mean(self.train_cost_batch[1]), end='')\n print('Triplet=%.4f\\t' % np.mean(self.train_cost_batch[2]), end='')\n\n if validation_set is None:\n print()\n return\n\n if self.sparse_input:\n _temp = utils.get_sparse_ind_val_shape(validation_set)\n vl_feed = {self.input_data: _temp, self.input_data_corr: _temp, self.input_label: validation_set_label}\n else:\n vl_feed = {self.input_data: validation_set, self.input_data_corr: validation_set, self.input_label: validation_set_label}\n\n if self.triplet_strategy != 'none':\n result = self.tf_session.run([self.tf_merged_summaries, self.cost, self.autoencoder_loss, self.triplet_loss], feed_dict=vl_feed)\n else:\n result = self.tf_session.run([self.tf_merged_summaries, self.cost], feed_dict=vl_feed)\n\n summary_str = result[0]\n self.tf_validation_summary_writer.add_summary(summary_str, epoch)\n\n if self.verbose:\n print(\"[Validation Stat (at this step)] - Cost: \")\n print('Overall=%.4f' % (result[1]), end='')\n if self.triplet_strategy != 'none':\n print('Autoencoder=%.4f\\t' % (result[2]), end='')\n print('Triplet=%.4f\\t' % (result[3]), end='')\n print()", "def num_folds(self):\n return int(op.basename(self.eval_dir).partition('num_folds=')[2])", "def plot_keras_learning_kfold(hs, savefig=True, img_name='learn_kfold'):\n import matplotlib.pyplot as plt\n plt.rcParams['figure.figsize'] = [10, 8]\n legend_font_size = 10\n fontsize = 12\n markersize = 5\n\n epochs = np.asarray(hs[0].epoch) + 1\n k_folds = len(hs)\n\n # Extract names of all recorded metrics for training and val sets\n pr_metrics = get_keras_performance_metrics(hs[0])\n\n # Plot\n for m in pr_metrics:\n metric_name = m\n metric_name_val = 'val_' + m\n\n # Compute the average of a metric across folds\n metric_avg = np.asarray([hs[fold].history[metric_name] for fold in hs]).sum(axis=0, keepdims=True) / k_folds\n metric_avg_val = np.asarray([hs[fold].history[metric_name_val] for fold in hs]).sum(axis=0, keepdims=True) / k_folds\n\n # Plot a metric for each fold vs epochs\n marker = ['b.', 'r^', 'kx', 'mv', 'gp', 'bs', 'r8', 'kD']\n fig = plt.figure()\n for i, metric in enumerate([metric_name, metric_name_val]):\n ax = fig.add_subplot(3, 1, i + 1)\n for fold in range(k_folds):\n plt.plot(epochs, hs[fold].history[metric], alpha=0.5, markersize=markersize, label='fold{}'.format(fold + 1))\n plt.ylabel(metric, fontsize=fontsize)\n plt.grid(True)\n plt.xlim([0.5, len(epochs) + 0.5])\n plt.ylim([0, 1])\n ax.tick_params(axis='both', which='major', labelsize=fontsize)\n ax.tick_params(axis='both', which='minor', labelsize=fontsize)\n plt.legend(loc='best', prop={'size': legend_font_size})\n\n\n # Plot the average of a metric across folds vs epochs\n ax = fig.add_subplot(3, 1, 3)\n plt.plot(epochs, metric_avg.flatten(), 'bo', alpha=0.6, markersize=markersize, label=metric_name)\n plt.plot(epochs, metric_avg_val.flatten(), 'rs', alpha=0.6, markersize=markersize, label=metric_name_val)\n plt.ylabel(metric_name+' avg over folds', fontsize=fontsize)\n plt.xlabel('epochs', fontsize=fontsize)\n plt.grid(True)\n plt.xlim([0.5, len(epochs) + 0.5])\n plt.ylim([0, 1])\n ax.tick_params(axis='both', which='major', labelsize=fontsize)\n ax.tick_params(axis='both', which='minor', labelsize=fontsize)\n plt.legend(loc='best', prop={'size': legend_font_size})\n\n if savefig:\n plt.savefig(img_name + '_' + metric_name + '.png', bbox_inches='tight')", "def validate_pruning(data_array, fold_size):\r\n\r\n # This is a nested x-fold of the validate function.\r\n # Count the labels\r\n labels = np.unique(data_array[:, data_array.shape[1]-1])\r\n # Initialize variables\r\n total_confusion_matrix = np.zeros((len(labels), len(labels)))\r\n total_depth = 0\r\n total_pruned_matrix = np.zeros((len(labels), len(labels)))\r\n total_pruned_depth = 0\r\n print_counter = 1\r\n for fold in range(fold_size):\r\n # Prints for visualisation purposes\r\n print(f\"Validating training set {print_counter}/{fold_size} \", end=\"\", flush=True)\r\n # Divide data into test data and the rest\r\n training_visualisation_data, test_data = divide_dataset(data_array, fold_size, fold)\r\n # Initialize variables for current training set\r\n confusion_matrix = np.zeros((len(labels), len(labels)))\r\n training_depth = 0\r\n pruned_matrix = np.zeros((len(labels), len(labels)))\r\n pruned_depth = 0\r\n for validation_fold in range(fold_size-1):\r\n # Prints for visualisation purposes\r\n print(\".\", end=\"\", flush=True)\r\n # Further divide data into training and validation data\r\n training_data, validation_data = divide_dataset(training_visualisation_data, fold_size-1, validation_fold)\r\n # train and test tree\r\n tree, depth = dt.decision_tree_learning(training_data)\r\n predicted_values, true_values = dt.classify_array(test_data, tree)\r\n # prune and test tree\r\n tree = dt.prune(tree, validation_data)\r\n new_depth = dt.get_depth(tree)\r\n # Append confusion matrices and depth of both trees\r\n predicted_pruned_values, true_pruned_values = dt.classify_array(test_data, tree)\r\n confusion_matrix += dt.get_confusion_matrix(predicted_values, true_values)\r\n training_depth += depth\r\n pruned_matrix += dt.get_confusion_matrix(predicted_pruned_values, true_pruned_values)\r\n pruned_depth += new_depth\r\n # Append the averages of confusion matrices and depths of all validation folds\r\n total_confusion_matrix += confusion_matrix/(fold_size-1)\r\n total_depth += training_depth/(fold_size-1)\r\n total_pruned_matrix += pruned_matrix/(fold_size-1)\r\n total_pruned_depth += pruned_depth/(fold_size-1)\r\n print_counter+=1\r\n print(\"\")\r\n # Average matrices and depths of all training folds\r\n average_confusion_matrix = total_confusion_matrix/fold_size\r\n average_depth = total_depth/fold_size\r\n average_pruned_matrix = total_pruned_matrix/fold_size\r\n average_pruned_depth = total_pruned_depth/fold_size\r\n\r\n return average_confusion_matrix, average_depth, average_pruned_matrix, average_pruned_depth", "def train_approximate_network():\n \n model_dict = {} # all the different models\n model_dict['UNet'] = UNet\n model_dict['UNetLite'] = UNetLite\n model_dict['UNetWide40'] = UNetWide40\n model_dict['UNetWide48'] = UNetWide48\n model_dict['UNetDS64'] = UNetDS64\n model_dict['UNetWide64'] = UNetWide64\n model_dict['MultiResUNet1D'] = MultiResUNet1D\n model_dict['MultiResUNetDS'] = MultiResUNetDS\n\n\n mdlName1 = 'UNetDS64' # approximation network\n mdlName2 = 'MultiResUNet1D' # refinement network\n \n length = 1024 # length of the signal\n\n try: # create directory to save models\n os.makedirs('models')\n except:\n pass\n\n try: # create directory to save training history\n os.makedirs('History')\n except:\n pass\n\n # 10 fold cross validation\n for foldname in range(10):\n\n print('----------------')\n print('Training Fold {}'.format(foldname+1))\n print('----------------')\n # loading training data\n dt = pickle.load(open(os.path.join('data','train{}.p'.format(foldname)),'rb'))\n X_train = dt['X_train']\n Y_train = dt['Y_train']\n # loading validation data\n dt = pickle.load(open(os.path.join('data','val{}.p'.format(foldname)),'rb'))\n X_val = dt['X_val']\n Y_val = dt['Y_val']\n\n # loading metadata\n dt = pickle.load(open(os.path.join('data','meta{}.p'.format(foldname)),'rb'))\n max_ppg = dt['max_ppg']\n min_ppg = dt['min_ppg']\n max_abp = dt['max_abp']\n min_abp = dt['min_abp']\n\n\n Y_train = prepareLabel(Y_train) # prepare labels for training deep supervision\n \n Y_val = prepareLabel(Y_val) # prepare labels for training deep supervision\n \n\n \n mdl1 = model_dict[mdlName1](length) # create approximation network\n\n # loss = mae, with deep supervision weights\n mdl1.compile(loss='mean_absolute_error',optimizer='adam',metrics=['mean_squared_error'], loss_weights=[1., 0.9, 0.8, 0.7, 0.6]) \n\n\n checkpoint1_ = ModelCheckpoint(os.path.join('models','{}_model1_fold{}.h5'.format(mdlName1,foldname)), verbose=1, monitor='val_out_loss',save_best_only=True, mode='auto') \n # train approximation network for 100 epochs\n history1 = mdl1.fit(X_train,{'out': Y_train['out'], 'level1': Y_train['level1'], 'level2':Y_train['level2'], 'level3':Y_train['level3'] , 'level4':Y_train['level4']},epochs=100,batch_size=256,validation_data=(X_val,{'out': Y_val['out'], 'level1': Y_val['level1'], 'level2':Y_val['level2'], 'level3':Y_val['level3'] , 'level4':Y_val['level4']}),callbacks=[checkpoint1_],verbose=1)\n\n pickle.dump(history1, open('History/{}_model1_fold{}.p'.format(mdlName1,foldname),'wb')) # save training history\n\n\n mdl1 = None # garbage collection\n\n time.sleep(300) # pause execution for a while to free the gpu" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the minimum temperature 4.5 means off.
def min_temp(self): return convert(4.5, TEMP_CELSIUS, self.unit_of_measurement)
[ "def minwatertemperature(self):\n return self._minwatertemperature", "def minairtemperature(self):\n return self._minairtemperature", "def lower_temperature(self) -> float:\n\n return self._get_temperature(_MCP9808_REG_LOWER_TEMP)", "def low_temperature(self):\r\n low_temperature_list = self.get_temperature(self._past_weather_list, \"low_temperature\")\r\n lowest_temperature = low_temperature_list[0]\r\n return lowest_temperature", "def target_temperature_low(self):\n return self.heater.eco_temperature", "def minimum_temperature(self, value: float) -> None:\n self._min_temp = value", "def low_temperature(self):\r\n low_temperature = self.get_average(self._past_weather_list, \"low_temperature\")\r\n\r\n if self._past_weather_list[-1].get_air_pressure() < self._air_pressure:\r\n low_temperature -= 2\r\n\r\n return low_temperature", "def _pseudo_min(self) -> float:\n x = self.min()\n if math.isinf(x) or math.isnan(x):\n x = self.mean() - 4 * self.std()\n return x", "def _default_low_value(self):\n return self.minimum", "def test_temperature_min(self):\n self.assertAlmostEqual(self.thermodata.Tmin.value_si, self.Tmin, 6)", "def _get_minimumValue(self) -> \"double\" :\n return _core.FloatSpinnerCommandInput__get_minimumValue(self)", "def min_vpu_low(self) -> float:\n return self._min_vpu_low", "def get_lowest_temperature(line):\n try:\n return int(line[MIN_TEMPERATURE_INDEX])\n except ValueError:\n return None", "def _get_minimumValue(self) -> \"double\" :\n return _core.AngleValueCommandInput__get_minimumValue(self)", "def target_temperature_high(self):\n return self.heater.comfort_temperature", "async def get_minimum_pressure(self):\n command = PMIN10 if self._headtype == AzuraPumpHeads.FLOWRATE_TEN_ML else PMIN50\n p_min = await self.create_and_send_command(command) * ureg.bar\n return str(p_min)", "def get_today_lowest_temp(self):\n t1 = self.data['temp1']\n return t1[t1.index('~') + 1:]", "def min_max_temp(temp: str, unit: str = 'C') -> str:\r\n if not temp or len(temp) < 7:\r\n return ''\r\n if temp[:2] == 'TX':\r\n temp_type = 'Maximum'\r\n elif temp[:2] == 'TN':\r\n temp_type = 'Minimum'\r\n else:\r\n return ''\r\n temp = temp[2:].replace('M', '-').replace('Z', '').split('/')\r\n if len(temp[1]) > 2:\r\n temp[1] = temp[1][:2] + '-' + temp[1][2:]\r\n return '{temp_type} temperature of {temp} at {time}:00Z'.format(\r\n temp_type=temp_type, temp=temperature(temp[0], unit), time=temp[1])", "def test_Tmin(self):\n self.assertAlmostEqual(self.stick.Tmin.value_si, self.Tmin, 6)", "def max_temp(self):\n temp = self.device.away_temperature.high\n if temp is None:\n return super().max_temp\n else:\n return temp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the maximum temperature 30.5 means on.
def max_temp(self): return convert(30.5, TEMP_CELSIUS, self.unit_of_measurement)
[ "def max_temp(self):\n temp = self.device.away_temperature.high\n if temp is None:\n return super().max_temp\n else:\n return temp", "def maxwatertemperature(self):\n return self._maxwatertemperature", "def max_temp(self):\n # pylint: disable=no-member\n if self._max_temp:\n return self._max_temp\n else:\n # Get default temp from super class\n return ClimateDevice.max_temp.fget(self)", "def test_temperature_max(self):\n self.assertAlmostEqual(self.thermodata.Tmax.value_si, self.Tmax, 6)", "def maxairtemperature(self):\n return self._maxairtemperature", "def target_temperature_max(self) -> Optional[float]:\n if self._state is None:\n return None\n limits = self._device_conf.get(\"max\", {})\n return limits.get(str(_operation_mode_to(self.operation_mode)), {}).get(\"max\", 31)", "def target_temperature_high(self):\n return self.heater.comfort_temperature", "def maximum_temperature(self, value: float) -> None:\n self._max_temp = value", "def most_hot(self):\n maxtemp = -270.0 # No one would survive that...\n hottest = None\n for weather in self.forecast.weathers:\n d = weather.temperature()\n if 'temp_max' in d and d['temp_max'] > maxtemp:\n maxtemp = d['temp_max']\n hottest = weather\n return hottest", "def high_temperature(self):\r\n high_temperature_list = self.get_temperature(self._past_weather_list, \"high_temperature\")\r\n highest_temperature = high_temperature_list[-1]\r\n return highest_temperature", "def get_highest_average_temperature(self) -> int:\n add = constant.COUNTER_STARTER\n count = constant.COUNTER_STARTER\n for val in range(len(self.weather_detail)):\n add = add + utils.string_to_integer(self.weather_detail[val].max_temperature)\n count = count + 1\n return int(add / count)", "def test_Tmax(self):\n self.assertAlmostEqual(self.stick.Tmax.value_si, self.Tmax, 6)", "def target_temperature_low(self):\n return self.heater.eco_temperature", "def maxwatertemperature(self, maxwatertemperature):\n\n self._maxwatertemperature = maxwatertemperature", "def target_temperature(self):\n return self._thermostat_temp", "def _get_maximumValue(self) -> \"double\" :\n return _core.AngleValueCommandInput__get_maximumValue(self)", "def get_new_temperature(self):\r\n\r\n temperature = self.current_temperature\r\n difference = random.randint(0,10)/100.0\r\n if random.randint(0,100) > 50:\r\n temperature += difference\r\n if (temperature > TEMPERATURE_MAX_LIMIT):\r\n temperature = TEMPERATURE_MAX_LIMIT\r\n else:\r\n temperature -= difference\r\n if (temperature < TEMPERATURE_MIN_LIMIT):\r\n temperature = TEMPERATURE_MIN_LIMIT\r\n return temperature", "def _get_maximumValue(self) -> \"double\" :\n return _core.FloatSpinnerCommandInput__get_maximumValue(self)", "def target_temperature(self):\n temps = [zone['setpointStatus']['targetHeatTemperature']\n for zone in self._status['zones']]\n\n avg_temp = round(sum(temps) / len(temps), 1) if temps else None\n return avg_temp", "def upper_temperature(self) -> float:\n\n return self._get_temperature(_MCP9808_REG_UPPER_TEMP)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if possible to use the HM Object as this HA type.
def _check_hm_to_ha_object(self): from pyhomematic.devicetypes.thermostats import HMThermostat\ as pyHMThermostat # Check compatibility from HMDevice if not super()._check_hm_to_ha_object(): return False # Check if the homematic device correct for this HA device if isinstance(self._hmdevice, pyHMThermostat): return True _LOGGER.critical("This %s can't be use as thermostat", self._name) return False
[ "def has_hm(self) -> bool:\n return self.halo_profile is not None", "def __is(self, object_instance: WashBase, rule_class) -> bool:\n return textx_isinstance(object_instance, self.__metamodel[rule_class])", "def check(cls, obj):\n # An object is 'missing' if it has an attribute 'moya_missing' set to True\n return getattr(obj, \"moya_missing\", False)", "def is_challenge_model(input_object):\n return bool(isinstance(input_object, ChallengeModel))", "def HasHIA(self):\n return self.__has('HIA')", "def _is_object_type(attribute_info):\n lsa_type, get_type, cmo_type = _get_attribute_types(attribute_info)\n return lsa_type == alias_constants.OBJECT or \\\n (_is_type_an_unknown_type(lsa_type) and\n (get_type == alias_constants.OBJECT or (cmo_type == alias_constants.OBJECT)))", "def is_specific(self):\n return True", "def __super_entity(trackable):\n\n if hasattr(trackable, \"fields\"):\n keys = trackable.fields\n else:\n keys = trackable\n\n return \"instance_type\" in keys", "def _can_be_object(self, synsets: Sequence[Synset]) -> bool:\n for synset in synsets:\n for lch in synset.lowest_common_hypernyms(self.wn_organism_synset):\n # logger.debug(f\"synset: {synset.name()}, lowest common hypernym: {lch.name()}\")\n if lch.name() != self.wn_organism_synset.name():\n return True\n return False", "def IsCOMObject(self) -> bool:", "def is_specific(self):\n return False", "def _check_host(self):\n if not self.available:\n _LOGGER.error(\"No HassOS availabe\")\n raise HassioNotSupportedError()", "def HasHIL(self):\n return self.__has('HIL')", "def IsFamilyOrAssembly(self) -> bool:", "def HasHIS(self):\n return self.__has('HIS')", "def HasHDT(self):\n return self.__has('HDT')", "def HasGOH(self):\n return self.__has('GOH')", "def is_supported_hist(checked_object) -> bool:\n is_th1d = False\n is_th1f = False\n if type(checked_object) == type(ROOT.TH1D()):\n is_th1d = True\n if type(checked_object) == type(ROOT.TH1F()):\n is_th1f = True\n if is_th1d or is_th1f:\n return True\n else:\n return False", "def is_hugging_face_model(x):\n return hasattr(x, 'embed')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of random, unique playing cards
def randomCardList(cardCount): return random.sample(cardSpace(), k=cardCount)
[ "def get_cards():\n return random.randint(1, 10)", "def total_cards_list(self):\n cartesian_product = product(self.suit, self.rank)\n list_of_cards = list(cartesian_product)\n return random.sample(list_of_cards, 36)", "def players_having_no_tie_card_sample():\n player1_cards = [Card('H', '2'), Card('H', '3'), Card('H', '4'), Card('H', '5'), Card('H', '6')]\n player2_cards = [Card('H', '9'), Card('H', '10'), Card('H', 'J'), Card('H', 'K'), Card('H', 'Q')]\n return [player1_cards, player2_cards]", "def players_having_tie_card_sample():\n player1_cards = [Card('S', '2'), Card('H', '3'), Card('H', '4'), Card('H', '5'), Card('H', '6'),\n Card('H', '7')]\n player2_cards = [Card('H', '2'), Card('H', '10'), Card('H', 'J'), Card('H', 'K'), Card('H', 'Q'),\n Card('H', 'A')]\n return [player1_cards, player2_cards]", "def random_deck():\n deck = list(deckset)\n shuffle(deck)\n return deck", "def _unique_cards(self, game_mode='ranked', game_threshold = 5, formatted = True):\n cards = self.generate_card_stats(game_mode, game_threshold).reset_index()\n cards = cards['card'].unique().tolist()\n return cards", "def create_deck():\n card_deck = []\n for x in range(6):\n for suit in ('H', 'S', 'C', 'D'):\n for rank in range(2, 11):\n card_deck.append((str(rank) + str(suit)))\n for face_cards in ('A', 'J', 'Q', 'K'):\n card_deck.append((str(face_cards) + str(suit)))\n\n random.shuffle(card_deck)\n return card_deck", "def get_random_card(self):\n return random.choice(self.cards)", "def get_random_cards(cards, amount=10):\n if len(cards) < amount:\n print(\"There are not enough cards to display {a} cards. Displaying {l} cards instead.\".format(a=amount, l=len(cards)))\n return cards\n else:\n print(\"Selecting {a} cards from card list of {l}.\".format(a=amount, l=len(cards)))\n return random.sample(cards, amount)", "def make_deck():\n deck = 4 * valid_ranks\n random.shuffle(deck)\n return deck", "def create_all_cards(self):\r\n all_cards = []\r\n\r\n for value in Card.values:\r\n for symbols in self.choose_symbols_color():\r\n all_cards.append(f'{value} {symbols}')\r\n return all_cards", "def _generate_cards_played(self):\n self.games['p_cards_played'] = self.games['card_history'].map(lambda x: self._get_card_list(x, player='me'))\n self.games['o_cards_played'] = self.games['card_history'].map(lambda x: self._get_card_list(x, player='opponent'))", "def deal_to_players_hand():\r\n cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n card = random.choice(cards)\r\n return card", "def deal_cards(deck, number_of_cards = 1):\n cards = random.sample(deck, number_of_cards)\n for c in cards:\n deck.remove(c)\n return sorted(cards, reverse = True)", "def get_played_cards(cards, progress):\n return [card for card in cards if has_been_played(card, progress)]", "def _create_new_deck(self,player):\n\t\tdeck = [Card(character,number,player) for character in [\"A\",\"B\",\"C\",\"D\",\"E\"] for number in range(1,6)]\n\t\trandom.shuffle(deck)\n\t\treturn deck", "def players_cards_with_no_tie():\n player2_cards = [Card('H', '2'), Card('H', '3'), Card('H', '4'), Card('H', '5'), Card('H', '6'),\n Card('H', '7'), Card('H', '8'), Card('D', '2'), Card('D', '3'), Card('D', '4'),\n Card('D', '5'), Card('D', '6'), Card('D', '7'), Card('D', '8'), Card('S', '2'),\n Card('S', '3'), Card('S', '4'), Card('S', '5'), Card('S', '6'), Card('S', '7'),\n Card('C', '2'), Card('C', '3'), Card('C', '4'), Card('C', '5'), Card('C', '6'),\n Card('C', '7')]\n player1_cards = [Card('H', '9'), Card('H', '10'), Card('H', 'J'), Card('H', 'K'), Card('H', 'Q'),\n Card('H', 'A'), Card('D', '9'), Card('D', '10'), Card('D', 'J'), Card('D', 'K'),\n Card('D', 'Q'), Card('D', 'A'), Card('S', '8'), Card('S', '9'), Card('S', '10'),\n Card('S', 'J'), Card('S', 'K'), Card('S', 'Q'), Card('S', 'A'), Card('C', '8'),\n Card('C', '9'), Card('C', '10'), Card('C', 'J'), Card('C', 'K'), Card('C', 'Q'),\n Card('C', 'A')]\n players_cards = [player1_cards, player2_cards]\n return players_cards", "def pick_card(self):\n shuffle(Constants.RANKS)\n shuffle(Constants.SUITE)\n return Card(Constants.RANKS[0], Constants.SUITE[0])", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def deal(self, n: int):\n d = Deck()\n d.shuffle()\n deal = tuple([] for _ in range(n))\n for _ in range(3):\n for i in range(n):\n card = d.pop()\n deal[i].append(card)\n return deal" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Executes Proj1Test for a given guess.
def runGuess(answer, logger, display=True): timeStarted = time.time() output = subprocess.getoutput(["Proj1Test.exe"] + answer) try: timeDelta = round(time.time() - timeStarted, ACCURACY) guesses = round(float(re.findall(GUESS_REGEX, output)[0]), ACCURACY) quality = round(float(re.findall(QUALITY_REGEX, output)[0]), ACCURACY) data = (answer, guesses, quality, timeDelta) logger.logSuccess(*data) # Summary so far if display: print(logger.summarise(data)) except IndexError: # The program did not execute successfully. if display: logger.logPrint(f">> ERROR OCCURRED WITH {answer}. Continuing with analysis\n") logger.logError(answer)
[ "def test_guess_learner():\n\n assert update_guess(1, 0.3, 0.1, 0.2) > \\\n update_guess(1, 0.3, 0.1, 0.8)\n assert update_guess(1, 0.01, 0.01, 0.01) > \\\n update_guess(1, 0.01, 0.01, 0.99)\n assert update_guess(1, 0.49, 0.49, 0.01) > \\\n update_guess(1, 0.49, 0.49, 0.99)", "def puzzle_1() -> None:\n\n print(_play_the_game(GOAL_NUMBER_1))", "def execute_one_game(self, game_state, experiment):\n pass", "def execute(self, grades, module_dict, solution_dict):\n module = module_dict[self.module_name]\n function = module.__dict__[self.function_name]\n passing_all = True\n for test, solution in self.test_cases.items():\n grades.add_message(\"Testing {}...\".format(repr(test)))\n\n result = function(test)\n\n if not isinstance(result, bool):\n grades.add_message('FAIL: {}'.format(self.path))\n grades.add_message('\\tReturn type of {} must be '\n 'bool, but it is {}'.format(\n self.function_name, type(result)))\n passing_all = False\n\n if result == solution:\n grades.add_message('PASS: {}'.format(self.path))\n grades.add_message('\\t{} properly classified'.format(\n repr(test)))\n\n else:\n grades.add_message('FAIL: {}'.format(self.path))\n grades.add_message('\\t{} improperly classified'.format(\n repr(test)))\n grades.add_message('\\tstudent result: {}'.format(repr(result)))\n grades.add_message('\\tcorrect result: {}'.format(\n repr(solution)))\n passing_all = False\n return passing_all", "def evaluate_individual(self, test_cases):\n\n # TMH CHANGE HERE\n\n self.errors = []\n\n # Self playing as Player 0\n for opponent in test_cases:\n player0 = GPPlayer(self.program)\n player1 = GPPlayer(opponent)\n verbose = False\n\n game = dominion.Dominion(player0, player1, verbose)\n winner = game.play()\n\n if winner == \"draw\":\n error = 0.5\n elif winner == 0:\n error = 0\n else:\n error = 1\n\n self.errors.append(error)\n\n # Self playing as Player 1\n for opponent in test_cases:\n player1 = GPPlayer(self.program)\n player0 = GPPlayer(opponent)\n verbose = False\n\n game = dominion.Dominion(player0, player1, verbose)\n winner = game.play()\n\n if winner == \"draw\":\n error = 0.5\n elif winner == 1:\n error = 0\n else:\n error = 1\n\n self.errors.append(error)\n\n print(\"ERRORS:\", self.errors)\n\n self.total_error = sum(self.errors)", "def test_guess_correct():\n\n assert update_guess(1, 0.3, 0.1, 0.7) >= 0.3\n assert update_guess(1, 0.1, 0.3, 0.7) >= 0.1\n assert update_guess(1, 0.01, 0.01, 0.01) >= 0.01\n assert update_guess(1, 0.49, 0.49, 0.99) >= 0.49", "def game(team1, team2):\n lineup1 = formBestLineup(team1)\n lineup2 = formBestLineup(team2)\n expRun1 = expectedRuns(lineup1)\n expRun2 = expectedRuns(lineup2)\n p = 0\n for i in range(1, 21):\n for j in range(0, i):\n p += expRun1[i] * expRun2[j]\n print('\\n\\nProbability that '+team1.name+' beats '+team2.name+' is about '+str(round(100*p, 2))+'%.\\n\\n')\n return round(100*p, 2)", "def test_approach(approach: str, sample_no: int, func, args: tuple) -> str:\n res = f\"{approach.capitalize()} Programming Approach for the Example #{sample_no}\\n\"\n start = time.time()+1\n soln = func(*args)\n time.sleep(1)\n res += '%d\\nTotal run time %.5f\\n' % (\n soln, time.time()-start)\n return res", "def runf1test(self):\n return self.runtest(self.f1) == \"pass\"", "def test2_happy_path_test_prep(self):\n \n test_config = TestPlanConfig(r'configs/test_plan/happy_path.xml', \n r'../src/bespoke/xsd/test_plan.xsd',\n self.builds,\n self.tools,\n self.resources)\n #Element 0\n test_prep_0 = test_config['Happy_Test_Case_1']._tests[0]\n self.assertEqual(test_prep_0.name, 'Test_System_1', 'Incorrect TestPrep name!')\n self.assertEqual(test_prep_0.sut.alias, 'Windows_VM', 'Incorrect SUT name!')\n self.assertEqual(test_prep_0._checkpoint, 'ReadyToAutoTest', 'Incorrect checkpoint!')\n self.assertEqual(test_prep_0._post_wait, 5, 'Incorrect postwait!')\n self.assertEqual(test_prep_0._timeout, 600, 'Incorrect timeout!')\n \n #Element 5\n test_prep_5 = test_config['Happy_Test_Case_1']._tests[5]\n self.assertEqual(test_prep_5.name, 'Test_System_2', 'Incorrect TestPrep name!')\n self.assertEqual(test_prep_5.sut.alias, 'CentOS_VM', 'Incorrect SUT name!')\n self.assertEqual(test_prep_5._checkpoint, 'StartTesting', 'Incorrect checkpoint!')\n self.assertEqual(test_prep_5._post_wait, 8, 'Incorrect postwait!')\n self.assertEqual(test_prep_5._timeout, 599, 'Incorrect timeout!')\n \n #Element 8\n test_prep_8 = test_config['Happy_Test_Case_1']._tests[8]\n self.assertEqual(test_prep_8.name, 'Test_System_3', 'Incorrect TestPrep name!')\n self.assertEqual(test_prep_8.sut.alias, 'Ubuntu_VM', 'Incorrect SUT name!')\n self.assertEqual(test_prep_8._checkpoint, 'TestNow', 'Incorrect checkpoint!')\n self.assertEqual(test_prep_8._post_wait, 123124, 'Incorrect postwait!')\n self.assertEqual(test_prep_8._timeout, 2, 'Incorrect timeout!')\n \n #Element 12\n test_prep_12 = test_config['Happy_Test_Case_1']._tests[12]\n self.assertEqual(test_prep_12.name, 'Test_System_1', 'Incorrect TestPrep name!')\n self.assertEqual(test_prep_12.sut.alias, 'Windows_VM', 'Incorrect SUT name!')\n self.assertEqual(test_prep_12._checkpoint, 'ReadyToAutoTest', 'Incorrect checkpoint!')\n self.assertEqual(test_prep_12._post_wait, 5, 'Incorrect postwait!')\n self.assertEqual(test_prep_12._timeout, 600, 'Incorrect timeout!')\n \n #Element 15\n test_prep_15 = test_config['Happy_Test_Case_1']._tests[5]\n self.assertEqual(test_prep_15.name, 'Test_System_2', 'Incorrect TestPrep name!')\n self.assertEqual(test_prep_15.sut.alias, 'CentOS_VM', 'Incorrect SUT name!')\n self.assertEqual(test_prep_15._checkpoint, 'StartTesting', 'Incorrect checkpoint!')\n self.assertEqual(test_prep_15._post_wait, 8, 'Incorrect postwait!')\n self.assertEqual(test_prep_15._timeout, 599, 'Incorrect timeout!')", "def execute(self):\n print 'PSP Exercise 9A'\n print 'This program performs the chi-squared test on data given.'\n print\n file_path = self.get_file_name()\n data = io.read_csv_file(file_path)\n test_data = self.get_test_column(data)\n q_val, p_val = chi_squared.ChiSquaredTest().execute(test_data)\n print 'Q: ', q_val\n print 'P: ', 1.0 - p_val", "def run_test_problem1b():\n print()\n print('--------------------------------------------------')\n print('Testing the problem1b function:')\n print(' See the graphics windows that pop up.')\n print('--------------------------------------------------')\n\n # TWO tests on ONE window.\n title = 'Test 1, followed by Test 2, of problem1b'\n window = rg.RoseWindow(300, 300, title)\n\n p = rg.Point(60, 50)\n problem1b(p, window, 100, 150, 'pink')\n window.continue_on_mouse_click()\n\n p = rg.Point(200, 100)\n problem1b(p, window, 200, 100, 'black')\n window.close_on_mouse_click()\n\n title = 'Test 3 of problem1b'\n window = rg.RoseWindow(300, 200, title)\n\n p = rg.Point(100, 50)\n problem1b(p, window, 150, 150, 'purple')\n window.close_on_mouse_click()", "def evaluate(self,correctResultMap):\n if self.fitness is None:\n self.toVerilog(self.verilogFilePath, self.moduleName)\n #change the arguments on the line below or it will not toVerilog\n testOutput = testScootBotOrganism(\n self.verilogFilePath,\n 'TestCode',\n self.numInputs,\n self.numOutputs,\n self.moduleName,\n writeSim=False,\n clearFiles=False)\n\n self.fitness = self.fitnessFunction(testOutput)\n \n #print self.fitness\n #print testOutput\n #print \"fitness: \", self.fitness\n #raw_input(\"press enter yo\")\n \n return self.fitness", "def __evaluateGame__(self):\n # Obtain Result of Round (dictated by __rules__)\n result = self.__rules__(player1_choice=self._humanPlayer._latestChoice,\n player2_choice=self._computerPlayer._latestChoice)\n\n # Interpret Outcome of Round in terms of computer and human\n if result == \"Player 1\":\n # Update Results for Computer and Human\n self._humanPlayer._results[0] += 1\n self._computerPlayer._results[1] += 1\n\n # Report Round Outcome to User\n print(\"Congratulations, you won this round!\")\n\n elif result == \"Player 2\":\n # Update Results for Computer and Human\n self._humanPlayer._results[1] += 1\n self._computerPlayer._results[0] += 1\n\n # Report Round Outcome to User\n print(\"Sorry, the Computer won this round. Try Again!\")\n\n else:\n # Update Results for Computer and Human\n self._humanPlayer._results[2] += 1\n self._computerPlayer._results[2] += 1\n\n # Report Round Outcome to User\n print(\"This round's a Tie!\")", "def _prove(self, goal=None, assumptions=None, verbose=False):\n if not assumptions:\n assumptions = []\n\n stdout, returncode = self._call_prover9(\n self.prover9_input(goal, assumptions), verbose=verbose\n )\n return (returncode == 0, stdout)", "def run_tests():\n guitar_1 = Guitar(\"Gibson L-5 CES\", 1922, 16035.40)\n guitar_2 = Guitar(\"Another Guitar\", 2013, 1512.9)\n\n print(\"{} get_age() - Expected {}. Got {}\".format(guitar_1.name, 98,\n guitar_1.get_age()))\n print(\"{} get_age() - Expected {}. Got {}\".format(guitar_2.name, 7,\n guitar_2.get_age()))\n print()\n print(\"{} is_vintage() - Expected {}. Got {}\".format(guitar_1.name,\n True,\n guitar_1.is_vintage()))\n print(\"{} is_vintage() - Expected {}. Got {}\".format(guitar_2.name,\n False,\n guitar_2.is_vintage()))", "def execute_one_game(self, pac_expr_tree):\n # Pick a new map and set up a new game state.\n game_map = self.experiment.pre_loaded_maps[random.randint(0, 99)]\n self.experiment.world_data = []\n game_state = GameState(game_map,\n self.experiment.pill_density,\n self.experiment.time_multiplier,\n self.experiment.fruit_spawning_probability,\n self.experiment.fruit_score,\n self.experiment.num_pacs,\n self.experiment.num_ghosts)\n game_state.write_world_config(self.experiment.world_data)\n game_state.write_world_time_score(self.experiment.world_data)\n\n # Create a new Pac controller\n self.pac_controllers[0] = PacController(0, pac_expr_tree)\n\n # While the game isn't over, play game turns.\n game_over = False\n while (not game_over):\n game_over = game_state.play_turn(self.experiment.world_data,\n self.pac_controllers,\n self.ghost_controllers)\n\n # Implement parsimony pressure\n fitness = 0\n if (self.parsimony_technique == 'size'):\n fitness = game_state.score - (self.pppc * pac_expr_tree.root.size)\n else:\n fitness = game_state.score - (self.pppc * pac_expr_tree.root.height)\n\n return fitness, game_state.score", "def do_stat_test(self, comparison=valid_comparisons[0]):\n if comparison not in self.valid_comparisons:\n raise ValueError(\"Invalid comparison: see self.valid_comparison\")\n\n phos_res = run_protein(self.phospho_norm, comparison[0], comparison[1])\n self.phospho_results[comparison] = phos_res\n\n return phos_res", "def _grade_test(self, test):\n utils.underline('Running tests for ' + test.name)\n print()\n if test['note']:\n print(test['note'])\n total_passed = grade(test, self.logger, self.args.interactive,\n self.args.verbose)\n\n total_cases = test.num_cases\n if total_cases > 0:\n print('== {} ({}%) cases passed for {} =='.format(total_passed,\n round(100 * total_passed / total_cases, 2),\n test.name))\n if test.num_locked > 0:\n print('-- There are still {} locked test cases.'.format(test.num_locked) + \\\n ' Use the -u flag to unlock them. --')\n print()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the sorting function works for empty list
def test_empty(): empty_list = [] assert bubble_sort(empty_list) == empty_list
[ "def test_default_sort(self):\n self.run_on_lists(default_sort)", "def test_unsortedlist(self) -> None:\n test_list = [9, 7, 5, 2, 4, 5, 3, 3, 2, 1, 10, 200]\n\n actual = self.algorithm(test_list)\n expected = sorted(test_list)\n\n assert actual == expected", "def test_quick_sort_on_empty_list():\n from quick import quick_sort\n assert quick_sort([]) == []", "def test_impossible_basic():\n assert sort_arr([3, 1, 2]) == ()", "def test_radix_sort_on_empty_list():\n from radix import radix_sort\n assert radix_sort([]) == []", "def test_unsortedlist(self) -> None:\n test_list = [9, 7, 5, 2, 4, 5, 3, 3, 2, 1, 10, 200]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_impossible_trivial():\n assert sort_arr([3, 1, 2]) == ()", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n actual = self.algorithm(test_list)\n expected = sorted(test_list)\n\n assert actual == expected", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_sort_list_should_not_change_input_list():\n simple = [\"bb\", \"cc\", \"dd\"]\n correct = simple.copy()\n assert filter.sort_list(correct) == correct", "def test_sorts_list_of_nums(self):\n result = quick_sort([5, 3, 8, 2, 9])\n self.assertEqual(result, [2, 3, 5, 8, 9])", "def test_advanced_trivial():\n assert sort_arr([43, 65, 1, 98, 99, 101]) == ()", "def test_merge_sort():\n assert myFunction.merge_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5], 'incorrect'\n assert myFunction.merge_sort([10, 91, 2, 13, 3, 1]) == [1, 2, 3, 10, 13, 91], 'incorrect'", "def test_list_default_ordering(self) -> None:\n raise NotImplementedError()", "def test_quick_sort_sorts_small_list():\n from quick import quick_sort\n assert quick_sort([4, 10, 7, 1, 9]) == [1, 4, 7, 9, 10]", "def test_quick_sort_on_one_item_list():\n from quick import quick_sort\n assert quick_sort([5]) == [5]", "def test_sort_a_given_array(self):\n # act\n res = heap.sort(self.arr[:])\n\n # assert\n self.assertTrue(helper.is_sorted(res))", "def test_sort_sorted():\n\n data = [1, 2, 3]\n sorted_data = bubble_sort(data)\n\n assert data == sorted_data", "def test_itersorted(self):\n sorted_from_structure = list(self.structure.itersorted())\n sorted_from_list = list(sorted(self.structure, key = lambda a: a.element))\n\n self.assertListEqual(sorted_from_structure, sorted_from_list)", "def test_original_unchanged():\n\n data = [3, 2, 1]\n bubble_sort(data)\n\n assert data == [3, 2, 1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that the sorting function works for singleelement list
def test_single(): single_element_list = [2] assert bubble_sort(single_element_list) == single_element_list
[ "def test_single():\n\n single_element_list = [5]\n\n assert single_element_list == bubble_sort(single_element_list)", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n actual = self.algorithm(test_list)\n expected = sorted(test_list)\n\n assert actual == expected", "def test_sorts_list_of_nums(self):\n result = quick_sort([5, 3, 8, 2, 9])\n self.assertEqual(result, [2, 3, 5, 8, 9])", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_quick_sort_on_one_item_list():\n from quick import quick_sort\n assert quick_sort([5]) == [5]", "def test_unsortedlist(self) -> None:\n test_list = [9, 7, 5, 2, 4, 5, 3, 3, 2, 1, 10, 200]\n\n actual = self.algorithm(test_list)\n expected = sorted(test_list)\n\n assert actual == expected", "def test_itersorted(self):\n sorted_from_structure = list(self.structure.itersorted())\n sorted_from_list = list(sorted(self.structure, key = lambda a: a.element))\n\n self.assertListEqual(sorted_from_structure, sorted_from_list)", "def test_quick_sort_sorts_small_list():\n from quick import quick_sort\n assert quick_sort([4, 10, 7, 1, 9]) == [1, 4, 7, 9, 10]", "def test_unsortedlist(self) -> None:\n test_list = [9, 7, 5, 2, 4, 5, 3, 3, 2, 1, 10, 200]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_sort_list_should_not_change_input_list():\n simple = [\"bb\", \"cc\", \"dd\"]\n correct = simple.copy()\n assert filter.sort_list(correct) == correct", "def test_radix_sort_on_one_item_list():\n from radix import radix_sort\n assert radix_sort([5]) == [5]", "def test_default_sort(self):\n self.run_on_lists(default_sort)", "def test_sort_sorted():\n\n data = [1, 2, 3]\n sorted_data = bubble_sort(data)\n\n assert data == sorted_data", "def test_sort_sorted():\n data = [1, 2, 3, 4, 6, 9]\n sorted_data = bubble_sort(data)\n for small, large in zip(sorted_data[:-1], sorted_data[1:]):\n assert small < large", "def test_sort_a_given_array(self):\n # act\n res = heap.sort(self.arr[:])\n\n # assert\n self.assertTrue(helper.is_sorted(res))", "def test_radix_sort_sorts_small_list():\n from radix import radix_sort\n assert radix_sort([4, 10, 7, 1, 9]) == [1, 4, 7, 9, 10]", "def test_sort_list(self):\n top_stream_id, featured_li = twitch.views.get_featured_info()\n ret_feature_li = twitch.views.sort_list(featured_li, \"3\")\n base_streamer = ret_feature_li[0][3]\n for i in range(len(ret_feature_li)):\n bool_statement = base_streamer >= ret_feature_li[i][3]\n self.assertEqual(bool_statement, True)\n base_streamer = ret_feature_li[i][3]\n ret_feature_li = twitch.views.sort_list(featured_li, \"1\") \n #test id in order\n base_streamer_name = ret_feature_li[0][1]\n for i in range(len(ret_feature_li)):\n bool_statement = base_streamer_name.lower() <= \\\n ret_feature_li[i][1].lower()\n self.assertEqual(bool_statement, True)\n base_streamer_name = ret_feature_li[i][1]", "def test_merge_sort():\n assert myFunction.merge_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5], 'incorrect'\n assert myFunction.merge_sort([10, 91, 2, 13, 3, 1]) == [1, 2, 3, 10, 13, 91], 'incorrect'", "def test_quick_sort_with_medium_lists(unsorted_l, sorted_l):\n from quick import quick_sort\n assert quick_sort(unsorted_l) == sorted_l" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that sorting leaves the original data unchanged.
def test_original_unchanged(): data = [3, 2, 1] sorted_data = bubble_sort(data) assert data == [3, 2, 1] and sorted_data != [3, 2, 1]
[ "def test_original_unchanged():\n\n data = [3, 2, 1]\n bubble_sort(data)\n\n assert data == [3, 2, 1]", "def test_sort_list_should_not_change_input_list():\n simple = [\"bb\", \"cc\", \"dd\"]\n correct = simple.copy()\n assert filter.sort_list(correct) == correct", "def test_sort_sorted():\n\n data = [1, 2, 3]\n sorted_data = bubble_sort(data)\n\n assert data == sorted_data", "def test_sort_all_equal():\n\n data = [2, 2, 2, 2]\n\n sorted_data = bubble_sort(data)\n\n assert sorted_data == [2, 2, 2, 2]", "def test(sort):\n print \"Testing sort functionality for {}...\".format(sort.__name__)\n\n # Shuffle the data, testing various ways to sort\n data = nprnd.randint(-1000, 1000, size=1000)\n assert sorted(data) == sort(data)\n\n shuffle(data)\n assert sorted(data, reverse=True) == sort(data, reverse=True)\n\n shuffle(data)\n assert lists_equal(sorted(data, key=abs), sort(data, key=abs), key=abs)\n\n print \"Test succeeded!\"", "def test_sort_all_equal():\n data = [9, 2, 3, 5, 8, 9]\n sorted_data = bubble_sort(data)\n assert sorted_data == [2, 3, 5, 8, 9, 9]", "def test(sort, sequence, count=100):\n builtin = sorted(sequence)\n copy = sequence[:]\n testrun = sort(copy)\n recieved = testrun if testrun else copy\n template = \"{} sort worked incorrectly. \\nRecieved: {} \\nExpected: {}\"\n error_message = template.format(sort.__name__, recieved, builtin)\n assert (testrun == builtin or copy == builtin), error_message\n time = timeit(lambda: sort(sequence[:]),number=count)/count\n print(\"{:>7.5f} : {}\".format(time, sort.__name__))", "def test_unsortedlist(self) -> None:\n test_list = [9, 7, 5, 2, 4, 5, 3, 3, 2, 1, 10, 200]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_unsortedlist(self) -> None:\n test_list = [9, 7, 5, 2, 4, 5, 3, 3, 2, 1, 10, 200]\n\n actual = self.algorithm(test_list)\n expected = sorted(test_list)\n\n assert actual == expected", "def test_swap_trivial():\n assert sort_arr([4, 2]) == (\"swap\", 1, 2)", "def test_sort_a_given_array(self):\n # act\n res = heap.sort(self.arr[:])\n\n # assert\n self.assertTrue(helper.is_sorted(res))", "def test_swap_basic2():\n assert sort_arr([1, 3, 2]) == (\"swap\", 2, 3)", "def test_sort_sorted():\n data = [1, 2, 3, 4, 6, 9]\n sorted_data = bubble_sort(data)\n for small, large in zip(sorted_data[:-1], sorted_data[1:]):\n assert small < large", "def test_impossible_basic():\n assert sort_arr([3, 1, 2]) == ()", "def test_impossible_trivial():\n assert sort_arr([3, 1, 2]) == ()", "def test_default_sort(self):\n self.run_on_lists(default_sort)", "def test_sort( self ) :\n\n t = [ self.v1, self.v10, self.v20, self.v11, self.v111,\n self.v12, self.vbig ]\n t.sort( key=components )\n for i in range( len( t ) - 2 ) :\n self.assertNotEqual( 1, verscmp( t[ i ], t[ i+1 ] ) )", "def test_small_merge_sort():\n assert mergesort([1, 2, 3]) == [1, 2, 3]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that sorting works on sorted data.
def test_sort_sorted(): data = [1, 2, 3, 4, 6, 9] sorted_data = bubble_sort(data) for small, large in zip(sorted_data[:-1], sorted_data[1:]): assert small < large
[ "def test_sort_sorted():\n\n data = [1, 2, 3]\n sorted_data = bubble_sort(data)\n\n assert data == sorted_data", "def test(sort):\n print \"Testing sort functionality for {}...\".format(sort.__name__)\n\n # Shuffle the data, testing various ways to sort\n data = nprnd.randint(-1000, 1000, size=1000)\n assert sorted(data) == sort(data)\n\n shuffle(data)\n assert sorted(data, reverse=True) == sort(data, reverse=True)\n\n shuffle(data)\n assert lists_equal(sorted(data, key=abs), sort(data, key=abs), key=abs)\n\n print \"Test succeeded!\"", "def test_sort_a_given_array(self):\n # act\n res = heap.sort(self.arr[:])\n\n # assert\n self.assertTrue(helper.is_sorted(res))", "def test_sort_all_equal():\n\n data = [2, 2, 2, 2]\n\n sorted_data = bubble_sort(data)\n\n assert sorted_data == [2, 2, 2, 2]", "def test_sort_all_equal():\n data = [9, 2, 3, 5, 8, 9]\n sorted_data = bubble_sort(data)\n assert sorted_data == [2, 3, 5, 8, 9, 9]", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n actual = self.algorithm(test_list)\n expected = sorted(test_list)\n\n assert actual == expected", "def test(sort, sequence, count=100):\n builtin = sorted(sequence)\n copy = sequence[:]\n testrun = sort(copy)\n recieved = testrun if testrun else copy\n template = \"{} sort worked incorrectly. \\nRecieved: {} \\nExpected: {}\"\n error_message = template.format(sort.__name__, recieved, builtin)\n assert (testrun == builtin or copy == builtin), error_message\n time = timeit(lambda: sort(sequence[:]),number=count)/count\n print(\"{:>7.5f} : {}\".format(time, sort.__name__))", "def test_merge_sort():\n assert myFunction.merge_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5], 'incorrect'\n assert myFunction.merge_sort([10, 91, 2, 13, 3, 1]) == [1, 2, 3, 10, 13, 91], 'incorrect'", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_sorts_list_of_nums(self):\n result = quick_sort([5, 3, 8, 2, 9])\n self.assertEqual(result, [2, 3, 5, 8, 9])", "def test_small_merge_sort():\n assert mergesort([1, 2, 3]) == [1, 2, 3]", "def test_sort_dataset(self):\n # Create a new dataset\n fh = self.filestore.upload_file(SORT_FILE)\n ds = self.api.load_dataset(\n datastore=self.datastore,\n filestore=self.filestore,\n file_id=fh.identifier\n ).dataset\n result = self.api.sort_dataset(ds.identifier, [1, 2, 0], [False, False, True], self.datastore)\n ds = self.datastore.get_dataset(result.dataset.identifier)\n rows = ds.fetch_rows()\n names = ['Alice', 'Bob', 'Dave', 'Gertrud', 'Frank']\n result = list()\n for row in rows:\n name = row.values[0]\n if name in names:\n result.append(name)\n for i in range(len(names)):\n self.assertEqual(names[i], result[i])\n result = self.api.sort_dataset(ds.identifier, [2, 1, 0], [True, False, True], self.datastore)\n ds = self.datastore.get_dataset(result.dataset.identifier)\n rows = ds.fetch_rows()\n names = ['Gertrud', 'Frank', 'Bob', 'Alice', 'Dave']\n result = list()\n for row in rows:\n name = row.values[0]\n if name in names:\n result.append(name)\n for i in range(len(names)):\n self.assertEqual(names[i], result[i])\n # Raises error for invalid column identifier\n with self.assertRaises(ValueError):\n self.api.sort_dataset(ds.identifier, [2, 10, 0], [True, False, True], self.datastore)", "def test_sort( self ) :\n\n t = [ self.v1, self.v10, self.v20, self.v11, self.v111,\n self.v12, self.vbig ]\n t.sort( key=components )\n for i in range( len( t ) - 2 ) :\n self.assertNotEqual( 1, verscmp( t[ i ], t[ i+1 ] ) )", "def test_original_unchanged():\n\n data = [3, 2, 1]\n bubble_sort(data)\n\n assert data == [3, 2, 1]", "def test_sort_reversed():\n data = [9, 6, 4, 3, 2, 1]\n sorted_data = bubble_sort(data)\n for small, large in zip(sorted_data[:-1], sorted_data[1:]):\n assert small < large", "def test_quick_sort_sorts_small_list():\n from quick import quick_sort\n assert quick_sort([4, 10, 7, 1, 9]) == [1, 4, 7, 9, 10]", "def test_advanced_trivial():\n assert sort_arr([43, 65, 1, 98, 99, 101]) == ()", "def test_default_sort(self):\n self.run_on_lists(default_sort)", "def test_impossible_basic():\n assert sort_arr([3, 1, 2]) == ()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that sorting works on reversesorted data.
def test_sort_reversed(): data = [9, 6, 4, 3, 2, 1] sorted_data = bubble_sort(data) for small, large in zip(sorted_data[:-1], sorted_data[1:]): assert small < large
[ "def test_reverse_basic():\n assert sort_arr([1, 5, 4, 3, 2, 6]) == (\"reverse\", 2, 5)", "def test_sort_sorted():\n\n data = [1, 2, 3]\n sorted_data = bubble_sort(data)\n\n assert data == sorted_data", "def test_original_unchanged():\n\n data = [3, 2, 1]\n bubble_sort(data)\n\n assert data == [3, 2, 1]", "def test_reversedlist(self) -> None:\n test_list = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n\n actual = self.algorithm(test_list)\n expected = sorted(test_list)\n\n assert actual == expected", "def test(sort):\n print \"Testing sort functionality for {}...\".format(sort.__name__)\n\n # Shuffle the data, testing various ways to sort\n data = nprnd.randint(-1000, 1000, size=1000)\n assert sorted(data) == sort(data)\n\n shuffle(data)\n assert sorted(data, reverse=True) == sort(data, reverse=True)\n\n shuffle(data)\n assert lists_equal(sorted(data, key=abs), sort(data, key=abs), key=abs)\n\n print \"Test succeeded!\"", "def test_reversedlist(self) -> None:\n test_list = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_original_unchanged():\n data = [3, 2, 1]\n sorted_data = bubble_sort(data)\n assert data == [3, 2, 1] and sorted_data != [3, 2, 1]", "def test_key_reverse(self) -> None:\n test_list = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n\n self.algorithm(test_list, (lambda x: -x))\n actual = test_list.copy()\n expected = sorted(test_list.copy(), key=(lambda x: -x))\n\n assert actual == expected", "def test_key_reverse(self) -> None:\n test_list = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\n\n actual = self.algorithm(test_list, (lambda x: -x))\n expected = sorted(test_list, key=(lambda x: -x))\n\n assert actual == expected", "def test_sort_all_equal():\n data = [9, 2, 3, 5, 8, 9]\n sorted_data = bubble_sort(data)\n assert sorted_data == [2, 3, 5, 8, 9, 9]", "def test_sort_all_equal():\n\n data = [2, 2, 2, 2]\n\n sorted_data = bubble_sort(data)\n\n assert sorted_data == [2, 2, 2, 2]", "def test_descending_true(self):\n actual = self.view001(descending=True)['rows']\n expected = [{'key': 'julia{0:03d}'.format(x), \n 'id': 'julia{0:03d}'.format(x),\n 'value': 1} for x in range(100)]\n self.assertEqual(actual, list(reversed(expected)))", "def test_sort_sorted():\n data = [1, 2, 3, 4, 6, 9]\n sorted_data = bubble_sort(data)\n for small, large in zip(sorted_data[:-1], sorted_data[1:]):\n assert small < large", "def test(sort, sequence, count=100):\n builtin = sorted(sequence)\n copy = sequence[:]\n testrun = sort(copy)\n recieved = testrun if testrun else copy\n template = \"{} sort worked incorrectly. \\nRecieved: {} \\nExpected: {}\"\n error_message = template.format(sort.__name__, recieved, builtin)\n assert (testrun == builtin or copy == builtin), error_message\n time = timeit(lambda: sort(sequence[:]),number=count)/count\n print(\"{:>7.5f} : {}\".format(time, sort.__name__))", "def test_sort_a_given_array(self):\n # act\n res = heap.sort(self.arr[:])\n\n # assert\n self.assertTrue(helper.is_sorted(res))", "def sort_reverse(list_of_integers):", "def test_radix_sort_sorts_big_list():\n from radix import radix_sort\n from random import shuffle\n big_list = list(range(100))\n shuffle(big_list)\n assert radix_sort(big_list) == list(range(100))", "def test_swap_basic2():\n assert sort_arr([1, 3, 2]) == (\"swap\", 2, 3)", "def test_sort( self ) :\n\n t = [ self.v1, self.v10, self.v20, self.v11, self.v111,\n self.v12, self.vbig ]\n t.sort( key=components )\n for i in range( len( t ) - 2 ) :\n self.assertNotEqual( 1, verscmp( t[ i ], t[ i+1 ] ) )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that sorting handles data with identical elements.
def test_sort_all_equal(): data = [9, 2, 3, 5, 8, 9] sorted_data = bubble_sort(data) assert sorted_data == [2, 3, 5, 8, 9, 9]
[ "def test_sort_all_equal():\n\n data = [2, 2, 2, 2]\n\n sorted_data = bubble_sort(data)\n\n assert sorted_data == [2, 2, 2, 2]", "def test_sort_sorted():\n\n data = [1, 2, 3]\n sorted_data = bubble_sort(data)\n\n assert data == sorted_data", "def test_original_unchanged():\n data = [3, 2, 1]\n sorted_data = bubble_sort(data)\n assert data == [3, 2, 1] and sorted_data != [3, 2, 1]", "def test_original_unchanged():\n\n data = [3, 2, 1]\n bubble_sort(data)\n\n assert data == [3, 2, 1]", "def test_sort_sorted():\n data = [1, 2, 3, 4, 6, 9]\n sorted_data = bubble_sort(data)\n for small, large in zip(sorted_data[:-1], sorted_data[1:]):\n assert small < large", "def test(sort):\n print \"Testing sort functionality for {}...\".format(sort.__name__)\n\n # Shuffle the data, testing various ways to sort\n data = nprnd.randint(-1000, 1000, size=1000)\n assert sorted(data) == sort(data)\n\n shuffle(data)\n assert sorted(data, reverse=True) == sort(data, reverse=True)\n\n shuffle(data)\n assert lists_equal(sorted(data, key=abs), sort(data, key=abs), key=abs)\n\n print \"Test succeeded!\"", "def test_sort( self ) :\n\n t = [ self.v1, self.v10, self.v20, self.v11, self.v111,\n self.v12, self.vbig ]\n t.sort( key=components )\n for i in range( len( t ) - 2 ) :\n self.assertNotEqual( 1, verscmp( t[ i ], t[ i+1 ] ) )", "def test_single():\n single_element_list = [2]\n assert bubble_sort(single_element_list) == single_element_list", "def test_duplicate_items(self):\n argument = is_sorted([2, 3, 3, 5])\n expected = True\n self.assertEqual(expected, argument, \"The list has duplicate values.\")", "def test(sort, sequence, count=100):\n builtin = sorted(sequence)\n copy = sequence[:]\n testrun = sort(copy)\n recieved = testrun if testrun else copy\n template = \"{} sort worked incorrectly. \\nRecieved: {} \\nExpected: {}\"\n error_message = template.format(sort.__name__, recieved, builtin)\n assert (testrun == builtin or copy == builtin), error_message\n time = timeit(lambda: sort(sequence[:]),number=count)/count\n print(\"{:>7.5f} : {}\".format(time, sort.__name__))", "def test_small_merge_sort():\n assert mergesort([1, 2, 3]) == [1, 2, 3]", "def test_merge_sort():\n assert myFunction.merge_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5], 'incorrect'\n assert myFunction.merge_sort([10, 91, 2, 13, 3, 1]) == [1, 2, 3, 10, 13, 91], 'incorrect'", "def test_sort_a_given_array(self):\n # act\n res = heap.sort(self.arr[:])\n\n # assert\n self.assertTrue(helper.is_sorted(res))", "def test_single():\n\n single_element_list = [5]\n\n assert single_element_list == bubble_sort(single_element_list)", "def test_sortedlist(self) -> None:\n test_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected", "def test_sort_dataset(self):\n # Create a new dataset\n fh = self.filestore.upload_file(SORT_FILE)\n ds = self.api.load_dataset(\n datastore=self.datastore,\n filestore=self.filestore,\n file_id=fh.identifier\n ).dataset\n result = self.api.sort_dataset(ds.identifier, [1, 2, 0], [False, False, True], self.datastore)\n ds = self.datastore.get_dataset(result.dataset.identifier)\n rows = ds.fetch_rows()\n names = ['Alice', 'Bob', 'Dave', 'Gertrud', 'Frank']\n result = list()\n for row in rows:\n name = row.values[0]\n if name in names:\n result.append(name)\n for i in range(len(names)):\n self.assertEqual(names[i], result[i])\n result = self.api.sort_dataset(ds.identifier, [2, 1, 0], [True, False, True], self.datastore)\n ds = self.datastore.get_dataset(result.dataset.identifier)\n rows = ds.fetch_rows()\n names = ['Gertrud', 'Frank', 'Bob', 'Alice', 'Dave']\n result = list()\n for row in rows:\n name = row.values[0]\n if name in names:\n result.append(name)\n for i in range(len(names)):\n self.assertEqual(names[i], result[i])\n # Raises error for invalid column identifier\n with self.assertRaises(ValueError):\n self.api.sort_dataset(ds.identifier, [2, 10, 0], [True, False, True], self.datastore)", "def test_itersorted(self):\n sorted_from_structure = list(self.structure.itersorted())\n sorted_from_list = list(sorted(self.structure, key = lambda a: a.element))\n\n self.assertListEqual(sorted_from_structure, sorted_from_list)", "def test_sort_list_should_not_change_input_list():\n simple = [\"bb\", \"cc\", \"dd\"]\n correct = simple.copy()\n assert filter.sort_list(correct) == correct", "def test_unsortedlist(self) -> None:\n test_list = [9, 7, 5, 2, 4, 5, 3, 3, 2, 1, 10, 200]\n\n expected = sorted(test_list.copy())\n self.algorithm(test_list)\n actual = test_list.copy()\n\n assert actual == expected" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the average success probability of running a specific configuration for the number of plays specified.
def discriminate_channel(configuration: ChannelConfiguration, plays: Optional[int] = 100) -> float: return OneShotCircuit().compute_average_success_probability(configuration, plays)
[ "def probability(self):\r\n batcoins = arrayofcoins(1000) #code can be messed with to make a custom value for x in arrayofcoins(x), but theoretical probability calculation must also be changed\r\n for m in range(0,self.trials):\r\n if batcoins.successnumber() == 1:\r\n self.successruns = self.successruns + 1\r\n return float(self.successruns) / self.trials", "def average_quality(N, trials):\n\n # initialising values\n testList = list(range(50))\n totalQuality = 0\n averageQuality = 0\n\n # for every trial, the program, checks the quality of each shuffled list \n # and adds it to a total quality variable\n for i in range(trials):\n totalQuality = totalQuality + quality(riffle(testList, N))\n\n # the average quality is calculated\n averageQuality = totalQuality / trials\n \n return averageQuality", "def test(numTrials):\n # Your Code Here\n n = 100\n yes = 0\n for i in range(numTrials):\n africa = 0\n europe = 0\n samerica = 0\n asia = 0\n for i in range(n):\n rand = random.random()\n if rand < 0.25:\n africa += 1\n if rand < 0.5 and rand > 0.25:\n europe += 1\n if rand < 0.75 and rand > 0.5:\n samerica += 1\n if rand > 0.75:\n asia += 1\n #print africa, samerica, asia, europe\n if asia >= 30 or africa >= 30 or europe >= 30 or samerica >= 30:\n yes += 1\n prob = float(yes)/float(numTrials)\n return prob", "def run_simulation(self):\n for run in range(self.num_runs):\n for play in range(self.num_plays):\n self.run_and_update(play)\n self.reset()\n average_rewards_per_play = self.rewards_per_play / self.num_runs\n optimal_action_percentage = self.optimal_action_played / self.num_runs\n return average_rewards_per_play, optimal_action_percentage", "def calculate_runs(self, batting_agression, player):\r\n current_runs = np.random.choice(\r\n self.outcomes, size=6, p=self.aggression_values.get(batting_agression))\r\n self.update_scoreboard(player, current_runs)\r\n return current_runs", "def average_num_attempts(data):\n total = 0\n for test_result in data:\n if INCLUDE_NO_SOLUTION_CARDS or test_result.solution != \"No solution\":\n attempts = test_result.attempts\n total += attempts\n return total / len(data)", "def avg_goals_for(self):\n if self.total_played() == 0:\n return 0.0\n\n return float(self.total_goals_for()) / float(self.total_played())", "def average_for_runs(dataset, num_runs, K):\n\tour_total_acc = 0\n\tscikit_total_acc = 0\n\n\t# here we perform random subsampling on the dataset. \n\t# for the user-specified number of runs (say 100), continue the division\n\t# of the dataset into a training set and testing set. collect the accuracy\n\t# rate and divide by 100 to compute an overall average \n\tfor i in range(num_runs):\n\t\ttraining_set = []\n\t\ttesting_set = []\n\t\t# randomly splits the dataset into a training and testing set \n\t\t(training_set, testing_set) = split_dataset(dataset, PROBABILITY_TRAINING_SET)\n\t\t# run our nearest-neighbor classifier \n\t\t(acc_ours, error_ours) = nearest_neighbors_implementation(training_set,testing_set,K)\n\t\t# run scikit learn nearest-neighbor classifier \n\t\t(acc_scikit, error_scikit) = nearest_neighbors_scikit(training_set,testing_set,K)\n\t\t# total the accuracy rates (note: the error rate is not used) \n\t\tour_total_acc += acc_ours\n\t\tscikit_total_acc += acc_scikit\n\t\n\t# finally divide by the number of runs to get the overall average \n\treturn(our_total_acc/float(num_runs), scikit_total_acc/float(num_runs))", "def run_avg_results():\n\n # List of logs to be measured (tested)\n items = [\"logs_2017-06-23_14-16-00\",\n \"logs_2017-06-23_14-16-59\",\n \"logs_2017-06-23_14-17-58\",\n \"logs_2017-06-23_14-18-48\",\n \"logs_2017-06-23_14-19-39\"]\n\n results = []\n game = \"2048\"\n evals = 1000\n for item in items:\n prefix = \"C:/Users/Jan/Documents/GitHub/general-ai/Experiments/best_models_repeats/2048/MLP+ES/\"\n postfix = \"/best/best_0.json\"\n file_name = prefix + item + postfix\n logdir = prefix + item\n\n # SELECT PROPER MODEL\n model = MLP.load_from_file(file_name, game)\n # model = EchoState.load_from_file(file_name, game)\n\n # RUN MODEL\n # 2048\n result = run_2048_extended(model, evals)\n\n # MARIO\n # result = eval_mario_winrate(model=model, evals=evals, level=\"spikes\", vis_on=False)\n\n # ALHAMBRA\n # First element is result of our model (rest are original models from previous work)\n # result = eval_alhambra_avg_score(model, evals)[0]\n\n # TORCS\n # For reinforcement learning, please run model separately (tensorflow needs to be restarted)\n results.append(result)\n\n results = np.array(results)\n file_name = \"{}_stats_{}.txt\".format(game, utils.miscellaneous.get_pretty_time())\n with open(file_name, \"w\") as f:\n f.write(\"--GAME {} STATISTICS-- {} trainings of the same model\".format(game.upper(), len(items)))\n f.write(os.linesep)\n f.write(\"Model: {}\".format(model.get_name()))\n f.write(os.linesep)\n f.write(\"Total games: {} (for each model)\".format(evals))\n f.write(os.linesep)\n f.write(\"MAX TEST: {}\".format(np.max(results)))\n f.write(os.linesep)\n f.write(\"AVG TEST: {}\".format(np.mean(results)))\n f.write(os.linesep)\n f.write(\"MIN TEST: {}\".format(np.min(results)))", "def calc_pop_avg(populations, n):\n # Find number of trials\n num_trials = len(populations)\n # Then the average is the sum of the number of bacteria at every trial i for the same single time step n\n # divided by the number of trials\n return sum([populations[i][n] for i in range(num_trials)])/num_trials", "def trial_results(num_trials, event_probs):\n results_dict = {0:0, 1:0, 2:0, 3:0, 4:0}\n for trial in range(num_trials):\n output = run_trial(event_probs)\n results_dict[output] = results_dict[output] + 1\n\n return results_dict", "def average_precision(predictions):\n precisions = []\n correct_predictions = 0\n for i in range(len(predictions)):\n if predictions[i]:\n correct_predictions += 1\n precisions.append(correct_predictions / (i + 1))\n if precisions:\n #return sum(precisions) / len(precisions)\n return mean(precisions)\n return 0", "def getAvgProbability(self, states, actions):\n r = 0\n\n for i in range(len(states)):\n r += self.getProbability(states[i], actions[i])\n\n return r / len(states)", "def AVGfitnesingSing(self, chromosome, gen, num):\n avgFitness = 0\n print(\"Chromosome: \")\n print(chromosome)\n for i in range(self.numRun):\n titleRun = \"Training: gen \"+ str(gen + 1)+ \" - #sample \"+ str(num + 1)+ \" - run \"+ str(i + 1)\n g = Genetic(self.r_p, self.gdSidePanel, chromosome, titleRun, True)\n start = time.time()\n score, _, _, _, _, _ = g.run()\n finish = time.time()\n tempo = round(finish - start)\n avgFitness += (score + tempo)\n print(' - Match ' + str(i + 1) + ' Score ' + str(score + tempo))\n return avgFitness / self.numRun", "def general_stats(n, config_path):\n\n max_gen = 1000 # max number of generation before stopping\n best_seed = (-1, max_gen)\n total_steps = []\n accuracies = []\n for seed in range(n):\n random.seed(seed)\n winner_, config_, stats_, acc = run(config_path, max_gen)\n accuracies.append(acc)\n steps = len(stats_.generation_statistics)\n if best_seed[1] > steps:\n best_seed = (seed, steps)\n total_steps.append(steps)\n\n print(\"\\n best seed = {} with {} steps\".format(best_seed[0], best_seed[1]))\n\n plt.hist(total_steps, density=True, bins=30)\n plt.xlabel('number of generations')\n plt.savefig(\"generation histogram.svg\")\n plt.show()\n plt.close()\n\n plt.hist(accuracies, density=True, bins=30)\n plt.xlabel('accuracy on test set')\n plt.savefig(\"accuracy repartition.svg\")\n plt.show()\n plt.close()\n\n print(np.mean(np.array(accuracies)))", "def probability(outcomes):\n chances = {1: 0.1, 2: 0.2, 3: 0.3, 4: 0.15, 5: 0.05, 6: 0.2}\n result = 1\n for outcome in outcomes:\n result *= chances[outcome]\n return result", "def average_search_efficiency(config):\n \n #get parameters of the distributions depending on the chosen model\n if config['model'] == 'powerlaw':\n parameters = [config['beta']]\n #get policy from benchmark model\n policy = get_policy_from_dist(n_max = config['time_ep'], \n func = pdf_powerlaw,\n beta = config['beta']\n )\n \n elif config['model'] == 'double_exp':\n parameters = [config['d_int'], config['d_ext'], config['p']]\n #get policy from benchmark model\n policy = get_policy_from_dist(n_max=config['time_ep'],\n func = pdf_multimode,\n lambdas = np.array(parameters[:2]),\n probs = np.array([parameters[2], 1-parameters[2]])\n )\n \n \n #run the walks in parallel\n efficiencies = walk_from_policy(policy=policy,\n time_ep=config['time_ep'],\n n=config['n'],\n L=config['L'],\n Nt=config['Nt'],\n r=config['r'],\n lc=config['lc'])\n \n #get the mean search efficiency over the walks\n mean_eff = np.mean(efficiencies) \n tune.report(mean_eff = mean_eff)\n \n #save results\n if config['results_path']:\n np.save(config['results_path']+'efficiencies_'+ str([np.round(p, 10) for p in parameters])+'.npy', efficiencies)", "def _get_avg_runtime(self):\n run_time_total = 0\n for run_time in self._run_times:\n run_time_total = run_time_total + run_time\n\n return int(run_time_total / len(self._run_times))", "def simulate():\n\tnp.random.seed(42)\n\tmask = np.asarray([1,2,3]*ceil(N/3))[:N]\n\twon, lost = 0, 0\n\tfor i in range(10**6):\n\t\tdeck = np.asarray(list(range(1,int(N/KINDS)+1))*4)\n\t\tnp.random.shuffle(deck)\n\t\tres = not any(deck == mask)\n\t\tif res:\twon += 1\n\t\telse:\tlost += 1\n\t\t\n\t\tif not i%10**4:\n\t\t\tp_eval = won/(won+lost)\n\t\t\tprint(f\">>> Simulated win probability with {i} games: {100*p_eval:.4f}%\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the final state reshaped coordinates from a given amplitudes
def _compute_finale_state_vector_coords_reshaped(self, amplitudes_vector: List[float], angles_phase: List[float], n_points_theta: int, n_points_phase: int) -> ResultStatesReshaped: final_state_vector_coords_x = [] final_state_vector_coords_y = [] final_state_vector_coords_z = [] for amplitude in amplitudes_vector: theta_i = np.arccos(amplitude) theta_bloch = 2 * theta_i final_state_vector_coords_z.append(np.cos(theta_bloch)) # Reshaping matrices X, Y and Z in right dimensions to be represented top_z = max(final_state_vector_coords_z) min_z = min(final_state_vector_coords_z) radius = (top_z - min_z) / 2 center_z = 1 - radius for idx, _ in enumerate(final_state_vector_coords_z): new_theta_i = np.arccos((final_state_vector_coords_z[idx] - center_z) / radius) new_phase_i = angles_phase[idx % n_points_phase] final_state_vector_coords_x.append(np.sqrt(radius) * np.sin(new_theta_i) * np.cos(new_phase_i)) final_state_vector_coords_y.append(np.sqrt(radius) * np.sin(new_theta_i) * np.sin(new_phase_i)) return ResultStatesReshaped(reshaped_coords_x=np.reshape(final_state_vector_coords_x, (n_points_theta, n_points_phase)), reshaped_coords_y=np.reshape(final_state_vector_coords_y, (n_points_theta, n_points_phase)), reshaped_coords_z=np.reshape(final_state_vector_coords_z, (n_points_theta, n_points_phase)), center=center_z)
[ "def raw_amp_to_amp(rawamp2d: np.ndarray, probes: np.ndarray):\n return -np.imag(rawamp2d)*np.pi*2*np.pi*probes/4/C.epsilon_0/C.c", "def actuator_coords(self):\n\n mask = np.ones((11, 11), np.bool)\n for i in range(0, 3):\n for j in range(3 - i):\n mask[i, j] = False\n mask = np.bitwise_and(mask, mask[::-1])\n mask = np.bitwise_and(mask, mask[:, ::-1])\n rs = np.stack(np.where(mask)).T - 5\n return rs", "def h(state, landmark, scanner_displacement):\r\n\t\tdx = landmark[0] - (state[0] + scanner_displacement * cos(state[2]))\r\n\t\tdy = landmark[1] - (state[1] + scanner_displacement * sin(state[2]))\r\n\t\tr = sqrt(dx * dx + dy * dy)\r\n\t\talpha = (atan2(dy, dx) - state[2] + pi) % (2*pi) - pi\r\n\t\treturn np.array([r, alpha])", "def observation_model(state):\n x, y, _, _ = state\n diff = MAP_L - np.array([x,y])\n z = [np.hypot(diff[0, 0], diff[0, 1]), np.arctan2(diff[0, 1], diff[0, 0])]\n t = (diff[0, 1]/diff[0, 0]) ** 2 + 1\n h_wrt_x = np.array([[diff[0, 0] / z[0], diff[0, 1] /z[0], 0, 0],\n [-diff[0, 1] / ((diff[0, 0] **2) * t), 1/(diff[0, 0] * t), 0, 0]])\n return [z, h_wrt_x]", "def smooth_interp(self):\n #### 1) Prepare for ROMS grid\n xroms=np.zeros_like(self.lon0) ## xroms: longitude, yroms: latitude\n yroms=np.zeros_like(self.lat0)\n (y,x)=self.lon0.shape\n for i in range(y):\n for j in range(x):\n (yroms[i][j],xroms[i][j])=utm.from_latlon(self.lat0[i][j],self.lon0[i][j])[0:2]\n \n xy_roms = np.vstack((xroms[self.maskss0==1],yroms[self.maskss0==1])).T\n Fuv = interpXYZ(xy_roms,xy_roms, method='kriging')\n \n uroms, vroms = self.combine()\n for tstep in range(self.time_ss.shape[0]):\n utem=np.zeros_like(xroms)\n utem[self.maskss0==1]=Fuv(uroms[self.ind0+tstep,:,:][self.maskss0==1])\n uroms[self.ind0+tstep,:,:]=utem\n \n vtem=np.zeros_like(xroms)\n vtem[self.maskss0==1]=Fuv(vroms[self.ind0+tstep,:,:][self.maskss0==1])\n vroms[self.ind0+tstep,:,:]=vtem\n \n basemap = Basemap(projection='merc',llcrnrlat=self.lat0.min(),urcrnrlat=self.lat0.max(), \\\n llcrnrlon=self.lon0.min(),urcrnrlon=self.lon0.max(),resolution='i')\n fig1 = plt.figure()\n ax = fig1.add_subplot(111)\n \n basemap.drawcoastlines()\n basemap.fillcontinents()\n basemap.drawcountries()\n basemap.drawstates()\n x_rho, y_rho = basemap(self.lon0, self.lat0)\n\n basemap.pcolormesh(x_rho, y_rho, uroms[-2,:,:], vmin=uroms.min(),vmax=uroms.max()) \n plt.show() \n \n #pdb.set_trace()", "def qdToState(qd):\n x = np.zeros(13) #initialize dimensions\n\n x[0:3] = qd.pos\n x[3:6] = qd.vel\n\n Rot = RPYtoRot_ZXY(qd.euler[0], qd.euler[1], qd.euler[2])\n quat = RotToQuat(Rot)\n\n x[6:10] = quat\n x[11:13] = qd.omega\n\n return x", "def cube2latlon_preprocess(x, y, xi, yi):", "def _rotary_encoder_positions_from_fronts(ta, pa, tb, pb):\n p = pb[np.searchsorted(tb, ta) - 1] * pa\n p = np.cumsum(p) / WHEEL_TICKS * np.pi * WHEEL_RADIUS_CM\n return ta, p", "def ComputeAmplitudeAndPhase(self):\n # Compute the residues\n B = np.zeros((self.N, self.M), dtype=self.lam.dtype)\n for i in range(self.N):\n for j in range(self.M):\n B[i,j] = np.abs(self.lam[j])**i*np.exp(1.0j*i*np.angle(self.lam[j]))\n\n r, _, _, _ = la.lstsq(B, self.X)\n\n # Extract amplitudes and phases from residues\n self.amps = np.abs(r)\n self.faze = np.angle(r)\n\n return", "def convHealCoord(nside):\n coord = hp.pix2ang(nside,np.arange(12*nside*nside))\n theta = 90-(rad2deg(coord[0])) # galactic longitude\n phi = rad2deg(coord[1]) # galactic latitude\n ra = np.ndarray(shape=(12*nside*nside),dtype=float)\n dec = np.ndarray(shape=(12*nside*nside),dtype=float)\n for ii in range(len(theta)):\n eq_coord = ga2equ([phi[ii],theta[ii]]) # converting to equatorial coordinates\n ra[ii] = deg2rad(eq_coord[0])\n dec[ii] = deg2rad(eq_coord[1])\n\n return ra,dec", "def xyz(pd, times, acc=1.e-12, nmax=10000, stoerm=True):\n\n # compute number of orbits and number of ephemeris terms\n norbit, nephem = norbeph(pd) \n\n mass0 = pd['mass0']\n if pd['integ']:\n # Newtonian\n lrvm = ptolrvm(pd)\n # integrate\n ttime,ntest,eratio,npoint,ierr,tnext,nstore,arr = \\\n integrate(lrvm, times-pd['tstart'], acc=acc, nmax=nmax, stoerm=stoerm)\n ret = [arr[:,1],arr[:,2],arr[:,3]]\n for nb in range(norbit):\n ind = 6*(nb+1)\n ret += [arr[:,ind+1],arr[:,ind+2],arr[:,ind+3]]\n else:\n # Keplerian\n orbs = ptolorb(pd)\n\n x0 = np.zeros_like(times)\n y0 = np.zeros_like(times)\n z0 = np.zeros_like(times)\n ret = []\n for orb in orbs:\n tanom = orb.true(times, acc)\n scale = orb.a*(1-orb.e**2)/(1.+orb.e*np.cos(tanom))\n cto = np.cos(tanom+orb.omega)\n sto = np.sin(tanom+orb.omega)\n x = -scale*(m.sin(orb.Omega)*cto+m.cos(orb.Omega)*m.cos(orb.iangle)*sto)\n y = scale*(m.cos(orb.Omega)*cto-m.sin(orb.Omega)*m.cos(orb.iangle)*sto)\n z = -scale*m.sin(orb.iangle)*sto\n ret += [x,y,z]\n x0 -= orb.k*x\n y0 -= orb.k*y\n z0 -= orb.k*z\n\n ret = [x0,y0,z0] + ret\n\n return ret", "def ens_CM1_C2A(ens, var = 'ALL'):\n \n# Copy data from cell centered surrogate, then average the staggered fields to the centers\n \n t0 = timer()\n \n nx = ens.nx\n ny = ens.ny\n nz = ens.nz\n \n if var.upper() == \"U\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.u_ptr,:,:,:,0] = 0.5*(fstate.u[:,:,:,0] + fstate.u[:,:,:,1])\n fstate.xyz3d[ens.u_ptr,:,:,:,nx-1] = 0.5*(fstate.u[:,:,:,nx-1] + fstate.u[:,:,:,nx])\n fstate.xyz3d[ens.u_ptr,:,:,:,1:nx-1] = (-fstate.u[:,:,:,0:nx-2] + 13.0*fstate.u[:,:,:,1:nx-1] \\\n -fstate.u[:,:,:,3:nx+1] + 13.0*fstate.u[:,:,:,2:nx] ) / 24.0\n \n if var.upper() == \"V\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.v_ptr,:,:,0,:] = 0.5*(fstate.v[:,:,0,:] + fstate.v[:,:,1,:])\n fstate.xyz3d[ens.v_ptr,:,:,ny-1,:] = 0.5*(fstate.v[:,:,ny-1,:] + fstate.v[:,:,ny,:])\n fstate.xyz3d[ens.v_ptr,:,:,1:ny-1,:] = (-fstate.v[:,:,0:ny-2,:] + 13.0*fstate.v[:,:,1:ny-1,:] \\\n -fstate.v[:,:,3:ny+1,:] + 13.0*fstate.v[:,:,2:ny,:] ) / 24.0\n \n if var.upper() == \"W\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.w_ptr,:,0,:,:] = 0.5*(fstate.w[:,0,:,:] + fstate.w[:,1,:,:])\n fstate.xyz3d[ens.w_ptr,:,nz-1,:,:] = 0.5*(fstate.w[:,nz-1,:,:] + fstate.w[:,nz,:,:])\n fstate.xyz3d[ens.w_ptr,:,1:nz-1,:,:] = (-fstate.w[:,0:nz-2,:,:] + 13.0*fstate.w[:,1:nz-1,:,:] \\\n -fstate.w[:,3:nz+1,:,:] + 13.0*fstate.w[:,2:nz,:,:] ) / 24.0\n \n# Create ens variables to point at A-grid velocities\n\n ens.addvariable(\"UA\", data=fstate.xyz3d[ens.u_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n ens.addvariable(\"VA\", data=fstate.xyz3d[ens.v_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n ens.addvariable(\"WA\", data=fstate.xyz3d[ens.w_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n \n if time_all: print(\"\\n Wallclock time to convert from C to A grid:\", round(timer() - t0, 3), \" sec\")\n\n return", "def lifehex(initial_state, nsteps):\n\n nrow = len(initial_state)\n \n for i in range(nrow):\n initial_state[i] = np.array(initial_state[i]) + 0\n \n if len(initial_state[0]) < len(initial_state[1]):\n slength = len(initial_state[0])\n else:\n slength = len(initial_state[1])\n \n edge_1= np.array([0] * (slength + 2))\n \n \n \n \n for n in range(nsteps):\n iterate =[0]* (nrow + 2)\n for i in range(nrow):\n initial_state[i] = np.insert(initial_state[i],0,0)\n initial_state[i] = np.append(initial_state[i],0)\n \n initial_state.insert(0,edge_1)\n initial_state.append(edge_1)\n \n for i in range(len(initial_state)):\n iterate[i] = initial_state[i].copy()\n\n\n for i in range(1,nrow+1):\n \n if len(initial_state[i]) == slength + 2:\n \n for j in range(1,len(initial_state[i])-1):\n \n env_live = initial_state[i][j-1] + initial_state[i][j+1] + initial_state[i-1][j] + initial_state[i-1][j+1] +initial_state[i+1][j] +initial_state[i+1][j+1]\n\n if initial_state[i][j] == 1:\n if env_live == 3 or env_live == 5:\n iterate[i][j] = 1\n else:\n iterate[i][j] = 0\n elif initial_state[i][j] == 0:\n if env_live == 2:\n iterate[i][j] = 1\n else:\n iterate[i][j] = 0\n\n elif len(initial_state[i]) == slength +3:\n\n for j in range(1,len(initial_state[i])-1):\n \n env_live = initial_state[i][j-1] + initial_state[i][j+1] + initial_state[i-1][j-1] + initial_state[i-1][j] +initial_state[i+1][j-1] +initial_state[i+1][j]\n\n if initial_state[i][j] == 1:\n if env_live == 3 or env_live == 5:\n iterate[i][j] = 1\n else:\n iterate[i][j] = 0\n elif initial_state[i][j] == 0:\n if env_live == 2:\n iterate[i][j] = 1\n else:\n iterate[i][j] = 0\n \n \n \n del iterate[0]\n del iterate[-1]\n del initial_state[0]\n del initial_state[-1]\n\n for i in range(nrow):\n iterate[i]= np.delete(iterate[i], [0,-1])\n initial_state[i]= np.delete(initial_state[i], [0,-1])\n\n\n \n \n for i in range(nrow):\n initial_state[i] = iterate[i].copy()\n \n\n\n \n \n final_state = [0] * nrow \n for i in range(len(initial_state)):\n final_state[i] = initial_state[i].copy() > 0 \n\n \n # write your code here to replace return statement\n return final_state", "def geodetic2ecef(lon, lat, alt=0):\n lat = np.radians(lat)\n lon = np.radians(lon)\n xi = np.sqrt(1 - ESQ * np.sin(lat))\n x = (A / xi + alt) * np.cos(lat) * np.cos(lon)\n y = (A / xi + alt) * np.cos(lat) * np.sin(lon)\n z = (A / xi * (1 - ESQ) + alt) * np.sin(lat)\n return x, y, z", "def get_phi(self, state):\n tile_coders = []\n feature_vec = numpy.array([]) # where we store our active features\n shift_factor = self.memory_size / (len(state) - 5) # the amount of memory we for each tilecoder\n for i in range(len(state) - 5): # for all the other perceptions\n # decay position other bias\n perception = np.concatenate((state[:5], [state[i+5]])) # add the extra obs to the position obs)\n tile_coders.append(perception)\n\n f = np.array(\n getTiles(\n numtilings=self.num_tilings,\n memctable=shift_factor, # the amount of memory for each tilecoder\n floats=perception)\n )\n f += (i * shift_factor) # shift the tiles by the amount we've added on\n f = sorted(f) # we sort to make our verification simpler\n try:\n if f[0] <= feature_vec[len(feature_vec)-1]:\n print(\"Tilings are clashing.\") # notify that our tilings are overlapping\n raise # fail\n except IndexError:\n pass # the first tiling will have an index error\n\n feature_vec = np.concatenate((f, feature_vec)) # add our tile-coder to the feature vector\n feature_vec = numpy.concatenate(([1],feature_vec))\n if feature_vec[len(feature_vec) - 1] > self.memory_size: # if we're using more memory than we have\n print(\"Exceeding maximum memory\") # notify\n\n return feature_vec, tile_coders", "def make_obs2sfs_folded(snp, ix_normal, anc_ref, anc_alt, max_states=None, states=None):\n\n \"\"\"1. create FLIPPED, which is true for SNP that need to be flipped\"\"\"\n FLIPPED = (snp[anc_ref] == 0) & (snp[anc_alt] > 0)\n FLIPPED.reset_index(drop=True, inplace=True)\n snp.reset_index(drop=True, inplace=True)\n\n \"\"\"2. make data1, data2 which are flipped/non-flipped data\"\"\"\n data1 = pd.DataFrame()\n data2 = pd.DataFrame()\n for s in states:\n data1[s] = snp.loc[~FLIPPED, f\"{s}_alt\"] / (\n snp.loc[~FLIPPED, f\"{s}_alt\"] + snp.loc[~FLIPPED, f\"{s}_ref\"]\n )\n data2[s] = snp.loc[FLIPPED, f\"{s}_ref\"] / (\n snp.loc[FLIPPED, f\"{s}_alt\"] + snp.loc[FLIPPED, f\"{s}_ref\"]\n )\n data1[s] = np.nan_to_num(data1[s])\n data2[s] = np.nan_to_num(data2[s])\n m = np.max((snp[f\"{s}_alt\"] + snp[f\"{s}_ref\"]))\n m = m if max_states is None else min((m, max_states))\n data1[s] = np.round(data1[s] * m).astype(np.uint8)\n data2[s] = np.round(data2[s] * m).astype(np.uint8)\n\n data1[f\"{s}_alt\"], data1[f\"{s}_ref\"] = data1[s], m - data1[s]\n data2[f\"{s}_alt\"], data2[f\"{s}_ref\"] = data2[s], m - data2[s]\n del data1[s], data2[s]\n\n data = pd.DataFrame(\n np.vstack((data1.to_numpy(), data2.to_numpy())), columns=data1.columns\n )\n data[~FLIPPED] = data1\n data[FLIPPED] = data2\n sfs = data.drop_duplicates().reset_index(drop=True)\n sfs_dict = dict((tuple(v.values()), k) for (k, v) in sfs.to_dict(\"index\").items())\n data = np.array(data)\n \"\"\"4. use dicts to create SNP2SFS\"\"\"\n SNP2SFS = np.array([sfs_dict[tuple(i)] for i in data], dtype=np.uint16)\n\n return sfs, SNP2SFS, FLIPPED", "def __generate_final_state(self, initial_state: List[int]) -> List[int]:\n final_state = initial_state\n for instruction_pointer in range(0, len(initial_state), 4):\n opcode = final_state[instruction_pointer]\n if opcode == 1 or opcode == 2:\n first_param = final_state[final_state[instruction_pointer + 1]]\n second_param = final_state[final_state[instruction_pointer + 2]]\n third_parameter = final_state[instruction_pointer + 3]\n if opcode == 1:\n final_state[third_parameter] = first_param + second_param\n else:\n final_state[third_parameter] = first_param * second_param\n elif opcode == 99:\n return final_state\n else:\n raise ValueError(f\"Unknown opcode '{opcode}'!!\")\n return final_state", "def _resample(lam_field, lam_x0, x0, step, size, center_lon, n, n_inv, F, rho0):\n # x-coordinate in lon-lat grid is constant over all grid lines\n geox = np.empty(size[1], dtype=np.float64)\n for i in range(size[1]):\n geox[i] = x0[0] + i*step\n \n # memory for coordinates in Lambert space\n mapx = np.empty(size[1], dtype=np.float64)\n mapy = np.empty(size[1], dtype=np.float64)\n \n # memory for the corresponding Lambert grid indices \n indx = np.empty(size[1], dtype=np.int32)\n indy = np.empty(size[1], dtype=np.int32)\n \n # memory for the resulting field in lonlat space\n res_field = np.empty(size, dtype=np.float32)\n \n # for each line in lonlat grid \n for j in range(size[0]):\n # compute corresponding locations in Lambert space\n lambert_conformal.to_map2(geox, j*step + x0[1], mapx, mapy, center_lon, n, n_inv, F, rho0)\n # compute corresponding Lambert grid indices\n mapx -= lam_x0[0]\n mapx /= step\n mapy -= lam_x0[1]\n mapy /= step\n # the corresponding 'i,j'-integer indices of the lower left grid point\n indx[:] = mapx.astype(np.int32)\n indy[:] = mapy.astype(np.int32)\n # and compute bilinear weights\n mapx -= indx # contains now the weights\n mapy -= indy # contains now the weights\n \n # compute bilinear interpolation of the 4 neighboring grid point values \n for i in range(size[1]):\n res_field[j,i] = (1.0-mapy[i])*(1.0-mapx[i])*lam_field[indy[i],indx[i]] + \\\n mapy[i]*(1.0-mapx[i])*lam_field[indy[i]+1,indx[i]] + \\\n mapy[i]*mapx[i]*lam_field[indy[i]+1,indx[i]+1] + \\\n (1.0-mapy[i])*mapx[i]*lam_field[indy[i],indx[i]+1]\n \n return res_field", "def pacmanSuccessorStateAxioms(x, y, t, walls_grid):\n # print(walls_grid)\n now_pos = logic.PropSymbolExpr(pacman_str, x, y, t)\n x_pos = [x-1, x+1]\n y_pos = [y-1, y+1]\n\n prev_poses = []\n for new_x in x_pos:\n if walls_grid[new_x][y] == False:\n prev_position = logic.PropSymbolExpr(pacman_str, new_x, y, t-1)\n prev_action = logic.PropSymbolExpr('East', t-1) if new_x == x-1 else logic.PropSymbolExpr('West', t-1)\n prev_poses.append(logic.conjoin(prev_position, prev_action))\n\n for new_y in y_pos:\n if walls_grid[x][new_y] == False:\n prev_position = logic.PropSymbolExpr(pacman_str, x, new_y, t-1)\n prev_action = logic.PropSymbolExpr('North', t-1) if new_y == y-1 else logic.PropSymbolExpr('South', t-1)\n prev_poses.append(logic.conjoin(prev_position, prev_action))\n # print(now_pos % logic.disjoin(prev_poses))\n return(now_pos % logic.disjoin(prev_poses))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test module hotspots2006.py by downloading hotspots2006.csv and testing shape of extracted data has 10 rows and 6 columns
def test_hotspots2006(): test_path = tempfile.mkdtemp() x_train, metadata = hotspots2006(test_path) try: assert x_train.shape == (10, 6) except: shutil.rmtree(test_path) raise()
[ "def test_read_short_data() -> None:\n fo = open('data/short_data.csv', \"r\")\n e = Election(date(2000, 2, 8))\n e.read_results(fo)", "def get_features(year):", "def import_data():\n PROJECT_DIR = path.dirname( path.dirname( path.dirname( __file__ ) ) )\n \n geoplaces = pd.read_csv( \n filepath_or_buffer = path.join( PROJECT_DIR, 'data', 'geoplaces2.csv' ), \n usecols = ['placeID','smoking_area', 'dress_code', 'accessibility', 'price', 'other_services'], \n error_bad_lines = False \n ).dropna()\n \n parking = pd.read_csv( \n filepath_or_buffer = path.join( PROJECT_DIR, 'data', 'chefmozparking.csv' ), \n usecols = ['placeID','parking_lot'], \n error_bad_lines = False \n ).dropna()\n \n rating = pd.read_csv( \n filepath_or_buffer = path.join( PROJECT_DIR, 'data', 'rating_final.csv' ), \n usecols = ['placeID', 'userID', 'rating'], \n error_bad_lines = False \n ).dropna()\n \n # Remove duplicate ratings from the same user about the same restaurant if any and drop userID\n rating = rating.drop_duplicates( ['placeID', 'userID'] ).drop( 'userID', axis=1 )\n \n # INNER JOIN tables on placeID to make a duplicate row for each client rating and parking type\n data = pd.merge( pd.merge( geoplaces, parking, on = 'placeID' ), rating, on = 'placeID' )\n \n return data.drop( 'placeID', axis=1 )", "def test_read_nshort(self):\n\t\t# NUMBER_OF_VALUES = 1889\n\t\t# NUMBER_OF_KEYS = 10\n\t\t# key, value = csv.reader(open(os.path.join(data_path,\n\t\t# \t\"Neighborhoods-Short.csv\")))\n\n\t\t# self.assertEqual(NUMBER_OF_VALUES,)\n\t\t\n\t\t\n\t\t#data type\n\t\t#dataframe dimensions", "def main():\n # The following 6 countries have hotels in the Hotel_Reviews.csv data set. This was found while\n # doing an exploratory analysis of that data set.\n countries = ['france', 'italy', 'spain', 'united kingdom', 'austria', 'netherlands']\n with jsonlines.open('../Data/Cleaned/google_places_cleaned.jsonl', mode='w') as writer:\n with open('../Data/Original/places.original.json', 'r') as testfile:\n # fields: 'name', 'price', 'address', 'hours', 'phone', 'closed', 'gPlusPlaceId', 'gps'\n for line in tqdm(testfile):\n normalised_dict = ast.literal_eval(line)\n joined_address = ', '.join(normalised_dict.get('address'))\n if any(country in joined_address.lower() for country in countries):\n # Country found in the list, find which country it is.\n # GeoText module only detects the city/country if it is capitalised\n geo_address = GeoText(joined_address.title())\n try:\n matching_country = geo_address.countries[0]\n except IndexError:\n matching_country = [country for country in countries if country in joined_address.lower()][0].title()\n try:\n #If it matches 2 cities for some reason, take the one closer to the end of the string.\n matching_city = geo_address.cities[-1]\n except IndexError:\n matching_city = None\n normalised_dict['country'] = matching_country\n normalised_dict['city'] = matching_city\n try:\n # We get a TypeError if gps is None. These records can be discarded as we will use\n # latitude and longitude to join the googl places and Hotel reviews files.\n normalised_dict['latitude'] = normalised_dict.get('gps')[0]\n normalised_dict['longitude'] = normalised_dict.get('gps')[1]\n except TypeError:\n continue\n opening_hours_dict = process_hours(normalised_dict.get('hours'))\n # Add the new keys (Monday, Tuesday, ...)\n normalised_dict.update(opening_hours_dict)\n del normalised_dict['closed']\n del normalised_dict['gps']\n del normalised_dict['hours']\n\n # Write to output jsonl file\n writer.write(normalised_dict)", "def geo_info(database, keywords):\r\n count, idlist = search(database, keywords)\r\n\r\n flag = 0\r\n with open(\"idlist.txt\", \"w\", encoding=\"utf-8\") as f:\r\n f.write(str(idlist))\r\n\r\n df_allsamples=pd.DataFrame()\r\n df_allgeos=pd.DataFrame()\r\n print(\"Getting detials of each entry\")\r\n #loop every entry related to the keyword\r\n desc = \"Retriving\"\r\n n_download=int(input (\"{} entries are available, how many to download?\".format(count)))\r\n pbar = tqdm(\r\n total=int(n_download),\r\n initial=0,\r\n unit=\" entry\",\r\n unit_scale=False,\r\n desc=desc,\r\n )\r\n\r\n for id in idlist[0:n_download]:\r\n flag += 1\r\n geo_id, title, summary,link,df_samples, df_geo = get_summary(database, id)\r\n df_allsamples=df_allsamples.append(df_samples,ignore_index=True)\r\n df_allgeos=df_allgeos.append(df_geo,ignore_index=True)\r\n pbar.update(1)\r\n\r\n #save csv files\r\n print(\"Saving all GEO entries\")\r\n df_allgeos.to_csv(\"geos_{}.csv\".format(keywords))\r\n\r\n print(\"Saving all samples\")\r\n df_allsamples.to_csv(\"geosamples_{}.csv\".format(keywords))", "def csv_to_vw(loc_csv, loc_output, train=True):\n start = datetime.now()\n print(\"\\nTurning %s into %s. Is_train_set? %s\"%(loc_csv,loc_output,train))\n \n with open(loc_output,\"wb\") as outfile:\n for e, row in enumerate( DictReader(open(loc_csv)) ):\n\t\n\t #Creating the features\n numerical_features = \"\"\n categorical_features = \"\"\n for k,v in row.items():\n if k not in [\"Label\",\"Id\"]:\n if \"I\" in k: # numerical feature, example: I5\n if len(str(v)) > 0: #check for empty values\n numerical_features += \" %s:%s\" % (k,v)\n if \"C\" in k: # categorical feature, example: C2\n if len(str(v)) > 0:\n categorical_features += \" %s\" % v\n\t\t\t \n\t #Creating the labels\t\t \n if train: #we care about labels\n if row['Label'] == \"1\":\n label = 1\n else:\n label = -1 #we set negative label to -1\n outfile.write( \"%s '%s |i%s |c%s\\n\" % (label,row['Id'],numerical_features,categorical_features) )\n\t\t\n else: #we dont care about labels\n outfile.write( \"1 '%s |i%s |c%s\\n\" % (row['Id'],numerical_features,categorical_features) )\n \n\t #Reporting progress\n if e % 1000000 == 0:\n print(\"%s\\t%s\"%(e, str(datetime.now() - start)))\n\n print(\"\\n %s Task execution time:\\n\\t%s\"%(e, str(datetime.now() - start)))", "def load_test_data():\n testDataFile = \"../data/traindata_test.txt\"\n wordToVecDictFile = \"../data/glove/glove.6B.50d.txt\"\n X = FeatureProcessing.testFeatureProcess(testDataFile,wordToVecDictFile,window_size)\n return X", "def get_iterators(self, filename, batch_size, seed, device):\n data_field = [(None, None), (\"overview\", self.text), (\"genre\", self.label)]\n dataset = data.TabularDataset(path=filename, format='csv', fields=data_field,\n skip_header=True)\n train_data, val_data = dataset.split(split_ratio=0.8, random_state=random.seed(seed))\n\n self.text.build_vocab(train_data)\n self.text.vocab.load_vectors('glove.6B.50d')\n\n train_iterator, valid_iterator = data.BucketIterator.splits(\n (train_data, val_data),\n batch_size=batch_size,\n sort_key=lambda x: len(x.overview),\n sort_within_batch=True,\n device=device)\n\n return train_iterator, valid_iterator", "def test_CSV_It(self):\n # data_dir = os.path.join(os.getcwd(), \"VIIRS_Sample\")\n base_instance = CSV_It(input_dir=data_dir, TSdata=\"S*rade9*.tif\", Observations=\"S*cvg*.tif\", Mask=\"S*cvg*.tif\", DateLoc=\"10:18\", BandNum=\"\")\n output = os.path.join(data_dir, 'Test_Raster_List2.csv')\n base_instance.to_csv(output)\n base_instance = pd.read_csv(os.path.join(data_dir, \"Test_Raster_List2.csv\"))\n gdf = pd.read_csv(os.path.join(data_dir, \"Test_Raster_List.csv\"))\n gdf = gdf.sort_values(by=['date'])\n base_instance = base_instance.sort_values(by=['date'])\n base_instance = base_instance[['date', 'extent']]\n gdf = gdf[['date', 'extent']]\n print(gdf['extent'])\n print(base_instance['extent'])\n\n # base_instance.set_index('date')\n # gdf.set_index('date')\n # gdf = gdf.loc[:, ~gdf.columns.str.match('Unnamed')]\n # base_instance = base_instance.loc[:, ~base_instance.columns.str.match('Unnamed')]\n # assert base_instance.ground_truth_sindex.bounds == gdf.sindex.bounds\n # assert base_instance.equals(gpd.GeoDataFrame([]))\n pd.testing.assert_frame_equal(base_instance.reset_index(drop=True), gdf.reset_index(drop=True))\n # assert gdf['extent'].equals(base_instance['extent'])", "def build_solution():\r\n\r\n \"\"\"Reads CSV file and splits in into four datasets: Train, Kaggle public\r\n eaderboard, Kaggle private leaderboard (i.e the test set), and unused.\"\"\"\r\n df = pd.read_csv(\"data/atlas-higgs-challenge-2014-v2.csv\", sep=',')\r\n df_test = df[df['KaggleSet'].isin(('b', 'v'))]\r\n assert(len(df_test) == 550_000)\r\n df_test = df_test.drop('Weight', axis='columns')\r\n df_test = df_test.rename(columns={'KaggleWeight': 'Weight', 'Label': 'Class'})\r\n df_test.to_csv('data/solution_from_cern.csv', columns=['EventId', 'Class', 'Weight'], index=False)", "def main():\n df = pd.read_csv(\"HW_07_SHOPPING_CART_v137.csv\", header=0)\n df.index = df.ID\n del df['ID']\n global points\n points = {}\n for index, row in df.iterrows():\n # if(index <):\n points[index] = row.tolist()\n global all_clusters, clusters, cluster_number, total_number_of_features\n\n total_number_of_features = len(points[1])\n all_clusters = []\n for index, point in points.items():\n all_clusters.append(Cluster(index))\n all_clusters[index - 1].mean = point\n all_clusters[index - 1].guest_ids.append(index)\n\n cluster_number[len(all_clusters)] = all_clusters\n perform_clustering()\n smallest_cluster()", "def get_recommendation(knn_data):\r\n recommend = hotel[['hotel_id', 'hotel_name']] \r\n recommend['first recommendation'] = '0'\r\n recommend['distance 1'] = 0.000\r\n recommend['second recommendation'] = '0'\r\n recommend['distance 2'] = 0.000\r\n recommend['thrid recommendation'] = '0'\r\n recommend['distance 3'] = 0.000\r\n for i in range(NUM_HOTELS):\r\n each_hotel = knn_data[i]\r\n recommend.at[i, 'first recommendation'] = hotel_id_to_name[num_to_hotel_id[each_hotel[0][1]]]\r\n recommend.at[i, 'distance 1'] = each_hotel[1][1]\r\n recommend.at[i, 'second recommendation'] = hotel_id_to_name[num_to_hotel_id[each_hotel[0][2]]]\r\n recommend.at[i, 'distance 2'] = each_hotel[1][2]\r\n recommend.at[i, 'thrid recommendation'] = hotel_id_to_name[num_to_hotel_id[each_hotel[0][3]]]\r\n recommend.at[i, 'distance 3'] = each_hotel[1][3]\r\n recommend.to_csv('recommend.csv')", "def extract_from_file(filename):\n\n data_table = pd.DataFrame.from_csv(\"data.csv\")\n\n #This is line is just extracting the raw twitter posts\n raw_body_text = data_table['raw_body_text']\n \n #sentiment_category = data_table['sentiment']\n author_follower_count = data_table['author_followers_count']\n is_reshare = data_table['is_reshare']\n loc = data_table['location']\n return raw_body_text,author_follower_count, is_reshare, loc", "def create_pre_match_features(row):\n\n\tv=[] #vector to be populated\n\tv.append(row[\"tourney_date\"])\n\tv.append(row[\"tourney_name\"])\n\n\t#print(\"creating pre-match features for {} vs {}\".format(row[\"winner_name\"],row[\"loser_name\"]))\n\n\tdate=row[\"tourney_date\"]\n\tr=row[\"round\"]\n\tsur=row[\"surface\"]\n\n\t#We consider only matches of present year + past 5 years! So we need the year.\n\tyear=int(str(date)[0:4])\n\n\t#player1 (randomly assigned!)\n\tplayer1=random.choice([row[\"winner_name\"],row[\"loser_name\"]])\n\t#player2\n\tif player1==row[\"winner_name\"]:\n\t\tplayer2=row[\"loser_name\"]\n\telse:\n\t\tplayer2=row[\"winner_name\"]\n\n\tv.append(player1)\n\tv.append(player2)\n\n\t#rank difference\n\t#v.append(row[\"winner_rank\"]-row[\"loser_rank\"]) #Execution for matches.csv had this, but it's WRONG!!! It sets always the winner as player 1! \n\t# (Corrected directly in final_csv)\n\tif player1==row[\"winner_name\"]:\n\t\tv.append(row[\"winner_rank\"]-row[\"loser_rank\"])\n\telse:\n\t\tv.append(row[\"loser_rank\"]-row[\"winner_rank\"])\n\n\t#the function retrieve_player_stats should return a dataframe with the average stats of player against each common opponent with the other player\n\tavg_p1=retrieve_player_stats(player1,player2,date,r,sur,year)\n\tavg_p2=retrieve_player_stats(player2,player1,date,r,sur,year)\n\n\t#print(avg_p1)\n\n\t#print(avg_p2)\n\n\t#overall uncertainty of the data at disposal for the past matches of p1 and p2 against their common opponents\n\tif ((avg_p1.shape[0]>0) and (avg_p2.shape[0]>0)):\n\t\ts=0\n\t\t#uncertainty on the match\n\t\tfor i in range(avg_p1.shape[0]):\n\t\t\ts+=(avg_p1.iloc[i][\"data_amount\"]*avg_p2.iloc[i][\"data_amount\"])\n\t\t\t#print(\"Uncertainty for {}: {} x {} \".format(avg_p2.iloc[i][\"opponent\"],avg_p1.iloc[i][\"uncertainty\"],avg_p2.iloc[i][\"uncertainty\"]))\n\t\tu=1/s #u is the overall uncertainty of our feature vector for the match!\n\t\t#print(\"Overall uncertainty: {}\".format(u))\n\n\t\t#mean stats\n\t\tstats_p1=list(avg_p1.mean(axis=0,numeric_only=True)[0:13])\n\t\tstats_p2=list(avg_p2.mean(axis=0,numeric_only=True)[0:13])\n\n\t\t#WEIGHTED mean stats\n\t\t#we need to take mean value of each column to get average player performances against the list of common opponents\n\t\t#weight opponents by measure of data_amount at disposal?\n\t\t#No: this would make, for ex, a player look worse if he played lots of times against Novak Djokovic!\n\t\t#sum_unc_1=avg_p1[\"data_amount\"].sum()\n\t\t#avg_p1[\"weight\"]=avg_p1.apply(lambda row: (row[\"data_amount\"]/sum_unc_1),axis=1)\n\t\t#print(stats_p1)\n\n\t\tdiffs=list(np.subtract(stats_p1,stats_p2))\n\n\t\tv.extend(diffs)\n\n\t\tv.append(round(stats_p1[3]*stats_p1[4]-stats_p2[3]*stats_p2[4],4)) #complete\n\t\tv.append((stats_p1[3]-stats_p2[4])-(stats_p2[3]-stats_p1[4])) #serveadv\n\n\t\t#h2h\n\t\th2h_1=df[((df[\"winner_name\"]==player1) & (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r)) & (year-df[\"year\"]<=5))].shape[0]\n\t\th2h_2=df[((df[\"winner_name\"]==player2) & (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r)) & (year-df[\"year\"]<=5))].shape[0]\n\t\tif (h2h_1+h2h_2)>0:\n\t\t\tv.append(round((h2h_1/(h2h_1+h2h_2))-(h2h_2/(h2h_1+h2h_2)),4))\n\t\telse:\n\t\t\tv.append(0) #dummy value\n\n\t\t#fatigue feature\n\t\t# NOTE we don't have the data of each match, but only the starting date of the tournament;\n\t\t# therefore, the only thing that can be done is counting the num. of games played since the beginning of the tournament and give a % difference btw the 2 players.\n\t\t# This does not take into account the exact distance in days of previous matches from the current one, nor matches of the previous tournament,\n\t\t# but from our perspective it seems quite impossible to do differently.\n\t\ttourney_p1=df[(((df[\"winner_name\"]==player1) | (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]==date) & (df[\"round\"]<r)))]\n\t\tp1_games=tourney_p1[\"tot_games\"].sum()\n\t\ttourney_p2=df[(((df[\"winner_name\"]==player2) | (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]==date) & (df[\"round\"]<r)))]\n\t\tp2_games=tourney_p2[\"tot_games\"].sum()\n\n\t\tif np.isnan(p1_games):\n\t\t\tp1_games=0\n\t\tif np.isnan(p2_games):\n\t\t\tp2_games=0\n\n\t\tif p1_games==0 and p2_games==0:\n\t\t\tv.append(0) #no games played by either player, we put zero\n\t\telse:\n\t\t\tv.append(round((p1_games/(p1_games+p2_games))-(p2_games/(p1_games+p2_games)),4))\n\n\t\tv.append(u) #append uncertainty!\n\n\t\tif player1==row[\"winner_name\"]:\n\t\t\tv.append(0)\n\t\telse:\n\t\t\tv.append(1)\n\n\t\treturn v\n\telse:\n\t\treturn False", "def dataset():\n\n dataset_path = os.environ['LONGWOOD_DATASET']\n img_dir_path = os.path.join(os.path.dirname(dataset_path), 'img/')\n\n with open(dataset_path, 'rbU') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\",\", quotechar='\"')\n reader.next()\n for row in reader:\n\n year = row['ACC_NUM'][0:4]\n accession_number = row['ACC_NUM']\n species = row['NAME']\n country = row['COUNTRY_FULL']\n locality = row['LOCALITY']\n latlon = normalize_latlon(row['LAT_DEGREE'], row['LAT_DIR'],\n row['LONG_DEGREE'], row['LONG_DIR'],\n row['HumanLat'], row['HumanLong'])\n\n _possible_img = os.path.join(img_dir_path,\n '{}.jpg'.format(accession_number))\n has_picture = os.path.isfile(_possible_img)\n\n yield Specimen(year, accession_number, species, country, locality,\n latlon, has_picture)", "def feature_extractor(X_train, X_test):\n \n hog_train = []\n hog_test = []\n sift_train = []\n sift_test = []\n hog = cv2.HOGDescriptor()\n #HOGFeatureExtractor()\n \n winSize = (64,64)\n blockSize = (16,16)\n blockStride = (8,8)\n cellSize = (8,8)\n nbins = 9\n derivAperture = 1\n winSigma = 4.\n histogramNormType = 0\n L2HysThreshold = 2.0000000000000001e-01\n gammaCorrection = 0\n nlevels = 64\n hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins,derivAperture,winSigma,\n histogramNormType,L2HysThreshold,gammaCorrection,nlevels)\n winStride = (8,8)\n padding = (8,8)\n locations = ((10,20),)\n \n for img in X_train:\n kps, descs = sift(img)\n #if len(img.shape) == 2 :\n #img = img[:,:,numpy.newaxis]\n hog_train.append(hog.compute(img,winStride,padding,locations))\n if descs is None:\n sift_train.append([])\n else:\n sift_train.append(descs)\n i += 1\n if i%1000 == 0:\n print(i,datetime.now()-t)\n\n for img in X_test: \n kps, descs = sift(img)\n #if len(img.shape) == 2 :\n #img = img[:,:,numpy.newaxis]\n hog_test.append(hog.compute(img,winStride,padding,locations))\n if descs is None:\n sift_test.append([])\n else:\n sift_test.append(descs)\n \n return hog_train, hog_test, sift_train, sift_test", "def data_retriever(self):\n\n sql_st = '''\n SELECT *\n FROM geo_expense_data\n '''\n cur = self.conn.cursor()\n geo_comp_data = cur.execute(sql_st).fetchall()\n\n for record in geo_comp_data:\n geo_expense_id = record[0]\n year = record[2]\n month = record[3]\n day = record[4]\n comp_name = record[5]\n country = record[6]\n city = record[7]\n state = record[9]\n lat = record[11]\n lng = record[12]\n goog_details = self.company_type(comp_name,lat,lng)\n\n locations = self.locations_visited(year,month,day)\n # Find distance matrix between locations and visited places\n # Generate matrix n_location by n_goog_records\n # Should probably look to add some heuristic for name match\n if len(goog_details) == 0:\n sql_record = (geo_expense_id,'','','','','','')\n elif type(goog_details[0]) == dict:\n dist_array = np.zeros((len(locations),len(goog_details)))\n for i in range(len(locations)):\n loc_lat = locations[i][0]\n loc_lng = locations[i][1]\n for j in range(len(goog_details)):\n goog_lat = goog_details[j]['geometry']['location']['lat']\n goog_lng = goog_details[j]['geometry']['location']['lng']\n\n dist_array[i,j] = self.distance(goog_lat,goog_lng,loc_lat,loc_lng)\n\n min_dist_idx = np.argmin(dist_array)\n m = min_dist_idx // dist_array.shape[1]\n n = min_dist_idx - dist_array.shape[1] * m\n comp_type = goog_details[n]['types'][0]\n goog_name = goog_details[n]['name']\n address = goog_details[n]['formatted_address']\n placeid = goog_details[n]['place_id']\n goog_lat = goog_details[n]['geometry']['location']['lat']\n goog_lng = goog_details[n]['geometry']['location']['lng']\n\n sql_record = (geo_expense_id,goog_name,comp_type,address,placeid,goog_lat,goog_lng)\n else:\n comp_type = goog_details[0]\n sql_record = (geo_expense_id,'',comp_type,'','','','')\n\n self.data_writer(sql_record)", "def test_run(filename, count=10):\n for i, (tweet, valid) in enumerate(us_geocoded_tweets(filename)):\n if i > count:\n break\n try:\n t = extract(tweet)\n except:\n #print(repr(tweet))\n raise\n else:\n print(t, end='\\n\\n')\n i += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``ScheduledEventEntityMetadataLocation.copy`` works as intended.
def test__ScheduledEventEntityMetadataLocation__copy(): location = 'Koishi WonderLand' entity_metadata = ScheduledEventEntityMetadataLocation( location = location, ) copy = entity_metadata.copy() _assert_fields_set(copy) vampytest.assert_eq(copy, entity_metadata) vampytest.assert_is_not(copy, entity_metadata)
[ "def test__ScheduledEventEntityMetadataLocation__copy_with__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with(\n location = new_location,\n )\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({})\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({\n 'location': new_location,\n })\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def testCopy(self):\n impl = self.impl\n ws_name = self.ws_name\n conf = self.conf\n ws_meta = self.ws_meta\n\n test_object5 = {\n \"id\": \"test_object_id5\",\n \"type\": \"Genome\",\n \"data\": {\"name\":\"testgenome5\", \"string\":\"ACACGATTACA\"},\n \"workspace\": ws_name,\n \"command\": \"something\",\n \"metadata\": {\"origin\":\"shreyas\"},\n \"auth\": self.__class__.token\n }\n obj_meta5 = impl.save_object(test_object5)\n\n\n ws_name2 = \"testWS_%s\" % datetime.utcnow().strftime('%s')\n conf2 = {\"workspace\": ws_name2,\"default_permission\": \"a\", \"auth\": self.__class__.token }\n ws_meta2 = self.impl.create_workspace(conf2)\n\n impl.copy_object({\n \"new_id\": \"new_object_id5\",\n \"new_workspace\": ws_name2,\n \"source_id\": \"test_object_id5\",\n \"source_workspace\": ws_name,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n\n has_object = impl.has_object({\n \"id\": \"new_object_id5\",\n \"workspace\": ws_name2,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n self.assertTrue(has_object)", "def test_copy_story(self):\n\n # FIXME", "def test_copy(self):\n atom = self.atom.copy()\n self.assertEqual(self.atom.element.symbol, atom.element.symbol)\n self.assertEqual(self.atom.atomtype, atom.atomtype)\n self.assertEqual(self.atom.radical_electrons, atom.radical_electrons)\n self.assertEqual(self.atom.charge, atom.charge)\n self.assertEqual(self.atom.label, atom.label)", "def test_copy_run(self):\n pass", "def test_copy_meeting_schedule(self):\n meeting = make_meeting_test_data()\n self.client.login(username=\"secretary\", password=\"secretary+password\")\n\n url = urlreverse(\"ietf.meeting.views.new_meeting_schedule\", kwargs=dict(num=meeting.number, owner=meeting.schedule.owner_email(), name=meeting.schedule.name))\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n r = self.client.post(url, {\n 'name': \"copy\",\n 'public': \"on\",\n 'notes': \"New copy\",\n 'base': meeting.schedule.base_id,\n })\n self.assertNoFormPostErrors(r)\n\n new_schedule = Schedule.objects.get(meeting=meeting, owner__user__username='secretary', name='copy')\n self.assertEqual(new_schedule.public, True)\n self.assertEqual(new_schedule.visible, False)\n self.assertEqual(new_schedule.notes, \"New copy\")\n self.assertEqual(new_schedule.origin, meeting.schedule)\n self.assertEqual(new_schedule.base_id, meeting.schedule.base_id)\n\n old_assignments = {(a.session_id, a.timeslot_id) for a in SchedTimeSessAssignment.objects.filter(schedule=meeting.schedule)}\n for a in SchedTimeSessAssignment.objects.filter(schedule=new_schedule):\n self.assertIn((a.session_id, a.timeslot_id), old_assignments)", "def test_copy_ntm(self):\n new_ntm = self.ntm1.copy()\n self.assert_is_copy(new_ntm, self.ntm1)", "def _should_copy(src, dest, logger=None):\n if not os.path.exists(dest):\n return True\n if os.stat(src).st_mtime - os.stat(dest).st_mtime > 1e-6: # noqa\n # we add a fudge factor to work around a bug in python 2.x\n # that was fixed in python 3.x: https://bugs.python.org/issue12904\n if logger:\n logger.warning(\"Out of date: %s\" % dest)\n return True\n if logger:\n logger.info(\"Up to date: %s\" % dest)\n return False", "def test_copy(self):\n src_path = self.work_path('src', True)\n dst_path = self.work_path('dst', True)\n\n locker0 = Locker.create(src_path, self.content_path(), b'01234567')\n locker1 = locker0.copy(dst_path)\n\n self.assertEqual(locker1.path, os.path.join(dst_path, Locker.filename))", "def testCopy(self):\n\t\tfor atomType in atomTypes:\n\t\t\tfor electronState in electronStates:\n\t\t\t\tatom1 = Atom(atomType, electronState, -1, '1*')\n\t\t\t\tatom2 = atom1.copy()\n\t\t\t\tself.assertTrue(atom2 is not None)\n\t\t\t\tself.assertTrue(atom1 is not atom2)\n\t\t\t\tself.assertTrue(atom1.atomType == atom2.atomType)\n\t\t\t\tself.assertTrue(atom1.electronState == atom2.electronState)\n\t\t\t\tself.assertTrue(atom1.charge == atom2.charge)\n\t\t\t\tself.assertTrue(atom1.label == atom2.label)", "def test_copy_no_snapshot(ac_dc_network):\n snapshot = ac_dc_network.snapshots[2]\n copied_network = ac_dc_network.copy(with_time=False, snapshots=snapshot)\n\n assert copied_network.snapshots.size == 1\n assert snapshot not in copied_network.snapshots", "def test_copy_default_behavior(ac_dc_network):\n snapshot = ac_dc_network.snapshots[2]\n copied_network = ac_dc_network.copy()\n\n loads = ac_dc_network.loads.index.tolist()\n generators = ac_dc_network.generators.index.tolist()\n copied_loads = copied_network.loads.index.tolist()\n copied_generators = copied_network.generators.index.tolist()\n\n assert loads == copied_loads\n assert generators == copied_generators\n assert not copied_network.snapshots.empty\n assert snapshot in copied_network.snapshots", "def canCopy(self) -> bool:\n ...", "def test_existing_location(self):\n cm = get_camera_by_location(self.lat, self.lon)\n self.assertEqual(self.st, cm.start_time)\n self.assertEqual(self.et, cm.end_time)", "def test__ChannelMetadataBase__copy():\n channel_metadata = ChannelMetadataBase()\n \n copy = channel_metadata.copy()\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n \n vampytest.assert_eq(copy, channel_metadata)", "def test10_copy_of_sut(self):\n \n test_config = ResourceConfig(r'configs/resource/happy_path_template.xml', \n r'../src/bespoke/xsd/resource_config.xsd')\n \n actual_template_1 = test_config['BVT-2k3-R2-32']\n \n self.assertIsNot(actual_template_1, test_config._content['BVT-2k3-R2-32'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``ScheduledEventEntityMetadataLocation.copy_with`` works as intended.
def test__ScheduledEventEntityMetadataLocation__copy_with__0(): location = 'Koishi WonderLand' entity_metadata = ScheduledEventEntityMetadataLocation( location = location, ) copy = entity_metadata.copy_with() _assert_fields_set(copy) vampytest.assert_eq(copy, entity_metadata) vampytest.assert_is_not(copy, entity_metadata)
[ "def test__ScheduledEventEntityMetadataLocation__copy_with__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with(\n location = new_location,\n )\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def test__ScheduledEventEntityMetadataLocation__copy():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({})\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({\n 'location': new_location,\n })\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def testCopy(self):\n impl = self.impl\n ws_name = self.ws_name\n conf = self.conf\n ws_meta = self.ws_meta\n\n test_object5 = {\n \"id\": \"test_object_id5\",\n \"type\": \"Genome\",\n \"data\": {\"name\":\"testgenome5\", \"string\":\"ACACGATTACA\"},\n \"workspace\": ws_name,\n \"command\": \"something\",\n \"metadata\": {\"origin\":\"shreyas\"},\n \"auth\": self.__class__.token\n }\n obj_meta5 = impl.save_object(test_object5)\n\n\n ws_name2 = \"testWS_%s\" % datetime.utcnow().strftime('%s')\n conf2 = {\"workspace\": ws_name2,\"default_permission\": \"a\", \"auth\": self.__class__.token }\n ws_meta2 = self.impl.create_workspace(conf2)\n\n impl.copy_object({\n \"new_id\": \"new_object_id5\",\n \"new_workspace\": ws_name2,\n \"source_id\": \"test_object_id5\",\n \"source_workspace\": ws_name,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n\n has_object = impl.has_object({\n \"id\": \"new_object_id5\",\n \"workspace\": ws_name2,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n self.assertTrue(has_object)", "def test_copy_story(self):\n\n # FIXME", "def test_copy_run(self):\n pass", "def test_copy_meeting_schedule(self):\n meeting = make_meeting_test_data()\n self.client.login(username=\"secretary\", password=\"secretary+password\")\n\n url = urlreverse(\"ietf.meeting.views.new_meeting_schedule\", kwargs=dict(num=meeting.number, owner=meeting.schedule.owner_email(), name=meeting.schedule.name))\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n r = self.client.post(url, {\n 'name': \"copy\",\n 'public': \"on\",\n 'notes': \"New copy\",\n 'base': meeting.schedule.base_id,\n })\n self.assertNoFormPostErrors(r)\n\n new_schedule = Schedule.objects.get(meeting=meeting, owner__user__username='secretary', name='copy')\n self.assertEqual(new_schedule.public, True)\n self.assertEqual(new_schedule.visible, False)\n self.assertEqual(new_schedule.notes, \"New copy\")\n self.assertEqual(new_schedule.origin, meeting.schedule)\n self.assertEqual(new_schedule.base_id, meeting.schedule.base_id)\n\n old_assignments = {(a.session_id, a.timeslot_id) for a in SchedTimeSessAssignment.objects.filter(schedule=meeting.schedule)}\n for a in SchedTimeSessAssignment.objects.filter(schedule=new_schedule):\n self.assertIn((a.session_id, a.timeslot_id), old_assignments)", "def test_copy(self):\n src_path = self.work_path('src', True)\n dst_path = self.work_path('dst', True)\n\n locker0 = Locker.create(src_path, self.content_path(), b'01234567')\n locker1 = locker0.copy(dst_path)\n\n self.assertEqual(locker1.path, os.path.join(dst_path, Locker.filename))", "def test_copy(self):\n atom = self.atom.copy()\n self.assertEqual(self.atom.element.symbol, atom.element.symbol)\n self.assertEqual(self.atom.atomtype, atom.atomtype)\n self.assertEqual(self.atom.radical_electrons, atom.radical_electrons)\n self.assertEqual(self.atom.charge, atom.charge)\n self.assertEqual(self.atom.label, atom.label)", "def test_copy_ntm(self):\n new_ntm = self.ntm1.copy()\n self.assert_is_copy(new_ntm, self.ntm1)", "def _should_copy(src, dest, logger=None):\n if not os.path.exists(dest):\n return True\n if os.stat(src).st_mtime - os.stat(dest).st_mtime > 1e-6: # noqa\n # we add a fudge factor to work around a bug in python 2.x\n # that was fixed in python 3.x: https://bugs.python.org/issue12904\n if logger:\n logger.warning(\"Out of date: %s\" % dest)\n return True\n if logger:\n logger.info(\"Up to date: %s\" % dest)\n return False", "def test__ChannelMetadataBase__copy_with__0():\n channel_metadata = ChannelMetadataBase()\n \n copy = channel_metadata.copy_with()\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ChannelMetadataGuildAnnouncements__copy_with__1():\n old_name = 'alice'\n old_parent_id = 202304130060\n old_permission_overwrites = [\n PermissionOverwrite(202304130061, target_type = PermissionOverwriteTargetType.user)\n ]\n old_position = 7\n old_default_thread_auto_archive_after = 86400\n old_default_thread_slowmode = 60\n old_nsfw = True\n old_slowmode = 30\n old_topic = 'rin'\n \n new_name = 'emotion'\n new_parent_id = 202304130062\n new_permission_overwrites = [\n PermissionOverwrite(202304130063, target_type = PermissionOverwriteTargetType.role)\n ]\n new_position = 5\n new_default_thread_auto_archive_after = 3600\n new_default_thread_slowmode = 69\n new_nsfw = False\n new_slowmode = 33\n new_topic = 'orin'\n \n channel_metadata = ChannelMetadataGuildAnnouncements(\n name = old_name,\n parent_id = old_parent_id,\n permission_overwrites = old_permission_overwrites,\n position = old_position,\n default_thread_auto_archive_after = old_default_thread_auto_archive_after,\n default_thread_slowmode = old_default_thread_slowmode,\n nsfw = old_nsfw,\n slowmode = old_slowmode,\n topic = old_topic,\n )\n \n copy = channel_metadata.copy_with(\n name = new_name,\n parent_id = new_parent_id,\n permission_overwrites = new_permission_overwrites,\n position = new_position,\n default_thread_auto_archive_after = new_default_thread_auto_archive_after,\n default_thread_slowmode = new_default_thread_slowmode,\n nsfw = new_nsfw,\n slowmode = new_slowmode,\n topic = new_topic,\n )\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n \n vampytest.assert_eq(copy.name, new_name)\n vampytest.assert_eq(copy.parent_id, new_parent_id)\n vampytest.assert_eq(\n copy.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in new_permission_overwrites},\n )\n vampytest.assert_eq(copy.position, new_position)\n vampytest.assert_eq(copy.default_thread_auto_archive_after, new_default_thread_auto_archive_after)\n vampytest.assert_eq(copy.default_thread_slowmode, new_default_thread_slowmode)\n vampytest.assert_eq(copy.nsfw, new_nsfw)\n vampytest.assert_eq(copy.slowmode, new_slowmode)\n vampytest.assert_eq(copy.topic, new_topic)", "def test10_copy_of_sut(self):\n \n test_config = ResourceConfig(r'configs/resource/happy_path_template.xml', \n r'../src/bespoke/xsd/resource_config.xsd')\n \n actual_template_1 = test_config['BVT-2k3-R2-32']\n \n self.assertIsNot(actual_template_1, test_config._content['BVT-2k3-R2-32'])", "def canCopy(self) -> bool:\n ...", "def test_copy_feature_flag(self):\n pass", "def test_copy_temp_narrative(self):\n ws = self.getWsClient()\n\n source_nar_info = self.getImpl().create_new_narrative(self.getContext(), {})[0]\n source_ws_id = source_nar_info['workspaceInfo']['id']\n source_nar_id = source_nar_info['narrativeInfo']['id']\n\n # first, make a copy from the source as-is and make sure it has the temp tag.\n copied_nar_info = self.getImpl().copy_narrative(self.getContext(), {\n 'workspaceRef': '{}/{}'.format(source_ws_id, source_nar_id),\n 'newName': 'Untitled'\n })[0]\n copied_nar = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(copied_nar_info['newWsId'], copied_nar_info['newNarId'])\n }]\n })\n copied_ws_info = ws.get_workspace_info({'id': copied_nar_info['newWsId']})\n\n # second, tweak the source to remove its 'is_temporary' field all together. then\n # copy and test it.\n source_nar = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(source_ws_id, source_nar_id)\n }]\n })['data'][0]\n if 'is_temporary' in source_nar['info'][10]:\n del source_nar['info'][10]['is_temporary']\n ws.save_objects({'workspace': source_nar_info['workspaceInfo']['name'], 'objects':\n [{'type': self.NARRATIVE_TYPE,\n 'data': source_nar['data'],\n 'name': source_nar['info'][1],\n 'meta': source_nar['info'][10]}]})\n copied_nar_info2 = self.getImpl().copy_narrative(self.getContext(), {\n 'workspaceRef': '{}/{}'.format(source_ws_id, source_nar_id),\n 'newName': 'Untitled'\n })[0]\n copied_nar2 = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(copied_nar_info2['newWsId'], copied_nar_info2['newNarId'])\n }]\n })\n copied_ws_info2 = ws.get_workspace_info({'id': copied_nar_info2['newWsId']})\n\n # Finally, create a new non-temporary narrative, copy it, and ensure everything has\n # 'is_temporary': 'false' in the right metadata places.\n source_nar_info2 = self.getImpl().create_new_narrative(self.getContext(), {\n 'title': 'Not Temporary'\n })[0]\n source_ws_id2 = source_nar_info2['workspaceInfo']['id']\n source_nar_id2 = source_nar_info2['narrativeInfo']['id']\n copied_nar_info3 = self.getImpl().copy_narrative(self.getContext(), {\n 'workspaceRef': '{}/{}'.format(source_ws_id2, source_nar_id2),\n 'newName': 'Still Not Temporary'\n })[0]\n copied_nar3 = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(copied_nar_info3['newWsId'], copied_nar_info3['newNarId'])\n }]\n })\n copied_ws_info3 = ws.get_workspace_info({'id': copied_nar_info3['newWsId']})\n\n try:\n self.assertEqual(source_nar_info['workspaceInfo']['metadata']['is_temporary'], 'true')\n self.assertEqual(source_nar_info['narrativeInfo']['metadata']['is_temporary'], 'true')\n self.assertEqual(copied_nar['data'][0]['info'][10]['is_temporary'], 'true')\n self.assertEqual(copied_ws_info[8]['is_temporary'], 'true')\n self.assertEqual(copied_nar2['data'][0]['info'][10]['is_temporary'], 'true')\n self.assertEqual(copied_ws_info2[8]['is_temporary'], 'true')\n self.assertEqual(source_nar_info2['workspaceInfo']['metadata']['is_temporary'], 'false')\n self.assertEqual(source_nar_info2['narrativeInfo']['metadata']['is_temporary'], 'false')\n self.assertEqual(copied_nar3['data'][0]['info'][10]['is_temporary'], 'false')\n self.assertEqual(copied_ws_info3[8]['is_temporary'], 'false')\n finally:\n self.getWsClient().delete_workspace({'id': source_ws_id})\n self.getWsClient().delete_workspace({'id': source_ws_id2})\n self.getWsClient().delete_workspace({'id': copied_nar_info['newWsId']})\n self.getWsClient().delete_workspace({'id': copied_nar_info2['newWsId']})\n self.getWsClient().delete_workspace({'id': copied_nar_info3['newWsId']})", "def canCopySpecial(self) -> bool:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``ScheduledEventEntityMetadataLocation.copy_with`` works as intended.
def test__ScheduledEventEntityMetadataLocation__copy_with__1(): old_location = 'Koishi WonderLand' new_location = 'Orin\'s dance house' entity_metadata = ScheduledEventEntityMetadataLocation( location = old_location, ) copy = entity_metadata.copy_with( location = new_location, ) _assert_fields_set(copy) vampytest.assert_is_not(copy, entity_metadata) vampytest.assert_eq(copy.location, new_location)
[ "def test__ScheduledEventEntityMetadataLocation__copy_with__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({})\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({\n 'location': new_location,\n })\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def testCopy(self):\n impl = self.impl\n ws_name = self.ws_name\n conf = self.conf\n ws_meta = self.ws_meta\n\n test_object5 = {\n \"id\": \"test_object_id5\",\n \"type\": \"Genome\",\n \"data\": {\"name\":\"testgenome5\", \"string\":\"ACACGATTACA\"},\n \"workspace\": ws_name,\n \"command\": \"something\",\n \"metadata\": {\"origin\":\"shreyas\"},\n \"auth\": self.__class__.token\n }\n obj_meta5 = impl.save_object(test_object5)\n\n\n ws_name2 = \"testWS_%s\" % datetime.utcnow().strftime('%s')\n conf2 = {\"workspace\": ws_name2,\"default_permission\": \"a\", \"auth\": self.__class__.token }\n ws_meta2 = self.impl.create_workspace(conf2)\n\n impl.copy_object({\n \"new_id\": \"new_object_id5\",\n \"new_workspace\": ws_name2,\n \"source_id\": \"test_object_id5\",\n \"source_workspace\": ws_name,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n\n has_object = impl.has_object({\n \"id\": \"new_object_id5\",\n \"workspace\": ws_name2,\n \"type\": \"Genome\",\n \"auth\": self.__class__.token\n })\n self.assertTrue(has_object)", "def test_copy_story(self):\n\n # FIXME", "def test_copy_run(self):\n pass", "def test_copy_meeting_schedule(self):\n meeting = make_meeting_test_data()\n self.client.login(username=\"secretary\", password=\"secretary+password\")\n\n url = urlreverse(\"ietf.meeting.views.new_meeting_schedule\", kwargs=dict(num=meeting.number, owner=meeting.schedule.owner_email(), name=meeting.schedule.name))\n r = self.client.get(url)\n self.assertEqual(r.status_code, 200)\n\n r = self.client.post(url, {\n 'name': \"copy\",\n 'public': \"on\",\n 'notes': \"New copy\",\n 'base': meeting.schedule.base_id,\n })\n self.assertNoFormPostErrors(r)\n\n new_schedule = Schedule.objects.get(meeting=meeting, owner__user__username='secretary', name='copy')\n self.assertEqual(new_schedule.public, True)\n self.assertEqual(new_schedule.visible, False)\n self.assertEqual(new_schedule.notes, \"New copy\")\n self.assertEqual(new_schedule.origin, meeting.schedule)\n self.assertEqual(new_schedule.base_id, meeting.schedule.base_id)\n\n old_assignments = {(a.session_id, a.timeslot_id) for a in SchedTimeSessAssignment.objects.filter(schedule=meeting.schedule)}\n for a in SchedTimeSessAssignment.objects.filter(schedule=new_schedule):\n self.assertIn((a.session_id, a.timeslot_id), old_assignments)", "def test_copy(self):\n src_path = self.work_path('src', True)\n dst_path = self.work_path('dst', True)\n\n locker0 = Locker.create(src_path, self.content_path(), b'01234567')\n locker1 = locker0.copy(dst_path)\n\n self.assertEqual(locker1.path, os.path.join(dst_path, Locker.filename))", "def test_copy(self):\n atom = self.atom.copy()\n self.assertEqual(self.atom.element.symbol, atom.element.symbol)\n self.assertEqual(self.atom.atomtype, atom.atomtype)\n self.assertEqual(self.atom.radical_electrons, atom.radical_electrons)\n self.assertEqual(self.atom.charge, atom.charge)\n self.assertEqual(self.atom.label, atom.label)", "def test_copy_ntm(self):\n new_ntm = self.ntm1.copy()\n self.assert_is_copy(new_ntm, self.ntm1)", "def _should_copy(src, dest, logger=None):\n if not os.path.exists(dest):\n return True\n if os.stat(src).st_mtime - os.stat(dest).st_mtime > 1e-6: # noqa\n # we add a fudge factor to work around a bug in python 2.x\n # that was fixed in python 3.x: https://bugs.python.org/issue12904\n if logger:\n logger.warning(\"Out of date: %s\" % dest)\n return True\n if logger:\n logger.info(\"Up to date: %s\" % dest)\n return False", "def test__ChannelMetadataBase__copy_with__0():\n channel_metadata = ChannelMetadataBase()\n \n copy = channel_metadata.copy_with()\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ChannelMetadataGuildAnnouncements__copy_with__1():\n old_name = 'alice'\n old_parent_id = 202304130060\n old_permission_overwrites = [\n PermissionOverwrite(202304130061, target_type = PermissionOverwriteTargetType.user)\n ]\n old_position = 7\n old_default_thread_auto_archive_after = 86400\n old_default_thread_slowmode = 60\n old_nsfw = True\n old_slowmode = 30\n old_topic = 'rin'\n \n new_name = 'emotion'\n new_parent_id = 202304130062\n new_permission_overwrites = [\n PermissionOverwrite(202304130063, target_type = PermissionOverwriteTargetType.role)\n ]\n new_position = 5\n new_default_thread_auto_archive_after = 3600\n new_default_thread_slowmode = 69\n new_nsfw = False\n new_slowmode = 33\n new_topic = 'orin'\n \n channel_metadata = ChannelMetadataGuildAnnouncements(\n name = old_name,\n parent_id = old_parent_id,\n permission_overwrites = old_permission_overwrites,\n position = old_position,\n default_thread_auto_archive_after = old_default_thread_auto_archive_after,\n default_thread_slowmode = old_default_thread_slowmode,\n nsfw = old_nsfw,\n slowmode = old_slowmode,\n topic = old_topic,\n )\n \n copy = channel_metadata.copy_with(\n name = new_name,\n parent_id = new_parent_id,\n permission_overwrites = new_permission_overwrites,\n position = new_position,\n default_thread_auto_archive_after = new_default_thread_auto_archive_after,\n default_thread_slowmode = new_default_thread_slowmode,\n nsfw = new_nsfw,\n slowmode = new_slowmode,\n topic = new_topic,\n )\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n \n vampytest.assert_eq(copy.name, new_name)\n vampytest.assert_eq(copy.parent_id, new_parent_id)\n vampytest.assert_eq(\n copy.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in new_permission_overwrites},\n )\n vampytest.assert_eq(copy.position, new_position)\n vampytest.assert_eq(copy.default_thread_auto_archive_after, new_default_thread_auto_archive_after)\n vampytest.assert_eq(copy.default_thread_slowmode, new_default_thread_slowmode)\n vampytest.assert_eq(copy.nsfw, new_nsfw)\n vampytest.assert_eq(copy.slowmode, new_slowmode)\n vampytest.assert_eq(copy.topic, new_topic)", "def test10_copy_of_sut(self):\n \n test_config = ResourceConfig(r'configs/resource/happy_path_template.xml', \n r'../src/bespoke/xsd/resource_config.xsd')\n \n actual_template_1 = test_config['BVT-2k3-R2-32']\n \n self.assertIsNot(actual_template_1, test_config._content['BVT-2k3-R2-32'])", "def canCopy(self) -> bool:\n ...", "def test_copy_feature_flag(self):\n pass", "def test_copy_temp_narrative(self):\n ws = self.getWsClient()\n\n source_nar_info = self.getImpl().create_new_narrative(self.getContext(), {})[0]\n source_ws_id = source_nar_info['workspaceInfo']['id']\n source_nar_id = source_nar_info['narrativeInfo']['id']\n\n # first, make a copy from the source as-is and make sure it has the temp tag.\n copied_nar_info = self.getImpl().copy_narrative(self.getContext(), {\n 'workspaceRef': '{}/{}'.format(source_ws_id, source_nar_id),\n 'newName': 'Untitled'\n })[0]\n copied_nar = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(copied_nar_info['newWsId'], copied_nar_info['newNarId'])\n }]\n })\n copied_ws_info = ws.get_workspace_info({'id': copied_nar_info['newWsId']})\n\n # second, tweak the source to remove its 'is_temporary' field all together. then\n # copy and test it.\n source_nar = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(source_ws_id, source_nar_id)\n }]\n })['data'][0]\n if 'is_temporary' in source_nar['info'][10]:\n del source_nar['info'][10]['is_temporary']\n ws.save_objects({'workspace': source_nar_info['workspaceInfo']['name'], 'objects':\n [{'type': self.NARRATIVE_TYPE,\n 'data': source_nar['data'],\n 'name': source_nar['info'][1],\n 'meta': source_nar['info'][10]}]})\n copied_nar_info2 = self.getImpl().copy_narrative(self.getContext(), {\n 'workspaceRef': '{}/{}'.format(source_ws_id, source_nar_id),\n 'newName': 'Untitled'\n })[0]\n copied_nar2 = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(copied_nar_info2['newWsId'], copied_nar_info2['newNarId'])\n }]\n })\n copied_ws_info2 = ws.get_workspace_info({'id': copied_nar_info2['newWsId']})\n\n # Finally, create a new non-temporary narrative, copy it, and ensure everything has\n # 'is_temporary': 'false' in the right metadata places.\n source_nar_info2 = self.getImpl().create_new_narrative(self.getContext(), {\n 'title': 'Not Temporary'\n })[0]\n source_ws_id2 = source_nar_info2['workspaceInfo']['id']\n source_nar_id2 = source_nar_info2['narrativeInfo']['id']\n copied_nar_info3 = self.getImpl().copy_narrative(self.getContext(), {\n 'workspaceRef': '{}/{}'.format(source_ws_id2, source_nar_id2),\n 'newName': 'Still Not Temporary'\n })[0]\n copied_nar3 = ws.get_objects2({\n 'objects': [{\n 'ref': '{}/{}'.format(copied_nar_info3['newWsId'], copied_nar_info3['newNarId'])\n }]\n })\n copied_ws_info3 = ws.get_workspace_info({'id': copied_nar_info3['newWsId']})\n\n try:\n self.assertEqual(source_nar_info['workspaceInfo']['metadata']['is_temporary'], 'true')\n self.assertEqual(source_nar_info['narrativeInfo']['metadata']['is_temporary'], 'true')\n self.assertEqual(copied_nar['data'][0]['info'][10]['is_temporary'], 'true')\n self.assertEqual(copied_ws_info[8]['is_temporary'], 'true')\n self.assertEqual(copied_nar2['data'][0]['info'][10]['is_temporary'], 'true')\n self.assertEqual(copied_ws_info2[8]['is_temporary'], 'true')\n self.assertEqual(source_nar_info2['workspaceInfo']['metadata']['is_temporary'], 'false')\n self.assertEqual(source_nar_info2['narrativeInfo']['metadata']['is_temporary'], 'false')\n self.assertEqual(copied_nar3['data'][0]['info'][10]['is_temporary'], 'false')\n self.assertEqual(copied_ws_info3[8]['is_temporary'], 'false')\n finally:\n self.getWsClient().delete_workspace({'id': source_ws_id})\n self.getWsClient().delete_workspace({'id': source_ws_id2})\n self.getWsClient().delete_workspace({'id': copied_nar_info['newWsId']})\n self.getWsClient().delete_workspace({'id': copied_nar_info2['newWsId']})\n self.getWsClient().delete_workspace({'id': copied_nar_info3['newWsId']})", "def canCopySpecial(self) -> bool:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``ScheduledEventEntityMetadataLocation.copy_with_keyword_parameters`` works as intended.
def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__0(): location = 'Koishi WonderLand' entity_metadata = ScheduledEventEntityMetadataLocation( location = location, ) copy = entity_metadata.copy_with_keyword_parameters({}) _assert_fields_set(copy) vampytest.assert_eq(copy, entity_metadata) vampytest.assert_is_not(copy, entity_metadata)
[ "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({\n 'location': new_location,\n })\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def test__ChannelMetadataBase__copy_with_keyword_parameters__0():\n channel_metadata = ChannelMetadataBase()\n \n keyword_parameters = {}\n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ChannelMetadataGuildAnnouncements__copy_with_keyword_parameters__0():\n name = 'alice'\n parent_id = 202304130064\n permission_overwrites = [\n PermissionOverwrite(202304130065, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n default_thread_auto_archive_after = 86400\n default_thread_slowmode = 60\n nsfw = True\n slowmode = 30\n topic = 'rin'\n \n channel_metadata = ChannelMetadataGuildAnnouncements(\n name = name,\n parent_id = parent_id,\n permission_overwrites = permission_overwrites,\n position = position,\n default_thread_auto_archive_after = default_thread_auto_archive_after,\n default_thread_slowmode = default_thread_slowmode,\n nsfw = nsfw,\n slowmode = slowmode,\n topic = topic,\n )\n \n keyword_parameters = {}\n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ActivityMetadataBase__from_keyword_parameters__1():\n keyword_parameters = {'name': 'ara'}\n keyword_parameters_copy = keyword_parameters.copy()\n \n activity_metadata = ActivityMetadataBase.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(activity_metadata)\n \n vampytest.assert_eq(keyword_parameters, keyword_parameters_copy)", "def test__ChannelMetadataGuildAnnouncements__copy_with_keyword_parameters__1():\n old_name = 'alice'\n old_parent_id = 202304130066\n old_permission_overwrites = [\n PermissionOverwrite(202304130067, target_type = PermissionOverwriteTargetType.user)\n ]\n old_position = 7\n old_default_thread_auto_archive_after = 86400\n old_default_thread_slowmode = 60\n old_nsfw = True\n old_slowmode = 30\n old_topic = 'rin'\n \n new_name = 'emotion'\n new_parent_id = 202304130068\n new_permission_overwrites = [\n PermissionOverwrite(202304130069, target_type = PermissionOverwriteTargetType.role)\n ]\n new_position = 5\n new_default_thread_auto_archive_after = 3600\n new_default_thread_slowmode = 69\n new_nsfw = False\n new_slowmode = 33\n new_topic = 'orin'\n \n channel_metadata = ChannelMetadataGuildAnnouncements(\n name = old_name,\n parent_id = old_parent_id,\n permission_overwrites = old_permission_overwrites,\n position = old_position,\n default_thread_auto_archive_after = old_default_thread_auto_archive_after,\n default_thread_slowmode = old_default_thread_slowmode,\n nsfw = old_nsfw,\n slowmode = old_slowmode,\n topic = old_topic,\n )\n \n keyword_parameters = {\n 'name': new_name,\n 'parent_id': new_parent_id,\n 'permission_overwrites': new_permission_overwrites,\n 'position': new_position,\n 'default_thread_auto_archive_after': new_default_thread_auto_archive_after,\n 'default_thread_slowmode': new_default_thread_slowmode,\n 'nsfw': new_nsfw,\n 'slowmode': new_slowmode,\n 'topic': new_topic,\n }\n \n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy.name, new_name)\n vampytest.assert_eq(copy.parent_id, new_parent_id)\n vampytest.assert_eq(\n copy.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in new_permission_overwrites},\n )\n vampytest.assert_eq(copy.position, new_position)\n vampytest.assert_eq(copy.default_thread_auto_archive_after, new_default_thread_auto_archive_after)\n vampytest.assert_eq(copy.default_thread_slowmode, new_default_thread_slowmode)\n vampytest.assert_eq(copy.nsfw, new_nsfw)\n vampytest.assert_eq(copy.slowmode, new_slowmode)\n vampytest.assert_eq(copy.topic, new_topic)", "def test__ChannelMetadataGuildThreadAnnouncements__copy_with_keyword_parameters__1():\n old_name = 'alice'\n old_parent_id = 202304120071\n old_created_at = DateTime(2016, 4, 4)\n old_archived = False\n old_archived_at = DateTime(2017, 4, 4)\n old_auto_archive_after = 3600\n old_open = True\n old_owner_id = 202304120072\n old_slowmode = 30\n \n new_name = 'emotion'\n new_parent_id = 202304120073\n new_created_at = DateTime(2016, 4, 5)\n new_archived = True\n new_archived_at = DateTime(2017, 4, 5)\n new_auto_archive_after = 604800\n new_open = False\n new_owner_id = 202304120074\n new_slowmode = 31\n \n channel_metadata = ChannelMetadataGuildThreadAnnouncements(\n name = old_name,\n parent_id = old_parent_id,\n created_at = old_created_at,\n archived = old_archived,\n archived_at = old_archived_at,\n auto_archive_after = old_auto_archive_after,\n open = old_open,\n owner_id = old_owner_id,\n slowmode = old_slowmode,\n )\n \n keyword_parameters = {\n 'name': new_name,\n 'parent_id': new_parent_id,\n 'created_at': new_created_at,\n 'archived': new_archived,\n 'archived_at': new_archived_at,\n 'auto_archive_after': new_auto_archive_after,\n 'open': new_open,\n 'owner_id': new_owner_id,\n 'slowmode': new_slowmode,\n }\n \n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy.name, new_name)\n vampytest.assert_eq(copy.parent_id, new_parent_id)\n vampytest.assert_eq(copy._created_at, new_created_at)\n vampytest.assert_eq(copy.archived, new_archived)\n vampytest.assert_eq(copy.archived_at, new_archived_at)\n vampytest.assert_eq(copy.auto_archive_after, new_auto_archive_after)\n vampytest.assert_eq(copy.open, new_open)\n vampytest.assert_eq(copy.owner_id, new_owner_id)\n vampytest.assert_eq(copy.slowmode, new_slowmode)", "def test__ChannelMetadataGuildThreadAnnouncements__copy_with_keyword_parameters__0():\n name = 'alice'\n parent_id = 202304120069\n created_at = DateTime(2016, 4, 4)\n archived = False\n archived_at = DateTime(2017, 4, 4)\n auto_archive_after = 3600\n open_ = True\n owner_id = 202304120070\n slowmode = 30\n \n channel_metadata = ChannelMetadataGuildThreadAnnouncements(\n name = name,\n parent_id = parent_id,\n created_at = created_at,\n archived = archived,\n archived_at = archived_at,\n auto_archive_after = auto_archive_after,\n open = open_,\n owner_id = owner_id,\n slowmode = slowmode,\n )\n \n keyword_parameters = {}\n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with(\n location = new_location,\n )\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def test__ActivityMetadataBase__from_keyword_parameters__2():\n keyword_parameters = {'name': ''}\n \n activity_metadata = ActivityMetadataBase.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(activity_metadata)\n \n vampytest.assert_eq(keyword_parameters, {})", "def test__ActivityMetadataRich__from_keyword_parameters__0():\n keyword_parameters = {}\n activity_metadata = ActivityMetadataRich.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(activity_metadata)\n \n vampytest.assert_eq(keyword_parameters, {})", "def test__ScheduledEventEntityMetadataLocation__copy():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__IntegrationMetadataSubscription__from_keyword_parameters__0():\n integration_metadata = IntegrationMetadataSubscription.from_keyword_parameters({})\n _assert_fields_set(integration_metadata)", "def test__ChannelMetadataGuildThreadPublic__from_keyword_parameters__1():\n keyword_parameters = {}\n \n channel_metadata = ChannelMetadataGuildThreadPublic.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})", "def test__ChannelMetadataGuildDirectory__from_keyword_parameters__0():\n parent_id = 202304110005\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202304110006, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n \n keyword_parameters = {\n 'parent_id': parent_id,\n 'name': name,\n 'permission_overwrites': permission_overwrites,\n 'position': position,\n }\n channel_metadata = ChannelMetadataGuildDirectory.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(channel_metadata.parent_id, parent_id)\n vampytest.assert_eq(channel_metadata.name, name)\n vampytest.assert_eq(\n channel_metadata.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in permission_overwrites},\n )\n vampytest.assert_eq(channel_metadata.position, position)", "def test__ChannelMetadataGuildDirectory__from_keyword_parameters__1():\n keyword_parameters = {}\n \n channel_metadata = ChannelMetadataGuildDirectory.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})", "def test__ChannelMetadataGuildCategory__from_keyword_parameters__0():\n parent_id = 202304110003\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202304110004, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n \n keyword_parameters = {\n 'parent_id': parent_id,\n 'name': name,\n 'permission_overwrites': permission_overwrites,\n 'position': position,\n }\n channel_metadata = ChannelMetadataGuildCategory.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(channel_metadata.parent_id, parent_id)\n vampytest.assert_eq(channel_metadata.name, name)\n vampytest.assert_eq(\n channel_metadata.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in permission_overwrites},\n )\n vampytest.assert_eq(channel_metadata.position, position)", "def test__IntegrationMetadataSubscription__from_keyword_parameters__1():\n account = IntegrationAccount('hello', 'hell')\n expire_behavior = IntegrationExpireBehavior.kick\n expire_grace_period = 7\n revoked = True\n role_id = 202304080008\n subscriber_count = 100\n synced_at = DateTime(2016, 9, 9)\n syncing = True\n \n keyword_parameters = {\n 'account': account,\n 'expire_behavior': expire_behavior,\n 'expire_grace_period': expire_grace_period,\n 'revoked': revoked,\n 'role_id': role_id,\n 'subscriber_count': subscriber_count,\n 'synced_at': synced_at,\n 'syncing': syncing,\n }\n \n integration_metadata = IntegrationMetadataSubscription.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(integration_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(integration_metadata.account, account)\n vampytest.assert_is(integration_metadata.expire_behavior, expire_behavior)\n vampytest.assert_eq(integration_metadata.expire_grace_period, expire_grace_period)\n vampytest.assert_eq(integration_metadata.revoked, revoked)\n vampytest.assert_eq(integration_metadata.role_id, role_id)\n vampytest.assert_eq(integration_metadata.subscriber_count, subscriber_count)\n vampytest.assert_eq(integration_metadata.synced_at, synced_at)\n vampytest.assert_eq(integration_metadata.syncing, syncing)", "def check_partial_keywords_for_new_model(self, **input_dict):\n model = input_dict[self.get_model_descriptor_name()]\n actual_dict = self.get_model_dict(model)\n for key in input_dict:\n if key not in actual_dict:\n raise ie.InputArgumentsError(\n 'Input Arguments Error',\n input_dict,\n actual_dict)\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether ``ScheduledEventEntityMetadataLocation.copy_with_keyword_parameters`` works as intended.
def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__1(): old_location = 'Koishi WonderLand' new_location = 'Orin\'s dance house' entity_metadata = ScheduledEventEntityMetadataLocation( location = old_location, ) copy = entity_metadata.copy_with_keyword_parameters({ 'location': new_location, }) _assert_fields_set(copy) vampytest.assert_is_not(copy, entity_metadata) vampytest.assert_eq(copy.location, new_location)
[ "def test__ScheduledEventEntityMetadataLocation__copy_with_keyword_parameters__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with_keyword_parameters({})\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ChannelMetadataBase__copy_with_keyword_parameters__0():\n channel_metadata = ChannelMetadataBase()\n \n keyword_parameters = {}\n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ChannelMetadataGuildAnnouncements__copy_with_keyword_parameters__0():\n name = 'alice'\n parent_id = 202304130064\n permission_overwrites = [\n PermissionOverwrite(202304130065, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n default_thread_auto_archive_after = 86400\n default_thread_slowmode = 60\n nsfw = True\n slowmode = 30\n topic = 'rin'\n \n channel_metadata = ChannelMetadataGuildAnnouncements(\n name = name,\n parent_id = parent_id,\n permission_overwrites = permission_overwrites,\n position = position,\n default_thread_auto_archive_after = default_thread_auto_archive_after,\n default_thread_slowmode = default_thread_slowmode,\n nsfw = nsfw,\n slowmode = slowmode,\n topic = topic,\n )\n \n keyword_parameters = {}\n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ActivityMetadataBase__from_keyword_parameters__1():\n keyword_parameters = {'name': 'ara'}\n keyword_parameters_copy = keyword_parameters.copy()\n \n activity_metadata = ActivityMetadataBase.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(activity_metadata)\n \n vampytest.assert_eq(keyword_parameters, keyword_parameters_copy)", "def test__ChannelMetadataGuildAnnouncements__copy_with_keyword_parameters__1():\n old_name = 'alice'\n old_parent_id = 202304130066\n old_permission_overwrites = [\n PermissionOverwrite(202304130067, target_type = PermissionOverwriteTargetType.user)\n ]\n old_position = 7\n old_default_thread_auto_archive_after = 86400\n old_default_thread_slowmode = 60\n old_nsfw = True\n old_slowmode = 30\n old_topic = 'rin'\n \n new_name = 'emotion'\n new_parent_id = 202304130068\n new_permission_overwrites = [\n PermissionOverwrite(202304130069, target_type = PermissionOverwriteTargetType.role)\n ]\n new_position = 5\n new_default_thread_auto_archive_after = 3600\n new_default_thread_slowmode = 69\n new_nsfw = False\n new_slowmode = 33\n new_topic = 'orin'\n \n channel_metadata = ChannelMetadataGuildAnnouncements(\n name = old_name,\n parent_id = old_parent_id,\n permission_overwrites = old_permission_overwrites,\n position = old_position,\n default_thread_auto_archive_after = old_default_thread_auto_archive_after,\n default_thread_slowmode = old_default_thread_slowmode,\n nsfw = old_nsfw,\n slowmode = old_slowmode,\n topic = old_topic,\n )\n \n keyword_parameters = {\n 'name': new_name,\n 'parent_id': new_parent_id,\n 'permission_overwrites': new_permission_overwrites,\n 'position': new_position,\n 'default_thread_auto_archive_after': new_default_thread_auto_archive_after,\n 'default_thread_slowmode': new_default_thread_slowmode,\n 'nsfw': new_nsfw,\n 'slowmode': new_slowmode,\n 'topic': new_topic,\n }\n \n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy.name, new_name)\n vampytest.assert_eq(copy.parent_id, new_parent_id)\n vampytest.assert_eq(\n copy.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in new_permission_overwrites},\n )\n vampytest.assert_eq(copy.position, new_position)\n vampytest.assert_eq(copy.default_thread_auto_archive_after, new_default_thread_auto_archive_after)\n vampytest.assert_eq(copy.default_thread_slowmode, new_default_thread_slowmode)\n vampytest.assert_eq(copy.nsfw, new_nsfw)\n vampytest.assert_eq(copy.slowmode, new_slowmode)\n vampytest.assert_eq(copy.topic, new_topic)", "def test__ChannelMetadataGuildThreadAnnouncements__copy_with_keyword_parameters__1():\n old_name = 'alice'\n old_parent_id = 202304120071\n old_created_at = DateTime(2016, 4, 4)\n old_archived = False\n old_archived_at = DateTime(2017, 4, 4)\n old_auto_archive_after = 3600\n old_open = True\n old_owner_id = 202304120072\n old_slowmode = 30\n \n new_name = 'emotion'\n new_parent_id = 202304120073\n new_created_at = DateTime(2016, 4, 5)\n new_archived = True\n new_archived_at = DateTime(2017, 4, 5)\n new_auto_archive_after = 604800\n new_open = False\n new_owner_id = 202304120074\n new_slowmode = 31\n \n channel_metadata = ChannelMetadataGuildThreadAnnouncements(\n name = old_name,\n parent_id = old_parent_id,\n created_at = old_created_at,\n archived = old_archived,\n archived_at = old_archived_at,\n auto_archive_after = old_auto_archive_after,\n open = old_open,\n owner_id = old_owner_id,\n slowmode = old_slowmode,\n )\n \n keyword_parameters = {\n 'name': new_name,\n 'parent_id': new_parent_id,\n 'created_at': new_created_at,\n 'archived': new_archived,\n 'archived_at': new_archived_at,\n 'auto_archive_after': new_auto_archive_after,\n 'open': new_open,\n 'owner_id': new_owner_id,\n 'slowmode': new_slowmode,\n }\n \n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy.name, new_name)\n vampytest.assert_eq(copy.parent_id, new_parent_id)\n vampytest.assert_eq(copy._created_at, new_created_at)\n vampytest.assert_eq(copy.archived, new_archived)\n vampytest.assert_eq(copy.archived_at, new_archived_at)\n vampytest.assert_eq(copy.auto_archive_after, new_auto_archive_after)\n vampytest.assert_eq(copy.open, new_open)\n vampytest.assert_eq(copy.owner_id, new_owner_id)\n vampytest.assert_eq(copy.slowmode, new_slowmode)", "def test__ChannelMetadataGuildThreadAnnouncements__copy_with_keyword_parameters__0():\n name = 'alice'\n parent_id = 202304120069\n created_at = DateTime(2016, 4, 4)\n archived = False\n archived_at = DateTime(2017, 4, 4)\n auto_archive_after = 3600\n open_ = True\n owner_id = 202304120070\n slowmode = 30\n \n channel_metadata = ChannelMetadataGuildThreadAnnouncements(\n name = name,\n parent_id = parent_id,\n created_at = created_at,\n archived = archived,\n archived_at = archived_at,\n auto_archive_after = auto_archive_after,\n open = open_,\n owner_id = owner_id,\n slowmode = slowmode,\n )\n \n keyword_parameters = {}\n copy = channel_metadata.copy_with_keyword_parameters(keyword_parameters)\n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(copy, channel_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with__0():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy_with()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__ScheduledEventEntityMetadataLocation__copy_with__1():\n old_location = 'Koishi WonderLand'\n \n new_location = 'Orin\\'s dance house'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = old_location,\n )\n copy = entity_metadata.copy_with(\n location = new_location,\n )\n \n _assert_fields_set(copy)\n vampytest.assert_is_not(copy, entity_metadata)\n vampytest.assert_eq(copy.location, new_location)", "def test__ActivityMetadataBase__from_keyword_parameters__2():\n keyword_parameters = {'name': ''}\n \n activity_metadata = ActivityMetadataBase.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(activity_metadata)\n \n vampytest.assert_eq(keyword_parameters, {})", "def test__ActivityMetadataRich__from_keyword_parameters__0():\n keyword_parameters = {}\n activity_metadata = ActivityMetadataRich.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(activity_metadata)\n \n vampytest.assert_eq(keyword_parameters, {})", "def test__ScheduledEventEntityMetadataLocation__copy():\n location = 'Koishi WonderLand'\n \n entity_metadata = ScheduledEventEntityMetadataLocation(\n location = location,\n )\n copy = entity_metadata.copy()\n \n _assert_fields_set(copy)\n vampytest.assert_eq(copy, entity_metadata)\n vampytest.assert_is_not(copy, entity_metadata)", "def test__IntegrationMetadataSubscription__from_keyword_parameters__0():\n integration_metadata = IntegrationMetadataSubscription.from_keyword_parameters({})\n _assert_fields_set(integration_metadata)", "def test__ChannelMetadataGuildThreadPublic__from_keyword_parameters__1():\n keyword_parameters = {}\n \n channel_metadata = ChannelMetadataGuildThreadPublic.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})", "def test__ChannelMetadataGuildDirectory__from_keyword_parameters__0():\n parent_id = 202304110005\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202304110006, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n \n keyword_parameters = {\n 'parent_id': parent_id,\n 'name': name,\n 'permission_overwrites': permission_overwrites,\n 'position': position,\n }\n channel_metadata = ChannelMetadataGuildDirectory.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(channel_metadata.parent_id, parent_id)\n vampytest.assert_eq(channel_metadata.name, name)\n vampytest.assert_eq(\n channel_metadata.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in permission_overwrites},\n )\n vampytest.assert_eq(channel_metadata.position, position)", "def test__ChannelMetadataGuildDirectory__from_keyword_parameters__1():\n keyword_parameters = {}\n \n channel_metadata = ChannelMetadataGuildDirectory.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})", "def test__ChannelMetadataGuildCategory__from_keyword_parameters__0():\n parent_id = 202304110003\n name = 'Armelyrics'\n permission_overwrites = [\n PermissionOverwrite(202304110004, target_type = PermissionOverwriteTargetType.user)\n ]\n position = 7\n \n keyword_parameters = {\n 'parent_id': parent_id,\n 'name': name,\n 'permission_overwrites': permission_overwrites,\n 'position': position,\n }\n channel_metadata = ChannelMetadataGuildCategory.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(channel_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(channel_metadata.parent_id, parent_id)\n vampytest.assert_eq(channel_metadata.name, name)\n vampytest.assert_eq(\n channel_metadata.permission_overwrites,\n {permission_overwrite.target_id: permission_overwrite for permission_overwrite in permission_overwrites},\n )\n vampytest.assert_eq(channel_metadata.position, position)", "def test__IntegrationMetadataSubscription__from_keyword_parameters__1():\n account = IntegrationAccount('hello', 'hell')\n expire_behavior = IntegrationExpireBehavior.kick\n expire_grace_period = 7\n revoked = True\n role_id = 202304080008\n subscriber_count = 100\n synced_at = DateTime(2016, 9, 9)\n syncing = True\n \n keyword_parameters = {\n 'account': account,\n 'expire_behavior': expire_behavior,\n 'expire_grace_period': expire_grace_period,\n 'revoked': revoked,\n 'role_id': role_id,\n 'subscriber_count': subscriber_count,\n 'synced_at': synced_at,\n 'syncing': syncing,\n }\n \n integration_metadata = IntegrationMetadataSubscription.from_keyword_parameters(keyword_parameters)\n _assert_fields_set(integration_metadata)\n vampytest.assert_eq(keyword_parameters, {})\n \n vampytest.assert_eq(integration_metadata.account, account)\n vampytest.assert_is(integration_metadata.expire_behavior, expire_behavior)\n vampytest.assert_eq(integration_metadata.expire_grace_period, expire_grace_period)\n vampytest.assert_eq(integration_metadata.revoked, revoked)\n vampytest.assert_eq(integration_metadata.role_id, role_id)\n vampytest.assert_eq(integration_metadata.subscriber_count, subscriber_count)\n vampytest.assert_eq(integration_metadata.synced_at, synced_at)\n vampytest.assert_eq(integration_metadata.syncing, syncing)", "def check_partial_keywords_for_new_model(self, **input_dict):\n model = input_dict[self.get_model_descriptor_name()]\n actual_dict = self.get_model_dict(model)\n for key in input_dict:\n if key not in actual_dict:\n raise ie.InputArgumentsError(\n 'Input Arguments Error',\n input_dict,\n actual_dict)\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locks a file using POSIX locks.
def LockFile(fd): try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError as err: if err.errno == errno.EAGAIN: raise errors.LockError("File already locked") raise
[ "def lockfile(fileobj, blocking=True, exclusive=True):\n import fcntl, time, random\n if exclusive:\n flags = fcntl.LOCK_EX\n else:\n flags = fcntl.LOCK_SH\n\n if blocking:\n fcntl.lockf(fileobj.fileno(), flags)\n else:\n flags |= fcntl.LOCK_NB\n fcntl.lockf(fileobj.fileno(), flags)", "def lock_file(filename, mode='r+', blocking=False):\n # TODO(wickman) We should probably adopt the lockfile project here as has\n # a platform-independent file locking implementation.\n if not HAS_FCNTL:\n raise RuntimeError('Interpreter does not support fcntl!')\n\n try:\n fp = open(filename, mode)\n except IOError:\n return None\n\n try:\n fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB if not blocking else fcntl.LOCK_EX)\n except IOError as e:\n if e.errno in (errno.EACCES, errno.EAGAIN):\n fp.close()\n return False\n\n return fp", "def lock_file(fileno: int):\n try:\n # Try to lock file exclusively and in non-blocking fashion\n fcntl.flock(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n return False\n else:\n return True", "def _attempt_lock(lock_file):\n umask_original = os.umask(0)\n try:\n fp = os.open(lock_file, os.O_WRONLY | os.O_CREAT, stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)\n finally:\n os.umask(umask_original)\n\n try:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n return False\n\n return True", "def _lock( self, fileref=None, filehash=None ):\n\t\treturn self.__setlock( True, fileref=fileref, filehash=filehash )", "def do_lock(filename):\n try:\n file = open(filename, \"w\")\n file.write(\"locked\\n\")\n file.close()\n print_with_timestamp(\"Locked via file: %s\" % filename)\n return True\n except IOError as err:\n bail_with_message(\"I/O error({0}): {1}\".format(err.errno, err.strerror))", "def Shared(self, blocking=False, timeout=None):\n self._flock(fcntl.LOCK_SH, blocking, timeout,\n \"Failed to lock %s in shared mode\" % self.filename)", "def wait_and_lock():\n # Waits forever to get a lock on the lockfile\n # If an unrelated error occures a exception is raised \n self._f = open(self._filename, 'w')\n while true:\n try:\n fcntl.flock(filename, fcntl.LOCK_EX | dcnt.LOCK_NM)\n return\n except IOError as e:\n if e.errno == errno.EAGAIN:\n # Do not raise error when waiting to aquire lock\n time.sleep(0.1)\n else\n # Raise on all unrelated errors\n raise", "def lockfile(path, max_retries=10, retry_delay=1, shared=False, error=None):\n tries = 1\n max_tries = 1 + max_retries\n path = path + '.lock'\n\n lock = None\n while lock is None and tries <= max_tries:\n try:\n lock = LockFile(path, shared=shared)\n except LockError:\n tries += 1\n if tries <= max_tries:\n time.sleep(retry_delay)\n\n try:\n if error and lock is None:\n raise error\n yield lock\n finally:\n if lock is not None:\n lock.close()", "def acquire_flock(fname):\n\n lockfile = None\n\n try:\n lockfile = open(fname, 'wb')\n fcntl.flock(lockfile.fileno(), fcntl.LOCK_EX)\n except:\n # Note: caller is assumed to treat None as error\n if lockfile is not None:\n lockfile.close()\n lockfile = None\n\n return lockfile", "def testReadLocks1(t, env):\n c = env.c1\n c.init_connection()\n file = c.homedir + [t.code]\n # owner1 creates a file\n fh1, stateid1 = c.create_confirm('owner1', file,\n access=OPEN4_SHARE_ACCESS_BOTH,\n deny=OPEN4_SHARE_DENY_NONE)\n # owner2 opens the file\n fh2, stateid2 = c.open_confirm('owner2', file,\n access=OPEN4_SHARE_ACCESS_BOTH,\n deny=OPEN4_SHARE_DENY_NONE)\n # owner1 read locks the file\n res1 = c.lock_file('owner1', fh1, stateid1, type=READ_LT)\n check(res1)\n # owner2 read locks the file\n res2 = c.lock_file('owner2', fh2, stateid2, type=READ_LT)\n check(res2, msg=\"Getting read lock when another owner has read lock\")\n # owner1 write locks the file, should fail\n res1 = c.unlock_file(1, fh1, res1.lockid)\n check(res1)\n res1 = c.relock_file(2, fh1, res1.lockid, type=WRITE_LT)\n check(res1, NFS4ERR_DENIED,\n \"Getting write lock when another owner has read lock\")", "def flock(self, op: int) -> None:\n\n fcntl.flock(self.fd, op)", "def fileLocked(self, the_file, ctx=None):\n pass", "def islocked(fileobj):\n import fcntl\n\n flags = fcntl.LOCK_NB | fcntl.LOCK_EX\n try:\n fcntl.lockf(fileobj.fileno(), flags)\n except IOError, e:\n if e.strerror == \"Resource temporarily unavailable\":\n return True\n \n return False", "def __enter__(self):\n self._rpc_lock()\n old_mask = os.umask(0o077)\n try:\n trial_count = 0\n while self._fid is None and trial_count < 2:\n if os.path.exists(self._LOCK_PATH):\n # Rename existing file if it is not secure\n is_secure_path(self._LOCK_PATH)\n self._fid = open(self._LOCK_PATH, 'a+')\n if not is_secure_file(self._LOCK_PATH, self._fid):\n # File is insecure and was renamed, try again\n self._fid.close()\n self._fid = None\n trial_count += 1\n finally:\n os.umask(old_mask)\n if self._fid == None:\n self._rpc_unlock()\n raise RuntimeError('Unable to open write lock securely after two tries')\n # Advisory lock protects against simultaneous multi-process\n # modifications to the file, although we expect only one geopmd\n # process using this class.\n fcntl.lockf(self._fid, fcntl.LOCK_EX)\n self._fid.seek(0)\n return self", "def Exclusive(self, blocking=False, timeout=None):\n self._flock(fcntl.LOCK_EX, blocking, timeout,\n \"Failed to lock %s in exclusive mode\" % self.filename)", "def svn_fs_lock(*args) -> \"svn_lock_t **\":\n return _fs.svn_fs_lock(*args)", "def relock_file(self, seqid, fh, stateid,\n offset=0, len=0xffffffffffffffff, type=WRITE_LT):\n ops = [self.putfh_op(fh)]\n existing_lock_owner = exist_lock_owner4(stateid, seqid)\n locker = locker4(FALSE, lock_owner=existing_lock_owner)\n ops += [self.lock_op(type, FALSE, offset, len, locker)]\n res = self.compound(ops)\n if res.status==NFS4_OK:\n res.lockid = res.resarray[-1].switch.switch.lock_stateid\n return res", "def test_write_locked(self):\n self.create_file_blank(self.FILENAME)\n self.lock_file(self.FILENAME)\n try:\n fileio.writeline(self.FILENAME, 1, \"data\")\n self.fail(\"Did not get expected exception\")\n except:\n pass # print(\"expected exception\")\n finally:\n self.unlock_file(self.FILENAME)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locks the file in exclusive mode.
def Exclusive(self, blocking=False, timeout=None): self._flock(fcntl.LOCK_EX, blocking, timeout, "Failed to lock %s in exclusive mode" % self.filename)
[ "def lockfile(fileobj, blocking=True, exclusive=True):\n import fcntl, time, random\n if exclusive:\n flags = fcntl.LOCK_EX\n else:\n flags = fcntl.LOCK_SH\n\n if blocking:\n fcntl.lockf(fileobj.fileno(), flags)\n else:\n flags |= fcntl.LOCK_NB\n fcntl.lockf(fileobj.fileno(), flags)", "def fileLocked(self, the_file, ctx=None):\n pass", "def do_lock(filename):\n try:\n file = open(filename, \"w\")\n file.write(\"locked\\n\")\n file.close()\n print_with_timestamp(\"Locked via file: %s\" % filename)\n return True\n except IOError as err:\n bail_with_message(\"I/O error({0}): {1}\".format(err.errno, err.strerror))", "def _lock( self, fileref=None, filehash=None ):\n\t\treturn self.__setlock( True, fileref=fileref, filehash=filehash )", "def Shared(self, blocking=False, timeout=None):\n self._flock(fcntl.LOCK_SH, blocking, timeout,\n \"Failed to lock %s in shared mode\" % self.filename)", "def LockFile(fd):\n try:\n fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as err:\n if err.errno == errno.EAGAIN:\n raise errors.LockError(\"File already locked\")\n raise", "def wait_and_lock():\n # Waits forever to get a lock on the lockfile\n # If an unrelated error occures a exception is raised \n self._f = open(self._filename, 'w')\n while true:\n try:\n fcntl.flock(filename, fcntl.LOCK_EX | dcnt.LOCK_NM)\n return\n except IOError as e:\n if e.errno == errno.EAGAIN:\n # Do not raise error when waiting to aquire lock\n time.sleep(0.1)\n else\n # Raise on all unrelated errors\n raise", "def _attempt_lock(lock_file):\n umask_original = os.umask(0)\n try:\n fp = os.open(lock_file, os.O_WRONLY | os.O_CREAT, stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)\n finally:\n os.umask(umask_original)\n\n try:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n return False\n\n return True", "def test_write_locked(self):\n self.create_file_blank(self.FILENAME)\n self.lock_file(self.FILENAME)\n try:\n fileio.writeline(self.FILENAME, 1, \"data\")\n self.fail(\"Did not get expected exception\")\n except:\n pass # print(\"expected exception\")\n finally:\n self.unlock_file(self.FILENAME)", "def lock_file(filename, mode='r+', blocking=False):\n # TODO(wickman) We should probably adopt the lockfile project here as has\n # a platform-independent file locking implementation.\n if not HAS_FCNTL:\n raise RuntimeError('Interpreter does not support fcntl!')\n\n try:\n fp = open(filename, mode)\n except IOError:\n return None\n\n try:\n fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB if not blocking else fcntl.LOCK_EX)\n except IOError as e:\n if e.errno in (errno.EACCES, errno.EAGAIN):\n fp.close()\n return False\n\n return fp", "def lock_file(fileno: int):\n try:\n # Try to lock file exclusively and in non-blocking fashion\n fcntl.flock(fileno, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n return False\n else:\n return True", "def CheckUnlockWithoutLock(self):\n self.manager.unlock(\"filename\", LOCK_NONE, \"client\")", "def __enter__(self):\n self._rpc_lock()\n old_mask = os.umask(0o077)\n try:\n trial_count = 0\n while self._fid is None and trial_count < 2:\n if os.path.exists(self._LOCK_PATH):\n # Rename existing file if it is not secure\n is_secure_path(self._LOCK_PATH)\n self._fid = open(self._LOCK_PATH, 'a+')\n if not is_secure_file(self._LOCK_PATH, self._fid):\n # File is insecure and was renamed, try again\n self._fid.close()\n self._fid = None\n trial_count += 1\n finally:\n os.umask(old_mask)\n if self._fid == None:\n self._rpc_unlock()\n raise RuntimeError('Unable to open write lock securely after two tries')\n # Advisory lock protects against simultaneous multi-process\n # modifications to the file, although we expect only one geopmd\n # process using this class.\n fcntl.lockf(self._fid, fcntl.LOCK_EX)\n self._fid.seek(0)\n return self", "def CheckExclusiveBlocksShared(self):\n self.manager.lock(self.lockfunc, \"filename\", LOCK_EXCLUSIVE, \"exclusive\")\n\n def shared_locker():\n self.manager.lock(self.lockfunc, \"filename\", LOCK_SHARED, \"shared\")\n t = threading.Thread(target=shared_locker)\n t.start()\n t.join(0.1)\n self._print(self.manager)\n self.assertTrue(t.is_alive())\n\n self.manager.unlock(\"filename\", LOCK_NONE, \"exclusive\")\n t.join()\n self._print(self.manager)\n self.assertFalse(self.manager.is_idle())", "def flock(self, op: int) -> None:\n\n fcntl.flock(self.fd, op)", "def lock(self):\n self.locked = True", "def test_LockExists():\n doc_id = 1111111111\n file_type = 'a'\n mode = 'w'\n\n # Lock the file\n file1 = File(doc_id, file_type, mode)\n file1.lock_wait_interval = 0.001\n file1.write('bbc')\n\n # Will delete lock and backup file\n file2 = File(doc_id, file_type, 'r')\n file2.lock_wait_interval = 0.001\n file2.read()\n file2.close()\n\n # # Clean test file\n # filename = file1.filename\n # file1._remove()\n # file1.close()\n # assert not os.path.exists(filename)", "def unlockOTL(filename):\n con = lite.connect(database)\n with con:\n cur = con.cursor()\n cur.execute(\"UPDATE otl_table SET locked=0, lockedby='' WHERE filename='\"+filename+\"'\")\n con.commit()\n con.close()", "def lock(self) -> None:\n self._locked = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Locks the file in shared mode.
def Shared(self, blocking=False, timeout=None): self._flock(fcntl.LOCK_SH, blocking, timeout, "Failed to lock %s in shared mode" % self.filename)
[ "def _lock( self, fileref=None, filehash=None ):\n\t\treturn self.__setlock( True, fileref=fileref, filehash=filehash )", "def lockfile(fileobj, blocking=True, exclusive=True):\n import fcntl, time, random\n if exclusive:\n flags = fcntl.LOCK_EX\n else:\n flags = fcntl.LOCK_SH\n\n if blocking:\n fcntl.lockf(fileobj.fileno(), flags)\n else:\n flags |= fcntl.LOCK_NB\n fcntl.lockf(fileobj.fileno(), flags)", "def CheckExclusiveBlocksShared(self):\n self.manager.lock(self.lockfunc, \"filename\", LOCK_EXCLUSIVE, \"exclusive\")\n\n def shared_locker():\n self.manager.lock(self.lockfunc, \"filename\", LOCK_SHARED, \"shared\")\n t = threading.Thread(target=shared_locker)\n t.start()\n t.join(0.1)\n self._print(self.manager)\n self.assertTrue(t.is_alive())\n\n self.manager.unlock(\"filename\", LOCK_NONE, \"exclusive\")\n t.join()\n self._print(self.manager)\n self.assertFalse(self.manager.is_idle())", "def fileLocked(self, the_file, ctx=None):\n pass", "def __enter__(self):\n self._rpc_lock()\n old_mask = os.umask(0o077)\n try:\n trial_count = 0\n while self._fid is None and trial_count < 2:\n if os.path.exists(self._LOCK_PATH):\n # Rename existing file if it is not secure\n is_secure_path(self._LOCK_PATH)\n self._fid = open(self._LOCK_PATH, 'a+')\n if not is_secure_file(self._LOCK_PATH, self._fid):\n # File is insecure and was renamed, try again\n self._fid.close()\n self._fid = None\n trial_count += 1\n finally:\n os.umask(old_mask)\n if self._fid == None:\n self._rpc_unlock()\n raise RuntimeError('Unable to open write lock securely after two tries')\n # Advisory lock protects against simultaneous multi-process\n # modifications to the file, although we expect only one geopmd\n # process using this class.\n fcntl.lockf(self._fid, fcntl.LOCK_EX)\n self._fid.seek(0)\n return self", "def flock(self, op: int) -> None:\n\n fcntl.flock(self.fd, op)", "def LockFile(fd):\n try:\n fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError as err:\n if err.errno == errno.EAGAIN:\n raise errors.LockError(\"File already locked\")\n raise", "def CheckSharedLocks(self):\n for client in range(10):\n self.manager.lock(self.lockfunc, \"filename\", LOCK_SHARED, client)\n self._print(self.manager)\n for client in range(10):\n self.manager.unlock(\"filename\", LOCK_NONE, client)\n self._print(self.manager)\n self.assertTrue(self.manager.is_idle())", "def do_lock(filename):\n try:\n file = open(filename, \"w\")\n file.write(\"locked\\n\")\n file.close()\n print_with_timestamp(\"Locked via file: %s\" % filename)\n return True\n except IOError as err:\n bail_with_message(\"I/O error({0}): {1}\".format(err.errno, err.strerror))", "def open_shared(fn, mode=\"r\", buffering=-1):\n logger.debug(\"request to open shared file %s\", fn)\n base_dir = open_shared.base_dir\n\n if not validFilename(fn) or fn.startswith(\"/\"):\n raise NotFoundError(\"Invalid file name %s!\" % fn)\n\n to_open = os.path.join(base_dir, fn)\n logger.debug(\"request to open file %s\", to_open)\n return open(to_open, mode, buffering)", "def lock(self):\n self.locked = True", "def lock():\n ierr = c_int()\n lib.gmshFltkLock(\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshFltkLock returned non-zero error code: \",\n ierr.value)", "def testLockowner(self):\n self.fh, self.stateid = self.ncl.create_confirm()\n lockid = self.ncl.lock_file(self.fh, self.stateid, 25, 75)\n\n # Close file\n self.ncl.close_file(self.fh, self.stateid)\n\n # Attempt to keep using lockowner\n lockid = self.ncl.unlock_file(self.fh, lockid, 1, 25, 75,\n error=[NFS4ERR_BAD_STATEID])", "def lock_media(self):\n self._call(\"lockMedia\")", "def lock_file(filename, mode='r+', blocking=False):\n # TODO(wickman) We should probably adopt the lockfile project here as has\n # a platform-independent file locking implementation.\n if not HAS_FCNTL:\n raise RuntimeError('Interpreter does not support fcntl!')\n\n try:\n fp = open(filename, mode)\n except IOError:\n return None\n\n try:\n fcntl.flock(fp, fcntl.LOCK_EX | fcntl.LOCK_NB if not blocking else fcntl.LOCK_EX)\n except IOError as e:\n if e.errno in (errno.EACCES, errno.EAGAIN):\n fp.close()\n return False\n\n return fp", "def wait_and_lock():\n # Waits forever to get a lock on the lockfile\n # If an unrelated error occures a exception is raised \n self._f = open(self._filename, 'w')\n while true:\n try:\n fcntl.flock(filename, fcntl.LOCK_EX | dcnt.LOCK_NM)\n return\n except IOError as e:\n if e.errno == errno.EAGAIN:\n # Do not raise error when waiting to aquire lock\n time.sleep(0.1)\n else\n # Raise on all unrelated errors\n raise", "def _attempt_lock(lock_file):\n umask_original = os.umask(0)\n try:\n fp = os.open(lock_file, os.O_WRONLY | os.O_CREAT, stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)\n finally:\n os.umask(umask_original)\n\n try:\n fcntl.lockf(fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n return False\n\n return True", "def __lock_catalog(self):\n\n # XXX need filesystem lock too?\n self.__lock.acquire()", "def testReadLocks1(t, env):\n c = env.c1\n c.init_connection()\n file = c.homedir + [t.code]\n # owner1 creates a file\n fh1, stateid1 = c.create_confirm('owner1', file,\n access=OPEN4_SHARE_ACCESS_BOTH,\n deny=OPEN4_SHARE_DENY_NONE)\n # owner2 opens the file\n fh2, stateid2 = c.open_confirm('owner2', file,\n access=OPEN4_SHARE_ACCESS_BOTH,\n deny=OPEN4_SHARE_DENY_NONE)\n # owner1 read locks the file\n res1 = c.lock_file('owner1', fh1, stateid1, type=READ_LT)\n check(res1)\n # owner2 read locks the file\n res2 = c.lock_file('owner2', fh2, stateid2, type=READ_LT)\n check(res2, msg=\"Getting read lock when another owner has read lock\")\n # owner1 write locks the file, should fail\n res1 = c.unlock_file(1, fh1, res1.lockid)\n check(res1)\n res1 = c.relock_file(2, fh1, res1.lockid, type=WRITE_LT)\n check(res1, NFS4ERR_DENIED,\n \"Getting write lock when another owner has read lock\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the hat by creating channel mask and returning all important information.
def initHat(channels): mcc = mcc118(0) options = OptionFlags.CONTINUOUS channel_mask = chan_list_to_mask(channels) numberOfChannels = len(channels) return mcc, channel_mask, options, numberOfChannels
[ "def _init_h(self):\n self.H = np.random.random((self._num_bases, self._num_samples)) + 0.2", "def _initialize_heatmap(self):\n return np.zeros(shape=self.shape).astype(np.uint8)", "def _hash_init(self):\r\n # Initialize the indices and data dependencies.\r\n self.rotor = 1\r\n self.ratchet = 3\r\n self.avalanche = 5\r\n self.last_plain = 7\r\n self.last_cipher = 11\r\n\r\n # Start with cards all in inverse order.\r\n self.cards = list(range(255, -1, -1))", "def prepare_hamachi(self):", "def __init__(self, channel):\n self.channel = channel\n\n ## Internal state management\n # True if CMD_FLASH_INITIAL has been run\n self._flash_init = False\n # True if the RAM kernel itself has been loaded.x\n self._kernel_init = False", "def initialize_from_detector(self, detector):\n detector.initialize_channel_data(self)", "def __init__(self, *args):\n _snap.TCh_swiginit(self,_snap.new_TCh(*args))", "def _initialize_layer_stack(self):\n self.layer_stack = [(l, (pl, nl)) if self.pdk[nl]['Direction'] == 'h' else (l, (nl, pl)) \\\n for l, (pl, nl) in self.pdk.get_via_stack() if l.startswith('V')]", "def __init__(self, dim):\n self.dim = dim\n self.orient = 0 # default to 0\n self.bit_index = 0\n self.fig = go.Figure(\n layout = go.Layout(title=\"3D Spatial Mapping of Randomly Generated 1D Bitarray using Hilbert's Space Filling Curve.\")\n )\n \n # entry check to hilberts_curve to ensure dim parameter is a power of 2\n if np.log2(self.dim) % 1 != 0:\n raise ValueError\n \n # Generate a 3D matrix of size dim that maps 1D bitarray indices to Hilberts space filling curve \n print(\"\\nGenerating Hilbert Curve...\")\n self.HC = np.zeros((self.dim,self.dim,self.dim), dtype=int)\n self.hilbert_curve(dim,0,0,0,1,0,0,0,1,0,0,0,1)\n print(bcolors.OKGREEN + \"Hilbert curve matrix (HC) attribute successfully initialized.\" + bcolors.ENDC)\n\n # dereference bit_index counter for HC\n del self.bit_index\n\n # construct anti-diagonal identity matrix J\n self.J = np.eye(self.dim)\n for i in range(int(self.dim/2)):\n self.J[:,[0+i,self.dim-1-i]] = self.J[:,[self.dim-1-i,0+i]]", "def __init__(self, p, verbose = 0):\n super(HadamardOpXi16, self).__init__(p, 0, verbose)\n # Next set sizes for Hadamard-like matrices\n self.reset_vars(self.LOG_INT_FIELDS + 2)\n # Next set positions where Hadamard operation muse be done\n self.hadamard_operations = 3 + (3 << self.LOG_INT_FIELDS)\n # set masks for diagonal operation\n self.MASKS = [self.smask(self.P, range(0,24,4))]\n self.MASKS.append(~self.MASKS[0] & self.smask(self.P, range(24)))\n # make tables and directives for code generation\n #self.tables.update(self.make_tables())\n self.directives.update(self.make_directives())", "def setup(self):\n d = self._setup_convergences()\n return d.addErrback(self.log.err, \"selfheal-setup-err\")", "def __init__(self):\n # Create a initialized state map where all tiles are assumed unknown\n self._state = [TileState.Unknown] * StateMap.TILE_NUMBER\n self._state.append(False) # isClaim bit\n self._state.append(False) # Claim action bit", "def __init__(self, deviceIndex, featureIndex):\n super(HiResWheel, self).__init__()\n\n self.deviceIndex = deviceIndex\n self.featureIndex = featureIndex", "def __init__(self, ctl1, ctl2, ctl3, ctl4, ctl5, ctl6, ctl7, tgt, circ=None):\n super().__init__(\"c7x\", [], [ctl1, ctl2, ctl3, ctl4, ctl5, ctl6, ctl7, tgt], circ)\n self.h(tgt)\n self.c7z(ctl1, ctl2, ctl3, ctl4, ctl5, ctl6, ctl7, tgt)\n self.h(tgt)", "def convert_to_hic_format(self):\n\n if self.cfg.tal_mode == \"wt\":\n hek_mat = pd.read_csv(self.hek_file, sep=\"\\t\")\n elif self.cfg.tal_mode == \"tal1_ko\":\n hek_mat = pd.read_csv(self.tal1ko_file, sep=\"\\t\")\n elif self.cfg.tal_mode == \"lmo2_ko\":\n hek_mat = pd.read_csv(self.lmo2ko_file, sep=\"\\t\")\n\n \"get positions\"\n index, chr_list = self.change_index(list(hek_mat.index))\n columns, _ = self.change_index(hek_mat.columns)\n\n \"assign rows, columns and chr\"\n hek_mat.index = index\n hek_mat.columns = columns\n hek_mat[\"chr\"] = chr_list\n\n \"get matrices for TAL1 and LMO2\"\n tal1_mat = hek_mat.loc[hek_mat[\"chr\"] == \"chr1\"]\n tal1_mat = tal1_mat.iloc[:, 0:285]\n lmo2_mat = hek_mat.loc[hek_mat[\"chr\"] == \"chr11\"]\n lmo2_mat = lmo2_mat.iloc[:, 286:632]\n tal1_mat = tal1_mat.groupby(level=0, axis=1).sum()\n tal1_mat = tal1_mat.groupby(level=0, axis=0).sum()\n lmo2_mat = lmo2_mat.groupby(level=0, axis=1).sum()\n lmo2_mat = lmo2_mat.groupby(level=0, axis=0).sum()\n\n \"prepare data in the form of Hi-C\"\n tal_i = list(tal1_mat.index)\n tal_j = tal1_mat.columns\n lmo2_i = list(lmo2_mat.index)\n lmo2_j = lmo2_mat.columns\n\n tal_df = pd.DataFrame(columns=[\"i\", \"j\", \"v\"])\n for i in tal_i:\n for j in tal_j:\n tal_df = tal_df.append({\"i\": i, \"j\": j, \"v\": tal1_mat.loc[i][j]}, ignore_index=True)\n\n lmo2_df = pd.DataFrame(columns=[\"i\", \"j\", \"v\"])\n for i in lmo2_i:\n for j in lmo2_j:\n lmo2_df = lmo2_df.append({\"i\": i, \"j\": j, \"v\": lmo2_mat.loc[i][j]}, ignore_index=True)\n\n \"save data\"\n if self.cfg.tal_mode == \"wt\":\n tal_df.to_csv(cfg.hic_path + cfg.cell + \"/tal_df.txt\", sep=\"\\t\")\n lmo2_df.to_csv(cfg.hic_path + cfg.cell + \"/lmo2_df.txt\", sep=\"\\t\")\n else:\n tal_df.to_csv(cfg.output_directory + \"tal1_ko.txt\", sep=\"\\t\")\n lmo2_df.to_csv(cfg.output_directory + \"lmo2_ko.txt\", sep=\"\\t\")", "def __init__(self, *args):\n _snap.TChA_swiginit(self,_snap.new_TChA(*args))", "def __init__(self):\n \n self.enh_lib = enhancement\n self.enh = None \n\n self.height = 1.e-2\n # height (m) of coolant duct\n self.mdot = 1.0\n # mass flow rate (kg/s) of coolant\n self.ducts = 2 # number of coolant ducts per hot duct\n self.geometry = 'parallel plates'\n self.c_p = 4.179\n # Specific heat (kJ/kg*K) of water at 325K \n self.mu = 5.3e-4\n # viscosity of water at 325K (Pa*s), WolframAlpha\n self.k = 0.646e-3\n # thermal conductivity of water at 325K (kW/m*K) through\n # cooling duct \n self.Pr = (7.01 + 5.43)/2 # Prandtl # of water from Engineering\n # Toolbox\n self.rho = 1000.\n # density (kg/m**3) of water\n self.Nu_coeff = 0.023\n self.enthalpy0 = 113.25\n # enthalpy (kJ/kg) of coolant at restricted dead state\n self.entropy0 = 0.437\n # entropy (kJ/kg*K) of coolant at restricted dead state\n self.sides = 1\n \n functions.bind_functions(self)", "async def hue_setup(self, ctx, ip):\n await self.config.ip.set(ip)\n self.bridge = Bridge(await self.config.ip())\n self.lights = self.bridge.lights", "def __init__(self, temp, eflux, spec_heat):\n self.spec_heat = spec_heat\n self.eflux = eflux\n self.temp = temp", "def initialise(self):\n\n self.__initialise_chomp_sound()\n self.__initialise_states()\n Character.initialise(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to take in a set of Baxter paths, with joint angles ordered in MoveIt fashion, and reorder all the waypoints in the path for the Baxter interface joint angle order, and normalize using the joint ranges
def moveit_unscramble(self, paths): new_paths = [] for path in paths: new_path = np.zeros((path.shape[0], 7)) new_path[:, 0:2] = path[:, 0:2] #replace proper indices in each path new_path[:, 2:5] = path[:, 4:] new_path[:, 5:] = path[:, 2:4] new_path = np.divide(new_path, self.joint_range) #normalize with joint range new_paths.append(new_path) return new_paths
[ "def reorder_trajectory_joints(trajectory, joint_names):\n order = [trajectory.joint_names.index(j) for j in joint_names]\n new_points = []\n for point in trajectory.points:\n new_points.append(JointTrajectoryPoint(\n positions=[point.positions[i] for i in order],\n velocities=[point.velocities[i] for i in order] if point.velocities else [],\n accelerations=[point.accelerations[i] for i in order] if point.accelerations else [],\n time_from_start=point.time_from_start))\n trajectory.joint_names = joint_names\n trajectory.points = new_points", "def freezeOrientation(targetJoints):\n _str_funcName = \"metaFreezeJointOrientation\"\t\t\n #t1 = time.time()\n\n if type(targetJoints) not in [list,tuple]:targetJoints=[targetJoints]\n\n ml_targetJoints = cgmMeta.validateObjListArg(targetJoints,'cgmObject')\n\n #log.info(\"{0}>> meta'd: {1}\".format(_str_funcName, \"%0.3f seconds\"%(time.time() - t1)))\n #t1 = time.time()\t\t\t\t\n '''\n for i_jnt in ml_targetJoints:\n if i_jnt.getConstraintsTo():\n log.warning(\"freezeJointOrientation>> target joint has constraints. Can't change orientation. Culling from targets: '%s'\"%i_jnt.getShortName())\n return False\n '''\n #buffer parents and children of \n d_children = {}\n d_parent = {}\n mi_parent = cgmMeta.validateObjArg(ml_targetJoints[0].parent,noneValid=True)\n #log.info('validate')\n for i,i_jnt in enumerate(ml_targetJoints):\n _relatives = TRANS.children_get(i_jnt.mNode)\n log.debug(\"{0} relatives: {1}\".format(i,_relatives))\n d_children[i_jnt] = cgmMeta.validateObjListArg( _relatives ,'cgmObject',True) or []\n d_parent[i_jnt] = cgmMeta.validateObjArg(i_jnt.parent,noneValid=True)\n for i_jnt in ml_targetJoints:\n for i,i_c in enumerate(d_children[i_jnt]):\n #log.debug(i_c.getShortName())\n #log.debug(\"freezeJointOrientation>> parented '%s' to world to orient parent\"%i_c.mNode)\n i_c.parent = False\n if mi_parent:\n ml_targetJoints[0].parent = False\n\n #Orient\n t_loop = time.time()\n for i,i_jnt in enumerate(ml_targetJoints):\n \"\"\"\n So....jointOrient is always in xyz rotate order\n dup,rotate order\n Unparent, add rotate & joint rotate, push value, zero rotate, parent back, done\n \"\"\" \n log.debug(\"parent...\")\n if i != 0 and d_parent.get(i_jnt):\n i_jnt.parent = d_parent.get(i_jnt)#parent back first before duping\n\n #New method ----\n log.debug(\"loc...\")\n \n #mi_rootLoc = i_jnt.doLoc(fastMode = True)#Make some locs\n mi_zLoc = i_jnt.doLoc(fastMode = True)#Make some locs\n mi_yLoc = mi_zLoc.doDuplicate()\n\n log.debug(\"dup...\")\n i_dup = i_jnt.doDuplicate(parentOnly = True)\n\n i_dup.parent = i_jnt.parent\n i_dup.rotate = 0,0,0\n i_dup.rotateOrder = 0\n i_dup.jointOrient = 0,0,0\n\n log.debug(\"group...\")\n str_group = TRANS.group_me(i_jnt,False,True) #group for easy move\n \n mi_yLoc.parent = str_group\n mi_zLoc.parent = str_group\n\n mi_zLoc.tz = 1#Move\n mi_yLoc.ty = 1\n\n mc.makeIdentity(i_dup.mNode, apply = 1, jo = 1)#Freeze\n\n #Aim\n log.debug(\"constrain...\")\n\n str_const = mc.aimConstraint(mi_zLoc.mNode,\n i_dup.mNode,\n maintainOffset = False,\n weight = 1,\n aimVector = [0,0,1],\n upVector = [0,1,0],\n worldUpVector = [0,1,0],\n worldUpObject = mi_yLoc.mNode,\n worldUpType = 'object' )[0]\n \n\n \n i_jnt.rotate = [0,0,0] #Move to joint\n i_jnt.jointOrient = i_dup.rotate\n \n #log.info('{0} delete'.format(i))\n\n log.debug(\"delete...\")\t \n mc.delete([str_const,str_group])#Delete extra stuff str_group\n i_dup.delete()\n \n #reparent\n if mi_parent:\n try:ml_targetJoints[0].parent = mi_parent\n except Exception,error: raise StandardError,\"Failed to parent back %s\"%error\n for i,i_jnt in enumerate(ml_targetJoints):\n for ii,i_c in enumerate(d_children[i_jnt]):\n #log.info(\"{0} | {1}\".format(i,ii))\n #log.info(i_c)\n log.debug(\"freezeJointOrientation>> parented '%s' back\"%i_c.getShortName())\n i_c.parent = i_jnt.mNode \n cgmMeta.cgmAttr(i_c,\"inverseScale\").doConnectIn(\"%s.scale\"%i_jnt.mNode )\n #log.info('reparent')\n #log.info(\"{0}>> reparent: {1}\".format(_str_funcName, \"%0.3f seconds\"%(time.time() - t1)))\n #t1 = time.time()\t\t\t\t\n return True", "def placeBP(chain,branch,atmtypes,nbp=1,fillnbp=None,twist=0,dnawidth = 2,helang=140,connectmax=-1): \n\n atoms1 = []; atoms2 = []; atoms3 = []; axes=[]; count = 0\n bpcount = 0;\n for b in range(0,len(chain)-1):\n diff = [chain[b+1][i]-chain[b][i] for i in range(3)]\n ndiff = norm(diff)\n switchdir = (ndiff*twist > pi)\n switchdir = 0; \n\n if (atmtypes[b] == 'A' and atmtypes[b+1] == 'A'):\n nbase = nbp\n else:\n nbase = fillnbp\n \n isfiller = (atmtypes[b] == 'F')\n \n #do not place the last linker bead since it is redundant\n if (not isfiller and b < len(atmtypes) and atmtypes[b+1]=='F'):\n continue\n\n bpspace = 1.0/nbase\n \n # get the tangent and normalize\n tvec = diff\n tvec = normalize(tvec) \n # get the x axis from the branches\n # x axes at the beads\n xax1 = [branch[b][i] - chain[b][i] for i in range(3)]\n if b < len(chain)-2:\n xax2 = [branch[b+1][i] - chain[b+1][i] for i in range(3)]\n else:\n xax2[:] = xax1[:]\n yax1 = crossprod(tvec,xax1)\n xax1 = normalize(xax1); xax2 = normalize(xax2); yax1 = normalize(yax1)\n phi12 = dihedral(branch[b], chain[b], chain[b+1], branch[b+1])\n\n # align angle to be close to what we expect it to be\n if phi12 - ndiff*twist > pi:\n phi12 = phi12-2*pi\n elif phi12-ndiff*twist <-pi:\n phi12 = phi12 + 2*pi \n\n if switchdir:\n phi12 = 2*pi+phi12\n \n for bp in range(nbase):\n bpcount = bpcount + 1\n # get the center of the basepair\n if isfiller:\n center = [chain[b][i] for i in range(3)]\n else:\n center = [chain[b][i]+diff[i]*(bpspace/2+bp*bpspace) for i in range(3)] \n #center = [chain[b][i]+diff[i]*(bp*bpspace) for i in range(3)] \n \n count += 1\n if isfiller:\n name = 'F3'\n else:\n name = 'A3'\n\n #atoms3.append(Atom(coords=[chain[b][i]+diff[i]*(bp*bpspace) for i in range(3)],num=count,name=name))\n atoms3.append(Atom(coords=center,num=count,name=name))\n if len(atoms3) > 1: \n if (connectmax<0 or norm(array(atoms3[-1].coords)-array(atoms3[-2].coords)) < connectmax):\n atoms3[-1].conect.append(atoms3[-2])\n atoms3[-2].conect.append(atoms3[-1])\n \n # x axis at this particular basepair\n # need to interpolate in angle between bead x axes\n if (b+2 < len(atmtypes) and atmtypes[b] == 'A' and atmtypes[b+1] == 'A' and atmtypes[b+2] == 'F'):\n phi = (bp-(nbase-1)/2.0)*phi12/((nbp+fillnbp)/2)\n elif isfiller:\n phi = 0\n else:\n phi = (bp-(nbase-1)/2.0)*phi12/nbase #+ phi12/nbase/2\n\n xax = [cos(phi)*xax1[i] + sin(phi)*yax1[i] for i in range(3)]\n yax = crossprod(tvec,xax)\n yax = normalize(yax)\n \n # place two atoms in the x-y plane (first one on the x-axis) separated by angle helang \n count += 1\n theta = helang*pi/180 \n coords = [center[i] + xax[i] for i in range(3)]\n if isfiller:\n name = 'F1'\n else:\n name = 'A1'\n atoms1.append(Atom(coords=coords,num=count,name=name))\n\n count += 1 \n coords = [center[i] + dnawidth/2*(cos(theta)*xax[i] + sin(theta)*yax[i]) for i in range(3)]\n if isfiller:\n name = 'F2'\n else:\n name = 'A2'\n atoms2.append(Atom(coords=coords,num=count,name=name))\n\n atoms1[-1].resnum = bpcount\n atoms2[-1].resnum = bpcount\n atoms3[-1].resnum = bpcount \n\n # keep track of atom connections\n #atoms1[-1].conect.append(atoms2[-1])\n #atoms2[-1].conect.append(atoms1[-1])\n atoms1[-1].conect.append(atoms3[-1])\n atoms2[-1].conect.append(atoms3[-1])\n atoms3[-1].conect.extend([atoms1[-1],atoms2[-1]])\n\n if len(atoms1) > 1: \n if (connectmax<0 or norm(array(atoms1[-1].coords)-array(atoms1[-2].coords)) < connectmax):\n atoms1[-1].conect.append(atoms1[-2])\n atoms1[-2].conect.append(atoms1[-1])\n if len(atoms2) > 1:\n if (connectmax<0 or norm(array(atoms2[-1].coords)-array(atoms2[-2].coords)) < connectmax):\n atoms2[-1].conect.append(atoms2[-2])\n atoms2[-2].conect.append(atoms2[-1])\n\n \n return atoms1+atoms2+atoms3", "def rebuild_joint_chain(joint_list, name, net):\n\n log.info('rebuild_joint_chain: {}, {}, {}'.format(joint_list, name, net))\n\n new_joints = []\n\n for jnt in joint_list:\n\n info = naming_utils.ItemInfo(jnt)\n new_name = naming_utils.concatenate([info.side,\n info.base_name,\n info.joint_name,\n info.index,\n name])\n\n # Getting joint hierarchy.\n jnt_children = jnt.getChildren()\n jnt_parent = jnt.getParent()\n\n # Unparent joint.\n jnt.setParent(None)\n for child in jnt_children:\n child.setParent(None)\n\n # Make Joint\n new_jnt = pymel.duplicate(jnt, name=new_name)[0]\n pymel.select(None)\n\n # Re-Parent original\n jnt.setParent(jnt_parent)\n for child in jnt_children:\n child.setParent(jnt)\n\n new_joints.append(new_jnt)\n\n # Tags\n naming_utils.add_tags(new_jnt, tags={'Network': net.name(), 'Utility': name})\n\n # Rebuild Hierarchy\n if jnt_parent and jnt_parent in joint_list: # If a parent FK jnt exists, parent this fk jnt to it.\n new_parent_name = new_jnt.name().replace(jnt.name(), jnt_parent.name())\n\n # FK joints\n try:\n new_parent_jnt = pymel.PyNode(new_parent_name)\n new_jnt.setParent(new_parent_jnt)\n except pymel.MayaNodeError:\n pass # Couldn't find a parent. Move on.\n\n if jnt_children:\n for jnt_child in jnt_children:\n new_child_name = new_jnt.name().replace(jnt.name(), jnt_child.name())\n\n # FK joints\n try:\n new_child_jnt = pymel.PyNode(new_child_name)\n new_child_jnt.setParent(new_jnt)\n except pymel.MayaNodeError:\n pass # Couldn't find a parent. Move on.\n\n return new_joints", "def move_to_joints(self, joint_state):\n goal = control_msgs.msg.FollowJointTrajectoryGoal()\n goal.trajectory.joint_names.extend(ArmJoints.names())\n point = trajectory_msgs.msg.JointTrajectoryPoint()\n point.positions.extend(joint_state.values())\n point.time_from_start = rospy.Duration(TIME_FROM_START)\n goal.trajectory.points.append(point)\n self._joint_client.send_goal(goal)\n self._joint_client.wait_for_result(rospy.Duration(10))", "def joint_move(self, joints):\n if len(joints) != 4:\n rospy.logwarn(\"Invalid joint move target: \" + str(joints))\n return\n self.joint_move_srv.call(joints)", "def parallel_to_serial_joint_angles(joint_matrix):\n temp = joint_matrix\n temp[2, :] -= joint_matrix[1, :]\n return temp", "def _mergePathsOneStep(paths):\n groups = []\n # add first\n first = paths[0]\n rest = paths[1:]\n groups.append(first)\n\n for s in rest:\n createNew = True\n\n # sort into correct group\n for i in range(0, len(groups)):\n # start == start\n if _equals(groups[i][0], s[0]):\n # reverse s\n rev_s = s[::-1]\n # prepend without last\n groups[i] = rev_s[:-1] + groups[i]\n createNew = False\n break\n # start == end\n elif _equals(groups[i][0], s[-1]):\n # prepend without last\n groups[i] = s[:-1] + groups[i]\n createNew = False\n break\n # end == start\n elif _equals(groups[i][-1], s[0]):\n # append without first\n groups[i] = groups[i] + s[1:]\n createNew = False\n break\n # end == end\n elif _equals(groups[i][-1], s[-1]):\n # reverse s\n rev_s = s[::-1]\n # append without first\n groups[i] = groups[i] + rev_s[1:]\n createNew = False\n break\n # begin new group\n if createNew:\n groups.append(s)\n\n return groups", "def _move_obstacles(self):\n\n for obstacle_set in self.obstacle_manager:\n for obstacle in obstacle_set:\n obstacle.move()", "def move_all_boids_to_new_positions(self):\n for boid in self.boids_list:\n v1 = self.rule1(boid)\n v2 = self.rule2(boid)\n v3 = self.rule3(boid)\n\n boid.velocity = tuple(map(operator.add, boid.velocity, v1))\n boid.velocity = tuple(map(operator.add, boid.velocity, v2))\n boid.velocity = tuple(map(operator.add, boid.velocity, v3))\n boid.position = tuple(map(operator.add, boid.position, boid.velocity))", "def unite(self):\n\n if not self.input_paths:\n return\n else:\n self.path = self.input_paths[0]\n self.remaining_paths = self.input_paths[1:]\n\n nearest_path_info = self.extract_nearest_path()\n while nearest_path_info:\n (path, idx1, idx2) = nearest_path_info\n self.join_two_paths(path, idx1, idx2)\n nearest_path_info = self.extract_nearest_path()", "def update_arrows(ai_settings, screen, stats, sb, link, lynels, arrows, bombs):\n arrows.update()\n for arrow in arrows.copy():\n if arrow.rect.bottom <= 0:\n if stats.multiplier > 1:\n stats.multiplier //= 2 \n sb.prep_multiplier()\n arrows.remove(arrow)\n check_arrow_lynel_collisions(ai_settings, screen, stats, sb, link, lynels, arrows, bombs)", "def oriented_paths(paths: Iterable[BkPath2d]) -> tuple[list[BkPath2d], list[BkPath2d]]:\n from ezdxf.path import winding_deconstruction, make_polygon_structure\n\n polygons = make_polygon_structure(single_paths(paths))\n external_paths: list[BkPath2d]\n holes: list[BkPath2d]\n external_paths, holes = winding_deconstruction(polygons)\n for p in external_paths:\n p.counter_clockwise()\n for p in holes:\n p.clockwise()\n return external_paths, holes", "def Serial_open_chain_mapping(chain_a,chain_b,dir_graph):\n ori_a, pos_a = FK_MDH(chain_a,dir_graph)\n _, pos_b = FK_MDH(chain_b,dir_graph)\n\n pos_a= pos_a - np.array([chain_a[0][1][0][3],chain_a[0][1][1][3],chain_a[0][1][2][3]])\n pos_b= pos_b - np.array([chain_b[0][1][0][3],chain_b[0][1][1][3],chain_b[0][1][2][3]])\n \n #convert directed graph for use in nx\n dir_graph = nx.DiGraph(dir_graph)\n\n repos_a_EAS=[]\n reori_a_EAS=[]\n Euler_axis=[]\n Euler_axes=[]\n\n #I think I will need to create another function here for looping every solution in upchain joints to output the solution space for downchain joints\n for i in range(1,360,10): # discretised further here\n reori_a=copy.deepcopy(ori_a)\n repos_a=copy.deepcopy(pos_a)\n Euler_axis=[]\n for j in range(len(chain_a)):\n downchain = list(nx.nodes(nx.dfs_tree(dir_graph, j)))\n if j < len(chain_a)-1:\n ROTM= Vector_mapping_Euler_Axis_Space(repos_a[j+1]-repos_a[j], pos_b[j+1]-pos_b[j])[2][i]\n Euler_axis.append(Vector_mapping_Euler_Axis_Space(repos_a[j+1]-repos_a[j], pos_b[j+1]-pos_b[j])[0][i])\n for k in range(len(downchain)):\n if downchain[0] != downchain[-1]:\n reori_a[downchain[k]]= ROTM @ reori_a[downchain[k]]\n if downchain[k] != downchain[-1]:\n repos_a[downchain[k+1]]= repos_a[j] + ROTM @ (repos_a[downchain[k+1]]-repos_a[j])\n repos_a_EAS.append(repos_a)\n reori_a_EAS.append(reori_a)\n Euler_axes.append(Euler_axis)\n \n for i in range(len(repos_a_EAS)):\n repos_a_EAS[i]=repos_a_EAS[i]+np.array([chain_a[0][1][0][3],chain_a[0][1][1][3],chain_a[0][1][2][3]])\n\n\n return reori_a_EAS, repos_a_EAS, Euler_axes", "def _place_backbone_atoms(coords, angles, cis, aa_seq): \n for i in xrange(len(aa_seq)):\n current_res = index_to_three(aa_seq[i])\n current_atom = 'N'\n prev_atom = 'C'\n prev_prev_atom = 'CA'\n \n for j in xrange(backbone_atoms):\n index = i * atoms_per_residue + j\n if i == 0 and current_atom == 'N':\n coords[:,index] = 0\n elif i == 0 and current_atom == 'CA':\n coords[:,index] = 0\n coords[X][index] = get_bond_length(\n current_atom, prev_atom, current_res) \n else:\n v1 = _create_vector(coords, index, j, 1) \n v2 = _create_vector(coords, index, j, 2) \n \n if i == 0 and current_atom == 'C':\n v3 = Vector(0, -1.0, 0)\n else:\n v3 = _create_vector(coords, index, j, 3) \n \n angle = get_bond_angle(\n prev_prev_atom, prev_atom, current_atom, current_res) \n dihedral = _calculate_dihedral(prev_atom, angles, cis, i) \n bond_length = get_bond_length(prev_atom, current_atom, current_res) \n bond_length_prev = get_bond_length(\n prev_prev_atom, prev_atom, current_res) \n \n D = Vector(\n bond_length * cos(pi - angle),\n bond_length * cos(pi - dihedral) * sin(pi - angle),\n bond_length * sin(pi - dihedral) * sin(pi - angle),\n ) \n bc = (v1 - v2) / bond_length_prev\n n = ((v1 - v3) ** bc).normalized()\n nbc = bc ** n\n basis_change = numpy.array((bc, nbc, n)).transpose()\n D = D.left_multiply(basis_change) + v1 \n coords[:,index] = D\n \n prev_prev_atom = prev_atom\n prev_atom = current_atom\n current_atom = ATOMS[(ATOMS.index(current_atom) + 1) % backbone_atoms]", "def all_rail_pathless_to_roadway(self):\n\n for pathless_rail_od in self.fn.rail.iter_od_pairs(pathless=True):\n msg = \"pathless rail od is derived to roadway.\"\n print pathless_rail_od.id, msg\n self.od_pathless_to_roadway(pathless_rail_od)", "def list_to_brics_joints(joint_values, joint_names, time_stamp=None, unit=None):\n assert (type(joint_values) == list and type(joint_names) == list),\\\n \"''joint_values' and 'joint_names' must be lists.\"\n assert (len(joint_values) == len(joint_names)),\\\n \"''joint_values' and 'joint_names' must have the same dimension.\"\n\n configuration_out = brics_actuator.msg.JointPositions()\n for _ in joint_names:\n configuration_out.positions.append(brics_actuator.msg.JointValue())\n\n for joint, position, name in zip(\n configuration_out.positions, joint_values, joint_names\n ):\n joint.value = position\n joint.joint_uri = name\n if unit is not None:\n joint.unit = unit\n if time_stamp is not None:\n assert (\n (type(time_stamp) == std_msgs.msg.Time) or\n (isinstance(time_stamp, genpy.Time))\n ), \"'time_stamp' must be of type 'std_msgs.msg.Time' or 'genpy.Time'.\"\n\n joint.timeStamp = time_stamp\n\n return configuration_out", "def convert_joint_selection_to_loc():\n selection = cmds.ls(selection=True)\n joint_list = list()\n delete_list = list()\n\n for item in selection:\n if 'joint' in cmds.nodeType(item):\n cmds.select(clear=True)\n loc = cmds.spaceLocator(name=('{}_loc'.format(item)))[0]\n cmds.delete(cmds.pointConstraint(item, loc))\n selection.append(loc)\n joint_list.append(item)\n delete_list.append(loc)\n else:\n pass\n\n non_joint_list = list(set(selection) - set(joint_list))\n cmds.select(non_joint_list)\n\n return non_joint_list, delete_list", "def joints(self):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import all paths from a file with paths from all environments, ofand return into a single a dictionary keyed by environment name. Unscramle the path data and normalize it in the process
def paths_import_all(self, path_fname): with open (path_fname, "rb") as paths_f: paths_dict = pickle.load(paths_f) # keyed by environment name unscrambled_dict = {} for key in paths_dict.keys(): unscrambled_dict[key] = self.moveit_unscramble(paths_dict[key]) return unscrambled_dict
[ "def paths_import_single(self, path_fname, env_name, single_env=False):\n\t\tif not single_env:\n\t\t\twith open (path_fname, \"rb\") as paths_f:\n\t\t\t\tpaths_dict = pickle.load(paths_f)\n\n\t\t\t# for non single environment, need to use the environment name as a dictionary key to get the right path list\n\t\t\tenv_paths = self.moveit_unscramble(paths_dict[env_name])\n\t\t\treturn env_paths\n\n\t\telse:\n\t\t\twith open (path_fname, \"rb\") as paths_f:\n\t\t\t\tpaths_list = pickle.load(paths_f)\n\n\t\t\tenv_paths = self.moveit_unscramble(paths_list)\n\t\t\treturn env_paths", "def load_env() -> None:\n for file in find_env():\n with file.open(\"r\") as f:\n for line in f.readlines():\n key, value = line.strip().rstrip().split(\"=\")\n key = re.sub(r\"[^A-Za-z0-9_]\", \"_\", key).upper()\n os.environ[key] = value", "def environments_import(self, envs_fname):\n\t\twith open (envs_fname, \"rb\") as env_f:\n\t\t\tenvs = pickle.load(env_f)\n\t\tenv_names = envs['poses'].keys() # also has obstacle meta data\n\t\treturn env_names", "def parse_import_data_file_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/importJobs/(?P<import_job>.+?)/importDataFiles/(?P<import_data_file>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "def __loadResourceEnvPath(self):\n resourcePaths = os.environ.get(self.__resourceEnvName, '').split(os.pathsep)[::-1]\n\n # loading any python file under the resources path\n raiseOnResourceFail = os.environ.get(self.__resourceRaiseOnFailEnvName, '').lower() in ['1', 'true']\n for resourcePath in filter(os.path.exists, resourcePaths):\n for pythonFile in glob(os.path.join(resourcePath, '*.py')):\n try:\n self.__loadToRuntime(pythonFile, 'environment')\n except Exception as err:\n\n if raiseOnResourceFail:\n raise err\n\n # printing the stacktrace\n traceback.print_exc()", "def loaded(self, ignoreFromEnvironment=False):\n result = []\n for resourceFilePath, resourceSource in self.__loadedPaths.items():\n if not ignoreFromEnvironment or resourceSource != 'environment':\n result.append(resourceFilePath)\n\n return result", "def _crawl(\n self, key_path: List[str], env_vars: Mapping[str, Sequence[str]]\n ) -> Dict[str, Any]:\n new_vars: Dict[str, List[str]] = {}\n obj = self._path_get(key_path)\n # Sub-dict -> recurse\n if (\n hasattr(obj, \"keys\")\n and callable(obj.keys)\n and hasattr(obj, \"__getitem__\")\n ):\n for key in obj.keys():\n merged_vars = dict(env_vars, **new_vars)\n merged_path = key_path + [key]\n crawled = self._crawl(merged_path, merged_vars)\n # Handle conflicts\n for key in crawled:\n if key in new_vars:\n err = \"Found >1 source for {}\"\n raise AmbiguousEnvVar(err.format(key))\n # Merge and continue\n new_vars.update(crawled)\n # Other -> is leaf, no recursion\n else:\n new_vars[self._to_env_var(key_path)] = key_path\n return new_vars", "def read_env():\n try:\n with open(\".env\") as f:\n content = f.read()\n except (IOError, UnicodeDecodeError):\n content = \"\"\n\n for line in content.splitlines():\n m1 = re.match(r'\\A([A-Za-z_0-9]+)=(.*)\\Z', line)\n if m1:\n key, val = m1.group(1), m1.group(2)\n m2 = re.match(r'\\A\"(.*)\"\\Z', val)\n if m2:\n val = m2.group(1)\n m3 = re.match(r'\\A\"(.*)\"\\Z', val)\n if m3:\n val = re.sub(r'\\\\(.)\", r\"\\1', m3.group(1))\n os.environ.setdefault(key, val)", "def loadFromEnvironment(self):\n for argParser in self.argParsers:\n argParser.loadFromEnvironment()", "def environment_file(input_file):\n with open(input_file) as file_:\n return parse_environment(file_.read())", "def process_vars_dir(vars_dir):\n vars_dict = {}\n for root, subFolders, files in os.walk(vars_dir):\n for filename in files:\n full_path = os.path.join(root, filename)\n # remove the first first part:\n var_path = full_path.replace(vars_dir, '')\n # change / to _ (TODO this is OS-dependent)\n var_name = var_path.replace('/', '_')\n # get the extension out\n (var_prefix, extension) = os.path.splitext(var_name)\n\n if extension == '.json':\n vars_dict.update(read_json_file(var_prefix, full_path))\n elif var_path == '.gitignore':\n pass\n else:\n raise Exception(\"Don't know how to parse {}\".format(full_path))\n\n return vars_dict", "def environ_parse(env: _Environ = environ) -> dict:\n\n _return = {}\n\n for var in env:\n try:\n _return[var] = json_parse(env[var])\n except JSONDecodeError:\n _return[var] = str(env[var])\n\n return _return", "def load_environment_variables():\n config_json = json.load(open('settings/config.json'))\n\n for key in config_json.keys():\n if key not in os.environ:\n os.environ[key] = config_json[key]", "def _environment_variables() -> Dict[str, str]:\n return {key: value for key, value in os.environ.items() if _is_encodable(value)}", "def load_env_file( fileName ):\n logging.info( \"load environment variable file:%s\" % fileName )\n result = {}\n try:\n with open( fileName ) as f:\n for line in f:\n line = line.strip()\n if len( line ) > 0:\n if line[0] == '#':\n continue\n pos = line.find( '=')\n if pos != -1:\n name = line[0:pos].strip()\n value = line[pos+1:].strip()\n result[name] = value\n except Exception as ex:\n logging.error( \"get exception when loading environment file %s:%r\" %( fileName, ex ) )\n return result", "def get_environment_variables(prefix):\n\n env_var_path = os.path.join(prefix, \"etc\", \"conda\", \"activate.d\", \"env_vars.sh\")\n env_vars = {}\n try:\n with open(env_var_path, \"r\") as env_file:\n for var in env_file:\n var_array = var.strip().split()\n if len(var_array) >= 2:\n var_item_array = var_array[1].split(\"=\")\n if len(var_item_array) >= 1:\n env_vars[var_item_array[0]] = var_item_array[1]\n except (IOError, ValueError):\n return None\n\n ## Key = env_var, value = path\n return env_vars", "def expanded_env_dict():\n return generate_expanded_env_dict()", "def load_from_env(self):\n env_config = {}\n for env_var, value in os.environ.items():\n if env_var.startswith(\"IAMZERO_\"):\n key = env_var[8:].upper()\n env_config[key] = self._coerce_value(key, value)\n return env_config", "def load_environments(self):\n datasets = {}\n \n for mode in [\"train\", \"test\"]:\n \n filename = os.path.join(SOURCES,\"scenario-{}-{}-set.feather\".format(self.scenario, mode))\n if not os.path.exists(filename):\n self.setup_environment(mode=mode)\n \n datasets[mode] = pd.read_feather(filename)\n datasets[mode].set_index('index', inplace=True)\n \n \n return datasets[\"train\"], datasets[\"test\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import the paths from a single environment. File to load from may contain data from many environments, or just a single one, indicated by the single_env (True/False) flag
def paths_import_single(self, path_fname, env_name, single_env=False): if not single_env: with open (path_fname, "rb") as paths_f: paths_dict = pickle.load(paths_f) # for non single environment, need to use the environment name as a dictionary key to get the right path list env_paths = self.moveit_unscramble(paths_dict[env_name]) return env_paths else: with open (path_fname, "rb") as paths_f: paths_list = pickle.load(paths_f) env_paths = self.moveit_unscramble(paths_list) return env_paths
[ "def load_local(env_script, env):\n # pylint: disable=unused-argument\n SConscript(env_script, exports=['env'])", "def environments_import(self, envs_fname):\n\t\twith open (envs_fname, \"rb\") as env_f:\n\t\t\tenvs = pickle.load(env_f)\n\t\tenv_names = envs['poses'].keys() # also has obstacle meta data\n\t\treturn env_names", "def load_env() -> None:\n for file in find_env():\n with file.open(\"r\") as f:\n for line in f.readlines():\n key, value = line.strip().rstrip().split(\"=\")\n key = re.sub(r\"[^A-Za-z0-9_]\", \"_\", key).upper()\n os.environ[key] = value", "def __loadResourceEnvPath(self):\n resourcePaths = os.environ.get(self.__resourceEnvName, '').split(os.pathsep)[::-1]\n\n # loading any python file under the resources path\n raiseOnResourceFail = os.environ.get(self.__resourceRaiseOnFailEnvName, '').lower() in ['1', 'true']\n for resourcePath in filter(os.path.exists, resourcePaths):\n for pythonFile in glob(os.path.join(resourcePath, '*.py')):\n try:\n self.__loadToRuntime(pythonFile, 'environment')\n except Exception as err:\n\n if raiseOnResourceFail:\n raise err\n\n # printing the stacktrace\n traceback.print_exc()", "def loadFromEnvironment(self):\n for argParser in self.argParsers:\n argParser.loadFromEnvironment()", "def paths_import_all(self, path_fname):\n\t\twith open (path_fname, \"rb\") as paths_f:\n\t\t\tpaths_dict = pickle.load(paths_f)\n\n\t\t# keyed by environment name\n\t\tunscrambled_dict = {}\n\t\tfor key in paths_dict.keys():\n\t\t\tunscrambled_dict[key] = self.moveit_unscramble(paths_dict[key])\n\n\t\treturn unscrambled_dict", "def load_env():\n project_dir = dirname(dirname(__file__))\n dotenv.read_dotenv(join(project_dir, '.env'))", "def load_environment_variables():\n config_json = json.load(open('settings/config.json'))\n\n for key in config_json.keys():\n if key not in os.environ:\n os.environ[key] = config_json[key]", "def get_environment(paths: Optional[List[str]]) -> BaseEnvironment:\n return select_backend().Environment.from_paths(paths)", "def _load_specific_env_properties(env, prop_prefix=None):\n filename = '/etc/catalist/%s/env.properties' % env.strip().lower()\n prop_file = open(filename)\n return javaproperties.load(prop_file,\n object_pairs_hook=_filter_generator(\n prop_prefix))", "def load_config_from_env(self):\n app_envs = filter(\n lambda s: s.startswith(\n '{}_'.format(self.name.upper())), os.environ.keys())\n for env_key in app_envs:\n if os.environ[env_key]:\n self.config[env_key] = os.environ[env_key]", "def from_path(cls, path: Union[str, Path]) -> \"Platform\":\n path = Path(path).absolute()\n env: Dict[str, str] = {}\n for env_path in (path / \"env\").glob(\"*\"):\n if env_path.is_file():\n env[env_path.name] = env_path.read_text()\n return cls(path=path, env=MappingProxyType(env))", "def load_env(name, get_json=True):\n try:\n env = os.environ[name]\n except KeyError:\n return\n\n if get_json and env is not None:\n value = _load_json(env)\n else:\n value = env\n return value", "def loadenv(environment = ''):\n with open(config_dir + '/' + environment + '.yml', 'r') as f:\n env.config = yaml.load(f)\n env.roledefs = env.config['roledefs']\n env.user = env.config['user']\n env.password = env.config['password']", "def environment_file(input_file):\n with open(input_file) as file_:\n return parse_environment(file_.read())", "def load_dotenv_cwd():\n if dotenv is None:\n if os.path.isfile('.env.local') or os.path.isfile('.env'):\n print(\n ' * Tip: There are \".env.local\" or \".env\" files present.'\n ' Do \"pip install python-dotenv\" to use them.'\n )\n return\n\n for name in ('.env.local', '.env'):\n if os.path.isfile(name):\n dotenv.load_dotenv(name)", "def load_env(separator, line):\n env_key = line.rstrip().split(separator)[0].rstrip()\n # set the environment variable to the value with the start and\n # end quotes taken off.\n if len(line.rstrip().split(separator)) > 2:\n env_value = separator.join(line.rstrip().split(separator)[1:]).strip()\n else:\n env_value = line.rstrip().split(separator)[1].strip()\n if env_value:\n if env_value[0] == \"'\" or env_value[0] == '\"':\n env_value = env_value[1:-1]\n\n environ[env_key] = env_value", "def load_env_from_file(filename):\n if not os.path.exists(filename):\n raise FileNotFoundError(\"Environment file {} does not exist.\".format(filename))\n\n with open(filename) as f:\n for lineno, line in enumerate(f):\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n if \"=\" not in line:\n raise SyntaxError(\"Invalid environment file syntax in {} at line {}.\".format(filename, lineno + 1))\n\n name, value = parse_var(line)\n\n yield name, value", "def load():\n flask_env = os.environ[\"FLASK_ENV\"]\n\n with open(f\"settings.{flask_env}.json\", \"r\") as f:\n return Dict(json.load(f))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import the pointcloud data from a file, and flatten it into a vector
def pointcloud_import(self, pcd_fname): print('pointcloud filename:') print(pcd_fname) pc = pypcd.PointCloud.from_path(pcd_fname) # flatten into vector temp = [] temp.append(pc.pc_data['x'][~np.isnan(pc.pc_data['x'])]) temp.append(pc.pc_data['y'][~np.isnan(pc.pc_data['x'])]) temp.append(pc.pc_data['z'][~np.isnan(pc.pc_data['x'])]) temp = np.array(temp) print(temp.shape) obs_pc = temp.flatten('F') #flattened column wise, [x0, y0, z0, x1, y1, z1, x2, y2, ...] return obs_pc
[ "def load_vectors(filename):\n\n vectors = []\n with open(filename, 'r') as f:\n reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)\n for v in reader:\n vectors.append(v)\n return vectors", "def loadData(self, file_path: str): \n # Open the file, load the data and reshape it in order to be usable by \n # the model \n with h5py.File(file_path, 'r') as f:\n data_f = f['GlobalFeatureVectors'][:]\n number_of_samples = data_f.__len__()\n data_f = np.reshape(data_f, (number_of_samples, v.NUM_OF_FEATURES))\n data_d = f['Projection'][:]\n #data_d = np.reshape(data_d, (number_of_samples, 1))\n data_l = f['RadialVectorLength'][:]\n #data_l = np.reshape(data_l, (number_of_samples, 1))\n points = f['Cloud'][:]\n normals = f['Normals'][:]\n seeds = f['Seed'][:]\n seeds_normals = f['Seed_Normal'][:]\n # Set the input (X) and output (Y) data\n X = data_f \n Y = np.column_stack([data_d, data_l])\n # Scale the output data to be within the interval [0,1]\n Y , mins, maxs = getScaledArray(Y, high=1.0, low=0.0, mins=np.asarray([-103.75,0.07]), maxs=np.asarray([117.24,110.8]), bycolumn=True)\n self.updateNormalizationData(mins, maxs, high=1.0, low=0.0)\n return X, Y, points, normals, seeds, seeds_normals", "def loadFile(self, filename):\n f = open(filename, 'r')\n dataset = f.readlines()\n for data in dataset[1:]:\n d = data.split()\n self.lexiVector[d[0]] = np.array(d[1:], dtype=float)", "def point_cloud_from_fileobj(f):\n header = []\n while True:\n ln = f.readline().strip().decode('ascii')\n header.append(ln)\n if ln.startswith('DATA'):\n metadata = parse_header(header)\n dtype = _build_dtype(metadata)\n break\n if metadata['data'] == 'ascii':\n pc_data = parse_ascii_pc_data(f, dtype, metadata)\n elif metadata['data'] == 'binary':\n pc_data = parse_binary_pc_data(f, dtype, metadata)\n elif metadata['data'] == 'binary_compressed':\n pc_data = parse_binary_compressed_pc_data(f, dtype, metadata)\n else:\n print('DATA field is neither \"ascii\" or \"binary\" or\\\n \"binary_compressed\"')\n return PointCloud(metadata, pc_data)", "def read_vectors(in_file):\n vectors = np.loadtxt(in_file)\n if len(vectors) != 3:\n raise ValueError(\"The lattice vector file does not have 3 vectors\")\n return vectors", "def read_vasp(in_name):\n with open(in_name) as vasp_file:\n vasp_content = vasp_file.readlines()\n\n # lattice vectors\n\n vec1 = vasp_content[2].split()\n vec2 = vasp_content[3].split()\n vec3 = vasp_content[4].split()\n\n # matrix from vectors\n M = np.zeros((3, 3))\n M[0] = vec1\n M[1] = vec2\n M[2] = vec3\n\n # reads names of elements and amounts\n species = vasp_content[5].split()\n amounts_str = vasp_content[6].split()\n amounts = map(int, amounts_str)\n\n # make Atom objects from file\n atoms = []\n for element in species:\n\n # position of the first and last atom of one kind\n # in the vasp file\n firstAt = 8 + sum(amounts[:species.index(element)])\n lastAt = 8 + sum(amounts[:species.index(element) + 1])\n\n for line in vasp_content:\n if vasp_content.index(line) in range(firstAt, lastAt):\n xAtom, yAtom, zAtom = map(float, line.split())\n atoms.append(Atom(element, xAtom, yAtom, zAtom))\n return M, atoms", "def parse_points3d_file(colmap_sparse_plaintext_3dpoints_path):\n result = []\n\n # File format:\n # <point3d id> <x> <y> <z> <r> <g> <b> <error> <image id 1> <point2d idx 1> <image id 2> <point2d idx 2> ...\n # ...\n with open(colmap_sparse_plaintext_3dpoints_path, 'r') as f:\n while True:\n line = f.readline()\n\n if line == '':\n break\n\n line_trimmed = line.strip()\n\n if line_trimmed.startswith('#') or line_trimmed == '':\n continue\n\n splits = line_trimmed.split(' ', 8)\n\n point3d_id = int(splits[0])\n x = float(splits[1])\n y = float(splits[2])\n z = float(splits[3])\n r = int(splits[4])\n g = int(splits[5])\n b = int(splits[6])\n error = float(splits[7])\n\n point2d_list_str = splits[8]\n point2d_list_splits = point2d_list_str.split(' ')\n assert(len(point2d_list_splits) % 2 == 0)\n\n point2d_list = []\n for i in range(0, len(point2d_list_splits), 2):\n image_id = int(point2d_list_splits[i])\n point2d_idx = int(point2d_list_splits[i + 1])\n point2d_list.append((image_id, point2d_idx))\n\n point3d = Point3D(point_id=point3d_id, x=x, y=y, z=z, r=r, g=g, b=b, error=error, point2d_list=point2d_list)\n result.append(point3d)\n\n return result", "def load(file: str) -> PointData:\n data = np.load(file)\n point = data['point']\n point_data_data = data['point_data']\n\n point_data = PointData(point, point_data_data)\n return point_data", "def readdata(filename):\n\n infile = open(filename, \"r\")\n lines = infile.readlines()\n infile.close()\n \n xlist = []\n ylist = []\n for line in lines:\n coor = line.split()\n x = float(coor[1])\n y = float(coor[2])\n xlist.append(x)\n ylist.append(y)\n \n xarr = np.array(xlist)\n yarr = np.array(ylist)\n \n return xarr, yarr", "def load_xyz(self, filename):\n xyz = torch.Tensor(np.load(filename)).float()[:, :, :3]\n xyz = xyz.permute(2, 0, 1)\n return xyz", "def read_geometry_file(path_to_file):\n logger.info(\"Reading geometry file.\")\n with open(path_to_file) as f:\n lines = f.readlines()\n\n vec_x = lines[3].split()\n vec_y = lines[4].split()\n vec_z = lines[5].split()\n\n vec_x = [float(vec_x[i]) for i in range(1, len(vec_x))]\n vec_y = [float(vec_y[i]) for i in range(1, len(vec_y))]\n vec_z = [float(vec_z[i]) for i in range(1, len(vec_z))]\n\n vectors = [vec_x, vec_y, vec_z]\n uc_atoms = []\n for i in range(6, len(lines)):\n sl = lines[i].split()\n x = float(sl[1])\n y = float(sl[2])\n z = float(sl[3])\n t = sl[4]\n\n if sl[4] == \"Ga\":\n c = ga_mass\n elif sl[4] == \"Al\":\n c = al_mass\n elif sl[4] == \"In\":\n c = in_mass\n elif sl[4] == \"O\":\n c = o_mass\n\n global_atom_types[t] = global_atom_types[t] + 1\n\n a = Atom(x, y, z, t, c)\n uc_atoms.append(a)\n logger.info(\"Geomtery file read.\")\n # uc_atoms = UCAtoms(uc_atoms)\n\n return vectors, uc_atoms", "def load_pca_file(self, file_name='data_pca.txt'):\n fic_data_pca = open(self.param.path_dictionary + '/' + file_name, 'r')\n mean_data_list = fic_data_pca.readline().split(',')\n eig_pairs_list = fic_data_pca.readline().split(',')\n fic_data_pca.close()\n\n mean_data_vect = []\n for val in mean_data_list:\n mean_data_vect.append([float(val)])\n mean_data_vect = np.asarray(mean_data_vect)\n\n eig_pairs_vect = []\n for pair in eig_pairs_list:\n eig_val_str, eig_vect_str = pair.split(';')\n eig_vect_str = eig_vect_str.split(' ')\n # eig_vect_str[-1] = eig_vect_str[-1][:-1]\n eig_vect = []\n for i, v in enumerate(eig_vect_str):\n if v != '' and v != '\\n':\n eig_vect.append(float(v))\n eig_pairs_vect.append((float(eig_val_str), eig_vect))\n\n return mean_data_vect, eig_pairs_vect", "def img2vect(filename):\n\n\treturnVect = zeros((1, 1024))\n\tfr = open(filename)\n\n\t# transform the matirx to a vector\n\tfor i in range(32):\n\t\tlineStr = fr.readline()\n\t\tfor j in range (32):\n\t\t\treturnVect[0, 32 * i + j] = int(lineStr[j])\n\treturn returnVect", "def retrieveData(inputFile):\r\n df_points = pd.read_csv(\r\n inputFile, header=None, names=[\"x\", \"y\"], sep=\" \")\r\n if len(df_points.index) < 1:\r\n raise Exception(\"The input file is empty!\")\r\n data = [tuple(row) for row in df_points.values]\r\n points = np.array([data_point for data_point in data])\r\n return points", "def extract(filepath):\r\n with open(filepath, \"r\") as f:\r\n dataset = f.readlines()\r\n dataset = map(lambda i: i.strip('\\n').split(';'), dataset)\r\n dataset = np.array(list(dataset))\r\n return dataset", "def load_dataset (fileName):\n # open(fileName).readline(): '1.000000 0.067732 3.176513'\n # numFeat = 2\n # numFeat = len(open(fileName).readline().split('\\t')) - 1\n fr = open(fileName)\n xArr, yArr = [], []\n\n for line in fr.readlines():\n lineArr = []\n # eg: ['1.000000', '0.067732', '3.176513']\n currentLine = line.strip().split('\\t') \n # X = [ [x0, x1], [x0, x1], .... ] (str -> float)\n for i in range(len(currentLine) - 1):\n lineArr.append(float(currentLine[i]))\n \n xArr.append(lineArr)\n # Y = [y1, y2, ... ]\n yArr.append(float(currentLine[-1]))\n\n return xArr, yArr", "def get_points_from_file():\n input_file = open('input.txt', 'r')\n size = int(input_file.readline())\n\n points = []\n for i in range(size):\n x, y = input_file.readline().strip('\\n').split(' ')\n points.append(Point(int(x), int(y)))\n\n return points", "def extract_point_coordinates(file_path: str, custom_file_name: str=None) -> str:\n # open laz file to extract points\n las = pylas.read(file_path)\n # to get dimension names\n point_format = las.point_format\n # get length point list\n print(len(las.points))\n print(point_format.dimension_names)\n\n if not custom_file_name:\n # name for new file for numpy array points\n new_file_name = '3d_points.pkl'\n else:\n new_file_name = custom_file_name\n # extract points from oened file\n points = las.points\n # empty list to create numpy array with \n point_list = []\n #iterate through list, convert from np.void to list, append to empty list\n for index, item in enumerate(points):\n point = list(item)[:3]\n point_list.append(point)\n print(f'Processing {len(points)} points: {round(((index/len(points)) * 100), 3)}% complete.')\n\n # create 3D numpy array of points\n np_point_list = np.array(point_list)\n # dump subset of data into new file, pickled for numpy array object\n with open( new_file_name, 'wb') as file:\n pickle.dump(np_point_list, file)\n logging.info(f'Points saved to: {new_file_name}')\n return new_file_name", "def read_stl(filepath):\n import time\n start_time = time.process_time()\n\n tris, tri_nors, pts = [], [], ListDict()\n\n with open(filepath, 'rb') as data:\n # check for ascii or binary\n gen = _ascii_read if _is_ascii_file(data) else _binary_read\n\n for nor, pt in gen(data):\n # Add the triangle and the point.\n # If the point is already in the list of points, the\n # index returned by pts.add() will be the one from the\n # first equal point inserted.\n tris.append([pts.add(p) for p in pt])\n tri_nors.append(nor)\n\n print('Import finished in %.4f sec.' % (time.process_time() - start_time))\n\n return tris, tri_nors, pts.list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get number of points in the pointcloud file pcd_fname
def pontcloud_length_check(self, pcd_fname): pc = self.pointcloud_import(pcd_fname) return pc.shape[0]
[ "def compute_number_of_geometries(file_name: str | os.PathLike[str]) -> int:\n with open(file_name, 'r') as f:\n numat = int(f.readline())\n\n cmd = f\"wc -l {os.fspath(file_name)}\"\n wc = subprocess.getoutput(cmd).split()[0]\n\n lines_per_geometry = numat + 2\n\n return int(wc) // lines_per_geometry", "def num_points_per_cloud(self) -> torch.Tensor:\n return self._num_points_per_cloud", "def pointcloud_import(self, pcd_fname):\n\t\tprint('pointcloud filename:')\n\t\tprint(pcd_fname)\n\t\tpc = pypcd.PointCloud.from_path(pcd_fname)\n\n\t\t# flatten into vector\n\t\ttemp = []\n\t\ttemp.append(pc.pc_data['x'][~np.isnan(pc.pc_data['x'])])\n\t\ttemp.append(pc.pc_data['y'][~np.isnan(pc.pc_data['x'])])\n\t\ttemp.append(pc.pc_data['z'][~np.isnan(pc.pc_data['x'])])\n\t\ttemp = np.array(temp)\n\t\tprint(temp.shape)\n\t\tobs_pc = temp.flatten('F') #flattened column wise, [x0, y0, z0, x1, y1, z1, x2, y2, ...]\n\n\t\treturn obs_pc", "def npoints(self):\n return len(self.data)", "def count_observation(data_name):\n #filename = str(data_name)\n with open(data_name) as file: \n num_lines = 0\n for line in file: \n num_lines = num_lines + 1\n num_obs = num_lines/3\n return(int(num_obs))", "def getPointCount(self) -> \"int\":\n return _coin.SoGetPrimitiveCountAction_getPointCount(self)", "def vcf_info(file):\n for line in file: \n if line[:2]!='##':\n data=line.strip().split('\\t')\n numberofsamples=len(data[9:])\n break\n numberofSNPs=len(MYDATA.index)\n return numberofsamples,numberofSNPs", "def get_total_num_points(data_fields):\n\n n_points = 0\n for data_field in data_fields.values():\n n_points += data_field.shape[0]\n return n_points", "def nPoints(self):\n return _cantera.domain_nPoints(self._hndl)", "def CheckFeatureCount_Shapefile(shapefileFolder, fileName):\n \n d = ogr.GetDriverByName('ESRI Shapefile')\n ds = d.Open(shapefileFolder + '\\\\' + fileName + '.shp', 0)\n l = ds.GetLayer()\n feat_num = l.GetFeatureCount()\n del l\n del ds\n del d\n \n return feat_num", "def countfile(filename):\n generator = block_blob_service.list_blobs('travelimperial', filename)\n count = 0\n for blob in generator:\n count += 1\n print(blob.name)\n return count", "def num_perpception_layer_points(layer):\n return (layer + 1) * 4", "def point_attrib_count(self):\n return self.attributeCounts[AttributeOwner.POINT]", "def getNumPoints(self) -> \"int\":\n return _coin.SoFaceDetail_getNumPoints(self)", "def file_count():\n\n corpus = Corpus.from_env()\n click.echo(corpus.file_count)", "def get_total_number_of_variants_of_file(self):\n var_counter = 0\n with open(file_1) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n var_counter += 1\n return var_counter", "def count_lines(filename):\n pass", "def get_number_of_snvs(self):\n snv_counter = 0\n with open(file_1) as my_vcf_fh:\n vcf_reader = vcf.Reader(my_vcf_fh)\n for record in vcf_reader:\n if record.is_snp:\n snv_counter += 1\n return snv_counter", "def mount_point_count(self) -> int:\n return pulumi.get(self, \"mount_point_count\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import environments from files with description of where obstacles reside, dictionary keyed by 'poses' and 'obsData'. This function uses the poses key, which has the positions of all the environment obstacles
def environments_import(self, envs_fname): with open (envs_fname, "rb") as env_f: envs = pickle.load(env_f) env_names = envs['poses'].keys() # also has obstacle meta data return env_names
[ "def edit_world(file_name, poses):\n\n # Open world file\n tree = ET.parse(file_name)\n\n root = tree.getroot()\n root_state = root.findall('world')[0].findall('state')[0]\n\n objects_pose = poses\n models = {} \n\n for model in root_state.iter('model'):\n if model.attrib['name'] in objects_pose:\n models[model.attrib['name']] = model\n\n # Edit pose elements\n for model_name, element in models.iteritems():\n element.findall('pose')[0].text = objects_pose[model_name]\n \n # Write world file\n tree.write(file_name)", "def load_poses(dir_im, dir_joints):\n \n dict_joints = {}\n dict_joints_SR_destrorso = {}\n for posa in tqdm(os.listdir(dir_im)):\n posa1 = posa[:-13]\n posa = posa1 + \"_keypoints.json\"\n try:\n file = dir_joints+'/'+posa\n if os.path.isfile(file):\n with open(file) as f:\n data = json.load(f)\n prova = data['people'][0]['pose_keypoints_2d']\n punti = []\n punti1 = []\n i = 0\n max_y = 0.0\n while i < 75:\n x = prova[i]\n y1 = prova[i+1]\n y2 = -prova[i+1]\n punto = [x,y1]\n punto1 = [x,y2]\n punti.append(punto)\n punti1.append(punto1)\n i += 3\n if np.abs(y1)> max_y:\n max_y = np.abs(y1)\n punti = punti[:15]\n punti1 = punti1[:15]\n for i in range(len(punti1)):\n punti1[i][1] += max_y\n\n dict_joints[posa1] = punti\n dict_joints_SR_destrorso[posa1] = punti1\n except:\n print(posa)\n return dict_joints, dict_joints_SR_destrorso", "def load_env() -> None:\n for file in find_env():\n with file.open(\"r\") as f:\n for line in f.readlines():\n key, value = line.strip().rstrip().split(\"=\")\n key = re.sub(r\"[^A-Za-z0-9_]\", \"_\", key).upper()\n os.environ[key] = value", "def load_game():\n\n try:\n with open('character.json') as file_object:\n char = json.load(file_object)\n character.set_hp(char['hp'])\n character.set_coordinates(char['column'], char['row'])\n\n except FileNotFoundError:\n char = {\"hp\": 10, \"column\": 29, \"row\": 16}\n character.set_hp(char['hp'])\n character.set_coordinates(char['column'], char['row'])", "def load_pos():\n # Load the point positions\n filename_point_pos = os.path.join(dir_np, 'point_pos.npy')\n point_pos = np.load(filename_point_pos)\n\n # Load the cell positions\n filename_cell_pos = os.path.join(dir_np, 'cell_pos.npy')\n cell_pos = np.load(filename_cell_pos)\n\n # Load the cell volumes\n filename_cell_vol = os.path.join(dir_np, 'cell_vol.npy')\n cell_vol= np.load(filename_cell_vol)\n\n # Return the three position arrays\n return point_pos, cell_pos, cell_vol", "def load_exposures(self):\n if not self._loading_session:\n nisp_exposures_json_file, _ = QFileDialog.getOpenFileName(self.main,\n caption='Open NISP Exposures',\n filter='*.json')\n else:\n nisp_exposures_json_file = self._session['exposures']\n if nisp_exposures_json_file == '':\n return\n\n if not os.path.isfile(nisp_exposures_json_file):\n return\n\n with open(nisp_exposures_json_file) as f:\n nisp_exposure_filenames = json.load(f)\n\n self.exposures = {} # {dither: {detector: image}}\n\n fits_magic = 'SIMPLE = T'\n\n for exposure_name in nisp_exposure_filenames:\n full_path = os.path.join(os.path.dirname(nisp_exposures_json_file), 'data', exposure_name)\n print(f\"loading {full_path}\")\n try:\n f = open(full_path)\n magic = f.read(30)\n except:\n magic = ''\n if magic != fits_magic:\n message = QMessageBox(0, 'File Format Error', f'{exposure_name} is not a FITS file.')\n message.exec()\n self.exposures = None\n f.close()\n return\n if not f.closed:\n f.close()\n exposure = fits.open(full_path, memmap=True)\n dither = exposure[0].header['DITHSEQ']\n self.exposures[dither] = {}\n for detector in NISP_DETECTOR_MAP:\n self.exposures[dither][detector] = exposure[f'DET{NISP_DETECTOR_MAP[detector]}.SCI']\n\n for view_tab in self.view_tab:\n view_tab.init_view()\n\n self._session['exposures'] = nisp_exposures_json_file", "def load_maps(cat,maps=None):\n\n if not hasattr(maps, '__len__'):\n if cat.release=='y1':\n maps=np.array(list(config.map_name_y1.keys()))\n elif cat.release=='sv':\n maps=np.array(list(config.map_name_sv.keys()))\n print maps\n for i,x in enumerate(maps):\n print i,x\n if x=='ebv':\n setattr(cat,x,split_methods.get_maps(cat.ra,cat.dec,x,release=cat.release,nside=2048,map=True))\n else:\n setattr(cat,x,split_methods.get_maps(cat.ra,cat.dec,x,release=cat.release))\n\n return", "def load_gnss_poses_file(\n poses_file_path: str,\n sensor_id: str\n) -> kapture.Trajectories:\n pose_table = np.loadtxt(poses_file_path, delimiter=',', skiprows=1)\n timestamps_ns = pose_table[:, 0].astype(int)\n season_poses = pose_table[:, 1:8]\n trajectories = kapture.Trajectories()\n for timestamp_ns, (tx, ty, tz, qx, qy, qz, qw) in zip(timestamps_ns, season_poses):\n timestamp_ns = int(timestamp_ns)\n car_from_world = kapture.PoseTransform(r=[qw, qx, qy, qz], t=[tx, ty, tz]).inverse()\n trajectories[timestamp_ns, sensor_id] = car_from_world\n return trajectories", "def load_environments(self):\n datasets = {}\n \n for mode in [\"train\", \"test\"]:\n \n filename = os.path.join(SOURCES,\"scenario-{}-{}-set.feather\".format(self.scenario, mode))\n if not os.path.exists(filename):\n self.setup_environment(mode=mode)\n \n datasets[mode] = pd.read_feather(filename)\n datasets[mode].set_index('index', inplace=True)\n \n \n return datasets[\"train\"], datasets[\"test\"]", "def load_game():\n file = shelve.open('savegame', 'r')\n var.game_map = file['game_map']\n var.game_objects = file['game_objects']\n var.player = var.game_objects[file['player_index']] #get index of player in objects list and access it\n var.stairs = var.game_objects[file['stairs_index']]\n var.dungeon_level = file['dungeon_level']\n var.inventory = file['inventory']\n var.game_msgs = file['game_msgs']\n var.game_state = file['game_state']\n file.close()\n\n initialize_fov()", "def load_poses(pose_file_arg):\n pose_file = os.path.join(pose_file_arg)\n\n # Read and parse the poses\n poses = []\n try:\n with open(pose_file, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n T_w_cam0 = np.fromstring(line, dtype=float, sep=' ')\n T_w_cam0 = T_w_cam0.reshape(3, 4)\n T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))\n poses.append(T_w_cam0)\n\n except FileNotFoundError:\n print('Poses are not avaialble for sequence ' +\n pose_file_arg)\n\n return poses", "def read_micromanager_stage_positions(filename):\n with open(filename, 'r') as stage_positions:\n pos = json.load(stage_positions)\n\n positions = []\n\n # iterate over each position in the list, grab the devices and positions\n for p in pos['map']['StagePositions']['array']:\n devs = p['DevicePositions']['array']\n sp = {d['Device']['scalar']: d['Position_um']['array'] for d in devs}\n positions.append(sp)\n\n return positions", "def load_env(state, eid, socket, env_path=DEFAULT_ENV_PATH):\n env = {}\n if eid in state:\n env = state.get(eid)\n elif env_path is not None:\n p = os.path.join(env_path, eid.strip(), \".json\")\n if os.path.exists(p):\n with open(p, \"r\") as fn:\n env = tornado.escape.json_decode(fn.read())\n state[eid] = env\n\n if \"reload\" in env:\n socket.write_message(json.dumps({\"command\": \"reload\", \"data\": env[\"reload\"]}))\n\n jsons = list(env.get(\"jsons\", {}).values())\n windows = sorted(jsons, key=lambda k: (\"i\" not in k, k.get(\"i\", None)))\n for v in windows:\n socket.write_message(v)\n\n socket.write_message(json.dumps({\"command\": \"layout\"}))\n socket.eid = eid", "def read_obstacles_function(obstacles_file):\n \"\"\"\n An obstacle has fields (x,y,R) defining its spatial extent\n The obstacles files should be located somewhere near\n the following location : ./resources/obstacles.obs\n\n TODO: for moving obstacles, add fields vx, vy\"\"\"\n\n # parse xml file and get root element\n tree = et.parse(obstacles_file)\n root = tree.getroot()\n\n # obstacles = 1D vector containing all the info about the obstacles\n # for static obstacles : x, y, R -- size(obstacles)%3 == 0\n # for moving obstacles : add vx, vy -- size(obstacles)%5 == 0\n # no test is made in that sense for now\n obstacles_list = []\n\n for index in range(0, len(root)):\n this_obstacle = root[index].attrib\n # conversion from string to float\n # can be bypassed if the xml is defined with full tags\n x = float(this_obstacle['x'])\n y = float(this_obstacle['y'])\n r = float(this_obstacle['R'])\n\n obstacles_list.append(x)\n obstacles_list.append(y)\n obstacles_list.append(r)\n\n return obstacles_list, 3", "def load_breakpoints(args):\n break_dict = {}\n if args.breakpoints:\n with open(args.breakpoints, 'r') as in_handle:\n for line in in_handle:\n if line[0] != '#':\n line = line.strip().split('\\t')\n seq = line[0]\n coords = [int(coord)-1 for coord in line[1:]] # 0-based\n break_dict[seq] = coords\n return break_dict", "def readObjs():\n for source_key, wspace_key in self.cfg['obj'].items():\n if source_key in self.process.sourcemanager.keys():\n self.cfg['source'][source_key] = self.process.sourcemanager.get(source_key)\n else:\n obj = self.wspace.obj(wspace_key)\n if obj == None:\n self.logger.logWARNING(\"No Variable {0} is found.\".format(wspace_key))\n else:\n self.cfg['source'][source_key] = obj", "def get_positions(self) -> None:\n\n spawn_positions = dict()\n # Iterate through each floorplan scene.\n for scene in [1, 2, 4, 5]:\n spawn_positions[scene] = dict()\n\n # Load the scene and get environment data.\n resp = self.communicate([self.get_add_scene(scene_name=f\"floorplan_{scene}a\"),\n {\"$type\": \"send_environments\"}])\n envs = Environments(resp=resp)\n\n # Load an occupancy map.\n occ = np.load(str(OCCUPANCY_MAP_DIRECTORY.joinpath(f\"{scene}_0.npy\").resolve()))\n rooms = np.full(occ.shape, -1, dtype=int)\n for ix, iy in np.ndindex(occ.shape):\n # Ignore positions outside of the scene.\n if occ[ix, iy] == 2:\n continue\n # Get the room that this position is in.\n for i, env in enumerate(envs.envs):\n x = envs.x_min + (ix * OCCUPANCY_CELL_SIZE)\n z = envs.z_min + (iy * OCCUPANCY_CELL_SIZE)\n if env.is_inside(x, z):\n rooms[ix, iy] = i\n break\n np.save(str(ROOM_MAP_DIRECTORY.joinpath(str(scene)).resolve()), np.array(rooms))\n self.communicate({\"$type\": \"terminate\"})", "def load_exo_dict():\n pandexo_input = {\n \"star\":{\n \"type\" : \"user or phoenix\", \n \"starpath\" : \"file path\",\n \"w_unit\": \"Angs,cm,um,cm or Hz\",\n \"f_unit\": \"W/m2/um, FLAM, Jy, or erg/s/cm2/Hz\",\n \"mag\": \"magnitude\",\n \"ref_wave\": \"corresponding ref wave\",\n \"temp\": \"Only if phoenix, (in K)\",\n \"metal\": \"in log Fe/H\",\n \"logg\": \"cgs\"\n },\n\n \"planet\" :{\n\t \"type\": \"user\",\n \"exopath\" : \"file path\",\n \"w_unit\": \"Angs,cm,um,cm or Hz\",\n \"f_unit\": \"rp/r* or fp/f*\"\n },\n\n \"observation\": {\n \"sat_level\": \"in % sat level\",\n \"transit_duration\": \"in seconds\",\n \"noccultations\": \"num transits\",\n \"wave_bin\": \"in micron\",\n \"fraction\": \"time in/out\", \n \"noise_floor\":\"constant number or file name\"\n }\n }\n print(\"Replace all inputs before calling run_pandexo.\")\n return pandexo_input", "def paths_import_all(self, path_fname):\n\t\twith open (path_fname, \"rb\") as paths_f:\n\t\t\tpaths_dict = pickle.load(paths_f)\n\n\t\t# keyed by environment name\n\t\tunscrambled_dict = {}\n\t\tfor key in paths_dict.keys():\n\t\t\tunscrambled_dict[key] = self.moveit_unscramble(paths_dict[key])\n\n\t\treturn unscrambled_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads in a file, f, using a configurable block size, block_size. Returns an md5 hash of the content of the file
def md5_for_file(f, block_size=2**20): m = hashlib.md5() with open(f , "rb" ) as f: while True: buf = f.read(block_size) if not buf: break m.update( buf ) return m.hexdigest()
[ "def checksum_md5 (filename) :\n fname = filename\n block_size = 0x10000\n fd = open(fname, \"rb\")\n try:\n block = [ fd.read(block_size) ]\n while len(block[-1]) > 0 :\n block.append ( fd.read(block_size) )\n contents = block\n zero = hashlib.md5()\n i = 0 \n for el in contents :\n i += 1\n zero.update( el )\n m = zero\n return m.hexdigest()\n finally:\n fd.close()\n return None", "def calculate_md5_checksum(filename):\n\n length = io.DEFAULT_BUFFER_SIZE\n md5 = hashlib.md5()\n\n with io.open(filename, mode=\"rb\") as fd:\n for chunk in iter(lambda: fd.read(length), b''):\n md5.update(chunk)\n\n return md5.hexdigest()", "def md5_file_chunk(file_name, chunk_size=4096):\n hash_md5 = hashlib.md5()\n with open(file_name, \"rb\") as f:\n for chunk in iter(lambda: f.read(chunk_size), b\"\"):\n hash_md5.update(chunk)\n return base64.b64encode(hash_md5.digest()).decode()", "def digest_file(f, a):\n h = hashlib.new(a)\n chunk_size = 1024 * 10\n data = f.read(chunk_size)\n while data:\n h.update(data)\n data = f.read(chunk_size)\n if hasattr(f, 'name'):\n log.debug('hashed %s with %s to be %s', f.name, a, h.hexdigest())\n else:\n log.debug('hashed a file with %s to be %s', a, h.hexdigest())\n return h.hexdigest()", "def calc_md5( path_filename ):\n hash_md5 = hashlib.md5()\n with open( path_filename , \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()", "def compute_md5(file):\n md5 = hashlib.md5()\n while True:\n buf = file.read(8192)\n if not buf:\n break\n md5.update(buf)\n return md5", "def calcFileMd5sum(filename): \n\n m = hashlib.md5()\n\n # Read file in as 128 byte chunks\n with open(filename) as f: m.update(f.read(128))\n \n return m.hexdigest()", "def get_checksum(content, encoding=\"utf8\", block_size=8192):\r\n md = hashlib.md5()\r\n\r\n def safe_update(txt):\r\n try:\r\n md.update(txt)\r\n except UnicodeEncodeError:\r\n md.update(txt.encode(encoding))\r\n\r\n try:\r\n isfile = os.path.isfile(content)\r\n except TypeError:\r\n # Will happen with binary content.\r\n isfile = False\r\n if isfile:\r\n with open(content, \"rb\") as ff:\r\n txt = ff.read(block_size)\r\n while txt:\r\n safe_update(txt)\r\n txt = ff.read(block_size)\r\n elif hasattr(content, \"read\"):\r\n pos = content.tell()\r\n content.seek(0)\r\n txt = content.read(block_size)\r\n while txt:\r\n safe_update(txt)\r\n txt = content.read(block_size)\r\n content.seek(pos)\r\n else:\r\n safe_update(content)\r\n return md.hexdigest()", "def filehash(filepath, blocksize=4096):\n sha = hashlib.sha256()\n with open(filepath, 'rb') as fp:\n while True:\n data = fp.read(blocksize)\n if data:\n sha.update(data)\n else:\n break\n return sha.hexdigest()", "def _get_file_md5sum(file_name):\n hash_obj = hashlib.md5()\n with open(file_name, 'rb') as f:\n hash_obj.update(f.read())\n return hash_obj.hexdigest().encode('utf-8')", "def block_hash_read(self, f):\n return self._readline(f)", "def filehash(file):\n hasher = hashlib.md5()\n f = open(file, 'rb')\n buf = f.read()\n hasher.update(buf)\n return hasher.hexdigest()", "def md5sum_file(filename):\n import hashlib\n \n infile = open(filename, 'rb')\n content = infile.read()\n infile.close()\n m = hashlib.md5() \n m.update(content)\n md5 = m.hexdigest() # now the md5 variable contains the MD5 sum\n \n return md5", "def md5sum():\r\n hashSum = None\r\n try:\r\n # Open as read binary\r\n config = _ConfigFile._open('rb', yaml=False)\r\n\r\n # pipe contents of the file through\r\n hashSum = md5(config).hexdigest()\r\n except:\r\n hashSum = 'none'\r\n\r\n return hashSum", "def file_md5(filename):\r\n file_o = read_file(filename)\r\n file_str = file_o.read()\r\n file_o.close()\r\n return string_md5(file_str)", "def hashfile(afile, hasher=hashlib.sha256(), blocksize=65536):\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n return hasher.hexdigest()", "def checksum (self, filelist, block_size = 512):\n ret = []\n\n for f in filelist:\n h = hashlib.sha256 ()\n\n try:\n fd = open (f, \"rb\")\n except IOError:\n print \"Can't open :\", f\n else:\n while True:\n data = fd.read (block_size)\n\n if not data:\n break\n\n h.update (data)\n\n fd.close ()\n ret.append ((f, h.hexdigest ()))\n\n return ret", "def md5_filelike(filelike):\n m = hashlib.md5()\n while True:\n s = filelike.read()\n if len(s) == 0:\n break\n else:\n m.update(s)\n return m.hexdigest()", "def compute_file_hash(file_path, alg='md5'):\n if alg == 'md5':\n md5_obj = hashlib.md5()\n block_size = 65536\n # read chunk by chunk for big file\n with open(file_path, 'r+b') as f:\n for block in iter(lambda: f.read(block_size), \"\"):\n md5_obj.update(block)\n local_md5 = md5_obj.hexdigest()\n file_hash = local_md5\n\n else:\n raise NotImplementedError(\"ALGORITHM {0} NOT IMPLEMENTED!\".format(alg))\n return file_hash" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Recursively walks the target directory and removes all but one copy of files that share an md5 hash
def deduplicate_directory(target): dedupe_walk = os.walk(target) dupes = {} for w in dedupe_walk: src_files = product([w[0]], w[2]) for file in src_files: target_f = join(file[0], file[1]) hash = md5_for_file(join(file[0], file[1])) try: dupes[hash].append((target_f, os.stat(target_f).st_size)) except KeyError: dupes[hash] = [] dupes[hash].append((target_f, os.stat(target_f).st_size)) for key in dupes.keys(): for file, size in dupes[key][1:]: os.remove(file) return dupes
[ "def remove_file_hash(f_path):\n\tf_path = stringutil.normalize_file(f_path)\n\twith lock('w'), closing(conn.cursor()) as cur:\n\t\tcur.execute('DELETE FROM hashes WHERE file_path=:fp',{'fp':f_path})\n\t\tconn.commit()", "def remove_dup(self):\n duplicates = []\n hash_keys = dict()\n\n os.chdir(self.folderpath)\n file_list = os.listdir()\n print('number of images before removing duplicates', len(file_list))\n\n for index, filename in enumerate(os.listdir('.')): # listdir('.') = current directory\n if os.path.isfile(filename):\n with open(filename, 'rb') as f:\n filehash = hashlib.md5(f.read()).hexdigest()\n if filehash not in hash_keys:\n hash_keys[filehash] = index\n else:\n duplicates.append((index, hash_keys[filehash]))\n\n print('number of duplicates', len(duplicates))\n print('number of images after removing', len(file_list) - len(duplicates))\n\n for index in duplicates:\n os.remove(file_list[index[0]])\n\n print('Remove duplicates images from', self.folderpath)", "def purgeSharefiles(dir):\n for file in glob(f\"{dir}/share-*.ctxt\"):\n os.remove(file)", "def recursiveRemove(path):", "def hash_tree(root):\n file_list = []\n for root_directory, directories, files in os.walk(root):\n for file in files:\n file_list.append(os.path.join(root_directory, file))\n sorted_file_list = sorted(file_list)\n sha256 = hashlib.sha256()\n for file in sorted_file_list:\n _update_sha256(file, sha256)\n return sha256.hexdigest()", "def rm_files_into_dir(top):\n\tfor root, dirs, files in os.walk(top, topdown=False):\n\t\tfor name in files:\t\t\t\n\t\t\tif not name.endswith( tuple(file_type) + (\"srt\",) ) :\n\t\t\t\tpath = os.path.join(root, name)\n\t\t\t\tos.chmod(path, stat.S_IWUSR)\n\t\t\t\tos.remove(path)\n\t\t\t\tprint \"remove :\",path", "def cleanup(base_dir):\n for root, dirs, files in os.walk(base_dir, topdown=False):\n for name in files:\n fname = os.path.join(root, name)\n if fname not in __FETCHED:\n print('{}Deleting: {}{}'.format(YELLOW, RESET, fname))\n os.unlink(fname)\n\n for name in dirs:\n dname = os.path.join(root, name)\n if not os.listdir(dname):\n print('{}Deleting: {}{}'.format(YELLOW, RESET, dname))\n os.rmdir(dname)", "def md5files(files):\n m = hashlib.md5()\n for key, path in sorted(files, key=lambda x: x[0]):\n m.update(six.b(key))\n if os.path.isdir(path):\n m.update(md5files([\n (os.path.join(key, filename), os.path.join(path, filename))\n for filename in os.listdir(path)\n if not filename.startswith('.')]))\n else:\n with open(path, 'rb') as f:\n m.update(f.read())\n return m.hexdigest()", "def remove_files(self):\n flag = False\n _, _, files = next(os.walk(self.dest_path), (self.dest_path, [], []))\n for each in files:\n file_path = os.path.join(self.dest_path, each)\n try:\n os.remove(file_path)\n except PermissionError:\n flag = True\n print(\"Zipper: File: {} is being used by another process\".format(each))\n # Will not create and store the zip in todecode folder until all files are removed.\n if flag:\n self.remove_files()", "def remove_uuid_recursive(folder, dry=False):\n for fn in Path(folder).rglob('*.*'):\n print(remove_uuid_file(fn, dry=False))", "def clean_file_in_dir(dirname, filename):\n for parent, _, filenames in os.walk(dirname):\n for name in filenames:\n if name == filename:\n os.remove(os.path.join(parent, name))", "def remove_checksum(filepath):\n file_obj = file_factory(filepath)\n return file_obj.remove_checksum()", "def clean_files_pdbbind(pdb_files_dir='../data/input/pdbbind/refined-set/'):\n pdb_ids = [os.path.join(pdb_files_dir, x) for x in os.listdir(pdb_files_dir) if x[0] != '.']\n for pdb_id in tqdm(pdb_ids):\n files = [os.path.join(pdb_id, f) for f in os.listdir(pdb_id) if f[0] != '.']\n for f in files:\n if f[-4:] == '.sdf':\n subprocess.call(['rm', f])", "def test_clean():\n\n for deldir in [\"srcdata\", \"newdata\"]:\n path = Path(deldir)\n for name in path.glob(\"*.json\"):\n name.unlink()\n for name in path.glob(\"*.pickle\"):\n name.unlink()\n path.rmdir()\n\n rmtree(\"sys\")", "def master_list(start):\n flist = []\n oldcwd = os.getcwd()\n os.chdir(start)\n # Collect all files under start\n for root, dirs, files in os.walk(\".\"):\n for fname in files:\n # Only keep the topmost hash file\n if fname == hashfile and root != \".\":\n log(\"REMOVING\", op.join(root,fname))\n os.remove(op.join(root,fname))\n else:\n flist.append(op.join(root[2:],fname))\n os.chdir(oldcwd)\n return flist", "def __deduplicate(self, path, stat_info, fingerprint, file_obj):\n\n # No need to deduplicate empty files\n if stat_info.st_size == 0:\n return\n\n # Check modify time\n if self.__config[\"trust_modify_time\"]:\n prev_info = self.__prev_files.get(path)\n\n if prev_info is not None:\n prev_hash, prev_fingerprint = prev_info\n\n if fingerprint == prev_fingerprint:\n LOG.debug(\n \"File '%s' hasn't been changed. Make it an extern file with %s hash.\",\n path, prev_hash)\n\n return prev_hash\n\n # Find files with the same hash -->\n file_size = 0\n\n while file_size < stat_info.st_size:\n data = file_obj.read(\n min(psys.BUFSIZE, stat_info.st_size - file_size))\n\n if data:\n file_size += len(data)\n elif file_size == stat_info.st_size:\n break\n else:\n raise Error(\"The file has been truncated during the backup.\")\n\n file_hash = file_obj.hexdigest()\n file_obj.reset()\n\n if file_hash in self.__hashes:\n LOG.debug(\"Make '%s' an extern file with %s hash.\", path, file_hash)\n return file_hash\n # Find files with the same hash <--", "def scrubMetadata(self, autoRefresh=True):\r\n if autoRefresh:\r\n # we need to make sure we have the most current data first\r\n self.refresh(recursive=True)\r\n\r\n # built a set of all currently existing file and directory hashes\r\n hashes = set()\r\n for f in self.all(recursive=True):\r\n hashes.add(f.hash())\r\n\r\n # build a list of all hashes currently in self._md that are NOT in the\r\n # current set of hashes\r\n outdatedHashes = []\r\n for h in self._md.keys():\r\n if h not in hashes:\r\n outdatedHashes.append(h)\r\n\r\n # delete every entry in self._md that is outdated\r\n for h in outdatedHashes:\r\n del self._md[h]", "def delete_duplicates(image_dir, hash_size, highfreq_factor, skip_strict):\n hash_size, highfreq_factor = int(hash_size), int(highfreq_factor)\n if not skip_strict:\n delete_strict_duplicates(image_dir)\n counter = 0\n print(f\"[hash_size={hash_size}]\"\n f\"[highfreq_factor={highfreq_factor}]\")\n images = {}\n for ind, image in enumerate(sorted(os.listdir(image_dir))):\n print(ind)\n if os.path.isfile(os.path.join(image_dir, image)):\n images[image] = imagehash.phash(\n Image.open(os.path.join(image_dir, image)),\n hash_size=hash_size, highfreq_factor=highfreq_factor)\n flipped = {}\n duplicates_path = os.path.join(image_dir, 'duplicates')\n os.makedirs(duplicates_path, exist_ok=True)\n for key, value in images.items():\n if value not in flipped:\n flipped[value] = key\n else:\n counter += 1\n print(f\"[hash_size={hash_size}][highfreq_factor=\"\n f\"{highfreq_factor}][no. {counter}] Similar images:\"\n f\" {flipped[value]} {key}\")\n shutil.copy(os.path.join(image_dir, flipped[value]),\n os.path.join(\n duplicates_path,\n f'DUPLICATE_{counter}_ORIG_'\n + flipped[value]))\n shutil.move(os.path.join(image_dir, key),\n os.path.join(\n duplicates_path,\n f'DUPLICATE_{counter}_DUPL_' + key))", "def md5_filelike(filelike):\n m = hashlib.md5()\n while True:\n s = filelike.read()\n if len(s) == 0:\n break\n else:\n m.update(s)\n return m.hexdigest()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve files generated by PureCN_Dx
def _get_purecn_dx_files(paired, out): out_base = "%s-dx" % utils.splitext_plus(out["rds"])[0] all_files = [] for key, ext in [[("mutation_burden",), "_mutation_burden.csv"], [("plot", "signatures"), "_signatures.pdf"], [("signatures",), "_signatures.csv"]]: cur_file = "%s%s" % (out_base, ext) out = tz.update_in(out, key, lambda x: cur_file) all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
[ "def get_files_to_generate(self):\r\n pass", "def getFiles(self) -> List[ghidra.framework.model.DomainFile]:\n ...", "def generate_files(self) -> List[Tuple[str, str, str]]:\n raise NotImplementedError() # NOQA", "def _get_purecn_files(paired, work_dir, require_exist=False):\n out_base = os.path.join(work_dir, \"%s-purecn\" % (dd.get_sample_name(paired.tumor_data)))\n out = {\"plot\": {}}\n all_files = []\n for plot in [\"chromosomes\", \"local_optima\", \"segmentation\", \"summary\"]:\n if plot == \"summary\":\n cur_file = \"%s.pdf\" % out_base\n else:\n cur_file = \"%s_%s.pdf\" % (out_base, plot)\n if not require_exist or os.path.exists(cur_file):\n out[\"plot\"][plot] = cur_file\n all_files.append(os.path.basename(cur_file))\n for key, ext in [[\"hetsummary\", \".csv\"], [\"dnacopy\", \"_dnacopy.seg\"], [\"genes\", \"_genes.csv\"],\n [\"log\", \".log\"], [\"loh\", \"_loh.csv\"], [\"rds\", \".rds\"],\n [\"variants\", \"_variants.csv\"]]:\n cur_file = \"%s%s\" % (out_base, ext)\n if not require_exist or os.path.exists(cur_file):\n out[key] = cur_file\n all_files.append(os.path.basename(cur_file))\n return out_base, out, all_files", "def get_catalog_files():\n #Get path and all file names\n data_root = os.getenv(\"DATA_ROOT\")\n catalog_path = data_root + \"/catalog/\"\n file_names = os.listdir(catalog_path)\n\n #Extract the csv files\n catalog_files = []\n for file_name in file_names:\n if file_name.endswith(\".csv\"):\n catalog_files.append(catalog_path + file_name)\n \n #Results\n return catalog_files", "def get_data_files ():\n installpath = os.path.join (\"share\", \"ocempgui\")\n path = \"data\"\n dirs = get_directory_list (path)\n filedict = {}\n for path in dirs:\n files = glob.glob (os.path.join (path, \"*.*\"))\n if files:\n filedict[path] = files\n return get_installation_files (\"data\", installpath, filedict)", "def files(self):\n return map(os.path.basename,template.files_from_results(self.results))", "def extension_files(self, ):\n return extension_files(self.spec_dir, self.group_name)", "def list_from_dico(self):\n rflist = [' --- raw files ---']\n pflist = [' --- processed ---']\n flist = []\n for i,ftype in self.file_dico.items():\n if ftype in self.accept:\n iP = Path(i)\n if ftype in ('fid', 'FID', 'ser') and self.dotd:\n rflist.append(MSfile(iP, iP.relative_to(self.base), ftype)) \n elif self.msh5 and i.endswith('.msh5'):\n pflist.append(MSfile(iP, iP.relative_to(self.base), ftype))\n if self.dotd:\n flist = rflist\n if self.msh5:\n flist += pflist\n return flist", "def get_hi_files():\n proj_dirs = swp.project_info()\n hi_path = os.path.join(proj_dirs['data'], \"hi1a\")\n hi_path = os.path.join(hi_path, '*.fts')\n out_files = glob.glob(hi_path)\n return out_files", "def get_files(self, ext=\"xml\"):\n f = glob.glob(self.path+\"/*{}\".format(ext))\n return f", "def files(site):\n build_path = os.path.join(PATH, '.build')\n print fileList(build_path, relative=True)\n return [File(site, p) for p in fileList(build_path, relative=True)]", "def _get_files(self):\n\n glob_path = os.path.join(self.path, self.mask)\n return glob.glob(glob_path)", "def acquire_files():\n sample_measurements = []\n sample_names = []\n dir_path = os.getcwd()\n for file in os.listdir(dir_path):\n if file.lower().endswith(\".spe\"):\n \"Ignore the background and reference spectra\"\n if file == \"USS_Independence_Background.Spe\":\n pass\n elif file == \"UCB018_Soil_Sample010_2.Spe\":\n pass\n else:\n sample_measurements.append(file)\n name = os.path.splitext(file)[0].replace(\"_\", \" \")\n sample_names.append(str(name))\n return sample_measurements, sample_names", "def get_documentation_files ():\n installpath = os.path.join (\"share\", \"doc\", \"ocempgui\")\n docpaths = get_directory_list (\"doc\")\n\n # Traverse all the directories in the docpath an get the needed files.\n # Every file installed from the docs will have a suffix.\n filedict = {}\n for path in docpaths:\n files = glob.glob (os.path.join (path, \"*.*\"))\n if files:\n filedict[path] = files\n return get_installation_files (\"doc\", installpath, filedict)", "def data_files(self):\n #tf_record_pattern = os.path.join(FLAGS.data_dir, '%s-*' % self.subset)\n #tf_record_pattern = os.path.join(FLAGS.data_dir, 'test_*')\n tf_record_pattern = FLAGS.data_dir\n #tf_record_pattern = os.path.join(FLAGS.data_dir, FLAGS.ImageSet_basename + '*')\n print(\"tf_record_pattern:\")\n print(tf_record_pattern)\n data_files = tf.gfile.Glob(tf_record_pattern)\n print(data_files)\n if not data_files:\n print('No files found for dataset %s/%s at %s' % (self.name,\n self.subset,\n FLAGS.data_dir))\n\n self.download_message()\n exit(-1)\n return data_files", "def getOutputTextureFiles(self):\r\n return _osgDB.Output_getOutputTextureFiles(self)", "def returnFiles(self):\n return self.files", "def data_files():\r\n data_files = []\r\n path = get_data_path(media=\"media\")\r\n for f in findall(path):\r\n data_files.append(('media/models_media', [f]))\r\n return data_files" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run PureCN.R wrapper with presegmented CNVkit or GATK4 inputs.
def _run_purecn(paired, work_dir): segfns = {"cnvkit": _segment_normalized_cnvkit, "gatk-cnv": _segment_normalized_gatk} out_base, out, all_files = _get_purecn_files(paired, work_dir) failed_file = out_base + "-failed.log" cnr_file = tz.get_in(["depth", "bins", "normalized"], paired.tumor_data) if not utils.file_uptodate(out["rds"], cnr_file) and not utils.file_exists(failed_file): cnr_file, seg_file = segfns[cnvkit.bin_approach(paired.tumor_data)](cnr_file, work_dir, paired) from bcbio import heterogeneity vcf_file = heterogeneity.get_variants(paired.tumor_data, include_germline=False)[0]["vrn_file"] vcf_file = germline.filter_to_pass_and_reject(vcf_file, paired, out_dir=work_dir) with file_transaction(paired.tumor_data, out_base) as tx_out_base: # Use UCSC style naming for human builds to support BSgenome genome = ("hg19" if dd.get_genome_build(paired.tumor_data) in ["GRCh37", "hg19"] else dd.get_genome_build(paired.tumor_data)) cmd = ["PureCN.R", "--seed", "42", "--out", tx_out_base, "--rds", "%s.rds" % tx_out_base, "--sampleid", dd.get_sample_name(paired.tumor_data), "--genome", genome, "--vcf", vcf_file, "--tumor", cnr_file, "--segfile", seg_file, "--funsegmentation", "Hclust", "--maxnonclonal", "0.3"] if dd.get_num_cores(paired.tumor_data) > 1: cmd += ["--cores", str(dd.get_num_cores(paired.tumor_data))] try: cmd = "export R_LIBS_USER=%s && %s && %s" % (utils.R_sitelib(), utils.get_R_exports(), " ".join([str(x) for x in cmd])) do.run(cmd, "PureCN copy number calling") except subprocess.CalledProcessError as msg: if _allowed_errors(str(msg)): logger.info("PureCN failed to find solution for %s: skipping" % dd.get_sample_name(paired.tumor_data)) with open(failed_file, "w") as out_handle: out_handle.write(str(msg)) else: logger.exception() raise for f in all_files: if os.path.exists(os.path.join(os.path.dirname(tx_out_base), f)): shutil.move(os.path.join(os.path.dirname(tx_out_base), f), os.path.join(os.path.dirname(out_base), f)) out = _get_purecn_files(paired, work_dir, require_exist=True)[1] return out if (out.get("rds") and os.path.exists(out["rds"])) else None
[ "def main(args):\n model, ensemble = setup_gnina_model(args.cnn, args.dimension, args.resolution)\n model.eval() # Ensure models are in evaluation mode!\n\n device = utils.set_device(args.gpu)\n model.to(device)\n\n example_provider = setup.setup_example_provider(args.input, args, training=False)\n grid_maker = setup.setup_grid_maker(args)\n\n # TODO: Allow average over different rotations\n loader = dataloaders.GriddedExamplesLoader(\n example_provider=example_provider,\n grid_maker=grid_maker,\n random_translation=0.0, # No random translations for inference\n random_rotation=False, # No random rotations for inference\n device=device,\n grids_only=True,\n )\n\n for batch in loader:\n if not ensemble:\n log_pose, affinity = model(batch)\n else:\n log_pose, affinity, affinity_var = model(batch)\n\n pose = torch.exp(log_pose[:, -1])\n\n for i, (p, a) in enumerate(zip(pose, affinity)):\n print(f\"CNNscore: {p:.5f}\")\n print(f\"CNNaffinity: {a:.5f}\")\n if ensemble:\n print(f\"CNNvariance: {affinity_var[i]:.5f}\")\n print(\"\")", "def _run(input_ucn_file_name, input_image_dir_name, first_date_string,\n last_date_string, num_examples_to_keep, output_dir_name):\n\n # Read upconvnet and CNN.\n ucn_metafile_name = short_course.find_model_metafile(\n model_file_name=input_ucn_file_name, raise_error_if_missing=True)\n\n print('Reading trained upconvnet from: \"{0:s}\"...'.format(\n input_ucn_file_name))\n ucn_model_object = short_course.read_keras_model(input_ucn_file_name)\n\n print('Reading upconvnet metadata from: \"{0:s}\"...'.format(\n ucn_metafile_name))\n ucn_metadata_dict = short_course.read_model_metadata(ucn_metafile_name)\n\n cnn_file_name = ucn_metadata_dict[short_course.CNN_FILE_KEY]\n cnn_metafile_name = short_course.find_model_metafile(\n model_file_name=cnn_file_name, raise_error_if_missing=True)\n\n print('Reading trained CNN from: \"{0:s}\"...'.format(cnn_file_name))\n cnn_model_object = short_course.read_keras_model(cnn_file_name)\n\n print('Reading CNN metadata from: \"{0:s}\"...'.format(cnn_metafile_name))\n cnn_metadata_dict = short_course.read_model_metadata(cnn_metafile_name)\n print(SEPARATOR_STRING)\n\n # Read images.\n image_file_names = short_course.find_many_image_files(\n first_date_string=first_date_string, last_date_string=last_date_string,\n image_dir_name=input_image_dir_name)\n\n image_dict = short_course.read_many_image_files(image_file_names)\n print(SEPARATOR_STRING)\n\n # Decide which images to keep.\n num_examples = len(image_dict[short_course.STORM_IDS_KEY])\n num_examples_to_keep = min([num_examples_to_keep, num_examples])\n\n example_indices = numpy.linspace(\n 0, num_examples - 1, num=num_examples, dtype=int)\n example_indices = numpy.random.choice(\n example_indices, size=num_examples_to_keep, replace=False)\n\n storm_ids = numpy.round(\n image_dict[short_course.STORM_IDS_KEY][example_indices]\n ).astype(int)\n storm_steps = numpy.round(\n image_dict[short_course.STORM_STEPS_KEY][example_indices]\n ).astype(int)\n image_matrix = image_dict[short_course.PREDICTOR_MATRIX_KEY][\n example_indices, ...]\n\n # Reconstruct images.\n predictor_names = image_dict[short_course.PREDICTOR_NAMES_KEY]\n\n print('Normalizing {0:d} images...'.format(num_examples_to_keep))\n image_matrix_norm, _ = short_course.normalize_images(\n predictor_matrix=image_matrix + 0., predictor_names=predictor_names,\n normalization_dict=cnn_metadata_dict[\n short_course.NORMALIZATION_DICT_KEY]\n )\n\n print('Applying CNN to create scalar features...')\n feature_matrix = short_course._apply_cnn(\n cnn_model_object=cnn_model_object, predictor_matrix=image_matrix_norm,\n output_layer_name=ucn_metadata_dict[short_course.CNN_FEATURE_LAYER_KEY],\n verbose=False)\n\n print('Applying upconvnet to reconstruct images from scalar features...')\n reconstructed_image_matrix_norm = ucn_model_object.predict(\n feature_matrix, batch_size=num_examples_to_keep)\n\n print('Denormalizing reconstructed images...\\n')\n reconstructed_image_matrix = short_course.denormalize_images(\n predictor_matrix=reconstructed_image_matrix_norm,\n predictor_names=predictor_names,\n normalization_dict=cnn_metadata_dict[\n short_course.NORMALIZATION_DICT_KEY]\n )\n\n # Plot reconstructed images.\n actual_image_dir_name = '{0:s}/actual_images'.format(output_dir_name)\n reconstructed_image_dir_name = '{0:s}/reconstructed_images'.format(\n output_dir_name)\n\n short_course._create_directory(actual_image_dir_name)\n short_course._create_directory(reconstructed_image_dir_name)\n\n temperature_index = predictor_names.index(short_course.TEMPERATURE_NAME)\n\n for i in range(num_examples_to_keep):\n this_temp_matrix_kelvins = numpy.concatenate(\n (image_matrix[i, ..., temperature_index],\n reconstructed_image_matrix[i, ..., temperature_index]),\n axis=0)\n\n this_min_temp_kelvins = numpy.percentile(this_temp_matrix_kelvins, 1)\n this_max_temp_kelvins = numpy.percentile(this_temp_matrix_kelvins, 99)\n\n short_course.plot_many_predictors_with_barbs(\n predictor_matrix=image_matrix[i, ...],\n predictor_names=predictor_names,\n min_colour_temp_kelvins=this_min_temp_kelvins,\n max_colour_temp_kelvins=this_max_temp_kelvins)\n\n this_figure_file_name = (\n '{0:s}/storm={1:06d}_step={2:06d}_actual.jpg'\n ).format(actual_image_dir_name, storm_ids[i], storm_steps[i])\n\n print('Saving figure to file: \"{0:s}\"...'.format(this_figure_file_name))\n pyplot.savefig(this_figure_file_name, dpi=FIGURE_RESOLUTION_DPI)\n pyplot.close()\n\n short_course.plot_many_predictors_with_barbs(\n predictor_matrix=reconstructed_image_matrix[i, ...],\n predictor_names=predictor_names,\n min_colour_temp_kelvins=this_min_temp_kelvins,\n max_colour_temp_kelvins=this_max_temp_kelvins)\n\n this_figure_file_name = (\n '{0:s}/storm={1:06d}_step={2:06d}_reconstructed.jpg'\n ).format(reconstructed_image_dir_name, storm_ids[i], storm_steps[i])\n\n print('Saving figure to file: \"{0:s}\"...'.format(this_figure_file_name))\n pyplot.savefig(this_figure_file_name, dpi=FIGURE_RESOLUTION_DPI)\n pyplot.close()", "def _run_xlnet_classifier(self):\n run_classifier.main(unused_argv=None)", "def cifar_demo():\n mpi.mkdir(FLAGS.output_dir)\n logging.info('Loading cifar data...')\n cifar = visiondata.CifarDataset(FLAGS.root, is_training=True)\n cifar_test = visiondata.CifarDataset(FLAGS.root, is_training=False)\n\n # try: use sub images\n #cifar = datasets.SubImageSet(cifar, [28,28], 1)\n #cifar_test = datasets.CenterRegionSet(cifar_test, [28,28])\n\n conv = pipeline.ConvLayer([\n pipeline.PatchExtractor([6,6], 1), # extracts patches\n pipeline.MeanvarNormalizer({'reg': 10}), # normalizes the patches\n pipeline.LinearEncoder({},\n trainer = pipeline.ZcaTrainer({'reg': 0.1})), # Does whitening\n pipeline.ThresholdEncoder({'alpha': 0.25, 'twoside': True},\n trainer = pipeline.OMPTrainer(\n {'k': 1600, 'max_iter':100})), # does encoding\n pipeline.SpatialPooler({'grid': (4,4), 'method': 'max'}) # average pool\n ])\n logging.info('Training the pipeline...')\n conv.train(cifar, 400000)\n logging.info('Dumping the pipeline...')\n if mpi.is_root():\n with open(os.path.join(FLAGS.output_dir, FLAGS.model_file),'w') as fid:\n pickle.dump(conv, fid)\n fid.close()\n logging.info('Extracting features...')\n Xtrain = conv.process_dataset(cifar, as_2d = True)\n mpi.dump_matrix_multi(Xtrain,\n os.path.join(FLAGS.output_dir, \n FLAGS.feature_file+'_train'))\n Ytrain = cifar.labels().astype(np.int)\n Xtest = conv.process_dataset(cifar_test, as_2d = True)\n mpi.dump_matrix_multi(Xtest,\n os.path.join(FLAGS.output_dir, \n FLAGS.feature_file+'_test'))\n Ytest = cifar_test.labels().astype(np.int)\n # normalization\n m, std = classifier.feature_meanstd(Xtrain)\n Xtrain -= m\n Xtrain /= std\n Xtest -= m\n Xtest /= std\n \n w, b = classifier.l2svm_onevsall(Xtrain, Ytrain, 0.005)\n if mpi.is_root():\n with open(os.path.join(FLAGS.output_dir, FLAGS.svm_file), 'w') as fid:\n pickle.dump({'m': m, 'std': std, 'w': w, 'b': b}, fid)\n accu = np.sum(Ytrain == (np.dot(Xtrain,w)+b).argmax(axis=1)) \\\n / float(len(Ytrain))\n accu_test = np.sum(Ytest == (np.dot(Xtest,w)+b).argmax(axis=1)) \\\n / float(len(Ytest))\n \n logging.info('Training accuracy: %f' % accu)\n logging.info('Testing accuracy: %f' % accu_test)", "def run_classifier_onnx():\n if args_opt.eval_data_file_path == \"\":\n raise ValueError(\"'eval_data_file_path' must be set when do onnx evaluation task\")\n assessment_method = args_opt.assessment_method.lower()\n ds = create_classification_dataset(batch_size=args_opt.eval_batch_size,\n assessment_method=assessment_method,\n data_file_path=args_opt.eval_data_file_path,\n schema_file_path=args_opt.schema_file_path,\n dataset_format=args_opt.dataset_format,\n do_shuffle=(args_opt.eval_data_shuffle.lower() == \"true\"))\n do_eval_onnx(ds, args_opt.num_class, assessment_method)", "def main():\n\n config = SimCLRConfig.parse_arguments()\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in config.gpus])\n num_gpus_per_node = len(config.gpus)\n world_size = config.num_nodes * num_gpus_per_node\n distributed = world_size > 1\n setattr(config, 'num_gpus_per_node', num_gpus_per_node)\n setattr(config, 'world_size', world_size)\n setattr(config, 'distributed', distributed)\n \n rich.print(config.__dict__)\n config.save()\n\n if config.distributed:\n rich.print(f\"Distributed training on {world_size} GPUs.\")\n mp.spawn(\n main_worker,\n nprocs=config.num_gpus_per_node,\n args=(config, )\n )\n else:\n rich.print(f\"Single GPU training.\")\n main_worker(0, config=config) # single machine, single gpu", "def _Run(benchmark_spec, rank):\n vm = benchmark_spec.vms[rank]\n master = benchmark_spec.vms[0]\n nccl_env = []\n if FLAGS.nccl_cuda_visible_devices:\n nccl_env.append('CUDA_VISIBLE_DEVICES={}'\n .format(FLAGS.nccl_cuda_visible_devices))\n nccl_env.extend(FLAGS.nccl_extra_params)\n\n prof_cmd = ''\n if benchmark_spec.profiler:\n prof_cmd = (r'{}/bin/nvprof --profile-child-processes '\n r'-o /tmp/pkb/%h.%p.nvprof'.format(cuda_toolkit.CUDA_HOME))\n\n distributed_cmd = (\n 'torch.distributed.launch '\n '--nproc_per_node={nproc_per_node} '\n '--nnodes={num_vms} '\n '--node_rank={rank} '\n '--master_addr={addr} '\n '--master_port=2222'\n .format(num_vms=benchmark_spec.num_vms,\n rank=rank,\n addr=master.internal_ip,\n nproc_per_node=benchmark_spec.nproc_per_node))\n\n cmd_flags = {\n 'adam-betas': \"'(0.9, 0.98)'\",\n 'adam-eps': 1e-06,\n 'arch': 'roberta_large',\n 'attention-dropout': 0.1,\n 'clip-norm': 1.0,\n 'criterion': 'masked_lm',\n 'disable-validation': '',\n 'distributed-no-spawn': '',\n 'dropout': 0.1,\n 'fast-stat-sync': '',\n 'log-format': 'simple',\n 'lr': 0.0004,\n 'lr-scheduler': 'polynomial_decay',\n 'max-tokens': 6000,\n 'max-update': 1500000,\n 'memory-efficient-fp16': '',\n 'multilang-sampling-alpha': 0.7,\n 'num-workers': 4,\n 'no-epoch-checkpoints': '',\n 'no-save': '',\n 'optimizer': 'adam',\n 'sample-break-mode': 'complete',\n 'save-interval-updates': 3000,\n 'task': 'multilingual_masked_lm',\n 'tokens-per-sample': 512,\n 'total-num-update': 1500000,\n 'train-subset': 'train',\n 'valid-subset': 'valid',\n 'warmup-updates': 15000,\n 'weight-decay': 0.01,\n }\n\n cmd_flags.update({\n 'log-interval': benchmark_spec.log_interval,\n 'max-sentences': benchmark_spec.max_sentences,\n 'update-freq': benchmark_spec.update_freq,\n 'max-epoch': benchmark_spec.max_epoch,\n })\n roberta_benchmark_flags = ' '.join(\n f'--{key}={value}' if value else f'--{key}'\n for key, value in sorted(cmd_flags.items()))\n roberta_benchmark_cmd = (\n f'{FLAGS.torch_env} DGXSYSTEM=DGX1 NEXP=1 PULL=0 LOGDIR=/tmp/robertammlm '\n f'{\" \".join(nccl_env)} {prof_cmd} python3 -m {distributed_cmd} '\n f'$HOME/fairseq/train.py {DATA_PATH}/data-bin/mlm-w103 '\n f'{roberta_benchmark_flags}')\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(roberta_benchmark_cmd)\n return MakeSamplesFromOutput(metadata, stdout) if master == vm else []", "def run_pipeline(base, cpu):\n\n URF_cmd = f'snakemake --snakefile {URF_loc} --directory {d.base} --use-singularity --singularity-args \"--bind {d.base}:/data,{blast_nt_loc}:/blast/blastdb\" --singularity-prefix {singularity_loc} --cores {cpu}'\n\n print('''\\n* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\nYou have selected to run the UPHL Reference-free (URF) Pipeline.\nAttempting to build directory structure.''')\n\n create_dirs_and_moveFs(d, cpu)\n print(' ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~\\n')\n print(f'Will now run the following command:\\n {URF_cmd}')\n print(' ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~\\n')\n time.sleep(3)\n subprocess.run(f'source activate {snkmk_env} && {URF_cmd} && conda deactivate', shell = True)\n\n print('\\n ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~\\n')\n print('Creating \"tree_building\" directory for follow-on analyses.')\n try:\n os.mkdir(d.treebld) # Create /tree_building subdirectory in base dir\n except FileExistsError:\n print('Subdirectory \"tree_building\" already exists.')", "def main():\n set_cuda_device(ARGS.cuda_device_id)\n float_formatter = lambda x: \"%.3f\" % x\n np.set_printoptions(formatter={\"float_kind\": float_formatter})\n\n # Setup logging in base_dir/log.txt\n log_file = os.path.join(ARGS.result_dir, ARGS.experiment_name, \"log.txt\")\n setup_logging(level=ARGS.log_level, filename=log_file)\n logger.info(\" -- Cifar -- Started \")\n print(\"Result dir: \", ARGS.result_dir)\n print(\"Log file: \", log_file)\n\n # Save commandline arguments\n tstart = time.time()\n try:\n if not ARGS.cuda:\n # Set number of CPU threads\n torch.set_num_threads(ARGS.njobs)\n else:\n ARGS.cuda_device_id = ARGS.cuda_device_id[0]\n\n if ARGS.reuse_base_dir is not None:\n base_dir = ARGS.reuse_base_dir\n else:\n base_dir = generate_run_base_dir(\n suffix=\"debug\",\n experiment=\"multilabel-mnist\",\n result_dir=ARGS.result_dir,\n timestamp=tstart,\n )\n exp_dir = generate_experiment_dir(base_dir, ARGS.net, \"test\")\n save_args(ARGS, exp_dir)\n # Create and run experiment\n run_cifar(ARGS, exp_dir)\n except Exception as e:\n logger.exception(\"Experiment crashed.\")\n logger.exception(\"Exception: %s\", str(e))\n\n # Measure time\n tstr = time_delta_now(tstart)\n logger.info(\" -- CIFAR -- Finished, took %s\", tstr)", "def main():\n\n args = parse_arguments()\n\n\n crnn = None\n \n if crnn is None:\n crnn = CRNN(\n args.iteration_count,\n args.batch_size,\n args.model_path,\n args.examples_path,\n args.max_image_width,\n 0, #train/test ratio here train rate is 0\n args.restore,\n 1\n )\n\n predict_result = crnn.test()\n f = open(args.output_path,'w')\n for str in predict_result:\n str1 = str.split(':')[0]\n str2 = str.split(':')[1]\n str2 = str2.strip('_')\n f.writelines(str1+':'+str2)\n f.close()", "def main():\n env = os.environ.copy()\n db_root = util.get_db_root()\n assert db_root\n part = util.get_part()\n assert part\n\n information = util.get_part_information(db_root, part)\n\n valid_devices = []\n for name, device in util.get_devices(db_root).items():\n if device['fabric'] == information['device']:\n valid_devices.append(name)\n\n for part, data in util.get_parts(db_root).items():\n if data['device'] in valid_devices:\n command = \"make roi_only\"\n env['XRAY_PART'] = part\n cwd = os.getenv('XRAY_FUZZERS_DIR')\n subprocess.run(command.split(' '), check=True, env=env, cwd=cwd)", "def run_CNN(X):\r\n\r\n print(\"Making CNN predictions\")\r\n model_path = resource_filename(__name__, \"models/model_CNN.json\")\r\n weights_path = resource_filename(__name__, \"models/model_CNN_weights.hdf5\")\r\n\r\n with open(model_path, 'rt') as model_json_file:\r\n model_json = model_json_file.read()\r\n\r\n model = model_from_json(model_json, custom_objects={EmbeddingWithDropout.__name__: EmbeddingWithDropout})\r\n model.load_weights(weights_path)\r\n model.compile(loss='binary_crossentropy', optimizer='adam')\r\n result = model.predict(X).flatten()\r\n\r\n return result", "def main():\n # Read command line arguments\n args = get_input_arguments()\n # Unpack dictionary into keyword arguments\n # Unused arguments should be ignored silently.\n ppn.run(**args)", "def train(args):\n # ce\n if args.enable_ce:\n SEED = 102\n fluid.default_main_program().random_seed = SEED\n fluid.default_startup_program().random_seed = SEED\n\n cat_feat_dims_dict = OrderedDict()\n for line in open(args.cat_feat_num):\n spls = line.strip().split()\n assert len(spls) == 2\n cat_feat_dims_dict[spls[0]] = int(spls[1])\n dcn_model = DCN(args.cross_num, args.dnn_hidden_units, args.l2_reg_cross,\n args.use_bn, args.clip_by_norm, cat_feat_dims_dict,\n args.is_sparse)\n dcn_model.build_network()\n dcn_model.backward(args.lr)\n\n # config dataset\n dataset = fluid.DatasetFactory().create_dataset()\n dataset.set_use_var(dcn_model.data_list)\n pipe_command = 'python reader.py {}'.format(args.vocab_dir)\n dataset.set_pipe_command(pipe_command)\n dataset.set_batch_size(args.batch_size)\n dataset.set_thread(args.num_thread)\n train_filelist = [\n os.path.join(args.train_data_dir, fname)\n for fname in next(os.walk(args.train_data_dir))[2]\n ]\n dataset.set_filelist(train_filelist)\n num_epoch = args.num_epoch\n if args.steps:\n epoch = args.steps * args.batch_size / 41000000\n full_epoch = int(epoch // 1)\n last_epoch = epoch % 1\n train_filelists = [train_filelist for _ in range(full_epoch)] + [\n random.sample(train_filelist, int(\n len(train_filelist) * last_epoch))\n ]\n num_epoch = full_epoch + 1\n print(\"train epoch: {}\".format(num_epoch))\n\n # Executor\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n\n for epoch_id in range(num_epoch):\n start = time.time()\n sys.stderr.write('\\nepoch%d start ...\\n' % (epoch_id + 1))\n dataset.set_filelist(train_filelists[epoch_id])\n exe.train_from_dataset(\n program=fluid.default_main_program(),\n dataset=dataset,\n fetch_list=[\n dcn_model.loss, dcn_model.avg_logloss, dcn_model.auc_var\n ],\n fetch_info=['total_loss', 'avg_logloss', 'auc'],\n debug=False,\n print_period=args.print_steps)\n model_dir = os.path.join(args.model_output_dir,\n 'epoch_' + str(epoch_id + 1), \"checkpoint\")\n sys.stderr.write('epoch%d is finished and takes %f s\\n' % (\n (epoch_id + 1), time.time() - start))\n fluid.save(fluid.default_main_program(), model_dir)", "def segm_fasterrcnn(param_list):\n #segmentation after detecting ROI using Faster R-CNN \n # load a model pre-trained pre-trained on COCO\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n\n # replace the classifier with a new one, that has\n # num_classes which is user-defined\n num_classes = 3 # 2 class (menisci) + background\n # get number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n # load a pre-trained model for classification and return\n # only the features\n backbone = torchvision.models.mobilenet_v2(pretrained=True).features\n # FasterRCNN needs to know the number of\n # output channels in a backbone. For mobilenet_v2, it's 1280\n # so we need to add it here\n backbone.out_channels = 1280\n\n # let's make the RPN generate 5 x 3 anchors per spatial\n # location, with 5 different sizes and 3 different aspect\n # ratios. We have a Tuple[Tuple[int]] because each feature\n # map could potentially have different sizes and\n # aspect ratios\n anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n aspect_ratios=((0.5, 1.0, 2.0),))\n\n # let's define what are the feature maps that we will\n # use to perform the region of interest cropping, as well as\n # the size of the crop after rescaling.\n roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],\n output_size=7,\n sampling_ratio=2)\n\n # put the pieces together inside a FasterRCNN model\n model = FasterRCNN(backbone,\n num_classes=3,\n rpn_anchor_generator=anchor_generator,\n box_roi_pool=roi_pooler)\n\n\n output_path = r\"/home/students/thampi/PycharmProjects/MA_Praise/outputs\"\n data_loc = r\"/home/students/thampi/PycharmProjects/meniscus_data/filt_data\"#559 elements in filt data\n hdf5file = r\"/home/students/thampi/PycharmProjects/meniscus_data/segm.hdf5\"\n _, num_data_img, _, num_test_show, valid_after, no_epo, _, datafrom, in_chann, crop_size,experiment_name, if_hdf5, batch_size, pretrained, _ = param_list\n\n # experiment_name = \"fasterrcnn_test\"\n expt_outpath = os.path.join(output_path, experiment_name)\n if not os.path.exists(expt_outpath):\n os.makedirs(expt_outpath)\n\n\n dataset = MeniscusDataForDetect(datafrom, num_img=num_data_img, in_channels=in_chann, #if_crop=True,\n crop_size=crop_size,\n if_hdf5=if_hdf5,\n )\n model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n model = model.to(device)\n data_len = len(dataset)\n ts, vs = int(0.8*data_len), int(0.1*data_len)#training set size, validation set size\n train_set, val_set, test_set = torch.utils.data.random_split(dataset, [ts, vs, data_len-(ts+vs)])\n data_loader = torch.utils.data.DataLoader(\n train_set, batch_size=batch_size,#, shuffle=True)\n # , num_workers=4,\n collate_fn=collate_fn)\n # For Training\n images,targets,index = next(iter(data_loader))\n images = list(image for image in images)\n\n tar = []\n\n loss_hist = Averager()\n itr = 1\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n lr_scheduler = None\n\n\n model = model.to(device)\n val_loader = torch.utils.data.DataLoader(\n val_set, batch_size=batch_size,#, shuffle=True)\n # , num_workers=4,\n collate_fn=collate_fn)\n\n model = train_general(train_set, expt_outpath, expt_name=experiment_name, val_set_perc=0.15, test_set_perc=0, device=device,\n num_epochs=no_epo, is_naive_train=True, mode=\"ele\", val_set=val_set, test_set=None, valid_after=valid_after, \n net_model=model, batch_size=batch_size)\n model = get_saved_model(model, expt_outpath, with_edge=False)\n\n model2_path = os.path.join(r\"/home/students/thampi/PycharmProjects/MA_Praise/outputs\", \"segm_roi\")\n model2 = UNetSimple(in_classes=1, channelscale=128, out_classes=2)\n model2 = get_saved_model(model2, model2_path, with_edge=False)\n test_loader = torch.utils.data.DataLoader(\n test_set, batch_size=batch_size,#, shuffle=True)\n # , num_workers=4,\n collate_fn=collate_fn)\n x, y , pred_segm = test_pred(model, model2, test_loader, experiment_name=\"\",\n loss_func=None, sgm_train=True, crop_size=crop_size, allow_faulty=True)\n save_pred_one(x, y, pred_segm, expt_outpath, bat_one=False, fig_name=\"res\", nosft=True, channelcnt=3)\n save_test_img_grid(x, y, pred_segm, expt_outpath, nosft=True, channel_cnt=3, fig_name=experiment_name)", "def test_run_detext_libert_binary_classification(self):\n output = os.path.join(DataSetup.out_dir, \"cls_libert_model\")\n args = self.base_args + [\n \"--task_type\", TaskType.BINARY_CLASSIFICATION,\n \"--ftr_ext\", \"bert\",\n \"--lr_bert\", \"0.00001\",\n \"--bert_hub_url\", DataSetup.libert_sp_hub_url,\n \"--num_units\", \"16\",\n f\"--{InputFtrType.LABEL_COLUMN_NAME}\", \"label\",\n f\"--{InputFtrType.DENSE_FTRS_COLUMN_NAMES}\", \"dense_ftrs\",\n \"--nums_dense_ftrs\", \"8\",\n f\"--{InputFtrType.SPARSE_FTRS_COLUMN_NAMES}\", \"sparse_ftrs\",\n \"--nums_sparse_ftrs\", \"30\",\n f\"--{InputFtrType.SHALLOW_TOWER_SPARSE_FTRS_COLUMN_NAMES}\", \"sparse_ftrs\",\n \"--nums_shallow_tower_sparse_ftrs\", \"30\",\n \"--pmetric\", \"auc\",\n \"--all_metrics\", \"accuracy\", \"auc\",\n \"--test_file\", DataSetup.binary_cls_data_dir,\n \"--dev_file\", DataSetup.binary_cls_data_dir,\n \"--train_file\", DataSetup.binary_cls_data_dir,\n \"--out_dir\", output]\n sys.argv[1:] = args\n main(sys.argv)\n self._cleanUp(output)", "def cli(raw_args: Optional[list[str]] = None) -> None:\n if not raw_args:\n raw_args = sys.argv[1:]\n\n parser = configure_argument_parser()\n args = parser.parse_args(raw_args)\n VerbosityConfiguration.set(args)\n CLIAnnotationContext.register(args)\n\n context = get_genomic_context()\n pipeline = CLIAnnotationContext.get_pipeline(context)\n grr = CLIAnnotationContext.get_genomic_resources_repository(context)\n\n if args.output:\n output = args.output\n else:\n output = os.path.basename(args.input).split(\".\")[0] + \"_annotated.vcf\"\n\n if not os.path.exists(args.work_dir):\n os.mkdir(args.work_dir)\n\n\n task_graph = TaskGraph()\n\n task_graph.input_files.append(args.input)\n task_graph.input_files.append(args.pipeline)\n if args.reannotate:\n task_graph.input_files.append(args.reannotate)\n\n if not tabix_index_filename(args.input):\n # annotate(args.input, None, pipeline.get_info(),\n # grr.definition, output, args.reannotate)\n assert grr is not None\n task_graph.create_task(\n \"all_variants_annotate\",\n annotate,\n [args.input, None, pipeline.get_info(),\n grr.definition, output, args.reannotate],\n []\n )\n else:\n with closing(TabixFile(args.input)) as pysam_file:\n regions = produce_regions(pysam_file, args.region_size)\n file_paths = produce_partfile_paths(args.input, regions, args.work_dir)\n region_tasks = []\n for index, (region, file_path) in enumerate(zip(regions, file_paths)):\n assert grr is not None\n region_tasks.append(task_graph.create_task(\n f\"part-{index}\",\n annotate,\n [args.input, region,\n pipeline.get_info(), grr.definition,\n file_path, args.reannotate],\n []\n ))\n\n assert grr is not None\n task_graph.create_task(\n \"combine\",\n combine,\n [args.input, pipeline.get_info(),\n grr.definition, file_paths, output],\n region_tasks\n )\n\n args.task_status_dir = os.path.join(args.work_dir, \".tasks-status\")\n args.log_dir = os.path.join(args.work_dir, \".tasks-log\")\n\n TaskGraphCli.process_graph(task_graph, **vars(args))", "def test_run_detext_cnn_ranking(self):\n output = os.path.join(DataSetup.out_dir, \"cnn_model\")\n self._cleanUp(output)\n args = self.ranking_args + [\"--out_dir\", output]\n sys.argv[1:] = args\n main(sys.argv)\n self._cleanUp(output)", "def test_compileNxModel(self):\n\n inputShape = (7, 7, 1)\n inputLayer = NxInputLayer(inputShape)\n outputLayer = NxConv2D(2, 3)(inputLayer.input)\n model = NxModel(inputLayer.input, outputLayer)\n model.clearTemp()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Segmentation of normalized inputs using GATK4, converting into standard input formats.
def _segment_normalized_gatk(cnr_file, work_dir, paired): work_dir = utils.safe_makedir(os.path.join(work_dir, "gatk-cnv")) seg_file = gatkcnv.model_segments(cnr_file, work_dir, paired)["seg"] std_seg_file = seg_file.replace(".cr.seg", ".seg") if not utils.file_uptodate(std_seg_file, seg_file): with file_transaction(std_seg_file) as tx_out_file: df = pd.read_csv(seg_file, sep="\t", comment="@", header=0, names=["chrom", "loc.start", "loc.end", "num.mark", "seg.mean"]) df.insert(0, "ID", [dd.get_sample_name(paired.tumor_data)] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) std_cnr_file = os.path.join(work_dir, "%s.cnr" % dd.get_sample_name(paired.tumor_data)) if not utils.file_uptodate(std_cnr_file, cnr_file): with file_transaction(std_cnr_file) as tx_out_file: logdf = pd.read_csv(cnr_file, sep="\t", comment="@", header=0, names=["chrom", "start", "end", "log2"]) covdf = pd.read_csv(tz.get_in(["depth", "bins", "antitarget"], paired.tumor_data), sep="\t", header=None, names=["chrom", "start", "end", "orig.name", "depth", "gene"]) df = pd.merge(logdf, covdf, on=["chrom", "start", "end"]) del df["orig.name"] df = df[["chrom", "start", "end", "gene", "log2", "depth"]] df.insert(6, "weight", [1.0] * len(df)) df.to_csv(tx_out_file, sep="\t", header=True, index=False) return std_cnr_file, std_seg_file
[ "def normalise(self):\n if not self.inputs:\n self.auto_detect_inputs()\n max_r = self.depth() - 1\n if max_r <= 2: \n for o in self.outputs:\n self.set_row(o,4)\n max_r = self.depth() -1\n claimed = []\n for q,i in enumerate(sorted(self.inputs, key=self.qubit)):\n self.set_row(i,0)\n self.set_qubit(i,q)\n #q = self.qubit(i)\n n = list(self.neighbours(i))[0]\n if self.type(n) in (1,2):\n claimed.append(n)\n self.set_row(n,1)\n self.set_qubit(n, q)\n else: #directly connected to output\n e = self.edge(i, n)\n t = self.edge_type(e)\n self.remove_edge(e)\n v = self.add_vertex(1,q,1)\n self.add_edge((i,v),3-t)\n self.add_edge((v,n), 2)\n claimed.append(v)\n for q, o in enumerate(sorted(self.outputs,key=self.qubit)):\n #q = self.qubit(o)\n self.set_row(o,max_r+1)\n self.set_qubit(o,q)\n n = list(self.neighbours(o))[0]\n if n not in claimed:\n self.set_row(n,max_r)\n self.set_qubit(n, q)\n else:\n e = self.edge(o, n)\n t = self.edge_type(e)\n self.remove_edge(e)\n v = self.add_vertex(1,q,max_r)\n self.add_edge((o,v),3-t)\n self.add_edge((v,n), 2)\n\n self.pack_circuit_rows()", "def processFlat(self):\n raise NotImplementedError(\"This method does not return flat \"\n \"segmentations.\")", "def inputprojections(goldseg,predseg):\n outWords = segUtils.words(predseg)\n predtext = predseg.replace(\" \",\"\").replace(\".\",\"\")\n goldVector = segUtils.getBVec(goldseg)\n res = []\n for (l,r) in sorted(outWords,lambda x,y:cmp(x[0],y[0])):\n outP = predtext[l:r]\n lp = l-1 if l>0 else l\n while lp>-1 and goldVector[lp]!=\"b\":\n lp-=1\n rp = r-1\n while rp<len(predtext)-1 and goldVector[rp]!=\"b\":\n rp+=1\n inpP = segUtils.segment(predtext[lp+1:rp+1],goldVector[lp+1:rp])\n res.append((outP,inpP))\n return res", "def get_model_inputsegs(self):\r\n\r\n # Get all the different cell models used in the slice\r\n input_models = set()\r\n for cells in self.glom_cells.values():\r\n for cell in cells:\r\n input_models.add(cell[:cell.find('[')])\r\n\r\n # Get each model's input segments (in the tuft)\r\n model_inputsegs = {m.class_name: m.tufted_dend_root\r\n for m in CellModel \\\r\n .select(CellModel.class_name, CellModel.tufted_dend_root) \\\r\n .where(CellModel.class_name.in_(list(input_models)))}\r\n\r\n return model_inputsegs", "def separate_unconnected_segments(self, scale_input, sigma_input, min_size_input):\r\n\r\n\r\n image = self.image\r\n # Label the matrix with different connected components\r\n labeled_matrix, num_cropped = measure.label(image, background=0, connectivity=1, return_num=True)\r\n\r\n # decleare premerged_set as an empty list\r\n self.premerged_set = []\r\n \r\n # Loop through all connected components\r\n # range list is different in python. numbers from 1 to 5 is range(1, 5+1), (last number is not included)\r\n for i in range(1, num_cropped + 1):\r\n \r\n # Get the coordinates of current labels\r\n x = np.array(np.where(labeled_matrix == i))\r\n\r\n # Eliminate case of noise, tuneable\r\n # 'continue' skips everything under the if statement in the for loop\r\n if x.shape[1] < self.min_shape: continue\r\n \r\n # We have down > up and right > left # To find corners of image\r\n up = x[0][0]\r\n down = x[0][-1]\r\n left = np.amin(x[1])\r\n right = np.amax(x[1])\r\n\r\n # Essential if there is noise, because it will be counted as one conencted component\r\n if down - up < self.min_height or right - left < self.min_width: continue\r\n\r\n # Buffering zone: 2 (To exapnd image), tuneable\r\n # Crop the image of current connected component with buffer\r\n cropped = image[up-self.buffer_zone:down+self.buffer_zone, left-self.buffer_zone:right+self.buffer_zone]\r\n\r\n # Convert to RGB --> selective search requires RGB\r\n temp = np.zeros([cropped.shape[0], cropped.shape[1], 3])\r\n temp[:, :, 0] = cropped\r\n temp[:, :, 1] = cropped\r\n temp[:, :, 2] = cropped\r\n cropped = temp\r\n if cropped.shape[1]<=0: continue\r\n # perform selective search on cropped region\r\n self.selective_search(cropped,left,up, scale_input, sigma_input, min_size_input)", "def parse_normalizer():\n\n if FLAGS.batch_norm:\n normalizer_fn=tf.contrib.layers.batch_norm\n else:\n return None, None\n \n scale_term = None\n if FLAGS.activation_function is 'relu':\n scale_term = False\n else:\n scale_term = True\n \n normalizer_params = {\n 'is_training': None,\n # 0.9, 0.99, 0.999 or 0.9999 ...\n # According to TF performance guide: lower it if training is ok and validation/test is performing worse\n # A.Geron suggest to try higher values for large datasets and small batch sizes \n 'decay': 0.9,\n 'updates_collections': None,\n # If we don't use activation functions --> scale:true\n 'scale': scale_term,\n # The 'fused parameter' allows better performance according to the TF performance guide\n 'fused': True\n \n # Try zero_debias_moving_mean=True for improved stability\n # 'zero_debias_moving_mean':True\n\n\n }\n \n return normalizer_fn, normalizer_params", "def preprocess_inputs(self, state, goal):\n #state, goal = self.clip_states_goals(state, goal)\n state_norm = self.state_normalizer.normalize(state)\n goal_norm = self.goal_normalizer.normalize(goal)\n inputs = np.concatenate([state_norm, goal_norm])\n return torch.tensor(inputs, dtype=torch.float32).unsqueeze(0)", "def preprocessing():", "def put_in_standard_form(self):\n self._group = self.boolean_gaussian_elimination(self._group)", "def test_g_normalizer(self):\n #3' shuffling\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917122_49917123insA\"))),\n \"NC_000006.11:g.49917127dupA\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917121_49917122insGA\"))),\n \"NC_000006.11:g.49917122_49917123dupGA\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917122_49917123dup\"))),\n \"NC_000006.11:g.49917122_49917123dupGA\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917122_49917123dupGA\"))),\n \"NC_000006.11:g.49917122_49917123dupGA\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917098delC\"))),\n \"NC_000006.11:g.49917099delC\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\n \"NC_000006.11:g.49917151_49917156delinsTCTAAA\"))), \"NC_000006.11:g.49917154_49917155delTCinsAA\")\n\n #5' shuffling\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917122_49917123insA\"))),\n \"NC_000006.11:g.49917123dupA\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917121_49917122insGA\"))),\n \"NC_000006.11:g.49917121_49917122dupAG\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917122_49917123dup\"))),\n \"NC_000006.11:g.49917121_49917122dupAG\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917122_49917123dupGA\"))),\n \"NC_000006.11:g.49917121_49917122dupAG\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NC_000006.11:g.49917099delC\"))),\n \"NC_000006.11:g.49917098delC\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\n \"NC_000006.11:g.49917151_49917156delinsTCTAAA\"))), \"NC_000006.11:g.49917154_49917155delTCinsAA\")\n\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\n \"NC_000009.11:g.36233991_36233992delCAinsTG\"))), \"NC_000009.11:g.36233991_36233992inv\")\n with self.assertRaises(HGVSInvalidVariantError):\n self.norm.normalize(self.hp.parse_hgvs_variant(\"NG_032871.1:g.32476_53457delinsAATTAAGGTATA\"))\n\n with self.assertRaises(HGVSInvalidVariantError):\n self.norm.normalize(self.hp.parse_hgvs_variant(\"NG_032871.1:g.32476_53457delinsAATTAAGGTATA\"))", "def normalize(input_, options=None):\n return JsonLdProcessor().normalize(input_, options)", "def format_input(self):\n # to get the weight matrix for the embedding layer\n self.get_weights_matrix()\n\n try:\n shutil.rmtree('./data/inputs/word2vec')\n except:\n pass\n os.mkdir('./data/inputs/word2vec')\n\n self.path_sentences = './data/inputs/sentences.txt'\n self.path_labels = './data/inputs/labels.txt'\n self.path_sentences_output = './data/inputs/word2vec/sentences.npy'\n self.path_labels_output = './data/inputs/word2vec/labels.npy'\n\n with open(self.path_sentences, 'r+') as f:\n lines = f.readlines()\n max_lenght = max([len(line.split()) for line in lines])\n sentences = np.zeros((len(lines), max_lenght)) # size = samples x max lenght of sentences\n i = 0\n nb_unknown = 0\n nb_token = 0\n for line in lines:\n sentence_formated = []\n for word in line.split():\n nb_token += 1\n try:\n sentence_formated.append(self.index_dict[word.decode('utf8')])\n except:\n sentence_formated.append(0)\n nb_unknown += 1\n lenght = len(sentence_formated)\n sentences[i, :lenght] = sentence_formated[:lenght]\n i += 1\n print('there was', nb_unknown, 'unknown tokens out of', nb_token, 'total tokens, which account for', int((float(nb_unknown) / float(nb_token))*100), '% of all tokens')\n\n with open(self.path_labels, 'r+') as f:\n lines = f.readlines()\n lines = map(int, lines)\n lb = LabelBinarizer()\n labels = lb.fit_transform(lines)\n # labels = np.zeros((len(lines), 1))\n # i = 0\n # for line in lines:\n # labels[i] = line\n # i += 1\n\n with open(self.path_sentences_output, 'wb') as f:\n np.save(f, sentences)\n with open(self.path_labels_output, 'wb') as f:\n np.save(f, labels)\n\n print('shape of sentences (nb_sample, max_len):', sentences.shape)\n print('shape of labels (nb_sample):', labels.shape)\n return sentences, labels", "def segment():\n\n\t# read input images from 'in' directory\n\timg = \"data/in.jpg\"\n\n\t# read image, prepare it by resizing it to fixed height and converting it to grayscale\n\timg = prepareImg(cv2.imread(img), 50)\n\t\n\t# execute segmentation with given parameters\n\t# -kernelSize: size of filter kernel (odd integer)\n\t# -sigma: standard deviation of Gaussian function used for filter kernel\n\t# -theta: approximated width/height ratio of words, filter function is distorted by this factor\n\t# - minArea: ignore word candidates smaller than specified area\n\tres = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100)\n\t\n\t#delete all files in segmeted directory\n\tfiles = glob.glob('segmented/*')\n\tfor f in files:\n\t\tos.remove(f)\n\n\t#delete all files in contrast directory\n\tfiles = glob.glob('contrast/*')\n\tfor f in files:\n\t\tos.remove(f)\n\n\t# iterate over all segmented words\n\tprint('Segmented into %d words'%len(res))\n\tfor (j, w) in enumerate(res):\n\t\t(wordBox, wordImg) = w\n\t\t(x, y, w, h) = wordBox\n\t\tcv2.imwrite('segmented/%d.png'%(j), wordImg) # save word\n\t\tcv2.rectangle(img,(x,y),(x+w,y+h),0,1) # draw bounding box in summary image\n\t\n\t# output summary image with bounding boxes around words\n\tcv2.imwrite('summary/summary.png', img)", "def normalize(self):\n list(map(lambda normalization: normalization[0](self.entry,normalization[1]), self.normalizations))", "def normalise(self) -> None:\n _ma.stochastify_d(self.plast)\n _ma.stochastify_d(self.initial)", "def preprocess(run_config):\n plain_gan.preprocess(run_config)\n\n true_ssn_options = run_config.setdefault('true_ssn_options', {})\n # Default to \"new\" JDS:\n for key in ['J', 'D', 'S']:\n true_ssn_options.setdefault(key, new_JDS[key].tolist())\n if run_config.get('ssn_type') == 'heteroin':\n true_ssn_options.setdefault('V', [0.3, 0])\n elif run_config.get('ssn_type') == 'deg-heteroin':\n true_ssn_options.setdefault('V', 0.5)", "def normalize_and_segment(self, text: str) -> str:\n return super().normalize_and_segment(text)", "def postprocess(nii_seg, options):\n\n def threshold(nii_seg, thr):\n \"\"\"Threshold the prediction. For no threshold, set 'thr' to 0.\"\"\"\n logger.info(\"Threshold: {}\".format(thr))\n if thr:\n nii_seg = imed.postprocessing.threshold_predictions(nii_seg, thr)\n return nii_seg\n\n def keep_largest_objects(nii_seg, n_objects):\n \"\"\"Only keep the n largest objects.\"\"\"\n logger.info(\"Keep largest objects: {}\".format(n_objects))\n if n_objects > 1:\n # TODO: implement the thing below.\n NotImplementedError(\"For now, the algorithm can only remove the largest object, no more than that.\")\n # Make sure input is binary. If not, skip with verbose.\n if np.array_equal(nii_seg.get_fdata(), nii_seg.get_fdata().astype(bool)):\n # Fetch axis corresponding to superior-inferior direction\n # TODO: move that code in image\n affine = nii_seg.get_header().get_best_affine()\n code = nib.orientations.aff2axcodes(affine)\n if 'I' in code:\n axis_infsup = code.index('I')\n elif 'S' in code:\n axis_infsup = code.index('S')\n else:\n raise ValueError(\n \"Neither I nor S is present in code: {}, for affine matrix: {}\".format(code, affine))\n nii_seg = imed.postprocessing.keep_largest_object_per_slice(nii_seg, axis=axis_infsup)\n else:\n logger.warning(\"Algorithm 'keep largest object' can only be run on binary segmentation. Skipping.\")\n return nii_seg\n\n def fill_holes(nii_seg):\n \"\"\"Fill holes\"\"\"\n logger.info(\"Fill holes\")\n # Make sure input is binary. If not, skip with verbose.\n if np.array_equal(nii_seg.get_fdata(), nii_seg.get_fdata().astype(bool)):\n nii_seg = imed.postprocessing.fill_holes(nii_seg)\n else:\n logger.warning(\"Algorithm 'fill holes' can only be run on binary segmentation. Skipping.\")\n return nii_seg\n\n logger.info(\"\\nProcessing segmentation\\n\" + \"-\" * 23)\n if options['thr']:\n nii_seg = threshold(nii_seg, options['thr'])\n if options['largest']:\n nii_seg = keep_largest_objects(nii_seg, options['largest'])\n if options['fill_holes']:\n nii_seg = fill_holes(nii_seg)\n return nii_seg", "def normalize(train_data):\n\t# Keep track for feature and mean, std\n\tnormalize_np = np.zeros((len(train_data), 2))\n\tfor i in range(1, len(train_data)):\n\n\t\trow_mean = np.mean(train_data[i])\n\t\trow_std = np.std(train_data[i])\n\t\ttrain_data[i] = (train_data[i]-row_mean)/row_std\n\n\t\tnormalize_np[i, 0], normalize_np[i, 1] = np.copy(row_mean), np.copy(row_std)\n\n\tnormalize_np[0, 1] = 1\n\treturn train_data, normalize_np" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve organized structure of PureCN output files.
def _get_purecn_files(paired, work_dir, require_exist=False): out_base = os.path.join(work_dir, "%s-purecn" % (dd.get_sample_name(paired.tumor_data))) out = {"plot": {}} all_files = [] for plot in ["chromosomes", "local_optima", "segmentation", "summary"]: if plot == "summary": cur_file = "%s.pdf" % out_base else: cur_file = "%s_%s.pdf" % (out_base, plot) if not require_exist or os.path.exists(cur_file): out["plot"][plot] = cur_file all_files.append(os.path.basename(cur_file)) for key, ext in [["hetsummary", ".csv"], ["dnacopy", "_dnacopy.seg"], ["genes", "_genes.csv"], ["log", ".log"], ["loh", "_loh.csv"], ["rds", ".rds"], ["variants", "_variants.csv"]]: cur_file = "%s%s" % (out_base, ext) if not require_exist or os.path.exists(cur_file): out[key] = cur_file all_files.append(os.path.basename(cur_file)) return out_base, out, all_files
[ "def output_components(output_dir):\n if output_dir is None:\n return\n\n component = 0\n paths_by_start = {}\n for path in Read.known_paths:\n if path[0] not in paths_by_start:\n paths_by_start[path[0]] = []\n paths_by_start[path[0]].append(path)\n\n with open(output_dir + '/single_nodes.txt', 'w', 0) as single_file:\n single_file.write(\"ID\\tBases\\tCopycount\\tNormalization\\n\")\n\n for source_node in Node.nodes:\n if hasattr(source_node, 'destroyed'):\n continue\n with open(output_dir + '/nodes'+str(component)+'.txt', 'w', 0) as nodefile, \\\n open(output_dir + '/edges'+str(component)+'.txt', 'w', 0) as edgefile, \\\n open(output_dir + '/paths'+str(component)+'.txt', 'w', 0) as pathfile:\n component_nodes, component_edges = source_node.add_component()\n component_nodes = Node.topological_sort(component_nodes)\n\n if len(component_nodes) == 1:\n source_node.hash = -1\n single_file.write(source_node.to_string())\n source_node.destroyed = True\n continue\n\n node_hash = 0\n nodefile.write(\"ID\\tBases\\tCopycount\\tNormalization\\n\")\n pathfile.write(\"ID1\\tID2\\tEtc.\\n\")\n for node in component_nodes:\n node.hash = node_hash\n node_hash += 1\n nodefile.write(node.to_string())\n node.destroyed = True\n\n for node in component_nodes:\n if node not in paths_by_start: continue\n paths = paths_by_start[node]\n for path in paths_by_start[node]:\n path = [str(n.hash) for n in path]\n pathfile.write(\"\\t\".join(path) + \"\\n\")\n\n edgefile.write(\"InID\\tOutID\\tWeight\\tCopycount\\tNormalization\\n\")\n for edge in component_edges:\n #np = tuple([edge.in_node,edge.out_node]) #node-pair\n if edge.copy_count > 0: #either the edge has a copy count or edge weight >= Read.K\n #edge.copy_count = max(Read.known_edges.get(np,0),1)/max(Read.L - edge.weight - 1, 1)\n edgefile.write(edge.to_string())\n component += 1", "def process_output(output):\n if os.path.isdir(output):\n\n dir_list = [directory for directory in os.listdir(output)\n if os.path.isdir(directory)]\n\n for directory in dir_list:\n\n print(\"Processing output in \" +\n os.path.join(directory, OUTPUT_FILE) +\n \"...\")\n out = nwchem.NwOutput(os.path.join(directory, OUTPUT_FILE))\n\n try:\n error = False\n for output in out.data:\n if output['has_error']:\n error = True\n\n if error:\n print(\"File: \" + os.path.join(directory, OUTPUT_FILE) +\n \" contains errors!\")\n\n elif out.data[-1]['task_time'] == 0:\n print('No timing information found in ' +\n os.path.join(directory, OUTPUT_FILE) + \".\")\n\n else:\n out.to_file(os.path.join(directory, 'data.json'))\n\n except NameError:\n\n print(\"No data found in file. \")\n\n except IndexError:\n\n print(\"Data is empty!\")\n\n else:\n\n output = os.path.abspath(output)\n print('Processing output in ' + output)\n\n try:\n out = nwchem.NwOutput(output)\n except:\n raise IOError('Could not find proper nwchem output file.')\n\n try:\n error = False\n for output in out.data:\n if output['has_error']:\n error = True\n\n if error:\n print(\"File: \" + output + \" contains errors!\")\n\n elif out.data[-1]['task_time'] == 0:\n print('No timing information found in ' + output + \".\")\n\n else:\n out.to_file(os.path.join(os.path.dirname(output),\n 'data.json'))\n\n except NameError:\n\n print(\"No data found in file. \")\n\n except IndexError:\n\n print(\"Data is empty!\")\n\n out.to_file(os.path.join(os.path.dirname(output), 'data.json'))", "def _get_purecn_dx_files(paired, out):\n out_base = \"%s-dx\" % utils.splitext_plus(out[\"rds\"])[0]\n all_files = []\n for key, ext in [[(\"mutation_burden\",), \"_mutation_burden.csv\"],\n [(\"plot\", \"signatures\"), \"_signatures.pdf\"],\n [(\"signatures\",), \"_signatures.csv\"]]:\n cur_file = \"%s%s\" % (out_base, ext)\n out = tz.update_in(out, key, lambda x: cur_file)\n all_files.append(os.path.basename(cur_file))\n return out_base, out, all_files", "def collect_formats():\n # collects file names\n # runs through all dirs, starting with root dir\n slice_a = home_path.find(\"root/\")\n belongs = home_path[slice_a + 5:-1]\n print(\"{}'s file formats:\\n\".format(belongs))\n\n for (dirpath, dirnames, filenames) in os.walk(home_path):\n # runs through files in current iter dir\n for filename in filenames:\n dot_i = filename.rfind('.') + 1\n file_format = filename[dot_i:] \n #print(file_format)\n if file_format == 'ini':\n continue\n\n if file_format not in dirs_and_file_coll.keys():\n if len(file_format) <= 3:\n dirs_and_file_coll[file_format] = 1\n else:\n dirs_and_file_coll[file_format] += 1", "def find_structure_file(folder):\n import glob\n \n structure_file = glob.glob(folder + '/*.cif') + \\\n glob.glob(folder + '/*.gen') + \\\n glob.glob(folder + '/*.sdf') + \\\n glob.glob(folder + '/*.xyz')\n print(structure_file)\n structure_file = structure_file[0]\n\n return structure_file", "def get_output_directories(self):\r\n pass", "def _generate_outputs(self):\n # Copy NEB files.\n for file_name in os.listdir(self.ref_dir_output):\n full_file_name = os.path.join(self.ref_dir_output, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, os.getcwd())\n\n # Copy NEB sub-files.\n for u_dir, r_dir in zip(self.user_sdir, self.ref_sdir_output):\n for file_name in os.listdir(r_dir):\n full_file_name = os.path.join(r_dir, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, u_dir)", "def list_cp_output():\n\n if request.args.get(\"ready\") == \"true\":\n output_dir = CONF.paths.cp_ready_output_dir\n else:\n output_dir = CONF.paths.cp_output_dir\n\n try:\n results = [name for name in os.listdir(output_dir)\n if name.endswith(\".yml\")]\n return jsonify(results)\n except OSError:\n LOG.error(\"Unable to read %s directory\", output_dir)\n abort(404)", "def _read_output(self):\n output = {\n os.path.splitext(filename)[0]: clean_up_df(\n read_df(os.path.join(self.stage_cache_dir, filename))\n )\n for filename in os.listdir(self.stage_cache_dir)\n if filename.endswith(\".tsv\")\n }\n self.logger.info(\n f\"Reading {self.stage_type.__name__} output:\\n\"\n f\"{pformat(list(output.keys()))}\"\n )\n\n return output", "def get_files_to_generate(self):\r\n pass", "def prepareOutput():\r\n\r\n os.removedirs(\"output\")\r\n os.mkdir(\"output\")", "def get_sorted_export_files():\n ...", "def create_outfile_names(outroot):\n outTableFile = \"jaccard.txt\"\n outPCAFile = \"jaccard_PCA.pdf\"\n outHeatmapFile = \"jaccard_heatmap.pdf\"\n if outroot != \"\":\n if outroot[-1] == \"/\":\n outTableFile= outroot + outTableFile\n outPCAFile = outroot + outPCAFile\n outHeatmapFile = outroot + outHeatmapFile\n else:\n outTableFile= outroot + \"_\" + outTableFile\n outPCAFile = outroot + \".\" + outPCAFile\n outHeatmapFile = outroot + \".\" + outHeatmapFile\n return(outTableFile, outPCAFile, outHeatmapFile)", "def getAstroNetFiles(baseDir, sector, anoPath='ffi/run/', prefix='prediction_', returnPath = False\n):\n path = os.path.join(baseDir,sector,anoPath)\n try:\n pathList = os.listdir(path)\n except Exception as e:\n print(e)\n print('Sector '+sector+' has no astroNet Output Files')\n return []\n\n anoFiles = [item for item in pathList if prefix in item]\n if returnPath:\n return [path+anoFile for anoFile in anoFiles if 'cut' not in anoFile]\n return anoFiles", "def print_input_files():\n print(\"## Available input images:\")\n files = listdir(\"input/\")\n for inp in files:\n print(\"## \", files.index(inp) + 1, \". \", inp, \" \", sep=\"\")\n print(\"##\")\n return files", "def aggregate_info(dir='.', write=True):\n import csv\n\n # rootnode = os.path.abspath(dir)\n # loopinfo = {}\n # for root, subFolders, files in os.walk(rootnode,followlinks=True):\n # [loopinfo.update((f,shape(os.path.join(root,f)))) for f in files]\n\n # if write:\n # f = open('nii_info.csv','w')\n # w = csv.writer(f)\n # niifiles = sorted(loopinfo.keys())\n # for nii in niifiles:\n # w.writerow(niifiles[nii].items())\n \n # f.flush()\n # f.close()\n\n # return loopinfo", "def run(inputFile,outputDirectory):\n structures=compute(inputFile)\n\n family=inputFile.split('/').pop().split('.')[0]+'.fasta'\n outfile = os.path.join(outputDirectory,family)\n with open (outfile,'w') as OUTPUT:\n for name,sequences in structures.items():\n print >>OUTPUT,name\n print >>OUTPUT,sequences[0].strip()\n print >>OUTPUT,sequences[1].strip()\n OUTPUT.closed\n return outfile", "def OutputFileSection(self):\n out = []\n out.append(\"Output data files:\")\n out.append('')\n for file in self.outputfiles:\n out.append(\"%s%s\" % (self.INDENT, file))\n out.append('')\n return out", "def Examine_Outputs(arcpy, in_zip, out_folder, skipfiles=[]):\n\n # Set environments and create scratch workspace\n arcpy.env.overwriteOutput = True\n out_sfolder = arcpy.CreateScratchName(\"temp\", data_type=\"Folder\", workspace=arcpy.env.scratchFolder)\n os.mkdir(out_sfolder)\n\n # Unzip to a known location (make sure no other nc files live here)\n ZipCompat(in_zip).extractall(out_sfolder)\n\n dellist = [] # Keep a directory of files to delete\n\n # Iterate through unzipped files and copy to output directory as necessary\n for dirpath, dirnames, filenames in os.walk(out_sfolder):\n for file in filenames:\n infile = os.path.join(dirpath, file)\n dellist.append(infile)\n\n # Copy skipped files over to new directory\n if file in skipfiles:\n shutil.copy2(infile, out_folder)\n arcpy.AddMessage(' File Copied: %s' %file)\n del file\n\n if file.endswith('.nc'):\n\n # Trap to eliminate Parameter tables in NC format from this extraction\n if file.endswith(LK_nc) or file.endswith(RT_nc):\n shutil.copy2(infile, out_folder)\n arcpy.AddMessage(' File Created: %s' %file)\n del file\n continue\n\n # Establish an object for reading the input NetCDF file\n rootgrp = netCDF4.Dataset(infile, 'r')\n\n ### Find 2D variables (y,x)\n ##ncVariableNames_2D = [variable for variable, ncvar in rootgrp.variables.iteritems() if ncvar.dimensions==('y', 'x')]\n ##rootgrp.close()\n ##\n ### Loop through global variables in NetCDF file to gather projection information\n ##for variablename in ncVariableNames_2D:\n ## outRasterLayer = variablename\n ## arcpy.MakeNetCDFRasterLayer_md(infile, variablename, 'x', 'y', outRasterLayer, \"\", \"\", \"BY_VALUE\")\n ## RasterObj = arcpy.Raster(outRasterLayer)\n ## RasterObj.save(os.path.join(out_folder, outRasterLayer))\n ## #arcpy.Raster(outRasterLayer).save(outRasterLayer + '.tif')\n ## arcpy.AddMessage(' File Created: %s' %outRasterLayer)\n ## arcpy.Delete_management(outRasterLayer)\n ## del RasterObj, variablename, outRasterLayer\n ##del file, infile, rootgrp, ncVariableNames_2D, variable, ncvar\n\n # Using netCDF4 library - still need point2, DX, DY\n GT = rootgrp.variables['ProjectionCoordinateSystem'].GeoTransform.split(\" \")\n PE_string = rootgrp.variables['ProjectionCoordinateSystem'].esri_pe_string\n arcpy.AddMessage(' GeoTransform: %s' %GT)\n DX = float(GT[1])\n DY = abs(float(GT[5])) # In GeoTransform, Y is usually negative\n arcpy.AddMessage(' DX: %s' %DX)\n arcpy.AddMessage(' DY: %s' %DY)\n sr = arcpy.SpatialReference()\n sr.loadFromString(PE_string.replace('\"', \"'\"))\n point = arcpy.Point(float(GT[0]), float(GT[3]) - float(DY*len(rootgrp.dimensions['y']))) # Calculate LLCorner value from GeoTransform (ULCorner)\n arcpy.env.outputCoordinateSystem = sr\n for variablename, ncvar in rootgrp.variables.iteritems():\n if ncvar.dimensions==('y', 'x'):\n outRasterLayer = variablename\n nc_raster = arcpy.NumPyArrayToRaster(ncvar[:], point, DX, DY)\n arcpy.CalculateStatistics_management(nc_raster)\n arcpy.DefineProjection_management(nc_raster, sr)\n nc_raster.save(os.path.join(out_folder, outRasterLayer))\n arcpy.AddMessage(' File Created: %s' %outRasterLayer)\n del nc_raster, variablename, outRasterLayer\n rootgrp.close()\n del file, infile, rootgrp, ncvar\n continue\n\n if file.endswith('.txt'):\n outRasterLayer = os.path.basename(infile)[:10] + 'txt'\n outRaster = os.path.join(out_folder, outRasterLayer)\n #outRaster = os.path.join(out_folder, outRasterLayer + '.tif')\n arcpy.ASCIIToRaster_conversion(infile, outRaster, 'INTEGER')\n arcpy.AddMessage(' File Created: %s' %file) # %outRaster\n del file, infile, outRaster, outRasterLayer\n continue\n\n if file.endswith('.shp'):\n newshp = os.path.join(out_folder, file)\n arcpy.CopyFeatures_management(infile, newshp)\n arcpy.AddMessage(' File Created: %s' %str(file))\n del file, infile, newshp\n continue\n\n if file.endswith('.csv') or file.endswith('.TBL'):\n shutil.copy2(infile, out_folder)\n arcpy.AddMessage(' File Created: %s' %file)\n del file, infile\n continue\n else:\n continue\n del dirpath, dirnames, filenames\n\n # Remove each file from the temporary extraction directory\n for infile in dellist:\n os.remove(infile)\n arcpy.AddMessage('Extraction of WRF routing grids completed.')\n return out_sfolder" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert LOH output into standardized VCF.
def _loh_to_vcf(cur): cn = int(float(cur["C"])) minor_cn = int(float(cur["M"])) if cur["type"].find("LOH"): svtype = "LOH" elif cn > 2: svtype = "DUP" elif cn < 1: svtype = "DEL" else: svtype = None if svtype: info = ["SVTYPE=%s" % svtype, "END=%s" % cur["end"], "SVLEN=%s" % (int(cur["end"]) - int(cur["start"])), "CN=%s" % cn, "MajorCN=%s" % (cn - minor_cn), "MinorCN=%s" % minor_cn] return [cur["chr"], cur["start"], ".", "N", "<%s>" % svtype, ".", ".", ";".join(info), "GT", "0/1"]
[ "def parse_haplotype_to_vcf(haplotype_to_vcf): \n haplotype_to_vcf.add_argument(\n \"-haplotypeFormat\", default = 'iupac',\n help = \"report which format (numeric vs. iupac) the haplotype file is in.\\n\" \n \"Default = iupac\", \n metavar = '')", "def _convert_output(self):\n pass", "def to_vcf(self, header=False):\n ret = BytesIO()\n if header:\n ret.write((\n '##INFO=<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples\">\\n'\n '##INFO=<ID=SVLEN,Number=1,Type=Integer,Description=\"Length of SV\">\\n'\n '##INFO=<ID=SVTYPE,Number=1,Type=String,Description=\"Structural Variant Type\">\\n'\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\\n'\n '##FORMAT=<ID=GQ,Number=1,Type=Float,Description=\"Genotype Quality\">\\n'\n '##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Sample Depth\">\\n'\n '##FORMAT=<ID=AD,Number=.,Type=Integer,Description=\"Allelic depths for the ref and alt alleles in the order listed\">\\n'\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\\tFORMAT\\tSAMPLE\\n'))\n\n for var in self.variants:\n if var.left_contig != var.right_contig:\n raise ValueError(\"Assembly variant to vcf does not currently support translocations\")\n chrom = var.left_contig\n start = var.left_position\n end = var.right_position\n ref_seq = str(self.ref_range.sequence[start - self.ref_range.start + 1: end - self.ref_range.start])\n alt_seq = str(var.assembly_sequence[var.assembly_begin:var.assembly_end])\n if len(ref_seq) != len(alt_seq):\n anchor_base = self.ref_range.sequence[start - self.ref_range.start]\n else:\n anchor_base = \"\"\n alt_depth = sum(var.depths) / len(var.depths)\n rcov = self.coverage[start - self.ref_range.start + 1: end - self.ref_range.start]\n ref_depth = sum(rcov) / len(rcov)\n genotype, genoqual = genotyper(alt_depth + ref_depth, alt_depth)\n svtype = \"\"\n svlen = \"\"\n if var.is_structural:\n svtype = \"SVTYPE=DEL;\" if len(ref_seq) > len(alt_seq) else \"SVTYPE=INS;\"\n svlen = \"SVLEN=%d;\" % (len(ref_seq) - len(alt_seq))\n ret.write(\"{chrom}\\t{pos}\\t.\\t{ref}\\t{alt}\\t.\\tPASS\\tNS=1;{svtype}{svlen}\\tGT:GQ:DP:AD\\t{gt}:{gq:.2f}:{dp}:{rd},{ad}\\n\".format(\n chrom=chrom, pos=start, ref=anchor_base + ref_seq, alt=anchor_base + alt_seq, svtype=svtype, svlen=svlen, gt=genotype,\n gq=genoqual, dp=alt_depth + ref_depth, rd=ref_depth, ad=alt_depth))\n ret.seek(0)\n return ret", "def to_vcf(self, output_filename):\n with open(output_filename, \"w\") as fp:\n print(self._bcf.header, end=\"\", file=fp)\n for variant in self.variants:\n print(variant, end=\"\", file=fp)", "def convertVCF2FLATJSON(self):\n if self.input_type not in ['vcf','vcf.gz'] or self.output_type != 'json':\n msg = \"Error: vcf files (possibly gzipped) must be given as input files, and a json file should be given as output file.\"\n status = \"failed\"\n raise generalException(msg)\n\n f = open(self.input_file)\n o = open(self.output_file,'w')\n vcf_reader = vcf.Reader(f)\n #cc = 1\n for record in vcf_reader:\n #for i in [1]:\n record = vcf_reader.next()\n for s in record.samples:\n if hasattr(s.data,'DP'):\n call_DP = s.data.DP\n else:\n call_DP = \"NA\"\n if len(uniqueInList(s.data.GT.split('|'))) > 1:\n call_het = \"Heterozygous\"\n else:\n call_het = \"Homozygous\"\n if isinstance(record.ALT, list):\n ALT = '|'.join([str(a) for a in record.ALT])\n else:\n ALT = record.ALT\n if isinstance(record.FILTER, list):\n FILTER = '|'.join([str(a) for a in record.FILTER])\n else:\n FILTER = str(record.FILTER)\n \n linedic = {\n \"variants_info_num_genes\" : \"NA\", \n \"variants_quality\" : str(record.QUAL),\n \"variants_info_allele_num\": \"NA\",\n \"variants_calls_info_zygosity\": call_het,\n \"variants_info_short_tandem_repeat\": \"NA\",\n \"readGroupSets_readGroups_experiment_sequencingCenter\": \"NA\",\n \"readGroupSets_readGroups_info_patient\": s.sample,\n \"variants_info_change_type\": record.var_type,\n \"variants_calls_info_read_depth\": str(call_DP),\n \"variants_info_other_effects\": \"NA\",\n \"variants_referenceBases\": record.REF,\n \"variants_info_is_scSNV_Ensembl\": \"NA\",\n \"readGroupSets_readGroups_experiment_libraryId\": \"NA\",\n \"variants_info_dbsnp_id_137\": \"NA\",\n \"variants_info_lof_tolerant_or_recessive_gene\": \"NA\",\n \"variants_info_is_scSNV_RefSeq\": \"NA\",\n \"variants_filters\": FILTER,\n \"readGroupSets_readGroups_sampleID\": s.sample,\n \"variants_start\": str(record.POS),\n \"variants_info_downsampled\": \"NA\",\n \"variants_referenceName\": record.CHROM,\n \"variants_alternateBases\": ALT,\n \"variants_calls_genotype\" : s.data.GT\n }\n o.write(json.dumps(linedic, ensure_ascii=False) + \"\\n\")\n\n o.close()\n f.close()\n\n status = \"succeeded\"\n return(status)\n # #sampleIdList = \n # varDic = {{\"Callset\": {\"id\" : , \"sampleId\" : , \"variantSetIds\" : [] }},\n # # {\"ReadGroupSets\" :\n # # {\"ReadGroups\" : {\"sampleId\" : }, {\"sampleId\" : }}\n # # },\n # {\"Variants\" :\n # {\"variantSetId\" : \"\",\n # \"referenceName\" : \"\",\n # \"start\" : \"\",\n # \"end\" : \"\",\n # \"referenceBases\" :\n # \"alternateBases\" :\n # \"quality\" :\n # \"filter\" :\n # },\n # \"calls\" :\n # { \"callSetId\": ,\n # \"genotype\" : []\n # }\n # },\n # { \"Variantsets\" { \"id\" : }}\n \n \n \n # jsonline = json.dumps(varDic, ensure_ascii=False)\n # cc += 1", "def test_output_hff(self):\n args = parse_args('convert {} -o file.hff'.format(self.test_data_file), use_shlex=True)\n # assertions\n self.assertEqual(args.output, 'file.hff')", "def convert_data(hdu, vslr):\n wave = hdu[1].data['Wavelength']\n wave = air2vac(wave)\n return wave * (1+vslr/c.c)", "def toHLS(self):\n retVal = self.getEmpty()\n if( self._colorSpace == ColorSpace.BGR or\n self._colorSpace == ColorSpace.UNKNOWN ):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HLS)\n elif( self._colorSpace == ColorSpace.RGB):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HLS)\n elif( self._colorSpace == ColorSpace.HSV ):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)\n cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)\n elif( self._colorSpace == ColorSpace.XYZ ):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)\n cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)\n elif( self._colorSpace == ColorSpace.HLS ):\n retVal = self.getBitmap() \n else:\n warnings.warn(\"Image.toHSL: There is no supported conversion to HSL colorspace\")\n return None\n return Image(retVal, colorSpace = ColorSpace.HLS )", "def read_normalization_transform(handle): \n return NormalizationTransform(handle.read(52))", "def process_vcf_post_l_merge(l_merge_output_vcf_path, processed_vcf_path):\n with open(l_merge_output_vcf_path) as l_merge_output_fh:\n with open(processed_vcf_path, 'w') as processed_vcf_fh:\n vcf_reader = vcf.Reader(l_merge_output_fh)\n\n # Fix info strings.\n _update_info_string_number(vcf_reader, 'SVTYPE', -1)\n _update_info_string_number(vcf_reader, 'SVLEN', -1)\n\n # Fix format header.\n orig = vcf_reader.formats['SU']\n vcf_reader.formats['DP'] = vcf.parser._Format(\n 'DP', orig.num, orig.type, orig.desc)\n del vcf_reader.formats['SU']\n\n # Make column headers match what's expected by vcf_parser.\n # l_merge output is missing FORMAT column header, and columns\n # for each sample.\n if not 'FORMAT' in vcf_reader._column_headers:\n vcf_reader._column_headers.append('FORMAT')\n vcf_reader.samples = [\n x['ID'] for x in vcf_reader.metadata['SAMPLE']]\n\n # Writer object using Reader as template.\n vcf_writer = vcf.Writer(processed_vcf_fh, vcf_reader)\n\n # Format each record with correct setting.\n for record in vcf_reader:\n # Filter when insufficient support.\n if int(record.INFO['SU'][0]) < 10:\n continue\n\n # Per-sample values.\n record.FORMAT = 'GT:DP'\n\n # vcf.model._Call requires data as a hashable type so follow\n # vcf internal code pattern of making a tuple.\n calldata_tuple_type = vcf.model.make_calldata_tuple(\n record.FORMAT.split(':'))\n\n samples_with_sv = [\n x.split(':')[0] for x in record.INFO['SNAME']]\n\n if 'SULIST' in record.INFO:\n dp_list = [x.split(':')[0] for x in record.INFO['SULIST']]\n else:\n dp_list = record.INFO['SU']\n\n # Parse the record\n record_samples = []\n for sample_id in vcf_reader.samples:\n try:\n sample_idx = samples_with_sv.index(sample_id)\n\n sample_data = calldata_tuple_type(\n GT='1/1',\n DP=dp_list[sample_idx])\n except ValueError:\n sample_data = calldata_tuple_type(GT='./.', DP=0)\n record_samples.append(\n vcf.model._Call(record, sample_id, sample_data))\n record.samples = record_samples\n\n # update METHOD field\n record.__dict__['INFO']['METHOD'] = 'LUMPY'\n\n vcf_writer.write_record(record)", "def itkVectorMagnitudeImageFilterICVF32IUC2_cast(obj: 'itkLightObject') -> \"itkVectorMagnitudeImageFilterICVF32IUC2 *\":\n return _itkVectorMagnitudeImageFilterPython.itkVectorMagnitudeImageFilterICVF32IUC2_cast(obj)", "def test_sff_default_output_hff(self):\n args = parse_args('convert {}'.format(self.test_sff_file), use_shlex=True)\n self.assertEqual(args.output, self.test_hff_file)", "def convert_to_hic_format(self):\n\n if self.cfg.tal_mode == \"wt\":\n hek_mat = pd.read_csv(self.hek_file, sep=\"\\t\")\n elif self.cfg.tal_mode == \"tal1_ko\":\n hek_mat = pd.read_csv(self.tal1ko_file, sep=\"\\t\")\n elif self.cfg.tal_mode == \"lmo2_ko\":\n hek_mat = pd.read_csv(self.lmo2ko_file, sep=\"\\t\")\n\n \"get positions\"\n index, chr_list = self.change_index(list(hek_mat.index))\n columns, _ = self.change_index(hek_mat.columns)\n\n \"assign rows, columns and chr\"\n hek_mat.index = index\n hek_mat.columns = columns\n hek_mat[\"chr\"] = chr_list\n\n \"get matrices for TAL1 and LMO2\"\n tal1_mat = hek_mat.loc[hek_mat[\"chr\"] == \"chr1\"]\n tal1_mat = tal1_mat.iloc[:, 0:285]\n lmo2_mat = hek_mat.loc[hek_mat[\"chr\"] == \"chr11\"]\n lmo2_mat = lmo2_mat.iloc[:, 286:632]\n tal1_mat = tal1_mat.groupby(level=0, axis=1).sum()\n tal1_mat = tal1_mat.groupby(level=0, axis=0).sum()\n lmo2_mat = lmo2_mat.groupby(level=0, axis=1).sum()\n lmo2_mat = lmo2_mat.groupby(level=0, axis=0).sum()\n\n \"prepare data in the form of Hi-C\"\n tal_i = list(tal1_mat.index)\n tal_j = tal1_mat.columns\n lmo2_i = list(lmo2_mat.index)\n lmo2_j = lmo2_mat.columns\n\n tal_df = pd.DataFrame(columns=[\"i\", \"j\", \"v\"])\n for i in tal_i:\n for j in tal_j:\n tal_df = tal_df.append({\"i\": i, \"j\": j, \"v\": tal1_mat.loc[i][j]}, ignore_index=True)\n\n lmo2_df = pd.DataFrame(columns=[\"i\", \"j\", \"v\"])\n for i in lmo2_i:\n for j in lmo2_j:\n lmo2_df = lmo2_df.append({\"i\": i, \"j\": j, \"v\": lmo2_mat.loc[i][j]}, ignore_index=True)\n\n \"save data\"\n if self.cfg.tal_mode == \"wt\":\n tal_df.to_csv(cfg.hic_path + cfg.cell + \"/tal_df.txt\", sep=\"\\t\")\n lmo2_df.to_csv(cfg.hic_path + cfg.cell + \"/lmo2_df.txt\", sep=\"\\t\")\n else:\n tal_df.to_csv(cfg.output_directory + \"tal1_ko.txt\", sep=\"\\t\")\n lmo2_df.to_csv(cfg.output_directory + \"lmo2_ko.txt\", sep=\"\\t\")", "def _convertToVcf(self, beagleFile, chrom, origVcf):\r\n \r\n vcfFile = VcfFile.VcfFile(origVcf.pool, origVcf.pool.outputDir + chrom + \"_\" + os.path.basename(origVcf.fileName), chrom=chrom)\r\n cmd = (\"java -jar -Xmx30g \" + Program.config.getPath(\"gatk\") + \r\n \" -R \" + Program.config.getPath(\"refGenome\") + \r\n \" -T BeagleOutputToVCF\" +\r\n \" -V \" + origVcf.fileName +\r\n \" -beagleR2:BEAGLE \" + beagleFile.rTwoFile +\r\n \" -beaglePhased:BEAGLE \" + beagleFile.getFile(\"phased\") +\r\n \" -beagleProbs:BEAGLE \" + beagleFile.getFile(\"gprobs\") +\r\n \" -o \" + vcfFile.fileName +\r\n \" --unsafe LENIENT_VCF_PROCESSING\")\r\n self.execute(cmd, \"gatk\", vcfFile)\r\n beagleFile.pool.vcf[chrom] = vcfFile\r\n vcfFile.phased = True\r\n return vcfFile", "def format_vcf(self):\n # Create a list of information for a VCF file\n info = [key + '=' + ','.join(value) for key, value in self._info.items()]\n info.extend(self._flags)\n vcf_line = [\n self._contig,\n str(self._position),\n self._snpid,\n self._reference,\n self._alternate,\n '.',\n '.',\n ';'.join(info)\n ]\n return '\\t'.join(vcf_line) # Join everything together with a tab", "def tcf2vrt(self,tcf):\n p = etree.Element(self.element)\n self.transform(tcf, p)\n return p", "def itkVectorMagnitudeImageFilterICVF22IUC2_cast(obj: 'itkLightObject') -> \"itkVectorMagnitudeImageFilterICVF22IUC2 *\":\n return _itkVectorMagnitudeImageFilterPython.itkVectorMagnitudeImageFilterICVF22IUC2_cast(obj)", "def convert_lav_to_psl(self):\n self.update_log(\"Converting the .lav to .psl\", \"3\",\n datetime.datetime.now())\n # Name for psl file\n # Call kentUtils tool lavToPsl to perform the format conversion\n call([\"/hps/nobackup/goldman/conor/1k_genomes/template_switching_sm/tools/lavToPsl\", self.output_file,\n self.out_psl_file], stdout=self.FNULL, stderr=STDOUT)\n # Delete the lav file\n #remove(self.output_file)", "def parse_vcf2(input_vcf: Union[str, Path], *args: str) -> DataFrame:\n\n def join_split_info(df):\n df_sub = df.INFO.str.split(\";\", expand=True)\n df_sub.columns = [x[: x.find(\"=\")] for x in df_sub.loc[0]]\n df_sub = df_sub.apply(lambda series: [x[x.find(\"=\") + 1 :] for x in series])\n index = pd.MultiIndex.from_product(\n [[\"INFO\"], df_sub.columns], names=[\"first\", \"second\"]\n )\n df_sub.columns = index\n del df[(\"INFO\", (\"\"))]\n df = df.join(df_sub)\n return df\n\n def split_samples(df):\n fixed = [\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"FORMAT\", \"INFO\"]\n samples = [x for x in df.columns.get_level_values(\"first\") if x not in fixed]\n if len(samples) > 0:\n # optional genotype fields present\n second = df.FORMAT[0].split(\":\")\n for sample in samples:\n df_sub = df[sample].str.split(\":\", expand=True)\n index = pd.MultiIndex.from_product(\n [[sample], second], names=[\"first\", \"second\"]\n )\n df_sub.columns = index\n del df[(sample, (\"\"))]\n df = df.join(df_sub)\n return df\n\n dfs = []\n input_vcfs = [Path(input_vcf)]\n if len(args) > 0:\n input_vcfs += [Path(x) for x in args]\n for input_vcf in input_vcfs:\n comments = []\n to_df = []\n print(input_vcf)\n with input_vcf.open(\"r\") as inp:\n # unfortunately, pandas does not take multiple char comment indicators\n for line in inp.readlines():\n if line.startswith(\"#\"):\n comments.append(line)\n else:\n to_df.append(line[:-1].split(\"\\t\"))\n if len(comments) == 0:\n raise ValueError(f\"{input_vcf} is not in valid vcf format.\")\n\n columns = comments[-1][1:-1].split(\"\\t\")\n mcolumns = pd.MultiIndex.from_product(\n [columns, [\"\"]], names=[\"first\", \"second\"]\n )\n df = pd.DataFrame(to_df, columns=mcolumns)\n # split INFO\n df = join_split_info(df)\n # split samples\n df = split_samples(df)\n dfs.append(df)\n df = pd.concat(dfs)\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Binaryencode categorical column `target_colname` separated by `value_sep` in data frame `dfm` to multiple binary columns with the same prefix `dest_colname_prefix` MySQL returns `GROUP_CONCAT(tf.name)` in `tfName` column in a commaseparated string. E.g. `ARID3A,ATF1,ATF2` stands for 3 TFs This function separates this string by commas and 3 new columns, `tf_ARID3A`, `tf_ATF1` and `tf_ATF2` would be 1 for this SNP
def binary_encode_tfbs(dfm, target_colname="tfName", value_sep=',', dest_colname_prefix=None): dummies = dfm.loc[:, target_colname].str.get_dummies(sep=value_sep) if dest_colname_prefix is not None: # Add a prefix to all column names dummies = dummies.add_prefix(dest_colname_prefix) dfm = pd.concat([dfm, dummies], axis=1).drop(target_colname, axis=1) return dfm
[ "def replace_labels(dataframe, id_col_name='FBbt_id', label_col_name='FBbt_name', sep='|'):\n col_order = dataframe.columns\n dataframe['converted_ids'] = dataframe.loc[:,id_col_name].apply(\n lambda x: (str(x).replace(':', '_')).split(sep))\n FBbt_list = list(dataframe.loc[:,'converted_ids'])\n flat_FBbt_list = list(set([item for sublist in FBbt_list for item in sublist]))\n\n # [str(x).replace(':', '_') for x in set(\n # dataframe[dataframe[id_col_name].notnull()][id_col_name])]\n\n query = (\"MATCH (c:Class) WHERE c.short_form IN %s \"\n \"RETURN c.short_form AS ID, c.label AS label\"\n % flat_FBbt_list)\n\n q = nc.commit_list([query])\n labels = dict_cursor(q)\n\n labels_df = pd.DataFrame(labels).set_index('ID')\n\n# allow label column not to be present in original spreadsheet (and add to columns to return if not there)\n try:\n dataframe = dataframe.drop(label_col_name, axis=1)\n except KeyError:\n ID_col_loc = col_order.get_loc(id_col_name)\n col_order = col_order.insert(ID_col_loc + 1, label_col_name)\n\n def label_lookup(ID_list):\n \"\"\"Looks up labels of items of a list of IDs in labels_df and returns list of labels.\"\"\"\n label_list = []\n try:\n label_list = [labels_df.loc[i, 'label'] for i in ID_list]\n except KeyError:\n pass\n return label_list\n\n# make column of lists of labels from column of lists of IDs\n dataframe['label_lists'] = dataframe.loc[:,'converted_ids'].apply(\n lambda x: label_lookup(x))\n# convert lists to strings with separator\n dataframe[label_col_name] = dataframe.loc[:,'label_lists'].apply(\n lambda x: sep.join(x) if type(x) == list else x)\n\n dataframe = dataframe[col_order]\n return dataframe", "def _one_hot_encode_targets(\n message_categories_df: pd.DataFrame,\n raw_cat_col: str\n) -> pd.DataFrame:\n cat_names = _get_category_names(message_categories_df[raw_cat_col].iloc[0], ';')\n cat_df = message_categories_df[raw_cat_col].str.split(';', expand=True)\n cat_df.columns = cat_names\n for cat_name in cat_names:\n cat_df[cat_name] = cat_df[cat_name].str.split('-').str[-1].astype(int)\n message_categories_df = message_categories_df.drop(raw_cat_col, axis=1)\n return message_categories_df.join(cat_df)", "def replace_ids(dataframe, id_col_name='FBbt_id', label_col_name='FBbt_name'):\n col_order = dataframe.columns\n label_list = list(set(dataframe[dataframe[label_col_name].notnull()][label_col_name].tolist()))\n\n query = \"MATCH (c:Class) WHERE c.label IN %s \\\n RETURN c.short_form AS %s, c.label AS %s\" \\\n % (label_list, id_col_name, label_col_name)\n\n q = nc.commit_list([query])\n ids = dict_cursor(q)\n\n ids_df = pd.DataFrame(ids)\n ids_df = ids_df.applymap(lambda x: x.replace('_', ':'))\n\n existing_column = True\n try:\n dataframe = dataframe.drop(id_col_name, axis=1)\n except KeyError:\n existing_column = False\n dataframe = pd.merge(left=dataframe, right=ids_df, how='left', on=label_col_name)\n if existing_column:\n dataframe = dataframe[col_order]\n\n return dataframe", "def prepare_data(df, num_features, cat_features, target=None):\r\n\r\n\talgo_df = pd.DataFrame()\r\n\t\r\n\tfor feature in num_features:\r\n\t\talgo_df[feature] = df[feature]\r\n\t\r\n\tfor f in cat_features:\r\n\t\tdf_dummy = pd.get_dummies(df[f], prefix=f)\r\n\t\talgo_df = pd.concat((algo_df, df_dummy), axis=1)\r\n\r\n\treturn algo_df", "def map_categorical_encoding(df, x, y=None, max_lvls = 100, min_percent = 0.025):\n\n import pandas as pd\n import numpy as np\n\n out = []\n\n for feat in x:\n if y is not None: # can only do target encoding if target is provided\n\n target_lvls = len(df[y].unique()) # get uniques in target\n if target_lvls < max_lvls: # decide if classification or regression\n df[y] = df[y].astype('category').cat.codes\n\n tmp=df[[feat,y]].groupby([feat]).agg({feat:'count', y:'sum'})\n tmp['feature'] = feat\n tmp['level']=tmp.index\n tmp=tmp.reset_index(level=0, drop=True).reset_index()\n tmp.rename(columns={feat: 'count', y: 'sum'}, inplace=True)\n\n else:\n \n tmp=df[[feat]].groupby([feat]).agg({feat:'count'})\n tmp['feature'] = feat\n tmp['level']=tmp.index\n tmp=tmp.reset_index(level=0, drop=True).reset_index()\n tmp.rename(columns={feat: 'count'}, inplace=True)\n\n tmp['proportional_encode'] = tmp['count'] / tmp['count'].sum()\n tmp['flag_low_prop'] = np.where(tmp['proportional_encode'] < min_percent , 1, 0) \n tmp = tmp.sort_values('proportional_encode', ascending=False) # order for ordinal encoding\n tmp['ordinal_encode'] = ((tmp['proportional_encode'].cumsum() - 0.5) * tmp['proportional_encode']) / tmp['proportional_encode'].sum()\n tmp['onehot'] = np.where(tmp['flag_low_prop'] == 1, 'ALL_OTHER', tmp['level'])\n\n if y is not None:\n noise = np.random.rand(len(tmp['level']))\n glb = tmp['sum'].sum() / tmp['count'].sum()\n lmda = 1 / (1 + np.exp((tmp['count'] - 20) / 10 * -1))\n tmp['target_encode_weighted'] = ((1 - lmda) * glb) + (lmda * tmp['sum'] / tmp['count'])\n tmp['target_encode_noise'] = (tmp['sum'] / tmp['count']) + (noise * 2 *0.01 - 0.01)\n tmp['target_encode_mean'] = (tmp['target_encode_weighted'] + tmp['target_encode_noise']) / 2\n \n if y is not None:\n tmp = tmp.drop(columns=['index', 'count','sum'])\n else:\n tmp = tmp.drop(columns=['index', 'count'])\n \n out.append(tmp)\n out = pd.concat(out)\n return out", "def col_labels (df, col_list):\n for tf in col_list:\n df[tf] = 0\n # Create a column for if enhancer overlaps transposable element\n df[\"enhancer_actual\"] = 0", "def feature_rfm_encode(self):\n if self._is_rfm_encode is False:\n return\n\n self._rfm_encoder, df_encoded = \\\n p5_util.df_rfm_one_hot_encode(self.df_invoice, 'RFM')\n \n #-------------------------------------------------------------------------\n # Encoded columns are renamed with root name = w_rfm_\n #-------------------------------------------------------------------------\n df_encoded, list_col_unchanged \\\n = p5_util.df_rename_columns(df_encoded, \"w_rfm_\")\n\n #-------------------------------------------------------------------------\n # New features issue from encoded RFM are aggregated to data sample\n #-------------------------------------------------------------------------\n self.df_invoice = pd.concat([self.df_invoice, df_encoded] , axis=1)\n del(df_encoded)\n return", "def one_hot_encode(self, columns, prefix):\n\n self.df = pd.get_dummies(self.df, columns=columns, prefix=prefix)", "def transform_target(self):\n transformed_frame = self.validation_data.melt(\n id_vars=[\"ID\"], value_vars=[\"m1\", \"m2\", \"m3\", \"m4\", \"m5\", \"m6\"], var_name=\"temp\", value_name=\"Target\"\n )\n transformed_frame[\"ID\"] = transformed_frame[[\"ID\", \"temp\"]].agg(\" x \".join, axis=1)\n transformed_frame.drop(columns=[\"temp\"], inplace=True)\n transformed_frame.sort_values(by=[\"ID\"], ascending=True, inplace=True)\n\n return transformed_frame", "def rename_cols_with_feat_name_prefix(feat_code, colnames, df, idxcol=\"SK_ID_CURR\"):\n df.set_index(idxcol, drop=True, inplace=True)\n\n # FEAT_CODE = \"CCB\"\n rename_cols = {}\n for colname in colnames:\n rename_cols[colname] = \"{}_{}\".format(feat_code, colname)\n\n df.rename(index=int, inplace=True, columns=rename_cols)\n\n df.reset_index(inplace=True)\n\n return list(rename_cols.values())", "def encode_onehot(df, cols):\n vec = DictVectorizer()\n vec_data = pd.DataFrame(vec.fit_transform(df[cols].to_dict(orient='records')).toarray())\n vec_data.columns = vec.get_feature_names()\n vec_data.index = df.index\n\n df = df.drop(cols, axis=1)\n df = df.join(vec_data)\n return df", "def generate_polynomial_feature(dataframe, features=[], target='', one_hot_encode=True):\r\n NUMBER2NOMINAL_NUM = 30\r\n NUMBER2NOMINAL_RATIO = 1/10\r\n df = dataframe\r\n\r\n # convert nominal type feature to number type feature\r\n if len(features)==0:\r\n features = df.columns\r\n if one_hot_encode == True:\r\n# cat_feats = [f for f in features if df[f].dtype == object and f!=target]\r\n cat_feats = df.select_dtypes('object').columns.tolist()\r\n cat_feats = list(set(cat_feats).intersection(set(features)))\r\n if target in cat_feats:\r\n cat_feats.remove(target)\r\n\r\n df,new_cols = one_hot_encoder(dataframe, True)\r\n df = pd.concat([df, dataframe[cat_feats]],axis=1)\r\n # convert number type feature to nominal type feature\r\n# numeric_feats = [f for f in features if df[f].dtype != object \\\r\n# and f not in new_cols and f!=target]\r\n numeric_feats = df.select_dtypes('number').columns.tolist()\r\n numeric_feats = list(set(numeric_feats).intersection(set(features)))\r\n if target in numeric_feats:\r\n numeric_feats.remove(target)\r\n numeric_feats = list(set(numeric_feats).difference(set(new_cols))) \r\n \r\n if target in cat_feats:\r\n cat_feats.remove(target)\r\n \r\n unique = df[numeric_feats].nunique()\r\n for f_ in numeric_feats:\r\n if unique[f_] <= NUMBER2NOMINAL_NUM \\\r\n and unique[f_]/df.shape[0] <= NUMBER2NOMINAL_RATIO:\r\n df[f_+'_cat'] = df[f_].astype(str)\r\n\r\n# cat_feats = [f for f in features if df[f].dtype == object and f!=target]\r\n# cat_feats2 = [f for f in features if df[f].dtype == object and f!=target]\r\n cat_feats = df.select_dtypes('object').columns.tolist()\r\n cat_feats = list(set(cat_feats).intersection(set(features)))\r\n if target in cat_feats:\r\n cat_feats.remove(target)\r\n cat_feats2 = cat_feats[:]\r\n\r\n for f_1 in cat_feats:\r\n for f_2 in cat_feats2:\r\n if f_1!=f_2:\r\n df[f_1+'_'+f_2] =df[f_1]+'_'+df[f_2]\r\n cat_feats2.remove(f_1)\r\n \r\n# numeric_feats = [f for f in features if df[f].dtype != object]\r\n# numeric_feats2 = [f for f in features if df[f].dtype != object]\r\n numeric_feats = df.select_dtypes('number').columns.tolist()\r\n numeric_feats = list(set(numeric_feats).intersection(set(features)))\r\n if target in numeric_feats:\r\n numeric_feats.remove(target)\r\n numeric_feats2 = numeric_feats[:] \r\n for f_1 in numeric_feats:\r\n for f_2 in numeric_feats2:\r\n df[f_1+'x'+f_2] = np.multiply(df[f_1],df[f_2])\r\n if f_1 != f_2:\r\n df[f_1+'/'+f_2] = np.divide(df[f_1], df[f_2])\r\n numeric_feats2.remove(f_1)\r\n\r\n return df", "def addBinColumn(df):\n binaries = []\n columns = df.columns\n dff = df.copy()\n for col in dff.columns:\n dff = dff.rename(columns={col: str(col)})\n for _, row in dff.iterrows():\n binary = '0b'\n for col in dff.columns:\n binary = binary + str(int(row[col]))\n binaries.append(int(binary, 2))\n df[cn.VALUE] = binaries", "def binary_feature(df, feat_col, value, binary_feature_col_name=None, concat=False):\n # If binary_feature_col_name is none use this instead\n if not binary_feature_col_name:\n binary_feature_col_name = feat_col+'_is_'+str(value)\n\n def is_value_present(s, value):\n \"\"\"\n Given a series and a value, return a binary feature 1 if present and 0 if otherwise\n \"\"\"\n if s[feat_col] == value:\n return 1\n else:\n return 0\n # Return binary feature series\n binary_feature = df.apply(lambda s: is_value_present(s, value), axis=1)\n # Set series name\n binary_feature.name = binary_feature_col_name\n if concat:\n return pandas.concat([df, binary_feature], axis=1)\n return binary_feature", "def assemble_features_labels_to_json(self, dst_json_file_path=None):\n # initialize\n self.assembled_data_json = []\n\n # meta_data\n self.assembled_data_json.append({\n 'start_index_cat': 0,\n 'end_index_cat': self._idx,\n 'feat_name_num': self.numerical_feature_name_,\n 'label_name': self.label_name_\n })\n\n for item in self._item_samples.keys():\n feat_label = {}\n self.assembled_data_json.append(feat_label)\n\n # label\n feat_label.update({self.label_name_: self._Y[self._item_samples[item][0]]})\n\n # categorical features\n for feat_index in self._item_samples.get(item):\n feat = self._X_cat[feat_index]\n for i in xrange(0, len(feat)):\n feat_label.update({str(feat[i]): True})\n\n # numerical features\n feat_index = self._item_samples[item][0]\n feat = self._X_num[feat_index]\n for i in xrange(0, len(feat)):\n feat_label.update({self.numerical_feature_name_[i]: feat[i]})\n\n if dst_json_file_path is not None:\n with open(dst_json_file_path, 'w') as f:\n json.dump(self.assembled_data_json, f)\n\n return self.assembled_data_json", "def create_fields_meta_cols(col_fam, src_name, feature_key, fields):\n row_key = gen_hash(src_name + feature_key + \"meta\")\n col_fam.insert(row_key, fields)\n return col_fam.get(row_key)", "def apply_imputation_encoding(df, mapping_table, tracking_flags=True):\n\n import pandas as pd\n import numpy as np\n\n categorical = df.select_dtypes(include=['object','category'])\n\n for feature in mapping_table['feature']:\n\n flag_feature = feature + '_flag_missing'\n tmp_df = mapping_table.loc[mapping_table['feature'] == feature]\n\n if feature in categorical.columns:\n value = tmp_df['mode']\n else:\n value = tmp_df['median']\n\n if tracking_flags == True:\n if tmp_df['create_flag'].item() == 1:\n df[flag_feature] = np.where(df[feature].isnull(), 1, 0)\n\n df[feature] = df[feature] = np.where(df[feature].isnull(), value, df[feature])\n\n return df", "def replace_ids_in_file(filename, id_col_name='FBbt_id', label_col_name='FBbt_name'):\n input_dataframe = pd.read_csv(filename, sep='\\t')\n output_dataframe = replace_ids(input_dataframe, id_col_name, label_col_name)\n\n output_dataframe.to_csv(filename, sep='\\t', index=False)", "def encodeFeatures(self, feature_list):\n enc_str = \"\"\n for feature in self.all_features:\n if feature in feature_list:\n enc_str += \"1\"\n else:\n enc_str += \"0\"\n return enc_str" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set fixed parameter values. Examples >>> from cymr import parameters >>> param_def = parameters.Parameters() >>> param_def.set_fixed(a=1, b=2)
def set_fixed(self, *args: dict[str, float], **kwargs: float): self.fixed.update(*args, **kwargs)
[ "def set_fixed(self, section, name, value):\n i = self.parameter_index(section, name)\n self.parameters[i].limits = (value, value)\n self.reset_fixed_varied_parameters()", "def set_parameter_fixed(parameter_dictionary, param_name, fix):\n if 'parameter_info' not in parameter_dictionary:\n print(\"Provided parameter dictionary did not have 'parameter_info' as a key.\")\n return\n if type(fix)!=bool:\n print(\"The 'fixed' parameter must be either True or False.\")\n return\n param_info = parameter_dictionary['parameter_info']\n list_of_paramnames = []\n which_parameter = -1\n for i,param in enumerate(param_info):\n list_of_paramnames.append(param['parname'])\n if param_name==param['parname']:\n which_parameter = i\n if param_name in list_of_paramnames:\n parameter_dictionary['parameter_info'][which_parameter]['fixed'] = fix\n if fix:\n print(f\"Parameter '{param_name}' will be fixed.\")\n else:\n print(f\"Parameter '{param_name}' will be allowed to vary.\")", "def set_varied(self, section, name, lower, upper):\n i = self.parameter_index(section, name)\n self.parameters[i].limits = (lower,upper)\n self.reset_fixed_varied_parameters()", "def fix_params(self, fix):\n\n self._modify_parameters(fix, set_fix=True)\n self._update()", "def set_fixed(self, num):\n self.fixed[num] = True", "def _set_parameter(self, par, val):\n self._parchk(par)\n setattr(self, par, float(val))", "def set_param(self, param, value=None, min=None, max=None, vary=None):\n if value is not None:\n self.params[param].value = value\n if min is not None:\n self.params[param].min = min\n if max is not None:\n self.params[param].max = max\n if vary is not None:\n self.params[param].vary = vary", "def reset_fixed_varied_parameters(self):\n self.varied_params = [param for param in self.parameters\n if param.is_varied()]\n self.fixed_params = [param for param in self.parameters\n if param.is_fixed()]", "def fixed(arg_name, fix=True):\n # TODO is this a toggleable 'lock' on the parameter's value?\n\n def decorator(func):\n _quick_set(func, 'fixed', arg_name, fix, {})\n return func\n\n return decorator", "def set(value,force=False):", "def set_parameters(self, params, **kargs):\n self._solver.set_parameters(params, **kargs)", "def set_parameters(cls, params):\n # cls.params_dict.update(params)\n for parameter in params:\n # cls.params_dict.update(params)\n if parameter in cls.params_dict:\n if params[parameter] < 0:\n raise ValueError(f\"{parameter} cannot be negative.\")\n if parameter == \"DeltaPhiMax\" and params[parameter] <= 0:\n raise ValueError(\"DeltaPhiMax must be larger than zero\")\n if parameter == \"eta\" and not 0 <= params[parameter] <= 1:\n raise ValueError(\"Eta must be greater than zero and smaller than one\")\n cls.params_dict.update(params)\n else:\n raise ValueError(\"Parameter not defined for this animal\")", "def setParams(self, params):\n return _core.CSumLinear_setParams(self, params)", "def test_get_set_parameters(self):\n self.assert_enter_command_mode()\n\n # verify we can set read/write parameters\n constraints = ParameterConstraints.dict()\n parameters = Parameter.dict()\n for key in constraints:\n if self._driver_parameters[parameters[key]][self.READONLY]:\n continue\n _, _, maximum = constraints[key]\n self.assert_set_parameter(parameters[key], maximum)", "def setValue(self, parameterValue: cern.japc.value.ParameterValue) -> None:\n ...", "def do_set(self, args):\n\n split_args = args.split()\n if len(split_args) < 1:\n module_logger.error(\"You must provide at least one argument\".format(args))\n elif len(split_args) == 1:\n if split_args[0] == \"iface\":\n iface = interface.get_first_interface()\n\n if iface is not None:\n self._params.iface = iface\n else:\n module_logger.error(\"There are no wireless interfaces available.\")\n elif split_args[0] == 'macs':\n self._params.macs = []\n else:\n module_logger.error(\"Parameters require a value\".format(split_args[0]))\n elif split_args[0] in meta.Params.VALID_PARAMS:\n try:\n param = split_args[0]\n value = split_args[1]\n # Validate certain parameters\n if split_args[0] == \"iface\":\n self._params.iface = value\n elif param == \"duration\":\n self._params.duration = value\n elif param == \"degrees\":\n self._params.degrees = value\n elif param == \"bearing\":\n self._params.bearing_magnetic = value\n elif param == \"hop_int\":\n self._params.hop_int = value\n elif param == \"hop_dist\":\n self._params.hop_dist = value\n elif param == \"mac\":\n self._params.add_mac(value)\n elif param == \"macs\":\n # Load macs from provided file\n self._params.add_mac(localizer.load_macs(value))\n elif param == \"channel\":\n self._params.channel = value\n elif param == \"capture\":\n self._params.capture = value\n\n print(\"Parameter '{}' set to '{}'\".format(param, value))\n\n except (ValueError, FileNotFoundError) as e:\n module_logger.error(e)\n else:\n module_logger.error(\"Invalid parameter '{}'\".format(split_args[0]))\n\n self._update_prompt()", "def set( i, value ):\n if i in __param: __param[i] = value", "def test_set_parameters_fusion(backend):\n c = Circuit(2)\n c.add(gates.RX(0, theta=0.1234))\n c.add(gates.RX(1, theta=0.1234))\n c.add(gates.CNOT(0, 1))\n c.add(gates.RY(0, theta=0.1234))\n c.add(gates.RY(1, theta=0.1234))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())\n\n c.set_parameters(4 * [0.4321])\n fused_c.set_parameters(4 * [0.4321])\n np.testing.assert_allclose(fused_c(), c())", "def setparam(self, param, value):\n\t\treturn self.__command(\"param.set %s %s\" % (param, value))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set free parameter ranges. Examples >>> from cymr import parameters >>> param_def = parameters.Parameters() >>> param_def.set_free(a=[0, 1], b=[1, 10])
def set_free( self, *args: dict[str, Iterable[float]], **kwargs: Iterable[float] ) -> None: self.free.update(*args, **kwargs)
[ "def setRegularizationParameter(self, beta) -> None:\n ...", "def set_free(self, num):\n self.fixed[num] = False", "def set_params_range(self):\n pass", "def _validate_and_set_permitted_range(self, params):\r\n self.permitted_range = None\r\n if 'permitted_range' in params:\r\n self.permitted_range = params['permitted_range']\r\n # PY: checks and set the features range for the rest of the parameters for\r\n # which the range is not provided.\r\n if not self.check_features_range():\r\n raise ValueError(\r\n \"permitted range of features should be within their original range\")\r\n else:\r\n self.permitted_range, feature_ranges_orig = self.get_features_range(self.permitted_range)", "def SetFreeSliding(self, *args):\n return _FairCurve.FairCurve_Batten_SetFreeSliding(self, *args)", "def setRange(x='0.0', oldmin='0.0', oldmax='1.0', newmin='0.0', newmax='1.0'):\n\n pass", "def set_fixed(self, section, name, value):\n i = self.parameter_index(section, name)\n self.parameters[i].limits = (value, value)\n self.reset_fixed_varied_parameters()", "def bind_params(self, binding):\n for k, v in binding.items():\n temp = self.free_params.get(k) # it's a name\n if temp:\n temp.val = v\n elif k in self.free_params.values(): # it's a parameter\n k.val = v\n else:\n raise ParameterError(\"Unknown free parameter '{}'\".format(k))", "def set_varied(self, section, name, lower, upper):\n i = self.parameter_index(section, name)\n self.parameters[i].limits = (lower,upper)\n self.reset_fixed_varied_parameters()", "def test_set_parameters_fusion(backend):\n c = Circuit(2)\n c.add(gates.RX(0, theta=0.1234))\n c.add(gates.RX(1, theta=0.1234))\n c.add(gates.CNOT(0, 1))\n c.add(gates.RY(0, theta=0.1234))\n c.add(gates.RY(1, theta=0.1234))\n fused_c = c.fuse()\n np.testing.assert_allclose(fused_c(), c())\n\n c.set_parameters(4 * [0.4321])\n fused_c.set_parameters(4 * [0.4321])\n np.testing.assert_allclose(fused_c(), c())", "def freeze_to(self, n) :\n if n >= len(self.opt.param_groups) :\n raise ValueError(f'The optimizer only has {len(self.opt.param_groups)} parameter groups')\n \n for g in self.opt.param_groups[:n]:\n for l in g['params']:\n l.requires_grad=False\n for g in self.opt.param_groups[n:]: \n for l in g['params']:\n l.requires_grad=True", "def free_credits(self, free_credits):\n \n self._free_credits = free_credits", "def set_active_constraints(self, lagrange_multipliers):\n self.active_constraints_set = True\n self.active_constraints_index = lagrange_multipliers != 0.\n return", "def set_boundary(self, parameter, new_boundaries):\n\n obj = self._model if parameter in self._model.fittingParameters \\\n else self._observed\n name, latex, fget, fset, mode, to_fit, bounds = \\\n obj.fittingParameters[parameter]\n\n bounds = new_boundaries\n\n obj.fittingParameters[parameter] = (\n name, latex, fget, fset, mode, to_fit, bounds)", "def SetPointBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...", "def f_bfree(self, f_bfree):\n \n self._f_bfree = f_bfree", "def setVoltageRange(self, lower, upper):\n self.instr.write(\"VOLT:LIM %f\" % float(lower))\n self.instr.write(\"VOLT:RANG %f\" % float(upper))", "def set_memory_range(self, ranges: List[AddrRange]) -> None:\n raise NotImplementedError", "def gluten_free(self, gluten_free):\n if gluten_free is None:\n raise ValueError(\"Invalid value for `gluten_free`, must not be `None`\") # noqa: E501\n\n self._gluten_free = gluten_free" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set dependent parameters in terms of other parameters. Examples >>> from cymr import parameters >>> param_def = parameters.Parameters() >>> param_def.set_dependent(a='exp(b 2)')
def set_dependent(self, *args: dict[str, str], **kwargs: str) -> None: self.dependent.update(*args, **kwargs)
[ "def eval_dependent(self, param: dict[str, float]) -> dict[str, float]:\n return set_dependent(param, self.dependent)", "def test_dependent(param_def):\n param = {'a': 1, 'b': 2}\n param = param_def.eval_dependent(param)\n assert param == {'a': 1, 'b': 2, 'd': 3.5}", "def set_dependency(a, b):\n d = dependencies.get(a)\n if d == None:\n d = set()\n d.add(b)\n dependencies[a] = d", "def test_setattr_new_dependent_in_specific_set(self):\n self.record.curve_set_values.cs1.dependent.new_curve = [100, 200, 300]\n new_curve = self.record.curve_sets['cs1']['dependent']['new_curve']\n self.assertListEqual([100, 200, 300], new_curve['value'])\n self.assertNotIn('tags', new_curve)\n self.assertNotIn('units', new_curve)", "def set_parameter(self, parameter_str, val):\n\n parameter, index = self.params.lookup(parameter_str)\n self.params.pop(index)\n\n for i, ode in enumerate(self.odes):\n self.odes[i] = cs.substitute(ode, parameter, cs.ssym(val))", "def test_setattr_new_dependent(self):\n self.record.curve_set_values.cs1.new_curve = [100, 200, 300]\n new_curve = self.record.curve_sets['cs1']['dependent']['new_curve']\n self.assertListEqual([100, 200, 300], new_curve['value'])\n self.assertNotIn('tags', new_curve)\n self.assertNotIn('units', new_curve)", "def set_params(self, **values):\n pc, pe = {}, {}\n for k, v in values.items():\n if k.startswith('e_'):\n pe[k[2:]] = v\n elif k.startswith('c_'):\n pc[k[2:]] = v\n else:\n raise ValueError( # pragma: no cover\n f\"Unexpected parameter name '{k}'\")\n self.clus.set_params(**pc)\n self.estimator.set_params(**pe)", "def setup_parameter(self, parameter, value):\n self.__dict__[parameter] = value", "def set_parameters(self, params, **kargs):\n self._solver.set_parameters(params, **kargs)", "def set_param_values(self, x):\n\n for name, value in x.iteritems():\n self.theano_params[name].set_value(value)", "def set(**kwargs):\n init()\n \n global tree, variable, weight, cuts, categories, dataset, name, title\n global variables\n \n\n ## require that tree and variable must be set\n if (not (tree or 'tree' in kwargs) or \n (not (variable or 'variable' in kwargs) and not 'variables' in kwargs) or\n (not (variable or 'variable' in kwargs) and not kwargs['variables'])):\n raise RuntimeError, \"Must provide tree and variable.\"\n\n for arg in ('tree variable weight cuts categories dataset name '\n 'title variables').split():\n if arg in kwargs.keys():\n setattr( sys.modules[__name__], arg, kwargs[arg] )\n del kwargs[arg]\n if kwargs.keys():\n raise RuntimeError, \"Unknown argument(s): %s\" % repr( kwargs.keys() )\n\n if name != 'data' and title == 'data':\n title = name\n\n if variable and not variable in variables:\n variables.append(variable)\n \n if not 'variable' in kwargs:\n variable = variables[0]", "def setParams(self, params):\n return _core.CSumLinear_setParams(self, params)", "def _set_parameter(self, par, val):\n self._parchk(par)\n setattr(self, par, float(val))", "def _signalDependencyComplete(self,dependent,dependency,strReplace=None):\n \n self.actions[dependent].setDependencyAsSatisfied(dependency,self.exeFuncs\n ,self.clients,self.osc,self.options,strReplace=strReplace)", "def set_varied(self, section, name, lower, upper):\n i = self.parameter_index(section, name)\n self.parameters[i].limits = (lower,upper)\n self.reset_fixed_varied_parameters()", "def set_parameter(dictionary, where, parameter):\n if len(where) == 1:\n dictionary[where[0]] = parameter\n else:\n set_parameter(dictionary[where[0]], where[1:], parameter)", "def set_dynamics(self, **kwargs):\n self._physics_client.changeDynamics(self.uid, -1, **kwargs)", "def set_param(self, param, value=None, min=None, max=None, vary=None):\n if value is not None:\n self.params[param].value = value\n if min is not None:\n self.params[param].min = min\n if max is not None:\n self.params[param].max = max\n if vary is not None:\n self.params[param].vary = vary", "def set_param_values(self, input_file_name, param_names, param_values):\n\t\t\n # Write the input file.\n bi.write_tmp_input_file(input_file_name, param_names, param_values)\n\n\t\t# Parameters\n self.tau = self._param[0]\n\t\t\n # self.time = self._x\n # self.T_0 = self._param[1] \n # self.T = self.T_0 + self.time * self.tau\n # self.T_end = self.T[-1]\n\t\t\n self.T = self._x \n self.T_0 = self._x[0]\n self.time = (self.T - self.T_0)/(self.tau/60)\n self.T_end = self._x[-1]\n\t\t\n self.n_T_steps = len(self._x)\n\t\t\n # Initialize pyrolysis model \n self.pyro_model = PyrolysisCompetitive(temp_0=self.T_0, temp_end=self.T_end, time=self.time, beta=self.tau, n_points=self.n_T_steps)\n\t\t\n\t\t# Read the parameters from the temporary file \n self.pyro_model.react_reader(\"tmp_proc_0_\"+input_file_name)\n self.pyro_model.param_reader(\"tmp_proc_0_\"+input_file_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate dependent parameters based on input parameters.
def eval_dependent(self, param: dict[str, float]) -> dict[str, float]: return set_dependent(param, self.dependent)
[ "def test_dependent(param_def):\n param = {'a': 1, 'b': 2}\n param = param_def.eval_dependent(param)\n assert param == {'a': 1, 'b': 2, 'd': 3.5}", "def evaluate(self,*args,**kwargs):\n \n \n params = self.params.deepcopy()\n \n if len(args)>0 and len(kwargs)>0: raise ValueError(\"Expected either *args or **kwargs but not both.\")\n \n if len(args)==0:\n missing = [k for k in self.get_sampled() if k not in kwargs]\n if len(missing)>0:\n raise ValueError(\"Missing the following parameters: %s\"%missing)\n for k,v in kwargs.items(): params[k]=v\n elif len(args)!=len(self.get_sampled()):\n raise ValueError(\"Expected %i parameters, only got %i.\"%(len(self.get_sampled()),len(args)))\n else:\n for k,v in zip(self.get_sampled().keys(),args): params[k]=v\n \n return params(), params", "def test_params_module():\n # Get the inputs required by the Scales object\n (profile, disp_phases, z0) = get_sim_data()\n\n\n # Test that the governing parameters are computed correctly\n # First, test a single dispersed phase\n model = params.Scales(profile, disp_phases[1])\n check_get_variables(model, z0, 0.15, 0.21724144538674975,\n 0.001724100901081246, 0.22611661456807244, 0.15)\n\n # Second, try a list of dispersed phases, where the dominant phase is\n # not the first one\n particles = [disp_phases[1], disp_phases[0], disp_phases[2]]\n model = params.Scales(profile, particles)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Third, make sure we get the same answer as the previous case if the\n # particles are in a different order (i.e., the original order)\n model = params.Scales(profile, disp_phases)\n check_get_variables(model, z0, 0.15, 1.1015134610748201,\n 0.001724100901081246, 0.33764577808309032, 0.15)\n\n # Using the latest Scales object, check that the other methods return\n # the correct results. Since these methods only depend on the values\n # of B, N, and us computed by the get_variables() method, only one case\n # needs to be tested\n assert_approx_equal(model.h_T(z0), 346.40139518559153, significant=6)\n assert_approx_equal(model.h_P(z0), 627.57408319500291, significant=6)\n assert_approx_equal(model.h_S(z0, 0.15), 295.45365120553163,\n significant=6)\n assert_approx_equal(model.lambda_1(z0, 0), 0.74523735215223819,\n significant=6)\n assert_approx_equal(model.u_inf_crit(z0), 0.063723667111426671,\n significant=6)", "def Evaluate(self, , *float):\n ...", "def evaluate(self, variables,functions):\r\n pass", "def test_dynamic(param_def, split_data):\n param = {'c': 2}\n param = param_def.eval_dynamic(param, study=split_data['study'])\n np.testing.assert_array_equal(param['e'][0], np.array([0.5, 1, 1.5]))\n np.testing.assert_array_equal(param['e'][1], np.array([1.5, 1, 0.5]))", "def fit(self, init_params=None, update_params=True, **kwargs):\n if init_params is None:\n init_params = self.params\n\n self.fit_result = self._dofit(init_params, **kwargs)\n print(self.fit_result)\n\n if True or self.fit_result.success and update_params:\n for par, value in zip([p for p in init_params if init_params[p].vary], self.fit_result.x):\n self.params[par].value = value\n\n hess = self.fit_result.hess_inv(self.fit_result.x) if callable(self.fit_result.hess_inv) else np.diag(self.fit_result.hess_inv)\n\n # make sure we only get the finite parameter errors\n self.param_error = np.zeros(len(self.params))\n self.param_error[hess>0] = hess[hess>0] ** 0.5\n\n self.process_fit_results(self.fit_result, self.params)", "def evaluate(self, parameters, config):\n\n # Update local model with global parameters\n self.model.set_weights(parameters)\n\n # Evaluate global model parameters on the local test data and return results\n loss, accuracy = self.model.evaluate(self.x_test, self.y_test)\n num_examples_test = len(self.x_test)\n return loss, num_examples_test, {\"accuracy\": accuracy}", "def test_get_dynamic(param_def, split_data):\n param = {'c': 2}\n param = param_def.eval_dynamic(param, study=split_data['study'])\n\n param1 = param_def.get_dynamic(param, 0)\n np.testing.assert_array_equal(param1['e'], np.array([0.5, 1, 1.5]))\n param2 = param_def.get_dynamic(param, 1)\n np.testing.assert_array_equal(param2['e'], np.array([1.5, 1, 0.5]))", "def evaluate(self, parameters, config):\n\n # Update local model with global parameters\n self.model.set_weights(parameters)\n\n # Get config values\n #steps: int = config[\"val_steps\"]\n\n # Evaluate global model parameters on the local test data and return results\n loss, accuracy = self.model.evaluate(self.x_test, self.y_test, 24)\n num_examples_test = len(self.x_test)\n return loss, num_examples_test, {\"accuracy of global model on local test data (client 2)\": accuracy}", "def evaluator(population, data_to_fit, config):\n is_parametric = config[\"model_generation\"][\"is_parametric\"]\n maximum_param_number = int(config[\"model_generation\"][\"maximum_param_number\"])\n maximum_complexity = int(config[\"model_generation\"][\"maximum_complexity\"])\n\n # split given data on dependent variables and independent one\n independent_var = data_to_fit[:,1:]\n independent_var = tuple(independent_var[:,column] for column in range(independent_var.shape[1]))\n #independent_var = (independent_var[:,0], independent_var[:,1])\n dependent_var = data_to_fit[:,0]\n\n for model in population:\n if (not hasattr(model, \"def_statement\")):\n def_repr = DefConstructor.def_constructor(model)\n setattr(model, \"def_statement\", def_repr)\n if (model.number_of_parameters > maximum_param_number or len(model) > maximum_complexity):\n setattr(model, \"is_deprecated\", True)\n continue\n\n import warnings\n\n def fxn():\n warnings.warn(\"deprecated\", DeprecationWarning)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fxn()\n if (is_parametric == 'True' and (not hasattr(model, \"optimal_params\")) and model.number_of_parameters > 0):\n is_multistart = eval(config[\"model_generation\"][\"multistart\"])\n bounds_included = eval(config[\"model_generation\"][\"bounds_included\"])\n if is_multistart:\n number_of_iterations = eval(config[\"model_generation\"][\"iterations_multistart\"])\n else:\n number_of_iterations = 1\n\n best_fit_params = []\n best_MSE = inf\n for i in range(number_of_iterations):\n try:\n #, model.curve_fit_init_params, model.curve_fit_bounds\n if is_multistart:\n model.curve_fit_init_params = 2 * random.rand(len(model.curve_fit_init_params)) - 1\n if bounds_included:\n popt, _ = curve_fit(model.def_statement, independent_var, dependent_var,\\\n p0 = model.curve_fit_init_params, bounds=model.curve_fit_bounds, \\\n ftol=0.01, xtol=0.01)\n else:\n try:\n popt, _ = curve_fit(model.def_statement, independent_var, dependent_var,\\\n p0 = model.curve_fit_init_params, \\\n ftol=0.01, xtol=0.01)\n except TypeError:\n print(model)\n raise\n\n except RuntimeError:\n popt = [nan for i in range(model.number_of_parameters)]\n except RuntimeWarning:\n popt = [nan for i in range(model.number_of_parameters)]\n except OptimizeWarning:\n popt = [nan for i in range(model.number_of_parameters)]\n except ZeroDivisionError:\n popt = [nan for i in range(model.number_of_parameters)]\n except ValueError:\n popt = [nan for i in range(model.number_of_parameters)]\n except IndexError:\n if hasattr(model, \"backup_handle\"):\n print(\"problem with simplification:\")\n print(model.backup_handle,'-->',model.handle)\n else:\n print(\"problem NOT with simplification\")\n print(model)\n raise\n setattr(model, \"optimal_params\", popt)\n QualityEstimator.quality_estimator([model], data_to_fit, config)\n if not isnan(model.MSE) and best_MSE > model.MSE:\n best_MSE = model.MSE\n best_fit_params = popt\n setattr(model, \"optimal_params\", best_fit_params)\n continue\n else:\n if not hasattr(model, \"optimal_params\"):\n setattr(model, \"optimal_params\", ones(model.number_of_parameters))\n\n if model.number_of_parameters == 0:\n model.def_statement_param = model.def_statement\n\n return population", "def get_params(self):\n # evaluate code if it hasn't already been evaluated\n if not self._evaluated:\n self.evaluate(self._serialization)\n self._evaluated = True\n return self._env", "def eval_dual_grad(x):\n self._param_eta = x[0]\n self._param_v = x[1:]\n dual_opt_input_values = self._dual_opt_input_values(episodes)\n grad = self._f_dual_grad(*dual_opt_input_values)\n eta_grad = np.float(grad[0])\n v_grad = grad[1]\n return np.hstack([eta_grad, v_grad])", "def _eval_fx(fx, x, pinfo, udat, auto_derivatives = False, get_J = False):\n \n nPar = x.size\n xtmp = np.copy(x)\n \n status = ScalePars(xtmp, pinfo)\n\n #print(\"kk\", xtmp.size)\n #print(xtmp)\n \n if(get_J):\n\n #\n # The user does not provide a drivatives engine, compute them\n # automatically. Keep your fingers crossed\n #\n if(auto_derivatives):\n dpar = 0.001\n syn = fx(xtmp, udat)\n \n nObs = syn.size\n J = np.zeros((nPar, nObs), dtype='float64')\n\n for ii in range(nPar):\n xtmp = np.copy(x)\n xtmp[ii] += dpar\n status = ScalePars(xtmp, pinfo)\n left = fx(xtmp, udat)\n \n xtmp = np.copy(x)\n xtmp[ii] -= dpar\n status = ScalePars(xtmp, pinfo)\n right = fx(xtmp, udat)\n\n J[ii] = (left - right) / (2*dpar*pinfo[ii].scl)\n \n else: # The user provides derivatives\n syn, J = fx(xtmp, udat, get_J=get_J)\n return syn, J\n\n else:\n #\n # No derivatives are requested\n #\n return fx(xtmp, udat)", "def update_optimizable_vars(self, params):\n if hasattr(params, 'get'):\n inBoth = set(self.optimizableVars.keys())\n inBoth = inBoth.intersection(set(params.keys()))\n for id in inBoth:\n self.set_var_ic(id, params.get(id), update_constants=False)\n elif len(params) == len(self.optimizableVars):\n for ii, id in enumerate(self.optimizableVars.keys()):\n self.set_var_ic(id, params[ii], update_constants=False)\n else:\n raise ValueError('Passed in parameter set does not have the proper '\n 'length!')\n\n self.constantVarValues = [self.evaluate_expr(var.value) for var in\n list(self.constantVars.values())]\n self.constantVarValues = np.array(self.constantVarValues)", "def create_f_compute_paral(X, y):\n def f_compute_paral(pars):\n \"\"\"Function which computes the different scores of sklearn models given\n the parameters.\"\"\"\n return application_sklearn_models(copy.copy(X), copy.copy(y), pars)\n return f_compute_paral", "def test_param(param_def):\n assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}\n assert param_def.free == {'f': [0, 1]}\n assert param_def.dependent == {'d': '2 + mean([a, b])'}\n assert param_def.dynamic == {'study': {'e': 'distract / c'}}", "def eval(self, symbols=None, weights=None):\n if self._contains_sympy:\n concrete_params = self.param_substitution(symbols, weights)\n return self.post_selected_circuit(concrete_params)\n else:\n return self.post_selected_circuit([torch.cat(p) if len(p) > 0\n else p for p in self.params])", "def _var_change_helper(self, vars_change: bool, inputs: tuple, params: list = None):\n if params is None:\n # get a list of params that are allowed to change\n params = [np for np in self._model.named_parameters() if np[1].requires_grad]\n\n # take a copy\n initial_params = [(name, p.clone()) for (name, p) in params]\n\n # run a training step\n self._train_step(inputs)\n\n # check if variables have changed\n for (_, p0), (name, p1) in zip(initial_params, params):\n try:\n if vars_change:\n assert not torch.equal(p0, p1)\n else:\n assert torch.equal(p0, p1)\n except AssertionError:\n raise ValueError( # error message\n \"{var_name} {msg}\".format(\n var_name=name,\n msg='did not change' if vars_change else 'changed'\n )\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate dynamic parameters based on data fields.
def eval_dynamic( self, param: dict[str, Any], study: Optional[dict[str, list[ArrayLike]]] = None, recall: Optional[dict[str, list[ArrayLike]]] = None ) -> dict[str, Union[float, list[ArrayLike]]]: if 'study' in self.dynamic and study is not None: param = set_dynamic(param, study, self.dynamic['study']) if 'recall' in self.dynamic and recall is not None: param = set_dynamic(param, recall, self.dynamic['recall']) return param
[ "def test_dynamic(param_def, split_data):\n param = {'c': 2}\n param = param_def.eval_dynamic(param, study=split_data['study'])\n np.testing.assert_array_equal(param['e'][0], np.array([0.5, 1, 1.5]))\n np.testing.assert_array_equal(param['e'][1], np.array([1.5, 1, 0.5]))", "def test_get_dynamic(param_def, split_data):\n param = {'c': 2}\n param = param_def.eval_dynamic(param, study=split_data['study'])\n\n param1 = param_def.get_dynamic(param, 0)\n np.testing.assert_array_equal(param1['e'], np.array([0.5, 1, 1.5]))\n param2 = param_def.get_dynamic(param, 1)\n np.testing.assert_array_equal(param2['e'], np.array([1.5, 1, 0.5]))", "def evaluate(field):\n pass", "def extract_data(val, field_evals):\n\tif not field_evals:\n\t\treturn val\n\tfor f in field_evals:\n\t\tval = f(val)\n\treturn val", "def generate_field_evals(fields):\n\ttry:\n\t\tevals = []\n\t\tfields = [f for f in fields.split('/') if f]\n\t\tfor f in fields:\n\t\t\tif '[' in f:\n\t\t\t\tfield_name, rest = f.split('[')\n\t\t\t\tslot_num = string.atoi(rest[:rest.find(']')])\n\t\t\t\tevals.append(_array_eval(field_name, slot_num))\n\t\t\telse:\n\t\t\t\tevals.append(_field_eval(f))\n\t\treturn evals\n\texcept Exception, e:\n\t\traise Exception(\"cannot parse field reference [%s]: %s\" % (fields, str(e)))", "def evaluate(self,*args,**kwargs):\n \n \n params = self.params.deepcopy()\n \n if len(args)>0 and len(kwargs)>0: raise ValueError(\"Expected either *args or **kwargs but not both.\")\n \n if len(args)==0:\n missing = [k for k in self.get_sampled() if k not in kwargs]\n if len(missing)>0:\n raise ValueError(\"Missing the following parameters: %s\"%missing)\n for k,v in kwargs.items(): params[k]=v\n elif len(args)!=len(self.get_sampled()):\n raise ValueError(\"Expected %i parameters, only got %i.\"%(len(self.get_sampled()),len(args)))\n else:\n for k,v in zip(self.get_sampled().keys(),args): params[k]=v\n \n return params(), params", "def _evaluate_all(self, func, data, data_type, model):\n\n #\n # Cast to the necessary argument type expected by the function\n #\n if data_type == 'ndarray':\n data_arg = data.values\n data_arg.reshape(-1, 1)\n elif (isinstance(data, pd.DataFrame) and len(data.columns) == 1):\n # data_arg = data\n data_arg = data[data.columns[0]]\n else:\n data_arg = data\n\n if isinstance(model, dict):\n out = func(data_arg, **model) # Model as keyword arguments\n elif isinstance(model, (list, tuple)):\n out = func(data_arg, *model) # Model as positional arguments\n else:\n out = func(data_arg, model) # Model as an arbitrary object\n\n return out", "def test_param(param_def):\n assert param_def.fixed == {'a': 1, 'b': 2, 'c': 3}\n assert param_def.free == {'f': [0, 1]}\n assert param_def.dependent == {'d': '2 + mean([a, b])'}\n assert param_def.dynamic == {'study': {'e': 'distract / c'}}", "def handle_unvalidated_param_values_helper( self, inputs, input_values, app, context=None, prefix=\"\" ):\n context = ExpressionContext( input_values, context )\n for input in inputs.itervalues():\n if isinstance( input, Repeat ): \n for i, d in enumerate( input_values[ input.name ] ):\n rep_prefix = prefix + \"%s %d > \" % ( input.title, i + 1 )\n self.handle_unvalidated_param_values_helper( input.inputs, d, app, context, rep_prefix )\n elif isinstance( input, Conditional ):\n values = input_values[ input.name ]\n current = values[\"__current_case__\"]\n # NOTE: The test param doesn't need to be checked since\n # there would be no way to tell what case to use at\n # workflow build time. However I'm not sure if we are\n # actually preventing such a case explicately.\n self.handle_unvalidated_param_values_helper( input.cases[current].inputs, values, app, context, prefix )\n else:\n # Regular tool parameter\n value = input_values[ input.name ]\n if isinstance( value, UnvalidatedValue ):\n try:\n # Convert from html representation\n if value.value is None:\n # If value.value is None, it could not have been\n # submited via html form and therefore .from_html\n # can't be guaranteed to work\n value = None\n else:\n value = input.from_html( value.value, None, context )\n # Do any further validation on the value\n input.validate( value, None )\n except Exception, e:\n # Wrap an re-raise any generated error so we can\n # generate a more informative message\n v = input.value_to_display_text( value, self.app )\n message = \"Failed runtime validation of %s%s (%s)\" \\\n % ( prefix, input.label, e )\n raise LateValidationError( message )\n input_values[ input.name ] = value", "def check_and_update_param_values_helper( self, inputs, values, trans, messages, context=None, prefix=\"\" ):\n context = ExpressionContext( values, context )\n for input in inputs.itervalues():\n # No value, insert the default\n if input.name not in values:\n messages.append( prefix + input.label )\n values[input.name] = input.get_initial_value( trans, context )\n # Value, visit recursively as usual\n else:\n if isinstance( input, Repeat ):\n for i, d in enumerate( values[ input.name ] ):\n rep_prefix = prefix + \"%s %d > \" % ( input.title, i + 1 )\n self.check_and_update_param_values_helper( input.inputs, d, trans, messages, context, rep_prefix )\n elif isinstance( input, Conditional ):\n group_values = values[ input.name ]\n current = group_values[\"__current_case__\"]\n self.check_and_update_param_values_helper( input.cases[current].inputs, group_values, trans, messages, context, prefix )\n else:\n # Regular tool parameter, no recursion needed\n pass", "def evaluate(self, variables,functions):\r\n pass", "def test_default_is_dynamic_return_value(self):\n for c in (*self.case_data, *self.case_data_extras):\n with self.subTest(msg=repr(c)):\n self.assertEqual(\n c.is_dynamic,\n utils.default_is_dynamic(\n element_default=c.q_default, element_type=c.q_type\n ),\n )", "def assign_dynamic_attributes(self, parsed_response):\n for field, value in parsed_response.items():\n func = self.schema.get(field)\n if not func:\n raise ValueError(\n 'Unknown field \"{}\" for class {}'\n ''.format(field, self.__class__.__name__))\n self[field] = func(value)", "def process_epidemic_parameters(self):", "def _params_validate_and_generate(self) -> None:\n # default params\n if \"d\" not in self.params:\n self.params[\"d\"] = 3\n\n # calculated params\n self.params[\"T\"] = -1 # -1 until a stabilizer round is added!\n self.params[\"num_readout\"] = -1 # -1 until a logical readout is performed!\n self.params[\n \"num_lattice_readout\"\n ] = -1 # -1 until a lattice readout is performed!\n self.params[\"num_data\"] = self.params[\"d\"]\n self.params[\"num_syn\"] = self.params[\"d\"] - 1", "def validate_numeric_entity(values: List[Dict], invalid_trigger: str = None, key: str = None,\n support_multiple: bool = True, pick_first: bool = False, constraint=None, var_name=None,\n **kwargs) -> SlotValidationResult:\n \n values_length = len(values)\n count = 0\n filled=False\n partially_filled=False\n trigger=\"\"\n default_response=(False,False,trigger,{})\n\n params={key:[]}\n \n\n if values_length==0:\n \n return build_response(default_response)\n \n\n for doc in values:\n \n try: \n exp = constraint.replace(var_name,str(doc[\"value\"]))\n result = eval(exp)\n\n if result:\n count+=1\n params[key].append(doc[\"value\"])\n \n else:\n trigger=invalid_trigger\n\n except NameError:\n # if the var_name is not found in the constraint then return default response\n return build_response(default_response)\n\n\n \n \n \n if count==values_length:\n # if all the values matches the constraints\n filled = True\n partially_filled = False\n else:\n partially_filled=True\n\n \n if len(params[key])==0:\n params={}\n elif pick_first and len(params[key])>0:\n #pick the first element in list of params\n params[key]=params[key][0]\n \n response = (filled,partially_filled,trigger,params)\n \n return build_response(response)", "def check_dyn_prop(records):\n return [check_dyn_prop_record(record) for record in records]", "def forcing_dynamic_equations(forcingEquations, parameters, qDict, uDict):\n\n F_full = forcingEquations.subs(uDict).subs(qDict).subs(parameters).expand()\n \n dynamicEquations = F_full[4:7]\n \n return dynamicEquations", "def test_dependent(param_def):\n param = {'a': 1, 'b': 2}\n param = param_def.eval_dependent(param)\n assert param == {'a': 1, 'b': 2, 'd': 3.5}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests downloads multiple files based on file names strings.
def test_downloading_two_named_files(self): files = ("Gp_xr_1m.txt", "Gp_xr_5m.txt") expected = [call(files[0], self.fmanager.remote), call(files[1], self.fmanager.remote)] self.fmanager.download(files) self.assertEqual(expected, self.mocked_method.call_args_list)
[ "def check_files_by_urls(tmpdir, base_url, url_iterable):\n # checking files by url\n download_dir = tmpdir.mkdir(\"download_url\")\n for url in url_iterable:\n _, filename = os.path.split(os.path.relpath(url, base_url))\n download_file = download_dir.join(filename)\n wget.download(url, str(download_file))\n with open(str(download_file)) as f:\n assert f.read() == filename\n download_dir.remove()", "def download_files(directory, url_list):\n\n for url in url_list:\n file = directory + url.split(\"/\", -1)[-1]\n try:\n urlreq.urlretrieve(url, file)\n except URLError as e:\n print(e)", "def download_files(urls, save_dir=\"tmp/\"):\n for url in urls:\n download_file(url, save_dir, None)", "def test_downloading_two_files_with_template(self): \n strings = ('1', '2')\n expected = [call('Gp_xr_1m.txt', self.fmanager.remote), \n call('Gp_xr_2m.txt', self.fmanager.remote)]\n self.fmanager.download_by_template(strings)\n self.assertEqual(expected, self.mocked_method.call_args_list)", "def test_url_download_tar_file(eld):\n path = eld.get_data(url=\"https://ndownloader.figshare.com/files/14615411\")\n assert \"abc.txt\" in os.listdir(path)", "def testGetFiles(self):\n fldr = path.join(thispath, \"res\")\n fns = getfiles(fldr, \"NamedL\", True)\n self.assertEqual(\"NamedList.xlsx\", fns[0], \"the only excel file there\")\n fns = getfiles(fldr, \"List\")\n self.assertEqual(\n appathsep(fldr) + \"NamedList.xlsx\", fns[0],\n \"the only excel file there\")\n fns = getfiles(fldr, nameonly=True)\n fnx = listdir(fldr)\n self.assertEqual(len(fnx), len(fns), \"the count of files\")\n fns = set(iter(fns))\n self.assertTrue(u\"厉害为国為幗.txt\" in fns,\n \"utf-8 based system can return mixing charset\")", "def download_files():\n log.info('Getting files on the web')\n response = urllib2.urlopen(full_url)\n page = response.read()\n\n # get all files' download urls\n soup = BeautifulSoup(page, 'html.parser')\n files_urls = [href for href in [a.get('href') for a in soup.find_all('a')]\n if href and href.find('xlsb') >= 0]\n\n base, _ = os.path.split(files_urls[0])\n files_urls = [url for url in files_urls\n if url.find(base) == 0 >= 0]\n\n if not files_urls:\n raise CannotFindFileToDownload('Cannot find any file')\n\n if not os.path.isdir(tmp_dir):\n os.mkdir(tmp_dir)\n\n for url in files_urls: # download all xlsb files\n f_in = urllib2.urlopen('%s' % url)\n _, filename = os.path.split(url)\n if os.path.exists(filename.split('.')[0] + \".csv\"):\n with open(os.path.join(tmp_dir, filename), 'wb') as f_out:\n f_out.write(f_in.read())\n os.system(\"export HOME=/tmp && libreoffice --headless --convert-to csv\"\n \" %s/%s --outdir %s --infilter=CSV:44,34,UTF8\"\n % (tmp_dir, filename, tmp_dir)) # convert from xlsb to csv format with UTF-8 encoding\n os.system(\"rm -f %s/%s\" % (tmp_dir, filename)) # delete original xlsb files\n log.info('File %s downloaded and converted', filename)\n\n csv_files = os.listdir(tmp_dir)\n\n return csv_files", "def download_files(filenames=None,\n instruments=None, \n list_files=False,\n level='l2', \n insitu=True, \n iuvs=False, \n new_files=False, \n start_date='2014-01-01', \n end_date='2020-01-01', \n update_prefs=False,\n only_update_prefs=False, \n exclude_orbit_file=False,\n local_dir=None,\n unittest=False,\n crustal_download=True,\n auto_yes=False):\n \n import os\n\n # Check for orbit num rather than time string\n if isinstance(start_date, int) and isinstance(end_date, int):\n start_date, end_date = orbit_time(start_date, end_date)\n start_date = parse(start_date)\n end_date = parse(end_date)\n start_date = start_date.replace(hour=0, minute=0, second=0)\n end_date = end_date.replace(day=end_date.day + 1, hour=0, minute=0, second=0)\n start_date = start_date.strftime('%Y-%m-%d')\n end_date = end_date.strftime('%Y-%m-%d')\n \n if update_prefs or only_update_prefs:\n utils.set_root_data_dir()\n if only_update_prefs:\n return\n \n public = utils.get_access()\n if not public:\n utils.get_uname_and_password()\n\n if filenames is None:\n if insitu and iuvs:\n print(\"Can't request both INSITU and IUVS in one query.\")\n return\n if not insitu and not iuvs:\n print(\"If not specifying filename(s) to download, Must specify either insitu=True or iuvs=True.\")\n return\n \n if instruments is None:\n instruments = ['kp']\n if insitu:\n level = 'insitu'\n if iuvs:\n level = 'iuvs'\n \n for instrument in instruments:\n # Build the query to the website\n query_args = []\n query_args.append(\"instrument=\" + instrument)\n query_args.append(\"level=\" + level)\n if filenames is not None:\n query_args.append(\"file=\" + filenames)\n query_args.append(\"start_date=\" + start_date)\n query_args.append(\"end_date=\" + end_date)\n if level == 'iuvs':\n query_args.append(\"file_extension=tab\")\n if local_dir is None:\n mvn_root_data_dir = utils.get_root_data_dir()\n else:\n mvn_root_data_dir = local_dir\n \n data_dir = os.path.join(mvn_root_data_dir, 'maven', 'data', 'sci', instrument, level)\n \n query = '&'.join(query_args)\n \n s = utils.get_filenames(query, public)\n \n if not s:\n print(\"No files found for {}.\".format(instrument))\n\n if s:\n s = str(s)\n s = s.split(',')\n\n if not crustal_download:\n s = [f for f in s if 'crustal' not in f]\n \n if list_files:\n for f in s:\n print(f)\n continue\n \n if new_files:\n s = utils.get_new_files(s, data_dir, instrument, level)\n\n if not auto_yes:\n if not unittest:\n print(\"Your request will download a total of: \" + str(len(s)) + \" files for instrument \" +\n str(instrument))\n print('Would you like to proceed with the download? ')\n valid_response = False\n cancel = False\n while not valid_response:\n response = (input('(y/n) > '))\n if response == 'y' or response == 'Y':\n valid_response = True\n elif response == 'n' or response == 'N':\n print('Cancelled download. Returning...')\n valid_response = True\n cancel = True\n else:\n print('Invalid input. Please answer with y or n.')\n\n if cancel:\n continue\n\n if not exclude_orbit_file:\n print(\"Before downloading data files, checking for updated orbit # file from naif.jpl.nasa.gov\")\n print(\"\")\n utils.get_orbit_files()\n \n i = 0\n utils.display_progress(i, len(s))\n for f in s:\n i += 1\n full_path = utils.create_dir_if_needed(f, data_dir, level)\n utils.get_file_from_site(f, public, full_path)\n utils.display_progress(i, len(s))\n\n return", "def download_multiple_file(*url):\r\n # from multiprocessing.pool import ThreadPool\r\n # NU MERGE\r\n path, url = url\r\n r = requests.get(url, stream=True)\r\n with open(path, 'wb') as f:\r\n for ch in r:\r\n f.write(ch)", "def download(all):\n print(\"Downloading\")", "def test_miscellaneous_album_download_in_multiple_dirs(self):\n self.add_mp3(artist='Artist', title='Title 1',\n filename='song1.mp3', path='Artist/Tracks')\n self.add_mp3(artist='Artist', title='Title 2',\n filename='song2.mp3', path='Artist/MoreTracks')\n self.run_add()\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n\n response = self.client.get(reverse('exordium:albumdownload', args=(album.pk,)))\n self.assertEqual(response.status_code, 200)\n self.assertIn('filenames', response.context)\n self.assertIn('zip_file', response.context)\n self.assertIn('zip_url', response.context)\n self.assertEqual(\n sorted(response.context['filenames']),\n sorted(['Artist/Tracks/song1.mp3', 'Artist/MoreTracks/song2.mp3'])\n )\n self.assertEqual(response.context['zip_file'], 'Artist_-_%s.zip' % (App.norm_filename(album.name)))\n self.assertContains(response, 'Artist/Tracks/song1.mp3<')\n self.assertContains(response, 'Artist/MoreTracks/song2.mp3<')\n self.assertContains(response, response.context['zip_file'])\n self.assertContains(response, response.context['zip_url'])\n self.assertContains(response, 'meta http-equiv')\n zip_file = os.path.join(self.zipfile_path, response.context['zip_file'])\n self.assertEqual(os.path.exists(zip_file), True)\n\n with zipfile.ZipFile(zip_file, 'r') as zf:\n self.assertEqual(\n sorted(zf.namelist()),\n sorted(['Artist/Tracks/song1.mp3', 'Artist/MoreTracks/song2.mp3'])\n )", "async def download(urls=URLS):\n result = {}\n\n # download all files in parallel and wait for all results\n http_client = AsyncHTTPClient()\n files = await multi({\n key: http_client.fetch(url)\n for key, url in map(raw_url, urls)\n })\n\n # process all downloaded files sequentially\n for key, response in files.items():\n result[key] = parse_response(response)\n\n return result", "def files_present(url, fileglobs, _all=True):\n any_present = False\n all_present = True\n fileglobsstr = ','.join(fileglobs)\n if fileglobs:\n cmd = (f'wget -r -l 10 -nd -np --spider --accept={fileglobsstr} {url}')\n reply, err, rc = sub_proc_exec(cmd)\n err = err.replace('%2B', '+')\n if rc == 0:\n for fileglob in fileglobs:\n regx = fileglob_to_regx(fileglob)\n res = re.findall(regx, err)\n any_present = any_present or res != []\n all_present = all_present and res != []\n if not fileglobs:\n return True\n if _all:\n return all_present\n else:\n return any_present", "def download_results(download_path, trackers='all'):\n print('Using download path ''{}'''.format(download_path))\n\n os.makedirs(download_path, exist_ok=True)\n\n if isinstance(trackers, str):\n if trackers == 'all':\n trackers = {k: 'all' for k in results_link_dict.keys()}\n elif trackers in results_link_dict:\n trackers = {trackers: 'all'}\n else:\n raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict')\n elif isinstance(trackers, dict):\n pass\n else:\n raise Exception('tracker_list must be set to ''all'', or be a dict')\n\n for trk, runfiles in trackers.items():\n trk_path = os.path.join(download_path, trk)\n if not os.path.exists(trk_path):\n os.makedirs(trk_path)\n\n if runfiles == 'all':\n for params, fileid in results_link_dict[trk].items():\n print('Downloading: {}/{}'.format(trk, params))\n _download_file(fileid, os.path.join(trk_path, params))\n elif isinstance(runfiles, (list, tuple)):\n for p in runfiles:\n for params, fileid in results_link_dict[trk].items():\n if re.match(r'{}(|_(\\d\\d\\d)).zip'.format(p), params) is not None:\n print('Downloading: {}/{}'.format(trk, params))\n _download_file(fileid, os.path.join(trk_path, params))\n\n else:\n raise Exception('tracker_list values must either be set to ''all'', or be a list of param names')", "def select_data_files(file_names):\n\n new_file_names = []\n for name in file_names:\n if name.endswith('.csv'):\n new_file_names.append(name)\n elif name.endswith('.xlsx'):\n new_file_names.append(name)\n else:\n pass\n return new_file_names", "def test_with_files(self):\n self.handler = HighfiveHandlerMock(\n Payload({}), repo_config=self.fakes['config']['individual_files']\n ).handler\n (chosen_reviewers, mentions) = self.choose_reviewers(\n self.fakes['diff']['travis-yml'], \"nikomatsakis\"\n )\n assert set([\"pnkfelix\", \"nrc\", \"aturon\"]) == chosen_reviewers\n assert set() == mentions", "def test_multiple_files(self):\n np = self.compile_test(['multiple_files.sv', 'include_a/include_a.sv', 'include_b/include_b.sv'])\n path = np.get_any_path(Waypoints('data_i', 'data_o'))\n self.assertTrue(not path.empty())\n\n np = self.compile_test(['include_a/include_a.sv', 'include_b/include_b.sv', 'multiple_files.sv'])\n path = np.get_any_path(Waypoints('data_i', 'data_o'))\n self.assertTrue(not path.empty())", "def download_urls(urls, savedir):\n results = []\n if len(urls) != 0:\n\n for u in urls:\n if os.path.exists(savedir+u.split(\"/\")[-1]):\n results.append(savedir+u.split(\"/\")[-1])\n continue\n try:\n res = urllib.request.urlretrieve(u, savedir+u.split(\"/\")[-1])\n results.append(res[0])\n print(\"download success {:s}\".format(res[0]))\n except:\n print(\"couldn't download {:s}\".format(u))\n\n return results", "def test_correct_request_returns_file_download(self):\n response = self.client.get(get_url())\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Disposition'],\n f'attachment; filename=\"filtered-testdump\"')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests downloads a single file based on a template generated name.
def test_downloading_a_single_file_with_template(self): self.fmanager.download_by_template('1') self.mocked_method.assert_called_once_with('Gp_xr_1m.txt', self.fmanager.remote)
[ "def test_download_file(self):\r\n purchase_url = '/' + self.purchase_uuid\r\n response = self.app.get(purchase_url)\r\n assert response.data == 'Test content\\n'\r\n assert response.status_code == 200", "def test_correct_request_returns_file_download(self):\n response = self.client.get(get_url())\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Disposition'],\n f'attachment; filename=\"filtered-testdump\"')", "def test_downloading_two_files_with_template(self): \n strings = ('1', '2')\n expected = [call('Gp_xr_1m.txt', self.fmanager.remote), \n call('Gp_xr_2m.txt', self.fmanager.remote)]\n self.fmanager.download_by_template(strings)\n self.assertEqual(expected, self.mocked_method.call_args_list)", "def test_arbitrary_url_file_download(eld):\n file = eld.get_data(url=\"http://www.google.com/robots.txt\")\n assert os.path.isfile(file)", "def test_url_download_tar_file(eld):\n path = eld.get_data(url=\"https://ndownloader.figshare.com/files/14615411\")\n assert \"abc.txt\" in os.listdir(path)", "def test_download_and_keep_file():\n file_name = download_and_read_file(\n \"https://dcc.ligo.org/public/0157/P1800370/005/GW170817_GWTC-1.hdf5\",\n outdir=\".\", read_file=False\n )\n assert os.path.isfile(file_name)", "def download_template(self):\n return", "def test_url_download_txt_file_with_content_disposition(eld):\n path = eld.get_data(url=\"https://ndownloader.figshare.com/files/7275959\")\n assert path.endswith(\"example.csv\") and os.path.isfile(path)", "def test_get_file_temporary_url(self):\n response = self.client.open(\n '/api/v1/user_uploads/{realm_id_str}/{filename}'.format(realm_id_str=1, filename='4e/m2A3MSqFnWRLUf9SaPzQ0Up_/zulip.txt'),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_get_local_file_for_report(self):\n downloader = self.downloader\n report_name = FAKE.file_path()\n local_name = downloader.get_local_file_for_report(report_name)\n self.assertEqual(local_name, report_name)", "def test_get_translation_file(self):\r\n self.test_resource_edit()\r\n url = reverse('download_for_translation', args=[self.project.slug, self.resource.slug, self.language.code])\r\n resp = self.client['maintainer'].post(url)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertTrue('project1_resource1_pt_BR.po' in resp['Content-Disposition'])", "def test_get_local_file_for_report(self):\n downloader = self.create_gcp_downloader_with_mocked_values()\n report_name = FAKE.file_path()\n local_name = downloader.get_local_file_for_report(report_name)\n self.assertEqual(local_name, report_name)", "def test_fetch_tarball_and_keep_single_file():\n file_name = fetch_open_samples(\n \"GW190424_180648\", read_file=False, outdir=\".\", unpack=True,\n path=\"GW190424_180648.h5\", catalog=\"GWTC-2\",\n download_kwargs={\"timeout\": 60}\n )\n assert os.path.isfile(\"./GW190424_180648.h5\")\n assert os.path.isfile(file_name)", "def maybe_download(filename, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urlretrieve(url + filename, filename)\n else:\n statinfo = os.stat(filename)\n print(\"Using existing file of size %d\" % (statinfo.st_size))\n '''\n if statinfo.st_size == expected_bytes:\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filename + '. Can you get to it with a browser?')\n '''\n return filename", "def download_fixture_file(source_url, local_path):\n if os.path.exists(local_path):\n return\n with open(local_path, \"wb\") as f:\n response = requests.get(source_url, stream=True)\n assert (\n response.status_code == 200\n ), \"Fixture file with url: {} not found\".format(source_url)\n for chunk in response.iter_content(chunk_size=1048576):\n f.write(chunk)\n f.flush()\n f.close()", "def download():\n return render_template(\"meta/download.html\")", "def test02DownloadFileNotOnServer(self):\n with self.assertRaises(ObtainError):\n self.om.download_file(\"INTL.IVYDB.{}D.zip\".format(self.bad_day))", "def test_get_pot_file(self):\r\n self.test_resource_edit()\r\n url = reverse('download_pot', args=[self.project.slug, self.resource.slug])\r\n resp = self.client['registered'].get(url, follow=True)\r\n self.assertContains(resp, 'msgid', status_code=200)", "def download_url():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests downloads multiple files based on template generated names.
def test_downloading_two_files_with_template(self): strings = ('1', '2') expected = [call('Gp_xr_1m.txt', self.fmanager.remote), call('Gp_xr_2m.txt', self.fmanager.remote)] self.fmanager.download_by_template(strings) self.assertEqual(expected, self.mocked_method.call_args_list)
[ "def test_downloading_a_single_file_with_template(self): \n self.fmanager.download_by_template('1')\n self.mocked_method.assert_called_once_with('Gp_xr_1m.txt', \n self.fmanager.remote)", "def pressurcooker_test_files():\n return download_fixture_files(PRESSURECOOKER_SUBS_FIXTURES)", "def test_downloading_two_named_files(self): \n files = (\"Gp_xr_1m.txt\", \"Gp_xr_5m.txt\")\n expected = [call(files[0], self.fmanager.remote), \n call(files[1], self.fmanager.remote)]\n self.fmanager.download(files)\n self.assertEqual(expected, self.mocked_method.call_args_list)", "def check_files_by_urls(tmpdir, base_url, url_iterable):\n # checking files by url\n download_dir = tmpdir.mkdir(\"download_url\")\n for url in url_iterable:\n _, filename = os.path.split(os.path.relpath(url, base_url))\n download_file = download_dir.join(filename)\n wget.download(url, str(download_file))\n with open(str(download_file)) as f:\n assert f.read() == filename\n download_dir.remove()", "def get_files_to_generate(self):\r\n pass", "def test_url_download_tar_file(eld):\n path = eld.get_data(url=\"https://ndownloader.figshare.com/files/14615411\")\n assert \"abc.txt\" in os.listdir(path)", "def test_files_exist(self):\n for filename in template_files:\n print(filename)\n self.assertTrue(\n os.path.exists(os.path.join(self.builtdir, filename))\n )", "def test_download_file(self):\r\n purchase_url = '/' + self.purchase_uuid\r\n response = self.app.get(purchase_url)\r\n assert response.data == 'Test content\\n'\r\n assert response.status_code == 200", "def download(all):\n print(\"Downloading\")", "def test_multiple_files(self):\n np = self.compile_test(['multiple_files.sv', 'include_a/include_a.sv', 'include_b/include_b.sv'])\n path = np.get_any_path(Waypoints('data_i', 'data_o'))\n self.assertTrue(not path.empty())\n\n np = self.compile_test(['include_a/include_a.sv', 'include_b/include_b.sv', 'multiple_files.sv'])\n path = np.get_any_path(Waypoints('data_i', 'data_o'))\n self.assertTrue(not path.empty())", "def GenerateTestURLs(self):\n for fz_key, fz_val in GenerateFuzzedHeaders():\n headers = {}\n headers.update(STATIC_RESPONSE_HEADERS)\n headers.update({fz_key: [fz_val]})\n json_headers = json.dumps(headers)\n b64_headers = base64.b64encode(json_headers)\n url = \"http://%s/default?respBody=%s&respHeader=%s\" % (TEST_SERVER,\n base64.b64encode(STATIC_RESPONSE_BODY), b64_headers)\n yield (json_headers, url)", "def download_geonames():\n DATA_ROOT.mkdir(exist_ok=True)\n for url, name in FILES:\n path = (DATA_ROOT / name)\n if path.exists():\n continue\n print(\"downloading {}\".format(url))\n path.write_bytes(requests.get(url).content)", "def test_get_file_multiple(coordination_args):\n accession = \"GCA_testmultiple\"\n get_genbank_annotations.get_genbank_file(\n accession, coordination_args[\"args\"],\n )", "def _download_datasets():\n def filepath(*args):\n return abspath(join(dirname(__file__), *args))\n for name in DATASETS_TO_DOWNLOAD:\n data = Dataset(name)\n url = data.url\n filename = filepath(data.filename)\n print(\"retrieving data {0} -> {1}\".format(url, filename))\n urlretrieve(url, filename)\n with open(filepath('listing.txt'), 'w') as f:\n f.write('\\n'.join(DATASETS_TO_DOWNLOAD) + '\\n')", "def test_correct_request_returns_file_download(self):\n response = self.client.get(get_url())\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Disposition'],\n f'attachment; filename=\"filtered-testdump\"')", "def module_files(self, module_name, field_name):\n for sample in self.grp.get_samples():\n try:\n ar = sample.analysis_result(module_name).get()\n arf = ar.field(field_name).get()\n local_path = join(TMP_DIR, basename(arf.get_referenced_filename()))\n if not isfile(local_path):\n local_path = arf.download_file(filename=local_path)\n except HTTPError1:\n continue\n except Exception:\n continue\n yield sample.name, local_path", "def test_miscellaneous_album_download_in_multiple_dirs(self):\n self.add_mp3(artist='Artist', title='Title 1',\n filename='song1.mp3', path='Artist/Tracks')\n self.add_mp3(artist='Artist', title='Title 2',\n filename='song2.mp3', path='Artist/MoreTracks')\n self.run_add()\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n\n response = self.client.get(reverse('exordium:albumdownload', args=(album.pk,)))\n self.assertEqual(response.status_code, 200)\n self.assertIn('filenames', response.context)\n self.assertIn('zip_file', response.context)\n self.assertIn('zip_url', response.context)\n self.assertEqual(\n sorted(response.context['filenames']),\n sorted(['Artist/Tracks/song1.mp3', 'Artist/MoreTracks/song2.mp3'])\n )\n self.assertEqual(response.context['zip_file'], 'Artist_-_%s.zip' % (App.norm_filename(album.name)))\n self.assertContains(response, 'Artist/Tracks/song1.mp3<')\n self.assertContains(response, 'Artist/MoreTracks/song2.mp3<')\n self.assertContains(response, response.context['zip_file'])\n self.assertContains(response, response.context['zip_url'])\n self.assertContains(response, 'meta http-equiv')\n zip_file = os.path.join(self.zipfile_path, response.context['zip_file'])\n self.assertEqual(os.path.exists(zip_file), True)\n\n with zipfile.ZipFile(zip_file, 'r') as zf:\n self.assertEqual(\n sorted(zf.namelist()),\n sorted(['Artist/Tracks/song1.mp3', 'Artist/MoreTracks/song2.mp3'])\n )", "def download_files(urls, save_dir=\"tmp/\"):\n for url in urls:\n download_file(url, save_dir, None)", "def test_api_v3_files_get(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method called when unhooking this callback.
def doUnhook(self, handler): pass
[ "def unhook(self):\n raise NotImplementedError", "def remove_callback(self):\n\n\t\tself.callback = None\n\n\t\treturn", "def onDeinit(self):", "def __disconnect_hook(self, hook_name):\n p = getattr(self, \"__%s\" % hook_name, None)\n if p:\n GPS.Hook(hook_name).remove(p)\n p = getattr(self, hook_name, None)\n if p:\n GPS.Hook(hook_name).remove(p)", "def unbind(self, event, callback):\r\n self._emitter.unsubscribe(event, callback)", "def unsubscribe(self, handle, callback=None):\r\n pass", "def unregister(self, callback):\n for n, reg in enumerate(self.__registry):\n if reg[\"callback\"] == callback:\n del self.__registry[n]\n self.driftwood.log.info(\"Tick\", \"unregistered\",\n callback.__qualname__)", "def removeEventCallback(self, *args) -> \"void\":\n return _coin.SoEventCallback_removeEventCallback(self, *args)", "def rem_active_handler(self,handler):\r\n self.active_handlers.remove(handler)", "def remove_update_callback(self, token):\n assert self.raw_ptr is not None\n\n lib.srl__consul__remove_update_callback(self.raw_ptr, token)\n\n self.update_callbacks.pop(token, None)", "def removeEventCallback(self, *args):\n return _coin.SoEventCallback_removeEventCallback(self, *args)", "def unloadExtension(self):\n # type: () -> ()", "def after_unmarshal(self):\n # don't place any code here, just catch call if not overridden by\n # subclass\n pass", "def clear_default_callback(self):\n self.default_callback = None", "def remove(self, callback):\n self._listeners.remove(callback)", "def removeCallback(self, f: 'SoCallbackListCB *', userdata: 'void *'=None) -> \"void\":\n return _coin.SoCallbackList_removeCallback(self, f, userdata)", "def reset(self):\n with self.lock:\n self.hooks = dict()", "def stop(self):\n if self._query_hook:\n sqla_event.remove(self._connectable, 'before_cursor_execute', self._query_hook)\n self._query_hook = None\n if self._replace_new_patch_aliases_config:\n self._replace_new_patch_aliases_config.__exit__(None, None, None)\n self._replace_new_patch_aliases_config = None", "def unset_hook(f: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n @wraps(f)\n def unset_hook_wrapper(self, **kwargs):\n f(self, **kwargs)\n self.attribution_model.is_hooked = False\n\n return unset_hook_wrapper", "def removeCallback(self, *args):\n return _coin.SoCallbackList_removeCallback(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the path of the tests directory.
def tests_dir(): return Path(os.path.realpath(__file__)).parent
[ "def get_tests_dir_path(): \n fmod_path = ctbto.tests.__path__\n \n test_dir = \"%s/conf_tests\" % fmod_path[0]\n \n return test_dir", "def unit_test_dir(self):\n return os.path.join(self.output_dir, 'unit_tests')", "def tests_root_directory(path: Optional[PathOrString] = None) -> Path:\n root = Path(os.path.realpath(__file__)).parent.parent.parent / \"Tests\"\n return root / path if path else root", "def data_test_dir():\n return Path(__file__).absolute().parent.parent.parent / \"test_data\"\n # return os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), \"test_data\")", "def get_test_configuration_path() -> Path:\n return get_project_root() / '.test_configuration'", "def _get_test_template_dir():\n return os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'test_templates/')", "def __get_testfile_path(self, path):\n path = os.path.relpath(\n path, os.path.join(self.__data_path, os.pardir))\n return path", "def test_data_dir():\n # Test dir.\n test_data_dir_ = join(dirname(__file__), __TEST_DATA_SUBDIR)\n return test_data_dir_", "def _GetTestPath(self, configuration):\n return os.path.join(\n self._build_dir, configuration, '%s.exe' % self._raw_name)", "def menpobench_dir():\n from pathlib import Path # to avoid cluttering the menpo.base namespace\n import os\n return Path(os.path.abspath(__file__)).parent", "def fixture_dir() -> str:\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), \"fixtures\")", "def get_scarlett_os_dir():\n tests_dir = os.path.dirname(os.path.abspath(__file__))\n scarlett_os_dir = os.path.join(tests_dir, os.path.pardir)\n return os.path.abspath(scarlett_os_dir)", "def fixture_fixtures_dir() -> Path:\n return Path(\"tests/fixtures/\")", "def get_test_paths(self):\n return self.test_paths", "def ktest_path(self):\r\n\t\treturn self.__pathstub + \".ktest\"", "def get_install_dir():\n return os.path.join(os.environ['TEST_TMPDIR'], 'installation')", "def PathToBuiltTest(self, configuration, target):\n target_name = target.split(':')[-1]\n path = os.path.join(\n self.BuildOutputRootDir(), configuration, target_name + '.app')\n return path", "def script_path(script, test_name=__name__):\n return '{test_path}.{script}'.format(test_path=test_name, script=script)", "def get_working_directory(self, root_path):\n\n roots = tuple(item.replace('.', os.path.sep) for item in self._test_roots)\n if len(roots) > 0:\n for item in roots:\n if root_path.endswith(item):\n return root_path.replace(item, '').rstrip(os.path.sep)\n\n return root_path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a generator object which will assist the tests object creation.
def generator(mocker): return Generator(mocker)
[ "def prepare_example_generator(self):\n generator = self.example_iterator_type()\n generator.configure(self)\n return generator;", "def test_custom_global_generator_multiple():\n c = TestClient()\n for num in range(3):\n generator = textwrap.dedent(f\"\"\"\n class MyGenerator{num}:\n def __init__(self, conanfile):\n self._conanfile = conanfile\n def generate(self):\n self._conanfile.output.info(f\"MyGenerator{num}!!\")\n \"\"\")\n save(os.path.join(c.cache.custom_generators_path, f\"mygen{num}.py\"), generator)\n conanfile = textwrap.dedent(\"\"\"\n [requires]\n pkg/0.1\n\n [generators]\n MyGenerator0\n MyGenerator1\n MyGenerator2\n \"\"\")\n c.save({\"pkg/conanfile.py\": GenConanfile(\"pkg\", \"0.1\"),\n \"conanfile.txt\": conanfile})\n c.run(\"create pkg\")\n c.run(\"install .\")\n assert \"conanfile.txt: Generator 'MyGenerator0' calling 'generate()'\" in c.out\n assert \"conanfile.txt: Generator 'MyGenerator1' calling 'generate()'\" in c.out\n assert \"conanfile.txt: Generator 'MyGenerator2' calling 'generate()'\" in c.out\n assert \"conanfile.txt: MyGenerator0!!\" in c.out\n assert \"conanfile.txt: MyGenerator1!!\" in c.out\n assert \"conanfile.txt: MyGenerator2!!\" in c.out", "def pytest_generate_tests(metafunc):\n if \"instance_generator\" in metafunc.fixturenames:\n all_instance_generators = (\n ecole.instance.SetCoverGenerator(n_rows=100, n_cols=200),\n ecole.instance.CombinatorialAuctionGenerator(n_items=50, n_bids=150),\n ecole.instance.CapacitatedFacilityLocationGenerator(n_customers=60, n_facilities=50),\n ecole.instance.IndependentSetGenerator(n_nodes=100),\n )\n metafunc.parametrize(\"instance_generator\", all_instance_generators)", "def create_test_generators(self):\n \n test_datagen = ImageDataGenerator( \n preprocessing_function = self.preprocess) \n\n\n test_generator = test_datagen.flow_from_directory(\n self.test_path,\n target_size=(self.image_size,self.image_size),\n batch_size= self.batch_size,\n class_mode='categorical', \n shuffle=False) # keep data in same order as labels\n \n return test_generator", "def test_generator(self):\n def generate(a: int):\n assert check_argument_types()\n yield a\n yield a + 1\n\n gen = generate(1)\n next(gen)", "def pytest_pycollect_makeitem(collector, name, obj):\n if inspect.isgeneratorfunction(obj):\n tests = []\n for number, yielded in enumerate(obj()):\n if isinstance(yielded, unittest.TestSuite):\n tests.extend(_yield_unittest_case(collector, name, item)\n for item in yielded)\n elif isinstance(yielded, unittest.TestCase):\n tests.append(_yield_unittest_case(collector, name, yielded))\n else:\n index, call, args = _split_yielded_test(yielded, number)\n test = pytest.Function(name+index, collector, args=args, callobj=call)\n tests.append(test)\n return tests", "def create_generator(instr_info):\n from visa_generator import VisaGenerator\n from anritsu_generator import AnritsuGenerator\n \n # check if instrument is proper or simulated\n if instr_info['type'] == 'sim':\n rm = visa.ResourceManager('@sim')\n else:\n rm = visa.ResourceManager('@py')\n \n # try to connect to instrument\n try:\n instr = rm.open_resource(instr_info['connection'])\n except socket.error:\n print(\"Unable to connect to instrument \" + instr_info['connection'])\n exit()\n\n # create the proper generator object with the correct inctruction keywords\n if instr_info['type'] == 'visa':\n return VisaGenerator(instr, instr_info)\n elif instr_info['type'] == 'anritsu':\n return AnritsuGenerator(instr, instr_info)\n else: # default to visa\n return VisaGenerator(instr, instr_info)", "def create_generators(args):\r\n common_args = {\r\n 'batch_size': args.batch_size,\r\n 'config': args.config,\r\n 'image_min_side': args.image_min_side,\r\n 'image_max_side': args.image_max_side,\r\n # 'preprocess_image': preprocess_image,\r\n }\r\n\r\n # create random transform generator for augmenting training data\r\n # if args.random_transform:\r\n # transform_generator = random_transform_generator(\r\n # min_rotation=-0.1,\r\n # max_rotation=0.1,\r\n # min_translation=(-0.1, -0.1),\r\n # max_translation=(0.1, 0.1),\r\n # min_shear=-0.1,\r\n # max_shear=0.1,\r\n # min_scaling=(0.9, 0.9),\r\n # max_scaling=(1.1, 1.1),\r\n # flip_x_chance=0.5,\r\n # flip_y_chance=0.5,\r\n # )\r\n # else:\r\n # transform_generator = random_transform_generator(flip_x_chance=0.5)\r\n\r\n if args.dataset_type == 'csv':\r\n train_generator = DataGenerator(\r\n args.annotations,\r\n shuffle=True,\r\n is_train=True,\r\n # args.classes,\r\n # transform_generator=transform_generator,\r\n **common_args\r\n )\r\n if args.val_annotations:\r\n validation_generator = DataGenerator(\r\n args.val_annotations,\r\n shuffle=True,\r\n is_train=False,\r\n **common_args\r\n )\r\n else:\r\n validation_generator = None\r\n else:\r\n raise ValueError(\r\n 'Invalid data type received: {}'.format(args.dataset_type))\r\n\r\n return train_generator, validation_generator\r\n # return train_generator\r", "def new_generator(self):\n return self.generator_function(**self.arguments)", "def build_generator(self, **kwargs):\n if \"type\" in kwargs:\n warnings.warn(\"Using type to configure generators is now deprecated. Please use module_name and class_name\"\n \"instead.\")\n type_ = kwargs.pop(\"type\")\n generator_class = self._get_generator_class_from_type(type_)\n kwargs.update({\n \"class_name\": generator_class.__name__\n })\n generator = instantiate_class_from_config(\n config=kwargs,\n runtime_config={\n \"datasource\": self\n },\n config_defaults={\n \"module_name\": \"great_expectations.datasource.generator\"\n }\n )\n return generator", "def test_custom_global_generator_imports():\n c = TestClient()\n generator = textwrap.dedent(\"\"\"\n from _myfunc import mygenerate\n\n class MyCustomGenerator:\n def __init__(self, conanfile):\n self._conanfile = conanfile\n def generate(self):\n mygenerate(self._conanfile)\n \"\"\")\n myaux = textwrap.dedent(\"\"\"\n def mygenerate(conanfile):\n conanfile.output.info(\"MYGENERATE WORKS!!\")\n \"\"\")\n save(os.path.join(c.cache.custom_generators_path, \"mygen.py\"), generator)\n save(os.path.join(c.cache.custom_generators_path, \"_myfunc.py\"), myaux)\n\n c.save({\"conanfile.txt\": \"\"})\n c.run(\"install . -g MyCustomGenerator\")\n assert \"conanfile.txt: Generator 'MyCustomGenerator' calling 'generate()'\" in c.out\n assert \"conanfile.txt: MYGENERATE WORKS!!\" in c.out", "def __init__(self, a, b):\n self.a = make_generator(a)\n self.b = make_generator(b)", "def testGenerators():\n topo = generateCompleteTopology(5)\n assert isinstance(topo, Topology)\n assert isinstance(topo.getGraph(), networkx.DiGraph)\n assert topo.getNumNodes() == 5\n\n topo = generateChainTopology(10)\n assert isinstance(topo, Topology)\n assert isinstance(topo.getGraph(), networkx.DiGraph)", "def generate(code, generators=None, include=None, output=None):\n pass", "def create_generators(args, preprocess_image):\n common_args = {\n 'batch_size': args.batch_size,\n 'config': args.config,\n 'image_min_side': args.image_min_side,\n 'image_max_side': args.image_max_side,\n 'preprocess_image': preprocess_image,\n }\n\n # create random transform generator for augmenting training data\n if args.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.5,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n if args.dataset_type == 'coco':\n # import here to prevent unnecessary dependency on cocoapi\n from ..preprocessing.coco import CocoGenerator\n\n train_generator = CocoGenerator(\n args.coco_path,\n 'train2017',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = CocoGenerator(\n args.coco_path,\n 'val2017',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'pascal':\n train_generator = PascalVocGenerator(\n args.pascal_path,\n 'trainval',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = PascalVocGenerator(\n\n args.pascal_path,\n 'test',\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'csv':\n train_generator = CSVGenerator(\n args.annotations,\n args.classes,\n transform_generator=transform_generator,\n **common_args\n )\n\n if args.val_annotations:\n validation_generator = CSVGenerator(\n args.val_annotations,\n args.classes,\n shuffle_groups=False,\n **common_args\n )\n else:\n validation_generator = None\n elif args.dataset_type == 'oid':\n train_generator = OpenImagesGenerator(\n args.main_dir,\n subset='train',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = OpenImagesGenerator(\n args.main_dir,\n subset='validation',\n version=args.version,\n labels_filter=args.labels_filter,\n annotation_cache_dir=args.annotation_cache_dir,\n parent_label=args.parent_label,\n shuffle_groups=False,\n **common_args\n )\n elif args.dataset_type == 'kitti':\n train_generator = KittiGenerator(\n args.kitti_path,\n subset='train',\n transform_generator=transform_generator,\n **common_args\n )\n\n validation_generator = KittiGenerator(\n args.kitti_path,\n subset='val',\n shuffle_groups=False,\n **common_args\n )\n else:\n raise ValueError('Invalid data type received: {}'.format(args.dataset_type))\n\n return train_generator, validation_generator", "def make_generator(object, t0=0.0):\n if object == None:\n return None\n \n if isinstance(object, Function):\n return Freezer(object, t0)\n \n if isinstance(object, Generator):\n return object\n \n return ConstantGenerator(object)", "def test_init():\n _badgegen = badgegen.BadgeGenerator()\n assert isinstance(_badgegen, badgegen.BadgeGenerator)", "def gen_tests(test_class):\n for class_attr in dir(test_class):\n if class_attr.startswith('test_'):\n yield class_attr", "def set_generator(self):\n self.generator = IntrospectionGeneratorModule(self.model_config, self.generator_classifier)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
decr(self, size_t n = 1) > SwigPyIterator
def decr(self, n = 1): return _digital_swig.SwigPyIterator_decr(self, n)
[ "def decr(self, n=1):\r\n return _osgDB.SwigPyIterator_decr(self, n)", "def advance(self, n):\n return _core.SwigPyIterator_advance(self, n)", "def advance(self, n):\n return _almathswig.SwigPyIterator_advance(self, n)", "def incr(self, n=1):\r\n return _osgDB.SwigPyIterator_incr(self, n)", "def incr(self, n=1):\n return _core.SwigPyIterator_incr(self, n)", "def __sub__(self, *args):\n return _decomp.SwigPyIterator___sub__(self, *args)", "def Drop(iterable, n):\n it = iter(iterable)\n it >> Take(n) >> Consume()\n return it", "def skip(sequence, n):\n _iterator = iter(sequence)\n for _ in range(n):\n next(_iterator)\n return _iterator", "def skip(self, n):\n return Enumerable3(itertools.islice(self, n, None, 1))", "def drop(Py_ssize_t_n, seq): # real signature unknown; restored from __doc__\n pass", "def __delslice__(*args):\n return _Field.vectormats___delslice__(*args)", "def dec(self, ix: int, value: V) -> None:\n self.inc(ix, -value)", "def __isub__(self, n):\n return _almathswig.SwigPyIterator___isub__(self, n)", "def __iadd__(self, n):\n return _core.SwigPyIterator___iadd__(self, n)", "def __isub__(self, n):\n return _core.SwigPyIterator___isub__(self, n)", "def __add__(self, n):\n return _core.SwigPyIterator___add__(self, n)", "def __iadd__(self, n):\n return _almathswig.SwigPyIterator___iadd__(self, n)", "def peekn(Py_ssize_t_n, seq): # real signature unknown; restored from __doc__\n pass", "def index_iter(iter, n):\n return next(itertools.islice(iter, n, None))", "def v9_tail(iterable, n):\n if n <= 0:\n return []\n items = deque(maxlen=n)\n for item in iterable:\n items.append(item)\n return list(items)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_chunks_to_symbols_bf_sptr __init__(self, p) > digital_chunks_to_symbols_bf_sptr
def __init__(self, *args): this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def prepare_symbols(self):", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, Unit_Cell, Na, Nb, Nc, spaceGroup):\n\n self.unit_cell = Unit_Cell\n \n #This is a list of all the crystallographic unit cells that make up\n #this magnetic unit cell\n self.AllUnitCells = [Unit_Cell]\n \n #bonds (interactions) are stored in this class since it is treated\n #as the cutoff cell.(The cell containing all unique bonds)\n self.Bonds = []\n \n self.space_Group = spaceGroup\n \n self.Na = Na\n self.Nb = Nb\n self.Nc = Nc\n \n #generate the magnetic Cell by translating the unit cell\n for i in range(0, Na):\n for j in range(0, Nb):\n for k in range(0, Nc):\n if i !=0 or j != 0 or k != 0: #to not duplicate original unit cell\n self.AllUnitCells.append(self.unit_cell.translateCell(i,j,k))\n \n\n #Recording bonds that are mapped back onto themselves by a symOp\n self.bondConstraints = []", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\n self.symbolings = open(\n artifact_manager.get_temp_file(\n config.SYMBOL_OPENINGS_CLOSINGS_SORTED),\n 'r')\n # The offsets_db is really small, and we need to read and write\n # from it a fair bit, so suck it into memory\n offsets_db = file(\n artifact_manager.get_temp_file(config.SYMBOL_OFFSETS_DB), 'rb')\n # A map from symbol_id to offset. The values of this map are\n # incremented as the openings and closings for a symbol are\n # consumed.\n self.offsets = cPickle.load(offsets_db)\n offsets_db.close()", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def chunks_to_symbols_bf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def __init__(self, symbol_group):\n self.symbol_group = symbol_group\n self.print_positions()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
chunks_to_symbols_bf(__dummy_4__ symbol_table, int D = 1) > digital_chunks_to_symbols_bf_sptr Map a stream of symbol indexes (unpacked bytes or shorts) to stream of float or complex constellation points in D dimensions (D = 1 by default)
def chunks_to_symbols_bf(*args, **kwargs): return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)
[ "def chunks_to_symbols_sf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sf(*args, **kwargs)", "def chunks_to_symbols_ic(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_ic(*args, **kwargs)", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def modulate(self, input_bits):\n mapfunc = vectorize(lambda i:\n self._constellation[bitarray2dec(input_bits[i:i + self.num_bits_symbol])])\n\n baseband_symbols = mapfunc(arange(0, len(input_bits), self.num_bits_symbol))\n\n return baseband_symbols", "def insert_snp500_symbols(df_symbol):\n\n # connect to the MySQL instance\n db_host = 'localhost'\n db_user = 'xiao'\n db_pass = 'Wx3921786!'\n db_name = 'securities_master'\n connect = mdb.connect(host=db_host, user=db_user, password=db_pass, db=db_name)\n mysql_cursor = connect.cursor()\n\n # create req strings\n table_name = 'symbol'\n columns = ','.join(df_symbol.columns.values)\n values = (\"%s, \" * 7)[:-2]\n req = \"\"\"INSERT INTO %s (%s) VALUES (%s)\"\"\" % (table_name, columns, values)\n\n # insert to MySQL with max chunk_size = 1000\n chunk_size = 1000\n for i in range(0, len(df_symbol.index), chunk_size):\n chunk_df = df_symbol.iloc[i: i+chunk_size]\n data = [tuple(x) for x in chunk_df.values.tolist()]\n mysql_cursor.executemany(req, data)\n connect.commit()\n\n mysql_cursor.close()", "def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)", "def ofdm_modulate(self, num_data_symb, freq_bin_data):\n min_pow = 1e-30\n time_ofdm_symbols = zeros((self.num_ant, num_data_symb * self.OFDMsymb_len), dtype=complex)\n for symb in range(num_data_symb):\n freq_data_start = symb * self.num_data_bins\n freq_data_end = freq_data_start + self.num_data_bins\n\n time_symb_start = symb * self.OFDMsymb_len\n time_symb_end = time_symb_start + self.OFDMsymb_len\n\n P = 0\n for ant in range(self.num_ant):\n\n ofdm_symb = zeros(self.NFFT, dtype=complex)\n ofdm_symb[self.used_data_bins] = freq_bin_data[ant, freq_data_start:freq_data_end]\n # plt.stem(array(range(-int(self.NFFT/2), int(self.NFFT/2))), abs(ofdm_symb))\n # plt.show()\n data_ifft = ifft(ofdm_symb, self.NFFT)\n cyclic_prefix = data_ifft[-self.CP:]\n data_time = concatenate((cyclic_prefix, data_ifft)) # add CP\n\n sig_energy = abs(dot(data_time, conj(data_time).T))\n # power scaling to normalize to 1\n if sig_energy > min_pow and ant == 0:\n scale_factor = sqrt(len(data_time) / sig_energy)\n else:\n scale_factor = 1\n data_time *= scale_factor\n P += var(data_time)\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] = data_time\n\n for ant in range(self.num_ant):\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] *= (1 / sqrt(P))\n\n return time_ofdm_symbols", "def bytes_to_syms():\n return _digital_swig.bytes_to_syms()", "def ExtractSymbols(self, native_heaps, sym_paths):\n raise NotImplementedError()", "def decode(symbols, blocks_quantity):\n\n symbols_n = len(symbols)\n assert symbols_n > 0, \"There are no symbols to decode.\"\n\n # We keep `blocks_n` notation and create the empty list\n blocks_n = blocks_quantity\n blocks = [None] * blocks_n\n\n # Recover the degrees and associated neighbors using the seed (the index, cf. encoding).\n symbols = recover_graph(symbols, blocks_n)\n # print(\"Graph built back. Ready for decoding.\", flush=True)\n \n solved_blocks_count = 0\n iteration_solved_count = 0\n start_time = time.time()\n # print(len(symbols))\n\n while iteration_solved_count > 0 or solved_blocks_count == 0:\n # print(1)\n iteration_solved_count = 0\n # Search for solvable symbols\n for i, symbol in enumerate(symbols):\n # Check the current degree. If it's 1 then we can recover data\n if symbol.degree == 1: \n\n iteration_solved_count += 1 \n block_index = next(iter(symbol.neighbors)) \n symbols.pop(i)\n\n # This symbol is redundant: another already helped decoding the same block\n if blocks[block_index] is not None:\n continue\n\n blocks[block_index] = symbol.data\n\n if VERBOSE:\n print(\"Solved block_{} with symbol_{}\".format(block_index, symbol.index))\n \n # Update the count and log the processing\n solved_blocks_count += 1\n log(\"Decoding\", solved_blocks_count, blocks_n, start_time)\n\n # Reduce the degrees of other symbols that contains the solved block as neighbor \n reduce_neighbors(block_index, blocks, symbols)\n\n print(\"\\n----- Solved Blocks {:2}/{:2} --\".format(solved_blocks_count, blocks_n))\n\n return np.asarray(blocks), solved_blocks_count", "def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def fstrmsymbols_cmd(in_fst=None, out_fst=None, in_symbols=0,\n out_symbols=0, pipe=True, **kwargs):\n\n cmd = ''\n if not in_symbols and not out_symbols:\n cmd += 'cat'\n\n if in_symbols:\n cmd = 'fstrmsymbols'\n in_symbols = in_symbols if isinstance(in_symbols, (list, tuple))\\\n else [in_symbols]\n fmt = ' \"echo ' + '{} ' * len(in_symbols) + '|\"'\n cmd += fmt.format(*in_symbols)\n if out_symbols:\n cmd += _add_input_output(in_fst, pipe=True)\n in_fst = None\n\n if out_symbols:\n cmd += 'fstrmsymbols --remove-from-output=true'\n out_symbols = out_symbols if isinstance(out_symbols, (list, tuple))\\\n else [out_symbols]\n fmt = ' \"echo ' + '{} ' * len(out_symbols) + '|\"'\n cmd += fmt.format(*out_symbols)\n\n cmd += _add_input_output(in_fst)\n cmd += fstpostprocess_cmd(None, out_fst, pipe=pipe, **kwargs)\n\n return cmd", "def get_raw_smiles(file_name, smiles_char_dict, open_fn, extract_fn) -> List[str]:\n data = []\n # open the gzipped chembl filegzip.open\n with open_fn(file_name, 'rt') as f:\n\n line_count = 0\n for line in f:\n\n line_count += 1\n # extract the canonical smiles column\n if platform.system() == \"Windows\":\n line = line.decode(\"utf-8\")\n\n # smiles = line.split('\\t')[1]\n\n smiles = extract_fn(line)\n\n # only keep reasonably sized molecules\n if 5 <= len(smiles) <= 200:\n\n smiles = split_charged_mol(smiles)\n\n if smiles_char_dict.allowed(smiles):\n # check whether the molecular graph consists of\n # multiple connected components (eg. in salts)\n # if so, just keep the largest one\n\n data.append(smiles)\n\n print(f'Processed {len(data)} molecules from {line_count} lines in the input file.')\n\n return data", "def bandlimited_dirac(N_sph, d, w_n=None):\n d = utils.asarray_1d(d)\n if w_n is None:\n w_n = np.ones(N_sph + 1)\n assert(len(w_n) == N_sph + 1), \"Provide weight per order.\"\n g_n = np.zeros([(N_sph + 1)**2, len(d)])\n for n, i in enumerate(range(N_sph + 1)):\n g_n[i, :] = w_n[i] * (2 * n + 1) / (4 * np.pi) * \\\n scyspecial.eval_legendre(n, np.cos(d))\n dirac = np.sum(g_n, axis=0)\n return dirac", "def getSymbol(id):", "def _get_symbols_to_logits_fn(self, max_decode_length):\n\n timing_signal = utils.get_position_encoding(\n max_decode_length + 1, self.params[\"hidden_size\"],\n )\n decoder_self_attention_bias = utils.get_decoder_self_attention_bias(\n max_decode_length, dtype = tf.float32\n # dtype=self._params[\"dtype\"]\n )\n\n def symbols_to_logits_fn(ids, i, cache):\n \"\"\"Generate logits for next potential IDs.\n\n Args:\n ids: Current decoded sequences.\n int tensor with shape [batch_size * beam_size, i + 1]\n i: Loop index\n cache: dictionary of values storing the encoder output, encoder-decoder\n attention bias, and previous decoder attention values.\n\n Returns:\n Tuple of\n (logits with shape [batch_size * beam_size, vocab_size],\n updated cache values)\n \"\"\"\n # Set decoder input to the last generated IDs\n decoder_input = ids[:, -1:]\n\n # Preprocess decoder input by getting embeddings and adding timing signal.\n decoder_input = self.embedding_softmax_layer(decoder_input)\n decoder_input += tf.cast(x=timing_signal[i:i + 1],\n dtype=decoder_input.dtype)\n\n self_attention_bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]\n\n decoder_outputs = self._call(\n decoder_input, cache.get(\"encoder_outputs\"), self_attention_bias,\n cache.get(\"encoder_decoder_attention_bias\"), cache,\n )\n logits = self.embedding_softmax_layer.linear(decoder_outputs)\n logits = tf.squeeze(logits, axis=[1])\n return tf.cast(logits, tf.float32), cache\n\n return symbols_to_logits_fn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_chunks_to_symbols_bc_sptr __init__(self, p) > digital_chunks_to_symbols_bc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def prepare_symbols(self):", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def __init__(self, Unit_Cell, Na, Nb, Nc, spaceGroup):\n\n self.unit_cell = Unit_Cell\n \n #This is a list of all the crystallographic unit cells that make up\n #this magnetic unit cell\n self.AllUnitCells = [Unit_Cell]\n \n #bonds (interactions) are stored in this class since it is treated\n #as the cutoff cell.(The cell containing all unique bonds)\n self.Bonds = []\n \n self.space_Group = spaceGroup\n \n self.Na = Na\n self.Nb = Nb\n self.Nc = Nc\n \n #generate the magnetic Cell by translating the unit cell\n for i in range(0, Na):\n for j in range(0, Nb):\n for k in range(0, Nc):\n if i !=0 or j != 0 or k != 0: #to not duplicate original unit cell\n self.AllUnitCells.append(self.unit_cell.translateCell(i,j,k))\n \n\n #Recording bonds that are mapped back onto themselves by a symOp\n self.bondConstraints = []", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\n self.symbolings = open(\n artifact_manager.get_temp_file(\n config.SYMBOL_OPENINGS_CLOSINGS_SORTED),\n 'r')\n # The offsets_db is really small, and we need to read and write\n # from it a fair bit, so suck it into memory\n offsets_db = file(\n artifact_manager.get_temp_file(config.SYMBOL_OFFSETS_DB), 'rb')\n # A map from symbol_id to offset. The values of this map are\n # incremented as the openings and closings for a symbol are\n # consumed.\n self.offsets = cPickle.load(offsets_db)\n offsets_db.close()", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_charp()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def chunks_to_symbols_bf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
chunks_to_symbols_bc(gr_complex_vector symbol_table, int D = 1) > digital_chunks_to_symbols_bc_sptr Map a stream of symbol indexes (unpacked bytes or shorts) to stream of float or complex constellation points in D dimensions (D = 1 by default)
def chunks_to_symbols_bc(*args, **kwargs): return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)
[ "def chunks_to_symbols_bf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)", "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def chunks_to_symbols_ic(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_ic(*args, **kwargs)", "def chunks_to_symbols_sf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sf(*args, **kwargs)", "def modulate(self, input_bits):\n mapfunc = vectorize(lambda i:\n self._constellation[bitarray2dec(input_bits[i:i + self.num_bits_symbol])])\n\n baseband_symbols = mapfunc(arange(0, len(input_bits), self.num_bits_symbol))\n\n return baseband_symbols", "def bandlimited_dirac(N_sph, d, w_n=None):\n d = utils.asarray_1d(d)\n if w_n is None:\n w_n = np.ones(N_sph + 1)\n assert(len(w_n) == N_sph + 1), \"Provide weight per order.\"\n g_n = np.zeros([(N_sph + 1)**2, len(d)])\n for n, i in enumerate(range(N_sph + 1)):\n g_n[i, :] = w_n[i] * (2 * n + 1) / (4 * np.pi) * \\\n scyspecial.eval_legendre(n, np.cos(d))\n dirac = np.sum(g_n, axis=0)\n return dirac", "def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)", "def insert_snp500_symbols(df_symbol):\n\n # connect to the MySQL instance\n db_host = 'localhost'\n db_user = 'xiao'\n db_pass = 'Wx3921786!'\n db_name = 'securities_master'\n connect = mdb.connect(host=db_host, user=db_user, password=db_pass, db=db_name)\n mysql_cursor = connect.cursor()\n\n # create req strings\n table_name = 'symbol'\n columns = ','.join(df_symbol.columns.values)\n values = (\"%s, \" * 7)[:-2]\n req = \"\"\"INSERT INTO %s (%s) VALUES (%s)\"\"\" % (table_name, columns, values)\n\n # insert to MySQL with max chunk_size = 1000\n chunk_size = 1000\n for i in range(0, len(df_symbol.index), chunk_size):\n chunk_df = df_symbol.iloc[i: i+chunk_size]\n data = [tuple(x) for x in chunk_df.values.tolist()]\n mysql_cursor.executemany(req, data)\n connect.commit()\n\n mysql_cursor.close()", "def expand_bonds(s,new_D):\r\n assert all(new_D>=s.D)\r\n for i in range(1,s.N):\r\n A=copy.copy(s.A[i])\r\n s.A[i]=np.zeros((s.q[i],new_D[i-1],new_D[i]),dtype=complex)\r\n s.A[i][:,0:s.D[i-1],0:s.D[i]]=A", "def x_HC2GC(s, l, b, d=-8.5):\n if isinstance(l, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(l.shape[0])\n elif isinstance(b, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(b.shape[0])\n elif isinstance(s, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(s.shape[0])\n else:\n return s*cos(l)*cos(b) + d", "def encode_whole_bonds(x, x_format=\"coords\", embedd_info={},\n needed_info = {\"cutoffs\": [2,5,10],\n \"bond_scales\": [.5, 1, 2]},\n free_mem=False, eps=1e-7): \n device, precise = x.device, x.type()\n # convert to 3d coords if passed as preds\n if x_format == \"encode\":\n pred_x = from_encode_to_pred(x, embedd_info=embedd_info, needed_info=needed_info)\n x = pred_x[:, :3] * pred_x[:, 3:4]\n\n # encode bonds\n\n # 1. BONDS: find the covalent bond_indices - allow arg -> DRY\n if \"prot_covalent_bond\" in needed_info.keys():\n native_bond_idxs = needed_info[\"covalent_bond\"]\n else:\n native_bond_idxs = prot_covalent_bond(needed_info[\"seq\"])\n\n native_bond_idxs = native_bond_idxs.to(device)\n # determine kind of cutoff (hard distance threhsold or closest points)\n closest = None\n cutoffs = needed_info[\"cutoffs\"].copy() \n if sum( isinstance(ci, str) for ci in cutoffs ) > 0:\n cutoffs = [-1e-3] # negative so no bond is taken \n closest = True\n\n # points under cutoff = d(i - j) < X \n cutoffs = torch.tensor(cutoffs, device=device).type(precise)\n dist_mat = torch.cdist(x, x, p=2)\n # do the base case for hard-distance threshold (bonds exist if below X)\n bond_buckets = torch.bucketize(dist_mat, cutoffs) \n # assign native bonds the extra token - don't repeat them\n bond_buckets[native_bond_idxs[0], native_bond_idxs[1]] = cutoffs.shape[0]\n # find the indexes - symmetric and we dont want the diag\n bond_buckets += len(cutoffs) * torch.eye(bond_buckets.shape[0], device=device).long()\n close_bond_idxs = ( bond_buckets < len(cutoffs) ).nonzero().t()\n\n # the K closest (covalent bonds excluded) are considered bonds \n if closest:\n k = int( needed_info[\"cutoffs\"][0].split(\"_\")[0] ) \n # copy dist_mat and mask the covalent bonds out\n masked_dist_mat = dist_mat.clone()\n masked_dist_mat += torch.eye(masked_dist_mat.shape[0]) * torch.amax(masked_dist_mat)\n masked_dist_mat[close_bond_idxs[0], close_bond_idxs[1]] = masked_dist_mat[0,0]\n # argsort by distance\n _, sorted_col_idxs = torch.topk(masked_dist_mat, k=k, dim=-1)\n # cat idxs and repeat row idx to match number of column idx\n sorted_col_idxs = torch.cat(sorted_idxs[:, :k], dim=-1)\n sorted_row_idxs = torch.repeat_interleave( torch.arange(dist_mat.shape[0]).long(), repeats=k )\n close_bond_idxs = torch.stack([ sorted_row_idxs, sorted_col_idxs ], dim=0)\n # dont pick rest of bonds, except the k closest || overwrites the previous bond_buckets\n bond_buckets = torch.ones_like(dist_mat) * len(cutoffs)\n bond_buckets[close_bond_idxs[0], close_bond_idxs[1]] = len(cutoffs)-1\n\n # merge all bonds\n if close_bond_idxs.shape[0] > 0:\n whole_bond_idxs = torch.cat([native_bond_idxs, close_bond_idxs], dim=-1)\n else:\n whole_bond_idxs = native_bond_idxs\n\n # 2. ATTRS: encode bond -> attrs\n bond_norms = dist_mat[ whole_bond_idxs[0] , whole_bond_idxs[1] ]\n bond_vecs = x[ whole_bond_idxs[0] ] - x[ whole_bond_idxs[1] ]\n bond_vecs /= (bond_norms + eps).unsqueeze(-1)\n bond_norms_enc = encode_dist(bond_norms, scales=needed_info[\"bond_scales\"]).squeeze()\n\n # pack scalars and vectors - extra token for covalent bonds\n bond_n_vectors = 1\n bond_n_scalars = (2 * len(needed_info[\"bond_scales\"]) + 1) + 1 # last one is an embedd of size 1+len(cutoffs)\n whole_bond_enc = torch.cat([bond_vecs, # 1 vector - no need of reverse - we do 2x bonds (symmetry)\n # scalars\n bond_norms_enc, # 2 * len(scales)\n bond_buckets[ whole_bond_idxs[0], whole_bond_idxs[1] ].unsqueeze(-1) # 1\n ], dim=-1) \n # free gpu mem\n if free_mem:\n del bond_buckets, bond_norms_enc, bond_vecs, dist_mat,\\\n close_bond_idxs, native_bond_idxs\n if closest: \n del masked_dist_mat, sorted_col_idxs, sorted_row_idxs\n\n embedd_info = {\"bond_n_vectors\": bond_n_vectors, \n \"bond_n_scalars\": bond_n_scalars, \n \"bond_embedding_nums\": [ len(cutoffs) + 1 ]} # extra one for covalent (default)\n\n return whole_bond_enc, whole_bond_idxs, embedd_info", "def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None", "def make_dom_map(pmt_directions, values, nside=512, d=0.2, smoothing=0.1):\n import healpy as hp\n\n discs = [hp.query_disc(nside, dir, 0.2) for dir in pmt_directions]\n npix = hp.nside2npix(nside)\n pixels = np.zeros(npix)\n for disc, value in zip(discs, values):\n for d in disc:\n pixels[d] = value\n if smoothing > 0:\n return hp.sphtfunc.smoothing(pixels, fwhm=smoothing, iter=1)\n return pixels", "def basis_format(basis_set_name,\n atomic_numbers,\n atomic_symbols,\n shell_type,\n n_primitives,\n atom_map,\n p_exponents,\n c_coefficients,\n p_c_coefficients):\n\n class TypeList:\n def __getitem__(self, item):\n typeList = {'0': ['s', 1],\n '1': ['p', 3],\n '2': ['d', 6],\n '3': ['f', 10],\n '4': ['g', 15],\n '-1': ['sp', 4],\n '-2': ['d_', 5],\n '-3': ['f_', 7],\n '-4': ['g_', 9]}\n if item in typeList:\n return typeList[item]\n else:\n raise ParserError('Basis function of type {} not implemented'.format(item), __name__, '')\n\n type_list = TypeList()\n\n atomic_numbers = [int(an) for an in atomic_numbers]\n atom_map = np.array(atom_map, dtype=int)\n # print(atom_map)\n basis_set = {'name': basis_set_name,\n 'primitive_type': 'gaussian'}\n\n shell_type_index = [0] + np.cumsum([type_list['{}'.format(s)][1]\n for s in shell_type]).tolist()\n prim_from_shell_index = [0] + np.cumsum(np.array(n_primitives, dtype=int)).tolist()\n\n # print(shell_type_index)\n # print(prim_from_shell_index)\n\n atoms_data = []\n for iatom, atomic_number in enumerate(atomic_numbers):\n symbol = str(atomic_symbols[iatom])\n\n shell_from_atom_counts = np.unique(atom_map, return_counts=True)[1]\n shell_from_atom_index = np.unique(atom_map, return_index=True)[1]\n # print(shell_from_atom_counts)\n # print('atom_indexes', shell_from_atom_index)\n # print('atom_number', iatom)\n # print('shells index', shell_from_atom_index[iatom])\n # print('number of shells', shell_from_atom_counts[iatom])\n\n shells_data = []\n for ishell in range(shell_from_atom_counts[iatom]):\n st = type_list['{}'.format(shell_type[shell_from_atom_index[iatom] + ishell])]\n # print(st, ishell)\n ini_prim = prim_from_shell_index[shell_from_atom_index[iatom] + ishell]\n fin_prim = prim_from_shell_index[shell_from_atom_index[iatom] + ishell+1]\n # print(ini_prim)\n # print(fin_prim)\n\n shells_data.append({\n 'shell_type': st[0],\n 'functions': st[1],\n 'p_exponents': p_exponents[ini_prim: fin_prim],\n 'con_coefficients': c_coefficients[ini_prim: fin_prim],\n 'p_con_coefficients': p_c_coefficients[ini_prim: fin_prim],\n })\n\n atoms_data.append({'shells': shells_data,\n 'symbol': symbol,\n 'atomic_number': atomic_number})\n\n basis_set['atoms'] = atoms_data\n\n return basis_set", "def calculate_discrete_NPHT_2d(binary_cubical_complex: numpy.array,\n number_of_directions)->list:\n\n binary_cubical_complex = binary_cubical_complex.astype(bool)\n\n if binary_cubical_complex.ndim != 2:\n raise ValueError(\"binary_cubical_complex must have dimension 2.\")\n\n vertices = [v for v, b in numpy.ndenumerate(binary_cubical_complex) if b]\n\n return_value = []\n # Spherical coordinates without PI as multiplicative factor\n spherical_coordinates = numpy.linspace(0, 2, number_of_directions + 1)[:-1]\n # _snap_zero_one guarantees that (1, 0), (-1, 0), (0, 1), (0, -1) are in cartesian_coordiantes.\n cartesian_coordinates = [(_snap_zero_one(numpy.cos(t*numpy.pi)),\n _snap_zero_one(numpy.sin(t*numpy.pi)))\n for t in spherical_coordinates]\n\n for v_cart in cartesian_coordinates:\n\n filtration = NormalizedBarycentricHeightFiltration(vertices, v_cart)\n\n filtrated_complex = numpy.empty(binary_cubical_complex.shape)\n filtrated_complex.fill(float('inf'))\n\n f_values = []\n for v in vertices:\n f_v = filtration(v)\n f_values.append(f_v)\n filtrated_complex[v] = f_v\n\n f_max = max(f_values)\n\n dgms = persistence_diagrams_of_filtrated_cubical_complex(filtrated_complex)\n dgms = [de_essentialize(dgm, f_max) for dgm in dgms]\n\n return_value.append(dgms)\n return return_value", "def ofdm_modulate(self, num_data_symb, freq_bin_data):\n min_pow = 1e-30\n time_ofdm_symbols = zeros((self.num_ant, num_data_symb * self.OFDMsymb_len), dtype=complex)\n for symb in range(num_data_symb):\n freq_data_start = symb * self.num_data_bins\n freq_data_end = freq_data_start + self.num_data_bins\n\n time_symb_start = symb * self.OFDMsymb_len\n time_symb_end = time_symb_start + self.OFDMsymb_len\n\n P = 0\n for ant in range(self.num_ant):\n\n ofdm_symb = zeros(self.NFFT, dtype=complex)\n ofdm_symb[self.used_data_bins] = freq_bin_data[ant, freq_data_start:freq_data_end]\n # plt.stem(array(range(-int(self.NFFT/2), int(self.NFFT/2))), abs(ofdm_symb))\n # plt.show()\n data_ifft = ifft(ofdm_symb, self.NFFT)\n cyclic_prefix = data_ifft[-self.CP:]\n data_time = concatenate((cyclic_prefix, data_ifft)) # add CP\n\n sig_energy = abs(dot(data_time, conj(data_time).T))\n # power scaling to normalize to 1\n if sig_energy > min_pow and ant == 0:\n scale_factor = sqrt(len(data_time) / sig_energy)\n else:\n scale_factor = 1\n data_time *= scale_factor\n P += var(data_time)\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] = data_time\n\n for ant in range(self.num_ant):\n time_ofdm_symbols[ant, time_symb_start: time_symb_end] *= (1 / sqrt(P))\n\n return time_ofdm_symbols", "def process_bdes(label: str,\n species_dict: dict,\n ) -> dict:\n source = species_dict[label]\n bde_report = dict()\n if source.e0 is None:\n logger.error(f'Cannot calculate BDEs without E0 for {label}. Make sure freq and sp jobs ran successfully '\n f'for this species.')\n return bde_report\n for bde_indices in source.bdes:\n found_a_label, cyclic = False, False\n # Index 0 of the tuple:\n if source.mol.atoms[bde_indices[0] - 1].is_hydrogen():\n e1 = species_dict['H'].e0\n else:\n bde_label = f'{label}_BDE_{bde_indices[0]}_{bde_indices[1]}_A'\n bde_cyclic_label = f'{label}_BDE_{bde_indices[0]}_{bde_indices[1]}_cyclic'\n cyclic = bde_cyclic_label in species_dict.keys()\n if not cyclic and bde_label not in species_dict.keys():\n logger.error(f'Could not find BDE species {bde_label} for generating a BDE report for {label}. '\n f'Not generating a BDE report for this species.')\n return dict()\n found_a_label = True\n e1 = species_dict[bde_cyclic_label if cyclic else bde_label].e0\n # Index 1 of the tuple:\n if cyclic:\n e2 = 0\n elif source.mol.atoms[bde_indices[1] - 1].is_hydrogen():\n e2 = species_dict['H'].e0\n else:\n letter = 'B' if found_a_label else 'A'\n bde_label = f'{label}_BDE_{bde_indices[0]}_{bde_indices[1]}_{letter}'\n if bde_label not in species_dict.keys():\n logger.error(f'Could not find BDE species {bde_label} for generating a BDE report for {label}. '\n f'Not generating a BDE report for this species.')\n return dict()\n e2 = species_dict[bde_label].e0\n if e1 is not None and e2 is not None:\n bde_report[bde_indices] = e1 + e2 - source.e0 # products - reactant\n else:\n bde_report[bde_indices] = 'N/A'\n logger.error(f'Could not calculate BDE for {label} between atoms '\n f'{bde_indices[0]} ({source.mol.atoms[bde_indices[0] - 1].element.symbol}) '\n f'and {bde_indices[1]} ({source.mol.atoms[bde_indices[1] - 1].element.symbol})')\n return bde_report", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def symbols(self):\n def _iter_symbols(symbol_values):\n # The initial charset doesn't matter, as the start codes have the same symbol values in all charsets.\n charset = 'A'\n\n shift_charset = None\n for symbol_value in symbol_values:\n if shift_charset:\n symbol = self._val2sym[shift_charset][symbol_value]\n shift_charset = None\n else:\n symbol = self._val2sym[charset][symbol_value]\n\n if symbol in (self.Special.START_A, self.Special.CODE_A):\n charset = 'A'\n elif symbol in (self.Special.START_B, self.Special.CODE_B):\n charset = 'B'\n elif symbol in (self.Special.START_C, self.Special.CODE_C):\n charset = 'C'\n elif symbol in (self.Special.SHIFT_A,):\n shift_charset = 'A'\n elif symbol in (self.Special.SHIFT_B,):\n shift_charset = 'B'\n\n yield symbol\n\n return list(_iter_symbols(self.symbol_values))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_chunks_to_symbols_sf_sptr __init__(self, p) > digital_chunks_to_symbols_sf_sptr
def __init__(self, *args): this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\n self.symbolings = open(\n artifact_manager.get_temp_file(\n config.SYMBOL_OPENINGS_CLOSINGS_SORTED),\n 'r')\n # The offsets_db is really small, and we need to read and write\n # from it a fair bit, so suck it into memory\n offsets_db = file(\n artifact_manager.get_temp_file(config.SYMBOL_OFFSETS_DB), 'rb')\n # A map from symbol_id to offset. The values of this map are\n # incremented as the openings and closings for a symbol are\n # consumed.\n self.offsets = cPickle.load(offsets_db)\n offsets_db.close()", "def chunks_to_symbols_sf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sf(*args, **kwargs)", "def prepare_symbols(self):", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, symbol_group):\n self.symbol_group = symbol_group\n self.print_positions()", "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def __init__(self):\n self.pts = []", "def __init__(self):\n this = _coin.new_SoSFVec2s()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoSFBox2s()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
chunks_to_symbols_sf(__dummy_4__ symbol_table, int D = 1) > digital_chunks_to_symbols_sf_sptr Map a stream of symbol indexes (unpacked bytes or shorts) to stream of float or complex constellation points in D dimensions (D = 1 by default)
def chunks_to_symbols_sf(*args, **kwargs): return _digital_swig.chunks_to_symbols_sf(*args, **kwargs)
[ "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def chunks_to_symbols_ic(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_ic(*args, **kwargs)", "def chunks_to_symbols_bf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)", "def insert_snp500_symbols(df_symbol):\n\n # connect to the MySQL instance\n db_host = 'localhost'\n db_user = 'xiao'\n db_pass = 'Wx3921786!'\n db_name = 'securities_master'\n connect = mdb.connect(host=db_host, user=db_user, password=db_pass, db=db_name)\n mysql_cursor = connect.cursor()\n\n # create req strings\n table_name = 'symbol'\n columns = ','.join(df_symbol.columns.values)\n values = (\"%s, \" * 7)[:-2]\n req = \"\"\"INSERT INTO %s (%s) VALUES (%s)\"\"\" % (table_name, columns, values)\n\n # insert to MySQL with max chunk_size = 1000\n chunk_size = 1000\n for i in range(0, len(df_symbol.index), chunk_size):\n chunk_df = df_symbol.iloc[i: i+chunk_size]\n data = [tuple(x) for x in chunk_df.values.tolist()]\n mysql_cursor.executemany(req, data)\n connect.commit()\n\n mysql_cursor.close()", "def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)", "def get_raw_smiles(file_name, smiles_char_dict, open_fn, extract_fn) -> List[str]:\n data = []\n # open the gzipped chembl filegzip.open\n with open_fn(file_name, 'rt') as f:\n\n line_count = 0\n for line in f:\n\n line_count += 1\n # extract the canonical smiles column\n if platform.system() == \"Windows\":\n line = line.decode(\"utf-8\")\n\n # smiles = line.split('\\t')[1]\n\n smiles = extract_fn(line)\n\n # only keep reasonably sized molecules\n if 5 <= len(smiles) <= 200:\n\n smiles = split_charged_mol(smiles)\n\n if smiles_char_dict.allowed(smiles):\n # check whether the molecular graph consists of\n # multiple connected components (eg. in salts)\n # if so, just keep the largest one\n\n data.append(smiles)\n\n print(f'Processed {len(data)} molecules from {line_count} lines in the input file.')\n\n return data", "def ExtractSymbols(self, native_heaps, sym_paths):\n raise NotImplementedError()", "def fstrmsymbols_cmd(in_fst=None, out_fst=None, in_symbols=0,\n out_symbols=0, pipe=True, **kwargs):\n\n cmd = ''\n if not in_symbols and not out_symbols:\n cmd += 'cat'\n\n if in_symbols:\n cmd = 'fstrmsymbols'\n in_symbols = in_symbols if isinstance(in_symbols, (list, tuple))\\\n else [in_symbols]\n fmt = ' \"echo ' + '{} ' * len(in_symbols) + '|\"'\n cmd += fmt.format(*in_symbols)\n if out_symbols:\n cmd += _add_input_output(in_fst, pipe=True)\n in_fst = None\n\n if out_symbols:\n cmd += 'fstrmsymbols --remove-from-output=true'\n out_symbols = out_symbols if isinstance(out_symbols, (list, tuple))\\\n else [out_symbols]\n fmt = ' \"echo ' + '{} ' * len(out_symbols) + '|\"'\n cmd += fmt.format(*out_symbols)\n\n cmd += _add_input_output(in_fst)\n cmd += fstpostprocess_cmd(None, out_fst, pipe=pipe, **kwargs)\n\n return cmd", "def make_dom_map(pmt_directions, values, nside=512, d=0.2, smoothing=0.1):\n import healpy as hp\n\n discs = [hp.query_disc(nside, dir, 0.2) for dir in pmt_directions]\n npix = hp.nside2npix(nside)\n pixels = np.zeros(npix)\n for disc, value in zip(discs, values):\n for d in disc:\n pixels[d] = value\n if smoothing > 0:\n return hp.sphtfunc.smoothing(pixels, fwhm=smoothing, iter=1)\n return pixels", "def Sym(s, symbol_table={}):\n if s not in symbol_table: symbol_table[s] = Symbol(s)\n return symbol_table[s]", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def dump_symbol_set(fp, ss):\n first = True\n fp.write(\"{\")\n\n # Make each iteration produce uniform result\n ss = list(ss)\n ss.sort()\n\n for i in ss:\n # Must be a symbol element\n assert (i.is_symbol() is True)\n if first is False:\n fp.write(\", \")\n else:\n first = False\n\n fp.write(i.name)\n\n fp.write(\"}\")\n\n return", "def init_SDFITS(DSS,tablesize,time_column=False):\n # create the primary HDU and extension headers\n prihdu = pyfits.PrimaryHDU()\n hdr = pyfits.CardList()\n cols = make_basic_columns(tablesize,time_column)\n \n # add telescope location data to the table header\n logger.debug(\"DSS: %s\", DSS)\n if type(DSS) == list:\n # This may seem odd but in the most general case there could be two or\n # more antennas, like in an interferometer. In that case, however,\n # \"single dish\" FITS format doesn't apply. We'll just assume a list of\n # length 1.\n dss = DSS[0]\n else:\n dss = DSS\n if dss !=0 :\n hdr.append(pyfits.Card('telescop', dss.name))\n hdr.append(pyfits.Card('sitelong', dss['longitude']))\n hdr.append(pyfits.Card('sitelat', dss['latitude']))\n hdr.append(pyfits.Card('siteelev', dss['elevation']))\n hdr.append(pyfits.Card('obsgeo-x', dss['geo-x']))\n hdr.append(pyfits.Card('obsgeo-y', dss['geo-y']))\n hdr.append(pyfits.Card('obsgeo-z', dss['geo-z']))\n hdr.append(pyfits.Card('TIMESYS', 'UTC'))\n \n # there will always be four axes in the data array\n hdr.append(pyfits.Card('MAXIS',4))\n # we will always have the first data axis with frequency in the\n # from of the observatory, or time-delay for correlation functions\n # (cannot set MAXIS1 until we know the size of the spectrum)\n # hdr.append(pyfits.Card('MAXIS1',?))\n hdr.append(pyfits.Card('CTYPE1','FREQ-OBS'))\n \n # the second and third axes will be right ascension and declination\n hdr.append(pyfits.Card('MAXIS2',1))\n hdr.append(pyfits.Card('CTYPE2','RA---GLS'))\n \n hdr.append(pyfits.Card('MAXIS3',1))\n hdr.append(pyfits.Card('CTYPE3','DEC--GLS'))\n\n # the fourth axis is polarization. As a default\n hdr.append(pyfits.Card('MAXIS4',1))\n hdr.append(pyfits.Card('CTYPE4','STOKES'))\n\n if time_column:\n # the optional fifth data axis will be time\n # (cannot set MAXIS5 until we know the number of spectra)\n # hdr.append(pyfits.Card('MAXIS4',?))\n hdr.append(pyfits.Card('CTYPE5','TIME'))\n \n return prihdu, hdr, cols", "def output_shp_segmented(self):\n ofn = \"{}_{}_tracks_segmented\".format(\n self.year,\n \"ATL\" if list(self.tc.keys())[0][:2] == \"AL\" else \"PAC\"\n )\n c = itertools.count(0)\n with shapefile.Writer(ofn,shapeType=3) as gis:\n gis.field(\"ID\",\"N\",\"3\")\n gis.field(\"ATCFID\",\"C\",\"8\")\n gis.field(\"ENTRY_INDEX\",\"N\")\n gis.field(\"NAME\",\"C\",\"10\")\n gis.field(\"ENTRY_TIME\",\"C\",\"16\")\n gis.field(\"NEXT_ENTRY_TIME\",\"C\",\"16\")\n gis.field(\"LAT\",\"N\",decimal=1)\n gis.field(\"LON\",\"N\",decimal=1)\n gis.field(\"NEXT_ENTRY_LAT\",\"N\",decimal=1)\n gis.field(\"NEXT_ENTRY_LON\",\"N\",decimal=1)\n gis.field(\"STATUS\",\"C\",\"3\")\n gis.field(\"PEAK_WIND\",\"N\",\"3\")\n gis.field(\"MIN_MSLP\",\"N\",\"4\")\n for TC in [t[1] for t in self.tc.items()]:\n for track in range(len(TC.entry)):\n gis.record(\n next(c),\n TC.atcfid,\n track,\n TC.name,\n TC.entry[track].entrytime.isoformat(),\n TC.entry[track+1].entrytime.isoformat() if track != len(TC.entry)-1 else None,\n TC.entry[track].location[0],\n TC.entry[track].location[1],\n TC.entry[track+1].location[0] if track != len(TC.entry)-1 else None,\n TC.entry[track+1].location[1] if track != len(TC.entry)-1 else None,\n TC.entry[track].status,\n TC.entry[track].wind if TC.entry[track].wind > 0 else \"\",\n TC.entry[track].mslp if TC.entry[track].mslp != None else \"\"\n )\n if track != len(TC.entry)-1:\n gis.line([[TC.entry[track].location_reversed,TC.entry[track+1].location_reversed]])\n else:\n gis.null()", "def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None", "def getDens(s3d, sC=1):\n\n siia, siiae = s3d.extractPlane(line='siia', sC=sC, meth='sum')\n siib, siibe = s3d.extractPlane(line='siib', sC=sC, meth='sum')\n nemap = calcDens(siia, siib)\n snmap = (1./(1./(siia/siiae)**2 + 1./(siib/siibe)**2))**0.5\n return nemap, snmap", "def getSymbol(id):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_chunks_to_symbols_sc_sptr __init__(self, p) > digital_chunks_to_symbols_sc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def prepare_symbols(self):", "def __init__(self):\n\n self.symbolings = open(\n artifact_manager.get_temp_file(\n config.SYMBOL_OPENINGS_CLOSINGS_SORTED),\n 'r')\n # The offsets_db is really small, and we need to read and write\n # from it a fair bit, so suck it into memory\n offsets_db = file(\n artifact_manager.get_temp_file(config.SYMBOL_OFFSETS_DB), 'rb')\n # A map from symbol_id to offset. The values of this map are\n # incremented as the openings and closings for a symbol are\n # consumed.\n self.offsets = cPickle.load(offsets_db)\n offsets_db.close()", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, symbol_group):\n self.symbol_group = symbol_group\n self.print_positions()", "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, Unit_Cell, Na, Nb, Nc, spaceGroup):\n\n self.unit_cell = Unit_Cell\n \n #This is a list of all the crystallographic unit cells that make up\n #this magnetic unit cell\n self.AllUnitCells = [Unit_Cell]\n \n #bonds (interactions) are stored in this class since it is treated\n #as the cutoff cell.(The cell containing all unique bonds)\n self.Bonds = []\n \n self.space_Group = spaceGroup\n \n self.Na = Na\n self.Nb = Nb\n self.Nc = Nc\n \n #generate the magnetic Cell by translating the unit cell\n for i in range(0, Na):\n for j in range(0, Nb):\n for k in range(0, Nc):\n if i !=0 or j != 0 or k != 0: #to not duplicate original unit cell\n self.AllUnitCells.append(self.unit_cell.translateCell(i,j,k))\n \n\n #Recording bonds that are mapped back onto themselves by a symOp\n self.bondConstraints = []", "def __init__(self):\n this = _coin.new_charp()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, symbolsDict):\n Command.__init__(self, symbolsDict)\n self.__symbol = None", "def __init__(self, grammar):\r\n for name, symbol in grammar.symbol2number.items():\r\n setattr(self, name, symbol)", "def __init__(\n self,\n symbol,\n order_type,\n quantity,\n direction\n ):\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
chunks_to_symbols_sc(gr_complex_vector symbol_table, int D = 1) > digital_chunks_to_symbols_sc_sptr Map a stream of symbol indexes (unpacked bytes or shorts) to stream of float or complex constellation points in D dimensions (D = 1 by default)
def chunks_to_symbols_sc(*args, **kwargs): return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)
[ "def chunks_to_symbols_sf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sf(*args, **kwargs)", "def chunks_to_symbols_ic(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_ic(*args, **kwargs)", "def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def chunks_to_symbols_bf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)", "def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None", "def SpcGroup(SGSymbol):\n LaueSym = ('-1','2/m','mmm','4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm','m3','m3m')\n LattSym = ('P','A','B','C','I','F','R')\n UniqSym = ('','','a','b','c','',)\n SysSym = ('triclinic','monoclinic','orthorhombic','tetragonal','rhombohedral','trigonal','hexagonal','cubic')\n SGData = {}\n if len(SGSymbol.split()) < 2:\n return SGErrors(0),SGData\n if ':R' in SGSymbol:\n SGSymbol = SGSymbol.replace(':',' ') #get rid of ':' in R space group symbols from some cif files\n SGData['SGGray'] = False\n if \"1'\" in SGSymbol: #set for incommensurate magnetic\n SGData['SGGray'] = True\n SGSymbol = SGSymbol.replace(\"1'\",'')\n SGSymbol = SGSymbol.split(':')[0] #remove :1/2 setting symbol from some cif files\n if '-2' in SGSymbol: #replace bad but legal symbols with correct equivalents\n SGSymbol = SGSymbol.replace('-2','m')\n if SGSymbol.split()[1] =='3/m':\n SGSymbol = SGSymbol.replace('3/m','-6')\n import pyspg\n SGInfo = pyspg.sgforpy(SGSymbol)\n SGData['SpGrp'] = SGSymbol.strip().lower().capitalize()\n SGData['SGLaue'] = LaueSym[SGInfo[0]-1]\n SGData['SGInv'] = bool(SGInfo[1])\n SGData['SGLatt'] = LattSym[SGInfo[2]-1]\n SGData['SGUniq'] = UniqSym[SGInfo[3]+1]\n SGData['SGFixed'] = False\n SGData['SGOps'] = []\n SGData['SGGen'] = []\n for i in range(SGInfo[5]):\n Mat = np.array(SGInfo[6][i])\n Trns = np.array(SGInfo[7][i])\n SGData['SGOps'].append([Mat,Trns])\n if 'array' in str(type(SGInfo[8])): #patch for old fortran bin?\n SGData['SGGen'].append(int(SGInfo[8][i]))\n SGData['BNSlattsym'] = [LattSym[SGInfo[2]-1],[0,0,0]]\n lattSpin = []\n if SGData['SGLatt'] == 'P':\n SGData['SGCen'] = np.array(([0,0,0],))\n elif SGData['SGLatt'] == 'A':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'B':\n SGData['SGCen'] = np.array(([0,0,0],[.5,0,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'C':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,0,]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'I':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'F':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5],[.5,0,.5],[.5,.5,0,]))\n lattSpin += [1,1,1,1]\n elif SGData['SGLatt'] == 'R':\n SGData['SGCen'] = np.array(([0,0,0],[2./3,1./3,1./3],[1./3,2./3,2./3]))\n\n if SGData['SGInv']:\n if SGData['SGLaue'] in ['-1','2/m','mmm']:\n Ibar = 7\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n Ibar = 1\n elif SGData['SGLaue'] in ['3R','3mR','3','3m1','31m','6/m','6/mmm']:\n Ibar = 15 #8+4+2+1\n else:\n Ibar = 4\n Ibarx = Ibar&14\n else:\n Ibarx = 8\n if SGData['SGLaue'] in ['-1','2/m','mmm','m3','m3m']:\n Ibarx = 0\n moregen = []\n for i,gen in enumerate(SGData['SGGen']):\n if SGData['SGLaue'] in ['m3','m3m']:\n if gen in [1,2,4]:\n SGData['SGGen'][i] = 4\n elif gen < 7:\n SGData['SGGen'][i] = 0\n elif SGData['SGLaue'] in ['4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm']:\n if gen == 2:\n SGData['SGGen'][i] = 4\n elif gen in [3,5]:\n SGData['SGGen'][i] = 3\n elif gen == 6:\n if SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGGen'][i] = 128\n else:\n SGData['SGGen'][i] = 16\n elif not SGData['SGInv'] and gen == 12:\n SGData['SGGen'][i] = 8\n elif (not SGData['SGInv']) and (SGData['SGLaue'] in ['3','3m1','31m','6/m','6/mmm']) and (gen == 1):\n SGData['SGGen'][i] = 24\n gen = SGData['SGGen'][i]\n if gen == 99:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 3\n elif SGData['SGLaue'] == 'm3m':\n gen = 12\n SGData['SGGen'][i] = gen\n elif gen == 98:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 4\n SGData['SGGen'][i] = gen\n elif not SGData['SGInv'] and gen in [23,] and SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGGen'][i] = 24\n elif gen >= 16 and gen != 128:\n if not SGData['SGInv']:\n gen = 31\n else:\n gen ^= Ibarx \n SGData['SGGen'][i] = gen\n if SGData['SGInv']:\n if gen < 128:\n moregen.append(SGData['SGGen'][i]^Ibar)\n else:\n moregen.append(1)\n SGData['SGGen'] += moregen\n if SGData['SGLaue'] in '-1':\n SGData['SGSys'] = SysSym[0]\n elif SGData['SGLaue'] in '2/m':\n SGData['SGSys'] = SysSym[1]\n elif SGData['SGLaue'] in 'mmm':\n SGData['SGSys'] = SysSym[2]\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGSys'] = SysSym[3]\n elif SGData['SGLaue'] in ['3R','3mR']:\n SGData['SGSys'] = SysSym[4]\n elif SGData['SGLaue'] in ['3','3m1','31m']:\n SGData['SGSys'] = SysSym[5]\n elif SGData['SGLaue'] in ['6/m','6/mmm']:\n SGData['SGSys'] = SysSym[6]\n elif SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGSys'] = SysSym[7]\n SGData['SGPolax'] = SGpolar(SGData)\n SGData['SGPtGrp'],SGData['SSGKl'] = SGPtGroup(SGData)\n\n if SGData['SGLatt'] == 'R':\n if SGData['SGPtGrp'] in ['3',]:\n SGData['SGSpin'] = 3*[1,]\n elif SGData['SGPtGrp'] in ['-3','32','3m']:\n SGData['SGSpin'] = 4*[1,]\n elif SGData['SGPtGrp'] in ['-3m',]:\n SGData['SGSpin'] = 5*[1,]\n \n else:\n if SGData['SGPtGrp'] in ['1','3','23',]:\n SGData['SGSpin'] = lattSpin+[1,]\n elif SGData['SGPtGrp'] in ['-1','2','m','4','-4','-3','312','321','3m1','31m','6','-6','432','-43m']:\n SGData['SGSpin'] = lattSpin+[1,1,]\n elif SGData['SGPtGrp'] in ['2/m','4/m','422','4mm','-42m','-4m2','-3m1','-31m',\n '6/m','622','6mm','-6m2','-62m','m3','m3m']:\n SGData['SGSpin'] = lattSpin+[1,1,1,]\n else: #'222'-'mmm','4/mmm','6/mmm'\n SGData['SGSpin'] = lattSpin+[1,1,1,1,]\n return SGInfo[-1],SGData", "def render_synthetic_surface(size, samples, rms=None, mask='circle', psd_fcn=abc_psd, **psd_fcn_kwargs): # NOQA\n # compute the grid and PSD\n sample_spacing = size / (samples - 1)\n nu_x = nu_y = forward_ft_unit(sample_spacing, samples)\n center = samples // 2 # some bullshit here to gloss over zeros for ab_psd\n nu_x[center] = nu_x[center+1] / 10\n nu_y[center] = nu_y[center+1] / 10\n nu_xx, nu_yy = np.meshgrid(nu_x, nu_y)\n\n nu_r, _ = cart_to_polar(nu_xx, nu_yy)\n psd = psd_fcn(nu_r, **psd_fcn_kwargs)\n\n # synthesize a surface from the PSD\n x, y, z = synthesize_surface_from_psd(psd, nu_x, nu_y)\n\n # mask\n mask = mcache(mask, samples)\n z[mask == 0] = np.nan\n\n # possibly scale RMS\n if rms is not None:\n z_rms = globals()['rms'](z) # rms function is shadowed by rms kwarg\n scale_factor = rms / z_rms\n z *= scale_factor\n\n return x, y, z", "def make_dom_map(pmt_directions, values, nside=512, d=0.2, smoothing=0.1):\n import healpy as hp\n\n discs = [hp.query_disc(nside, dir, 0.2) for dir in pmt_directions]\n npix = hp.nside2npix(nside)\n pixels = np.zeros(npix)\n for disc, value in zip(discs, values):\n for d in disc:\n pixels[d] = value\n if smoothing > 0:\n return hp.sphtfunc.smoothing(pixels, fwhm=smoothing, iter=1)\n return pixels", "def Sym(s, symbol_table={}):\n if s not in symbol_table: symbol_table[s] = Symbol(s)\n return symbol_table[s]", "def x_HC2GC(s, l, b, d=-8.5):\n if isinstance(l, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(l.shape[0])\n elif isinstance(b, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(b.shape[0])\n elif isinstance(s, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(s.shape[0])\n else:\n return s*cos(l)*cos(b) + d", "def getDens(s3d, sC=1):\n\n siia, siiae = s3d.extractPlane(line='siia', sC=sC, meth='sum')\n siib, siibe = s3d.extractPlane(line='siib', sC=sC, meth='sum')\n nemap = calcDens(siia, siib)\n snmap = (1./(1./(siia/siiae)**2 + 1./(siib/siibe)**2))**0.5\n return nemap, snmap", "def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)", "def bandlimited_dirac(N_sph, d, w_n=None):\n d = utils.asarray_1d(d)\n if w_n is None:\n w_n = np.ones(N_sph + 1)\n assert(len(w_n) == N_sph + 1), \"Provide weight per order.\"\n g_n = np.zeros([(N_sph + 1)**2, len(d)])\n for n, i in enumerate(range(N_sph + 1)):\n g_n[i, :] = w_n[i] * (2 * n + 1) / (4 * np.pi) * \\\n scyspecial.eval_legendre(n, np.cos(d))\n dirac = np.sum(g_n, axis=0)\n return dirac", "def sample_symbolic_table(symtable, size, strategy=\"diversity\"):\n\n if size > len(symtable.values):\n size = len(symtable.values)\n\n if strategy == \"uniform\":\n chosen_indices = np.random.choice(list(range(len(symtable.values))), size, replace=False)\n elif strategy == \"diversity\":\n indices = set(range(len(symtable.values)))\n chosen_indices = set()\n for i in range(size):\n pool = indices - chosen_indices\n candidate_size = min([20, len(pool)])\n candidates = np.random.choice(list(pool), size=candidate_size, replace=False)\n index = pick_diverse_candidate_index(candidates, chosen_indices, symtable.values)\n chosen_indices.add(index)\n\n sample_values = [symtable.values[i] for i in chosen_indices]\n symtable_sample = SymTable(sample_values)\n return symtable_sample", "def insert_snp500_symbols(df_symbol):\n\n # connect to the MySQL instance\n db_host = 'localhost'\n db_user = 'xiao'\n db_pass = 'Wx3921786!'\n db_name = 'securities_master'\n connect = mdb.connect(host=db_host, user=db_user, password=db_pass, db=db_name)\n mysql_cursor = connect.cursor()\n\n # create req strings\n table_name = 'symbol'\n columns = ','.join(df_symbol.columns.values)\n values = (\"%s, \" * 7)[:-2]\n req = \"\"\"INSERT INTO %s (%s) VALUES (%s)\"\"\" % (table_name, columns, values)\n\n # insert to MySQL with max chunk_size = 1000\n chunk_size = 1000\n for i in range(0, len(df_symbol.index), chunk_size):\n chunk_df = df_symbol.iloc[i: i+chunk_size]\n data = [tuple(x) for x in chunk_df.values.tolist()]\n mysql_cursor.executemany(req, data)\n connect.commit()\n\n mysql_cursor.close()", "def modulate(self, input_bits):\n mapfunc = vectorize(lambda i:\n self._constellation[bitarray2dec(input_bits[i:i + self.num_bits_symbol])])\n\n baseband_symbols = mapfunc(arange(0, len(input_bits), self.num_bits_symbol))\n\n return baseband_symbols", "def getSymbol(id):", "def _rdkit_smiles(cursor, table):\n cursor.execute(\n sql.SQL(\"\"\"\n SELECT reaction_id,\n m,\n morganbv_fp(m) AS mfp2 \n INTO {} FROM (\n SELECT reaction_id, \n mol_from_smiles(smiles::cstring) AS m\n FROM {}) tmp\n WHERE m IS NOT NULL\"\"\").format(\n sql.Identifier(interface.RDKIT_SCHEMA, table),\n sql.Identifier(table)))\n cursor.execute(\n sql.SQL('CREATE INDEX {} ON {} USING gist(m)').format(\n sql.Identifier(f'{table}_m'),\n sql.Identifier(interface.RDKIT_SCHEMA, table)))\n cursor.execute(\n sql.SQL('CREATE INDEX {} ON {} USING gist(mfp2)').format(\n sql.Identifier(f'{table}_mfp2'),\n sql.Identifier(interface.RDKIT_SCHEMA, table)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_chunks_to_symbols_if_sptr __init__(self, p) > digital_chunks_to_symbols_if_sptr
def __init__(self, *args): this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def prepare_symbols(self):", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\n self.symbolings = open(\n artifact_manager.get_temp_file(\n config.SYMBOL_OPENINGS_CLOSINGS_SORTED),\n 'r')\n # The offsets_db is really small, and we need to read and write\n # from it a fair bit, so suck it into memory\n offsets_db = file(\n artifact_manager.get_temp_file(config.SYMBOL_OFFSETS_DB), 'rb')\n # A map from symbol_id to offset. The values of this map are\n # incremented as the openings and closings for a symbol are\n # consumed.\n self.offsets = cPickle.load(offsets_db)\n offsets_db.close()", "def __init__(self, Unit_Cell, Na, Nb, Nc, spaceGroup):\n\n self.unit_cell = Unit_Cell\n \n #This is a list of all the crystallographic unit cells that make up\n #this magnetic unit cell\n self.AllUnitCells = [Unit_Cell]\n \n #bonds (interactions) are stored in this class since it is treated\n #as the cutoff cell.(The cell containing all unique bonds)\n self.Bonds = []\n \n self.space_Group = spaceGroup\n \n self.Na = Na\n self.Nb = Nb\n self.Nc = Nc\n \n #generate the magnetic Cell by translating the unit cell\n for i in range(0, Na):\n for j in range(0, Nb):\n for k in range(0, Nc):\n if i !=0 or j != 0 or k != 0: #to not duplicate original unit cell\n self.AllUnitCells.append(self.unit_cell.translateCell(i,j,k))\n \n\n #Recording bonds that are mapped back onto themselves by a symOp\n self.bondConstraints = []", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, symbol_group):\n self.symbol_group = symbol_group\n self.print_positions()", "def __init__(self):\n this = _coin.new_charp()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, grammar):\r\n for name, symbol in grammar.symbol2number.items():\r\n setattr(self, name, symbol)", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def __init__(self):\n # Main character id\n self.name = None\n self.p1 = None\n self.p1_is = None\n self.p2 = None\n self.p3 = None", "def __init__(self,name,first_instr):\n instr_t.__init__(self)\n self.name = name\n self.first_instr = first_instr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_chunks_to_symbols_ic_sptr __init__(self, p) > digital_chunks_to_symbols_ic_sptr
def __init__(self, *args): this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def prepare_symbols(self):", "def __init__(self):\n\n self.symbolings = open(\n artifact_manager.get_temp_file(\n config.SYMBOL_OPENINGS_CLOSINGS_SORTED),\n 'r')\n # The offsets_db is really small, and we need to read and write\n # from it a fair bit, so suck it into memory\n offsets_db = file(\n artifact_manager.get_temp_file(config.SYMBOL_OFFSETS_DB), 'rb')\n # A map from symbol_id to offset. The values of this map are\n # incremented as the openings and closings for a symbol are\n # consumed.\n self.offsets = cPickle.load(offsets_db)\n offsets_db.close()", "def chunks_to_symbols_ic(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_ic(*args, **kwargs)", "def __init__(self, Unit_Cell, Na, Nb, Nc, spaceGroup):\n\n self.unit_cell = Unit_Cell\n \n #This is a list of all the crystallographic unit cells that make up\n #this magnetic unit cell\n self.AllUnitCells = [Unit_Cell]\n \n #bonds (interactions) are stored in this class since it is treated\n #as the cutoff cell.(The cell containing all unique bonds)\n self.Bonds = []\n \n self.space_Group = spaceGroup\n \n self.Na = Na\n self.Nb = Nb\n self.Nc = Nc\n \n #generate the magnetic Cell by translating the unit cell\n for i in range(0, Na):\n for j in range(0, Nb):\n for k in range(0, Nc):\n if i !=0 or j != 0 or k != 0: #to not duplicate original unit cell\n self.AllUnitCells.append(self.unit_cell.translateCell(i,j,k))\n \n\n #Recording bonds that are mapped back onto themselves by a symOp\n self.bondConstraints = []", "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, grammar):\r\n for name, symbol in grammar.symbol2number.items():\r\n setattr(self, name, symbol)", "def __init__(self, symbol_group):\n self.symbol_group = symbol_group\n self.print_positions()", "def __init__(self):\n this = _coin.new_charp()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n # Main character id\n self.name = None\n self.p1 = None\n self.p1_is = None\n self.p2 = None\n self.p3 = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
chunks_to_symbols_ic(gr_complex_vector symbol_table, int D = 1) > digital_chunks_to_symbols_ic_sptr Map a stream of symbol indexes (unpacked bytes or shorts) to stream of float or complex constellation points in D dimensions (D = 1 by default)
def chunks_to_symbols_ic(*args, **kwargs): return _digital_swig.chunks_to_symbols_ic(*args, **kwargs)
[ "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def chunks_to_symbols_sf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sf(*args, **kwargs)", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def chunks_to_symbols_bf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)", "def modulate(self, input_bits):\n mapfunc = vectorize(lambda i:\n self._constellation[bitarray2dec(input_bits[i:i + self.num_bits_symbol])])\n\n baseband_symbols = mapfunc(arange(0, len(input_bits), self.num_bits_symbol))\n\n return baseband_symbols", "def setup_symbols_for_species_pKs(self, sid_list):\n new_variable_index = 0\n self.variable_vector_dict = {}\n for species_id in sid_list:\n pK_data_val = self.get_pK_val(species_id) \n self.variable_vector_dict[species_id] = [symbols('x[%d]'%new_variable_index), pK_data_val]\n new_variable_index += 1\n #for each species_id, set up the sequence of species that eventually lead to least protonated state, for binding constant calculation\n self.compounds_species_id_sequence = {}\n for species_id in self.compounds_data_dict.keys():\n self.compounds_species_id_sequence[species_id] = self.get_sequence_of_species_ids(species_id)", "def make_dom_map(pmt_directions, values, nside=512, d=0.2, smoothing=0.1):\n import healpy as hp\n\n discs = [hp.query_disc(nside, dir, 0.2) for dir in pmt_directions]\n npix = hp.nside2npix(nside)\n pixels = np.zeros(npix)\n for disc, value in zip(discs, values):\n for d in disc:\n pixels[d] = value\n if smoothing > 0:\n return hp.sphtfunc.smoothing(pixels, fwhm=smoothing, iter=1)\n return pixels", "def getSymbol(id):", "def bandlimited_dirac(N_sph, d, w_n=None):\n d = utils.asarray_1d(d)\n if w_n is None:\n w_n = np.ones(N_sph + 1)\n assert(len(w_n) == N_sph + 1), \"Provide weight per order.\"\n g_n = np.zeros([(N_sph + 1)**2, len(d)])\n for n, i in enumerate(range(N_sph + 1)):\n g_n[i, :] = w_n[i] * (2 * n + 1) / (4 * np.pi) * \\\n scyspecial.eval_legendre(n, np.cos(d))\n dirac = np.sum(g_n, axis=0)\n return dirac", "def ExtractSymbols(self, native_heaps, sym_paths):\n raise NotImplementedError()", "def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None", "def SpcGroup(SGSymbol):\n LaueSym = ('-1','2/m','mmm','4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm','m3','m3m')\n LattSym = ('P','A','B','C','I','F','R')\n UniqSym = ('','','a','b','c','',)\n SysSym = ('triclinic','monoclinic','orthorhombic','tetragonal','rhombohedral','trigonal','hexagonal','cubic')\n SGData = {}\n if len(SGSymbol.split()) < 2:\n return SGErrors(0),SGData\n if ':R' in SGSymbol:\n SGSymbol = SGSymbol.replace(':',' ') #get rid of ':' in R space group symbols from some cif files\n SGData['SGGray'] = False\n if \"1'\" in SGSymbol: #set for incommensurate magnetic\n SGData['SGGray'] = True\n SGSymbol = SGSymbol.replace(\"1'\",'')\n SGSymbol = SGSymbol.split(':')[0] #remove :1/2 setting symbol from some cif files\n if '-2' in SGSymbol: #replace bad but legal symbols with correct equivalents\n SGSymbol = SGSymbol.replace('-2','m')\n if SGSymbol.split()[1] =='3/m':\n SGSymbol = SGSymbol.replace('3/m','-6')\n import pyspg\n SGInfo = pyspg.sgforpy(SGSymbol)\n SGData['SpGrp'] = SGSymbol.strip().lower().capitalize()\n SGData['SGLaue'] = LaueSym[SGInfo[0]-1]\n SGData['SGInv'] = bool(SGInfo[1])\n SGData['SGLatt'] = LattSym[SGInfo[2]-1]\n SGData['SGUniq'] = UniqSym[SGInfo[3]+1]\n SGData['SGFixed'] = False\n SGData['SGOps'] = []\n SGData['SGGen'] = []\n for i in range(SGInfo[5]):\n Mat = np.array(SGInfo[6][i])\n Trns = np.array(SGInfo[7][i])\n SGData['SGOps'].append([Mat,Trns])\n if 'array' in str(type(SGInfo[8])): #patch for old fortran bin?\n SGData['SGGen'].append(int(SGInfo[8][i]))\n SGData['BNSlattsym'] = [LattSym[SGInfo[2]-1],[0,0,0]]\n lattSpin = []\n if SGData['SGLatt'] == 'P':\n SGData['SGCen'] = np.array(([0,0,0],))\n elif SGData['SGLatt'] == 'A':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'B':\n SGData['SGCen'] = np.array(([0,0,0],[.5,0,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'C':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,0,]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'I':\n SGData['SGCen'] = np.array(([0,0,0],[.5,.5,.5]))\n lattSpin += [1,]\n elif SGData['SGLatt'] == 'F':\n SGData['SGCen'] = np.array(([0,0,0],[0,.5,.5],[.5,0,.5],[.5,.5,0,]))\n lattSpin += [1,1,1,1]\n elif SGData['SGLatt'] == 'R':\n SGData['SGCen'] = np.array(([0,0,0],[2./3,1./3,1./3],[1./3,2./3,2./3]))\n\n if SGData['SGInv']:\n if SGData['SGLaue'] in ['-1','2/m','mmm']:\n Ibar = 7\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n Ibar = 1\n elif SGData['SGLaue'] in ['3R','3mR','3','3m1','31m','6/m','6/mmm']:\n Ibar = 15 #8+4+2+1\n else:\n Ibar = 4\n Ibarx = Ibar&14\n else:\n Ibarx = 8\n if SGData['SGLaue'] in ['-1','2/m','mmm','m3','m3m']:\n Ibarx = 0\n moregen = []\n for i,gen in enumerate(SGData['SGGen']):\n if SGData['SGLaue'] in ['m3','m3m']:\n if gen in [1,2,4]:\n SGData['SGGen'][i] = 4\n elif gen < 7:\n SGData['SGGen'][i] = 0\n elif SGData['SGLaue'] in ['4/m','4/mmm','3R','3mR','3','3m1','31m','6/m','6/mmm']:\n if gen == 2:\n SGData['SGGen'][i] = 4\n elif gen in [3,5]:\n SGData['SGGen'][i] = 3\n elif gen == 6:\n if SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGGen'][i] = 128\n else:\n SGData['SGGen'][i] = 16\n elif not SGData['SGInv'] and gen == 12:\n SGData['SGGen'][i] = 8\n elif (not SGData['SGInv']) and (SGData['SGLaue'] in ['3','3m1','31m','6/m','6/mmm']) and (gen == 1):\n SGData['SGGen'][i] = 24\n gen = SGData['SGGen'][i]\n if gen == 99:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 3\n elif SGData['SGLaue'] == 'm3m':\n gen = 12\n SGData['SGGen'][i] = gen\n elif gen == 98:\n gen = 8\n if SGData['SGLaue'] in ['3m1','31m','6/m','6/mmm']:\n gen = 4\n SGData['SGGen'][i] = gen\n elif not SGData['SGInv'] and gen in [23,] and SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGGen'][i] = 24\n elif gen >= 16 and gen != 128:\n if not SGData['SGInv']:\n gen = 31\n else:\n gen ^= Ibarx \n SGData['SGGen'][i] = gen\n if SGData['SGInv']:\n if gen < 128:\n moregen.append(SGData['SGGen'][i]^Ibar)\n else:\n moregen.append(1)\n SGData['SGGen'] += moregen\n if SGData['SGLaue'] in '-1':\n SGData['SGSys'] = SysSym[0]\n elif SGData['SGLaue'] in '2/m':\n SGData['SGSys'] = SysSym[1]\n elif SGData['SGLaue'] in 'mmm':\n SGData['SGSys'] = SysSym[2]\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n SGData['SGSys'] = SysSym[3]\n elif SGData['SGLaue'] in ['3R','3mR']:\n SGData['SGSys'] = SysSym[4]\n elif SGData['SGLaue'] in ['3','3m1','31m']:\n SGData['SGSys'] = SysSym[5]\n elif SGData['SGLaue'] in ['6/m','6/mmm']:\n SGData['SGSys'] = SysSym[6]\n elif SGData['SGLaue'] in ['m3','m3m']:\n SGData['SGSys'] = SysSym[7]\n SGData['SGPolax'] = SGpolar(SGData)\n SGData['SGPtGrp'],SGData['SSGKl'] = SGPtGroup(SGData)\n\n if SGData['SGLatt'] == 'R':\n if SGData['SGPtGrp'] in ['3',]:\n SGData['SGSpin'] = 3*[1,]\n elif SGData['SGPtGrp'] in ['-3','32','3m']:\n SGData['SGSpin'] = 4*[1,]\n elif SGData['SGPtGrp'] in ['-3m',]:\n SGData['SGSpin'] = 5*[1,]\n \n else:\n if SGData['SGPtGrp'] in ['1','3','23',]:\n SGData['SGSpin'] = lattSpin+[1,]\n elif SGData['SGPtGrp'] in ['-1','2','m','4','-4','-3','312','321','3m1','31m','6','-6','432','-43m']:\n SGData['SGSpin'] = lattSpin+[1,1,]\n elif SGData['SGPtGrp'] in ['2/m','4/m','422','4mm','-42m','-4m2','-3m1','-31m',\n '6/m','622','6mm','-6m2','-62m','m3','m3m']:\n SGData['SGSpin'] = lattSpin+[1,1,1,]\n else: #'222'-'mmm','4/mmm','6/mmm'\n SGData['SGSpin'] = lattSpin+[1,1,1,1,]\n return SGInfo[-1],SGData", "def _assign_symbols_from_groups(cmap_ops):\n\n cmap_ops.phase('assign symbols from groups')\n with open('codepoint_groups.txt', 'r') as f:\n for lineix, line in enumerate(f):\n ix = line.find('#')\n if ix >= 0:\n line = line[:ix]\n line = line.strip()\n if not line:\n continue\n\n cols = [s.strip() for s in line.split(';')]\n if not len(cols) == 3:\n print ('incorrect cols on line %d \"%s\"' % (lineix, line))\n if cols[0] == '':\n # no assignments for this line\n continue\n\n add, remove = [], []\n for s in cols[0].split():\n if s.startswith('-'):\n remove.append(s[1:])\n else:\n add.append(s)\n name = cols[1]\n\n # We use parens to delimit parts of the ranges that are 'for\n # reference' but should not impact codepoint assignment.\n # since parse_int_ranges doesn't understand these, strip\n # out the parenthesized sections. These don't nest but we\n # don't check for this, only that open ranges are closed.\n ranges = cols[2]\n parts = None\n ix = 0\n while ix < len(ranges):\n open_p = ranges.find('(', ix)\n if open_p < 0:\n if parts is not None:\n parts.append(ranges[ix:].strip())\n break\n close_p = ranges.find(')', open_p+1)\n if close_p < 0:\n raise Exception(\n 'unclosed paren in ranges on line %d \"%s\"' % (lineix, line))\n if parts is None:\n parts = []\n parts.append(ranges[ix:open_p])\n ix = close_p + 1\n if parts:\n ranges = ' '.join(parts)\n\n try:\n cps = tool_utils.parse_int_ranges(ranges)\n except Exception as err:\n sys.stderr.write(err + '\\n')\n sys.stderr.write(cols[2] + '\\n')\n sys.stderr.write('problem on %d \"%s\"\\n' % (lineix, line))\n raise\n if len(cps) > 50:\n sys.stderr.write('large range (%d) on %d \"%s\"\\n' % (\n len(cps), lineix, line))\n\n cmap_ops.log('group: %s (%d)' % (name, len(cps)))\n if add:\n cmap_ops.add_all_to_all(cps, add)\n if remove:\n cmap_ops.remove_all_from_all(cps, remove)", "def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)", "def extract_and_render_all_symbol_masks(self, raw_data_directory: str, destination_directory: str):\n print(\"Extracting Symbols from Muscima++ Dataset...\")\n\n xml_files = self.get_all_xml_file_paths(raw_data_directory)\n crop_objects = self.load_crop_objects_from_xml_files(xml_files)\n self.render_masks_of_crop_objects_into_image(crop_objects, destination_directory)", "def x_HC2GC(s, l, b, d=-8.5):\n if isinstance(l, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(l.shape[0])\n elif isinstance(b, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(b.shape[0])\n elif isinstance(s, np.ndarray):\n return s*cos(l)*cos(b) + d*np.ones(s.shape[0])\n else:\n return s*cos(l)*cos(b) + d", "def Symbol2DotArray(self,symbol='A'): ## The function to convert a charecter to a DOT ARRAY\n\n ## input is the character\n list_points =self.data_base.get(symbol)\n list_vector = list([])\n for x in list_points:## for converting a sequence of numbers to list of positions\n cord_x = x % 5\n cord_y = (x - cord_x) / 5\n list_vector.append([cord_x, cord_y])\n list_vector = np.array(list_vector)\n list_vector = (np.transpose(list_vector)) - 2\n return list_vector", "def symbols(self):\n def _iter_symbols(symbol_values):\n # The initial charset doesn't matter, as the start codes have the same symbol values in all charsets.\n charset = 'A'\n\n shift_charset = None\n for symbol_value in symbol_values:\n if shift_charset:\n symbol = self._val2sym[shift_charset][symbol_value]\n shift_charset = None\n else:\n symbol = self._val2sym[charset][symbol_value]\n\n if symbol in (self.Special.START_A, self.Special.CODE_A):\n charset = 'A'\n elif symbol in (self.Special.START_B, self.Special.CODE_B):\n charset = 'B'\n elif symbol in (self.Special.START_C, self.Special.CODE_C):\n charset = 'C'\n elif symbol in (self.Special.SHIFT_A,):\n shift_charset = 'A'\n elif symbol in (self.Special.SHIFT_B,):\n shift_charset = 'B'\n\n yield symbol\n\n return list(_iter_symbols(self.symbol_values))", "def parseFastPhon2IDF(inputfilename='phon.out',\n polarizationsfile='polarizations.idf',\n omega2sfile='energies.idf',\n D=3):\n from idf.Polarizations import write as writePols\n from idf.Omega2 import write as writeOmega2s\n try:\n infile = open(inputfilename, 'r')\n #polfile = open(polarizationsfile, 'w')\n #om2file = open(omega2sfile, 'w')\n except IOError, (errno, strerror):\n print \"I/O error(%s): %s\" % (errno, strerror)\n numkpts = 0\n res = []\n line=''\n # store eigenvals and eigenvecs into lists\n # these will be cast into numpy arrays,\n # once we know the dimensions of k-point grid and number of atoms\n eigvals = []\n eigvecs = []\n # !!! this does not work if Phon read the k-point grid from file !!!\n # Instead the number of k-points has to be passed explicitely\n # to parseFastPhon2IDF by the user.\n ## we want to determine the dimensions of the calculation:\n ## seek to the line defining the k-point grid:\n ##while line[0:10]!= 'Generating':\n ## line=infile.readline()\n ##line = line.lstrip('Generating IBZ points ....')\n ##line = line.strip()\n ##griddims = [int(x) for x in line.split()]\n ##print 'Found a %s x %s x %s k-point grid.\\n' % (griddims[0], griddims[1], griddims[2])\n\n # seek to first dynamical matrix (skip symmetry outputs):\n while line.strip() != 'Integrating frequencies...':\n line=infile.readline()\n infile.readline() # skip blank line\n #infile.readline() # skip 'Using ...'\n line = infile.readline() # read 'Using...' number of k-points\n stuff = line.split()\n numkpts = int(stuff[1])\n print 'Reading phonon modes at %s k-points.' % numkpts\n infile.readline() # skip blank line\n # we are now at the first dynamical matrix.\n infile.readline() # skip 'Dynamical matrix'\n numatoms = 1\n linecount = 0\n while line.strip() != 'Calling zheev.':\n linecount +=1\n line=infile.readline()\n linecount -= 1 # we read the extra 'Calling zheev' line.\n numatoms = int(np.sqrt(linecount/4)) # dyn mat is 3N * 3N, and each block is labelled\n print 'Found %s atom(s) \\n' % numatoms\n # now we can read all eigenvalues and eigenvectors for all k-points:\n for ikpt in range(numkpts):\n # we just read in 'Calling zheev.'\n # loop over number of modes:\n modevals = []\n modevecs = []\n for modeindex in range(3*numatoms):\n infile.readline() # skip 'Eigenvalue N'\n line = infile.readline() # read eigenvalue\n modevals.append(float(line.strip()))\n infile.readline() # skip 'Eigenvector'\n vec = []\n for atomindex in range(numatoms):\n infile.readline() # skip 'Atom'\n atomvec = []\n for x in range(3):\n vxstring = infile.readline()\n vxlist = [float(x) for x in vxstring.strip().split()]\n vx = complex(vxlist[0] + 1j * vxlist[1])\n atomvec.append(vx)\n vec.append(atomvec)\n modevecs.append(vec)\n # we finished reading eigenvals and eigenvecs at current k-point\n eigvals.append(modevals)\n eigvecs.append(modevecs)\n #print \"eigen-values:\", eigvals\n # now skip next dynamical matrix:\n while ((line.strip() != 'Calling zheev.') and (line != '\\n')):\n line=infile.readline()\n # write IDF files:\n omega2s = np.array(eigvals)\n pols = np.array(eigvecs)\n writeOmega2s(omega2s,\n filename=omega2sfile,\n comment='Parsed from'+inputfilename,\n D=3)\n writePols(pols,\n filename=polarizationsfile,\n comment='Parsed from'+inputfilename)\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_additive_scrambler_bb_sptr __init__(self, p) > digital_additive_scrambler_bb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def additive_scrambler_bb(*args, **kwargs):\n return _digital_swig.additive_scrambler_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)", "def __init__(self):\n this = _coin.new_SoBlinker()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, a, b):\n self.a = make_generator(a)\n self.b = make_generator(b)", "def __init__(self, *args):\n this = _coin.new_SbRotation(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__( self, public_key, secret_multiplier ):\n\n self.public_key = public_key\n self.secret_multiplier = secret_multiplier", "def __init__(self, algorithm: GeneratorAlgorithm) -> None:\n self.algorithm = algorithm", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, name, smarts, score) -> None:\n ...", "def __init__(self, *args):\n this = _coin.new_SbDPLine(*args)\n try: self.this.append(this)\n except: self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
additive_scrambler_bb(int mask, int seed, int len, int count = 0) > digital_additive_scrambler_bb_sptr Scramble an input stream using an LFSR. This block works on the LSB only of the input data stream, i.e., on an "unpacked binary" stream, and produces the same format on its output. The scrambler works by XORing the incoming bit stream by the output of the LFSR. Optionally, after 'count' bits have been processed, the shift register is reset to the seed value. This allows processing fixed length vectors of samples.
def additive_scrambler_bb(*args, **kwargs): return _digital_swig.additive_scrambler_bb(*args, **kwargs)
[ "def scrambler_bb(*args, **kwargs):\n return _digital_swig.scrambler_bb(*args, **kwargs)", "def descrambler_bb(*args, **kwargs):\n return _digital_swig.descrambler_bb(*args, **kwargs)", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def blockmix_salsa8(BY, Yi, r):\n\n start = (2 * r - 1) * 16\n X = BY[start:start+16] # BlockMix - 1\n tmp = [0]*16\n\n for i in range(2 * r): # BlockMix - 2\n #blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)\n salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)\n #array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4\n\n for i in range(r): # BlockMix - 6\n BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]\n BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def bpsk(input_bits, noise):\n modulator = Modulator()\n demodulator = Demodulator()\n channel = Channel()\n signal = modulator.make_bpsk_mod(input_bits)\n\n signal = channel.send_signal(signal, noise)\n\n result_bits = demodulator.make_bpsk_demod(signal, channel)\n return result_bits", "def apply_rubberband(infile, time_stretching_ratio=1.0, pitch_shifting_semitones=1):\n fs1, x = monoWavRead(filename=infile)\n\n tmp_file_1 = tmp_path('x')\n tmp_file_2 = tmp_path('y')\n if not os.path.exists('tmp'):\n os.makedirs('tmp')\n \n write(filename = tmp_file_1, rate = fs1, data = x)\n cmd = \"rubberband -c 1 -t {0} -p {1} {2} {3}\".format(\n time_stretching_ratio,\n pitch_shifting_semitones,\n tmp_file_1,\n tmp_file_2)\n #print(cmd)\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n out, err = p.communicate()\n if p.returncode != 0:print (\"ERROR!\")\n\n fs2, y = monoWavRead(filename=tmp_file_2)\n\n #Change the output file name to suit your requirements here\n outfile_name = os.path.basename(infile).split(\".\")[0] + (\"_timestr%s_pitchshift%s.wav\" % (str(time_stretching_ratio),str(pitch_shifting_semitones)))\n outfile = os.path.join(outfile_path, outfile_name)\n write(filename = outfile, rate = fs1, data = y)\n \n if (FILE_DELETION):\n extractFeaturesAndDelete(outfile)", "def addBroadcastBits( iAdr, bitCount ):\n # set the broadcast values\n for idx in range( 32-bitCount ):\n iAdr = iAdr | (1 << idx)\n return iAdr", "def server_side_sfsa_reconstruct_single_self_mask_indexed(client_index, fedsubavg_b_shares, submodel_shape, perturbed_itemIDs_size, \\\r\n perturbed_cateIDs_size, fedsubavg_security_para_dict):\r\n # Load parameters and reconstruct seed for PRNG\r\n seed_len = fedsubavg_security_para_dict['seed_len']\r\n security_strength = fedsubavg_security_para_dict['security_strength']\r\n modulo_model_r_len = fedsubavg_security_para_dict['modulo_model_r_len']\r\n modulo_count_r_len = fedsubavg_security_para_dict['modulo_count_r_len']\r\n fedsubavg_b = SecretSharer.recover_secret(fedsubavg_b_shares)\r\n fedsubavg_b_entropy = int2bytes(fedsubavg_b, seed_len / 8)\r\n # PRNG for self mask\r\n fedsubavg_DRBG_b = HMAC_DRBG(fedsubavg_b_entropy, security_strength)\r\n\r\n # First, reconstruct\r\n weighted_delta_submodel_b_mask = [np.zeros(para_shape, dtype='int64') for para_shape in submodel_shape]\r\n for layer, para_shape in enumerate(submodel_shape):\r\n if layer == 0: # Do not perform any mask for the embedding layer of user ID\r\n continue # TO keep submodel structure\r\n else:\r\n vector_len = 1\r\n for dim in para_shape:\r\n vector_len *= dim\r\n b_mask_one_layer = prng(fedsubavg_DRBG_b, modulo_model_r_len, security_strength, vector_len)\r\n b_mask_one_layer = b_mask_one_layer.astype('int64')\r\n b_mask_one_layer = b_mask_one_layer.reshape(para_shape)\r\n weighted_delta_submodel_b_mask[layer] = b_mask_one_layer\r\n perturbed_itemIDs_count_b_mask = prng(fedsubavg_DRBG_b, modulo_count_r_len, security_strength, perturbed_itemIDs_size)\r\n perturbed_cateIDs_count_b_mask = prng(fedsubavg_DRBG_b, modulo_count_r_len, security_strength, perturbed_cateIDs_size)\r\n\r\n # Return b_mask as dictionary\r\n fedsubavg_b_mask_dict = dict()\r\n fedsubavg_b_mask_dict['weighted_delta_submodel'] = weighted_delta_submodel_b_mask\r\n fedsubavg_b_mask_dict['perturbed_itemIDs_count'] = perturbed_itemIDs_count_b_mask.astype('int64')\r\n fedsubavg_b_mask_dict['perturbed_cateIDs_count'] = perturbed_cateIDs_count_b_mask.astype('int64')\r\n return client_index, fedsubavg_b_mask_dict", "def rle_to_binary_mask(rle):\n binary_array = np.zeros(np.prod(rle.get('size')), dtype=bool)\n counts = rle.get('counts')\n \n start = 0\n for i in range(len(counts)-1):\n start += counts[i] \n end = start + counts[i+1] \n binary_array[start:end] = (i + 1) % 2\n \n binary_mask = binary_array.reshape(*rle.get('size'), order='F')\n\n return binary_mask", "def radamsa_gen(seed_payload,res_count):\n\tsubprocess.call(\"radamsa\",\"-n \"+res_count)", "def makeonlybagswithtwostraddle(n_clusters,\n straddle_inclusion_first,\n straddle_inclusion_second,\n tail_inclusion,\n p_law_param,\n n_straddle,\n n_tail,\n trainfile,\n cluster_dir,\n option,\n directory_to_read,\n random_seed=None,\n numpy_seed=None):\n # pylint: disable=unused-argument\n\n random.seed(random_seed)\n\n np.random.seed(numpy_seed)\n\n train_df = pd.read_csv(trainfile)\n\n train_df.reset_index(drop=True, inplace=True)\n\n # ###Reading instead of creating\n cluster_to_indices_list = pickle.load(\n open(directory_to_read + \"cluster_indices\", \"rb\"))\n\n for cluster_label in range(n_clusters):\n print(\"size of cluster \", cluster_label, \" is \",\n len(cluster_to_indices_list[cluster_label]))\n\n # All Bags\n all_bags_list = []\n\n # #create the first straddle bags\n straddle_bags_first = []\n\n for _ in range(n_straddle):\n\n this_bag = []\n for cluster_label in range(n_clusters - 1):\n no_of_indices = len(cluster_to_indices_list[cluster_label])\n\n no_of_sampled_indices = np.random.binomial(\n n=no_of_indices, p=straddle_inclusion_first[cluster_label])\n this_bag = this_bag + random.sample(\n cluster_to_indices_list[cluster_label], no_of_sampled_indices)\n straddle_bags_first.append(this_bag)\n print(\"A straddle bag created\")\n all_bags_list.append(this_bag)\n\n straddle_bags_file = cluster_dir + \"straddle_bags_first\"\n with open(straddle_bags_file, \"wb\") as writing_to_straddle_bags_file:\n pickle.dump(straddle_bags_first, writing_to_straddle_bags_file)\n\n # #create the second straddle bags\n straddle_bags_second = []\n\n for _ in range(n_straddle):\n\n this_bag = []\n for cluster_label in range(1, n_clusters):\n no_of_indices = len(cluster_to_indices_list[cluster_label])\n\n no_of_sampled_indices = np.random.binomial(\n n=no_of_indices, p=straddle_inclusion_second[cluster_label - 1])\n this_bag = this_bag + random.sample(\n cluster_to_indices_list[cluster_label], no_of_sampled_indices)\n straddle_bags_second.append(this_bag)\n print(\"A straddle bag created\")\n all_bags_list.append(this_bag)\n\n straddle_bags_file = cluster_dir + \"straddle_bags_second\"\n with open(straddle_bags_file, \"wb\") as writing_to_straddle_bags_file:\n pickle.dump(straddle_bags_second, writing_to_straddle_bags_file)\n\n # create the tail bags\n cluster_label_to_tail_bags_list = []\n\n for cluster_label in range(n_clusters):\n this_cluster_tail_bags = []\n no_of_indices = len(cluster_to_indices_list[cluster_label])\n for _ in range(n_tail):\n no_of_sampled_indices = np.random.binomial(\n n=no_of_indices, p=tail_inclusion[cluster_label])\n this_bag = random.sample(cluster_to_indices_list[cluster_label],\n no_of_sampled_indices)\n this_bag.sort()\n this_cluster_tail_bags.append(this_bag)\n all_bags_list.append(this_bag)\n cluster_label_to_tail_bags_list.append(this_cluster_tail_bags)\n tail_bags_file = cluster_dir + \"tail_bags_\" + str(cluster_label)\n with open(tail_bags_file, \"wb\") as writing_to_tail_bags_file:\n pickle.dump(this_cluster_tail_bags, writing_to_tail_bags_file)\n\n # write all bags\n all_bags_file = cluster_dir + \"all_bags\"\n with open(all_bags_file, \"wb\") as writing_to_all_bags_file:\n pickle.dump(all_bags_list, writing_to_all_bags_file)\n\n # create the raw training set using all bags\n\n new_train_df = pd.DataFrame()\n\n bag_no = 1\n\n for bag_list in all_bags_list:\n if not bag_list:\n continue\n this_bag_df = train_df.iloc[bag_list].copy()\n this_bag_df[\"bag\"] = bag_no\n new_train_df = new_train_df.append(this_bag_df, ignore_index=True)\n bag_no = bag_no + 1\n\n new_train_df = new_train_df.sample(frac=1)\n new_train_df.to_csv(cluster_dir + \"full_train.csv\", index=False)", "def make_buffer_from_bit_pattern(pattern, DATASIZE, freqs, off_freq):\n # the key's middle value is the bit's value and the left and right bits are the bits before and after\n # the buffers are enveloped to cleanly blend into each other\n\n last_bit = pattern[-1]\n output_buffer = []\n offset = 0\n counter = 1\n\n for i in range(len(pattern)):\n bit = pattern[i]\n if i < len(pattern) - 1:\n next_bit = pattern[i+1]\n else:\n next_bit = pattern[0]\n\n freq = freqs[counter] if bit == '1' else off_freq\n tone = ttone(freq, DATASIZE, offset=offset)\n # output_buffer += envelope(tone, left=last_bit=='0', right=next_bit=='0')\n output_buffer.append(tone)\n # offset += DATASIZE\n last_bit = bit\n\n if counter == 8:\n counter = 1\n else:\n counter += 1\n\n output_buffer = [struct.pack('f'*len(frame), *frame) for frame in output_buffer]\n # print output_buffer\n\n # return struct.pack('s'*len(output_buffer), *output_buffer)\n return output_buffer", "def _handle_sb_linemode_slc(self, buf):\n assert 0 == len(buf) % 3, ('SLC buffer must be byte triplets')\n self._slc_start()\n while len(buf):\n func = buf.popleft()\n flag = buf.popleft()\n value = buf.popleft()\n self._slc_process(func, SLC_definition(flag, value))\n self._slc_end()\n self.request_forwardmask()", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def get_ULRB_scramble():\n return _MEGA_SCRAMBLER.call(\"megaScrambler.getSkewbULRBScramble\")", "def _get_lsh(self,sig,b,r):\n lsh = []\n for i,band in enumerate(range(b)):\n lsh.append(hash(tuple(sig[i*r:i*r+r])))\n #logging.debug('hashed signature: %s\\n[get_lsh]\\tto bins: %s',sig,lsh)\n return lsh", "def set_burst_count(self, count):\n if count <= 0 or not type(count) == int:\n raise ValueError(\"Burst count must be a positive, non-zero integer!\")\n\n response = self._send_command(\"BC \" + str(count))\n if response == b\"ok\\r\\n\":\n self.burstCount = count\n self.burstDuration = self.burstCount / self.repRate\n return True\n raise LaserCommandError(Laser.get_error_code_description(response))", "def hrsbias(rawpath, outpath, link=False, mem_limit=1e9, sdb=None, clobber=True):\n if not os.path.isdir(rawpath): return \n\n image_list = ImageFileCollection(rawpath)\n if len(image_list.files)==0: return\n\n #make output directory\n if not os.path.isdir(outpath): os.mkdir(outpath)\n \n \n obsdate=get_obsdate(image_list.summary['file'][0])\n \n\n #process the red bias frames\n matches = (image_list.summary['obstype'] == 'Bias') * (image_list.summary['detnam'] == 'HRDET')\n rbias_list = []\n for fname in image_list.summary['file'][matches]:\n ccd = red_process(rawpath+fname)\n rbias_list.append(ccd)\n if sdb is not None: dq_ccd_insert(rawpath + fname, sdb)\n\n if rbias_list:\n if os.path.isfile(\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate)) and clobber: \n os.remove(\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate))\n rbias = ccdproc.combine(rbias_list, method='median', output_file=\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate), mem_limit=mem_limit)\n del rbias_list\n\n #process the red bias frames\n matches = (image_list.summary['obstype'] == 'Bias') * (image_list.summary['detnam'] == 'HBDET')\n hbias_list = []\n for fname in image_list.summary['file'][matches]:\n ccd = blue_process(rawpath+fname)\n hbias_list.append(ccd)\n if sdb is not None: dq_ccd_insert(rawpath + fname, sdb)\n\n if hbias_list:\n if os.path.isfile(\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate)) and clobber: \n os.remove(\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate))\n hbias = ccdproc.combine(hbias_list, method='median', output_file=\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate), mem_limit=mem_limit)\n del hbias_list\n\n\n #provide the link to the bias frame\n if link:\n ldir = '/salt/HRS_Cals/CAL_BIAS/{0}/{1}/'.format(obsdate[0:4], obsdate[4:8])\n if not os.path.isdir(ldir): os.mkdir(ldir)\n ldir = '/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product'.format(obsdate[0:4], obsdate[4:8])\n if not os.path.isdir(ldir): os.mkdir(ldir)\n \n infile=\"{0}/RBIAS_{1}.fits\".format(outpath, obsdate)\n link='/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product/RBIAS_{2}.fits'.format(obsdate[0:4], obsdate[4:8], obsdate)\n if os.path.islink(link) and clobber: os.remove(link)\n os.symlink(infile, link)\n infile=\"{0}/HBIAS_{1}.fits\".format(outpath, obsdate)\n link='/salt/HRS_Cals/CAL_BIAS/{0}/{1}/product/HBIAS_{2}.fits'.format(obsdate[0:4], obsdate[4:8], obsdate)\n if os.path.islink(link) and clobber: os.remove(link)\n os.symlink(infile, link)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_bytes_to_syms_sptr __init__(self, p) > digital_bytes_to_syms_sptr
def __init__(self, *args): this = _digital_swig.new_digital_bytes_to_syms_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def bytes_to_syms():\n return _digital_swig.bytes_to_syms()", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, s: ghidra.program.model.symbol.Symbol, row: int, charOffset: int):\n ...", "def __init__(self):\n _snap.TPairHashImpl2_swiginit(self,_snap.new_TPairHashImpl2())", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, byte):\n self.byte = byte", "def __init__(self):\n _snap.TStrHashF_DJB_swiginit(self,_snap.new_TStrHashF_DJB())", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n _snap.TPairHashImpl1_swiginit(self,_snap.new_TPairHashImpl1())", "def __init__(self,size,randbytes=None,hash=sha1,mgf=MGF1_SHA1,saltlen=8):\n if randbytes is None:\n randbytes = load_urandom()\n super(PSSPadder,self).__init__(size,randbytes,hash,mgf,saltlen)", "def __init__(self, key):\n # self.key = key.decode(\"hex\") # Python 2\n self.key = bytes.fromhex(key)", "def __init__(self, converter=BinasciiConverter, **kwargs):\n self.mykey = [] # check\n self.keypc1 = [] # check\n self.c = [] # check\n self.d = [] # check\n self.my_IP = [] # check\n self.Expandsion = [] # check\n self.S_Box_B = [] # check\n self.L = []\n self.R = []\n self.A = [] # check\n self.F = [] # check\n self.PC2 = []\n self.IP = []\n self.converter = converter\n if kwargs.get('key'):\n self._set_key(kwargs['key'], kwargs.get('hexkey', False))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
bytes_to_syms() > digital_bytes_to_syms_sptr Convert stream of bytes to stream of +/ 1 symbols
def bytes_to_syms(): return _digital_swig.bytes_to_syms()
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def chunks_to_symbols_bc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bc(*args, **kwargs)", "def chunks_to_symbols_sc(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sc(*args, **kwargs)", "def chunks_to_symbols_bf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_bf(*args, **kwargs)", "def list2sym(lst):\n ...", "def chunks_to_symbols_sf(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_sf(*args, **kwargs)", "def chunks_to_symbols_ic(*args, **kwargs):\n return _digital_swig.chunks_to_symbols_ic(*args, **kwargs)", "def decode_sfp(self, buffer):\n result = []\n if buffer[0:1] != '\\xd4':\n return result\n buflen = struct.unpack('>H', buffer[1:3])[0] + 3\n result.append(struct.unpack('b', buffer[3:4])[0])\n pointer = 4\n args = []\n while pointer < buflen:\n argtype = ord(buffer[pointer:pointer + 1])\n pointer += 1\n if argtype < 64: # short int\n args.append(argtype)\n elif argtype < 128: # short str\n arglen = argtype & 0x3f\n args.append(buffer[pointer:pointer + arglen])\n pointer += arglen\n else:\n arglen = argtype & 0x0f\n if arglen < 4: # decoding integers\n if arglen == 0:\n args.append(ord(buffer[pointer:pointer + 1]))\n elif arglen == 1:\n args.append(struct.unpack('>H', buffer[pointer:pointer + 2])[0])\n elif arglen == 2:\n args.append(struct.unpack('>I', '\\x00' + buffer[pointer:pointer + 3])[0])\n elif arglen == 3:\n args.append(struct.unpack('>I', buffer[pointer:pointer + 4])[0])\n pointer += arglen + 1\n else:\n if argtype == 0xc4: # decoding strings\n arglen = ord(buffer[pointer:pointer + 1])\n elif argtype == 0xc5:\n arglen = struct.unpack('>H', buffer[pointer:pointer + 2])[0]\n pointer += 1\n pointer += 1\n args.append(buffer[pointer:pointer + arglen])\n pointer += arglen\n result.append(args)\n return result", "def bytes_to_hex(byteseq: bytes) -> str:\n return byteseq.hex()", "def bytes_to_hex(data):\n\n #from binascii import hexlify\n #return hex_string\n #hex_string = hexlify(data)\n return ''.join([\"%02X \" % ord(x) for x in data]).strip()", "def Hamming_7_4_dec(stream):\n\n if (len(stream) % 7) != 0:\n exit(\"Aborted decoding: non valid number of bits\")\n\n synd_tab = {\"000\" : np.array([0, 0, 0, 0]),\n \"001\" : np.array([0, 0, 0, 0]),\n \"010\" : np.array([0, 0, 0, 0]),\n \"100\" : np.array([0, 0, 0, 0]),\n \"101\" : np.array([1, 0, 0, 0]),\n \"110\" : np.array([0, 1, 0, 0]),\n \"111\" : np.array([0, 0, 1, 0]),\n \"011\" : np.array([0, 0, 0, 1])\n }\n\n G = np.array([[1, 0, 0, 0, 1, 0, 1], [0, 1, 0, 0, 1, 1, 0], [0, 0, 1, 0, 1, 1, 1], [0, 0, 0, 1, 0, 1, 1]])\n stream = [stream[k:k+7] for k in range(0, len(stream), 7)]\n for i, word in enumerate(stream):\n sig = word[0:4]\n par = word[4:]\n\n sig = np.asarray([int(i) for i in sig])\n par = np.asarray([int(i) for i in par])\n\n code_word = sig.dot(G) % 2\n par_word = code_word[4:]\n\n syndrom = (par + par_word) % 2\n synd = ''\n for bit in syndrom:\n synd += str(bit)\n\n mpep = synd_tab[synd]\n stream[i] = (sig + mpep) % 2\n\n return stream", "def convert_symbol(prototxt_fname):\n sym, output_name, input_dim = _parse_proto(prototxt_fname)\n exec(sym) # pylint: disable=exec-used\n _locals = locals()\n exec(\"ret = \" + output_name, globals(), _locals) # pylint: disable=exec-used\n ret = _locals['ret']\n return ret, input_dim", "def get_data_from_word(self, word):\n return struct.pack('<H', word)", "def decode_trading_pair(pair_string):", "def transform_instructions(lines, symbolic_table):\n address_count = 0\n for lnum in range(len(lines)):\n line = lines[lnum]\n field = parse_line(line)\n if field[\"kind\"] == AsmSrcKind.SYM:\n lines[lnum] = build_resolved(symbolic_table, field, address_count)\n if field[\"kind\"] != AsmSrcKind.COMMENT:\n address_count += 1", "def get_el_sym(self) :\n with open(self.filename, 'r') as f :\n for line in f :\n self.el_sym.append(line.split()[1])\n return self.el_sym", "def unpack(source):\n payload, symtab, radix, count = _filterargs(source)\n\n if radix != 62:\n raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')\n\n if count != len(symtab):\n raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')\n\n def lookup(match):\n \"\"\"Look up symbols in the synthetic symtab.\"\"\"\n word = match.group(0)\n return symtab[unbase62(word)] or word\n\n source = re.sub(r'\\b\\w+\\b', lookup, payload)\n return _replacestrings(source)", "def hex_to_hexstr(input_bytes):\n return helpers.hex_str(input_bytes)", "def hexlify(data: Any, sep: Any = ...) -> bytes:\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_binary_slicer_fb_sptr __init__(self, p) > digital_binary_slicer_fb_sptr
def __init__(self, *args): this = _digital_swig.new_digital_binary_slicer_fb_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_probe_density_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, position, spectrum, brightness):\n pass", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_if_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_ic_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_glfsr_source_b_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_sf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _coin.new_SbImage(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_simple_framer_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFVec2b()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
binary_slicer_fb() > digital_binary_slicer_fb_sptr slice float binary symbol outputting 1 bit output x 0 x >= 0 > 1
def binary_slicer_fb(): return _digital_swig.binary_slicer_fb()
[ "def linear_fb(fn, sr, filter_num):\n # build the triangle filter bank\n f = (sr / 2) * torch.linspace(0, 1, fn//2+1)\n filter_bands = torch.linspace(min(f), max(f), filter_num+2)\n \n filter_bank = torch.zeros([fn//2+1, filter_num])\n for idx in range(filter_num):\n filter_bank[:, idx] = trimf(\n f, [filter_bands[idx], \n filter_bands[idx+1], \n filter_bands[idx+2]])\n return filter_bank", "def spread_to_binary(spread, cutoff):\n return spread > cutoff", "def handle_slice_ff(self):\n\n allowed_cell_types = [\"FDRE\", \"FDSE\", \"FDCE\", \"FDPE\", \"LDCE\", \"LDPE\"]\n allowed_site_types = [\"SLICEL\", \"SLICEM\"]\n\n for cell_instance, cell_data in self.physical_cells_instances.items():\n cell_type = cell_data.cell_type\n if cell_type not in allowed_cell_types:\n continue\n\n site_name = cell_data.site_name\n site_type = cell_data.site_type\n\n if site_type not in allowed_site_types:\n continue\n\n tile_name = cell_data.tile_name\n tile_type = cell_data.tile_type\n slice_site = self.get_slice_prefix(site_name, tile_type)\n\n bel = cell_data.bel\n\n if cell_type in [\"FDRE\", \"FDCE\", \"LDCE\"]:\n self.add_cell_feature((tile_name, slice_site, bel, \"ZRST\"))\n\n if cell_type.startswith(\"LD\"):\n self.add_cell_feature((tile_name, slice_site, \"LATCH\"))\n\n if cell_type in [\"FDRE\", \"FDSE\"]:\n self.add_cell_feature((tile_name, slice_site, \"FFSYNC\"))\n\n init_param = self.device_resources.get_parameter_definition(\n cell_data.cell_type, \"INIT\")\n init_value = init_param.decode_integer(\n cell_data.attributes[\"INIT\"])\n\n if init_value == 0:\n self.add_cell_feature((tile_name, slice_site, bel, \"ZINI\"))", "def __write_plot3D_block_binary(f,B:Block):\n '''\n https://docs.python.org/3/library/struct.html\n '''\n def write_var(V:np.ndarray):\n for k in range(B.KMAX):\n for j in range(B.JMAX):\n for i in range(B.IMAX):\n f.write(struct.pack('f',V[i,j,k]))\n write_var(B.X)\n write_var(B.Y)\n write_var(B.Z)", "def handle_slice_ff(self):\n\n allowed_cell_types = [\"FDRE\", \"FDSE\", \"FDCE\", \"FDPE\", \"LDCE\", \"LDPE\"]\n allowed_site_types = [\"SLICEL\", \"SLICEM\"]\n\n for cell_instance, cell_data in self.physical_cells_instances.items():\n cell_type = cell_data.cell_type\n if cell_type not in allowed_cell_types:\n continue\n\n site_name = cell_data.site_name\n site_type = cell_data.site_type\n\n if site_type not in allowed_site_types:\n continue\n\n tile_name = cell_data.tile_name\n tile_type = cell_data.tile_type\n slice_site = self.get_slice_prefix(site_name, tile_type)\n\n bel = cell_data.bel\n\n if cell_type in [\"FDRE\", \"FDCE\", \"LDCE\"]:\n self.add_cell_feature((tile_name, slice_site, bel, \"ZRST\"))\n\n if cell_type.startswith(\"LD\"):\n self.add_cell_feature((tile_name, slice_site, \"LATCH\"))\n\n if cell_type in [\"FDRE\", \"FDCE\"]:\n self.add_cell_feature((tile_name, slice_site, \"FFSYNC\"))\n\n init_param = self.device_resources.get_parameter_definition(\n cell_data.cell_type, \"INIT\")\n init_value = init_param.decode_integer(\n cell_data.attributes[\"INIT\"])\n\n if init_value == 0:\n self.add_cell_feature((tile_name, slice_site, bel, \"ZINI\"))", "def sep_fir_filtering(x, S, ht, hv, hmimo, b, kernel=\"naive\"):\n\n B, N, T, C = x.get_shape() # B: number of samples in batch, N: number of nodes, T: temporal length, C: channels\n K, F = hv.get_shape() # K: Length vertex filter, F: Number of filters\n M, F = ht.get_shape() # M: Length time filter, F: Number of filters\n C, F = hmimo.get_shape() # M: Length time filter, F: Number of filters\n\n x = tf.transpose(x, perm=[0, 1, 3, 2]) # BxNxCxT\n x = tf.expand_dims(x, axis=4) # BxNxCxTx1\n x = tf.reshape(x, shape=[-1, T, 1]) # BNCxTx1\n\n x_convt = tf.nn.conv1d(x, tf.expand_dims(ht, axis=1), stride=1, padding=\"SAME\", data_format=\"NHWC\") # BNCxTxF\n x_convt = tf.reshape(x_convt, shape=[-1, N, C, T, F]) # BxNxCxTxF\n x_convt = tf.transpose(x_convt, perm=[0, 1, 3, 2, 4])\n\n with tf.name_scope(\"kernel_creation\"):\n if kernel == \"naive\":\n SK = _vertex_fir_kernel(S, K) # KxNxN\n elif kernel == \"chebyshev\":\n SK = _chebyshev_kernel(S, K)\n else:\n raise ValueError(\"Specified kernel type {} is not valid.\" % kernel)\n\n # KxNxN, BxNxTxCxF -> BxKxNxTxCxF\n # a b c d c e f g -> d a b e f g\n SKx = tf.einsum(\"abc,dcefg->dabefg\", SK, x_convt) # BxKxNxTxCxF\n print(SKx.shape)\n # KxF BxKxNxTxCxF -> BxNxTxCxF\n # a b c a e f g b -> c e f g b\n Yunmixed = tf.einsum(\"ab,caefgb->cefgb\", hv, SKx) # BxNxTxCxF\n print(Yunmixed.shape)\n # CxF BxNxTxCxF -> BxNxTxF\n # a b c d e a b -> c d e b\n Ymimo = tf.einsum(\"ab,cdeab->cdeb\", hmimo, Yunmixed)\n print(Ymimo.shape)\n\n if b is not None:\n Ymimo += b\n return Ymimo", "def binaryzeFNF(fnf_filename):\n fnf_handler = GTiffHandler()\n fnf_handler.readFile(fnf_filename)\n\n if fnf_handler.src_filehandler is not None:\n fnf_np_array = np.array(fnf_handler.src_Z)\n\n # accommodating fnf array values so forest is 1, no forest is 0\n fnf_np_array[fnf_np_array > 1] = 0\n\n unique, counts = np.unique(fnf_np_array, return_counts=True)\n print('After accommodating values the distribution of no forest (0) and forest (1) is: ',\n dict(zip(unique, counts)))\n\n print('Changing color table...')\n colorTable = fnf_handler.src_band1.GetColorTable()\n\n zero_colorEntry = colorTable.GetColorEntry(2)\n null_colorEntry = colorTable.GetColorEntry(0)\n\n colorTable.SetColorEntry(0, zero_colorEntry)\n colorTable.SetColorEntry(2, null_colorEntry)\n colorTable.SetColorEntry(3, null_colorEntry)\n\n fnf_handler.src_filehandler.GetRasterBand(1).SetColorTable(colorTable)\n\n print('Saving changes and storing...')\n fnf_handler.src_Z = fnf_np_array\n fnf_handler.writeSrcFile()\n fnf_handler.closeFile()\n\n print('All done')", "def _recover_bfloat16(x):\n if hasattr(x, \"dtype\") and x.dtype.type is np.void:\n assert x.itemsize == 2, \"Unknown dtype!\"\n return x.view(jnp.bfloat16)\n else:\n return x", "def __filtering(data,low,high,freq):\n bplowcut = low/(freq*0.5)\n bphighcut = high/(freq*0.5)\n [b,a] = sig.butter(N=3,Wn=[bplowcut,bphighcut],btype='bandpass')\n filtered = sig.filtfilt(b,a,data)\n\n return filtered", "def floatToChans(x,dpxBool):\n if dpxBool == False:\n return nodpxFloatToChans(x)\n else:\n return dpxFloatToChans(x)", "def opt_lf_num_bits(lf_params, min_bits, max_bits, rms_filt_error=0.1, noise_figure=1,\n sim_steps=1000, fpoints=512, mode=\"tdc\", sigma_ph=0.1):\n print(\"\\n********************************************************\")\n print(\"Optimizing loop filter digital direct form-I implementation for\")\n print(\"number of bits in fixed point data words utilized\")\n sign_bits = 1\n # fint number of integer bits needed\n int_bits = n_int_bits(lf_params)\n print(\"\\n* Integer bits = %d\"%int_bits)\n\n \"\"\" Optimization for quantization noise\n \"\"\"\n print(\"\\n* Optimizing for quantization noise:\")\n # find optimal number of bits for quantization noise\n lf_ideal = LoopFilterIIRPhase(ignore_clk=True, **lf_params)\n w = np.floor(np.random.normal(0, 0.1*lf_params[\"m\"], sim_steps))\n pow_ntdc_post_lf = var_ntdc_post_lf(lf_params, mode=mode) # variance of TDC noise at loop filter\n\n x_ideal = np.zeros(sim_steps)\n for n in range(sim_steps):\n x_ideal[n] = lf_ideal.update(w[n], 0)\n\n mses = []\n bit_range = range(min_bits-int_bits-1, max_bits-int_bits)\n for frac_bits in bit_range:\n # use a large number of int bits to avoid overflow. Tuning here is with frac bits as\n lf_quant = LoopFilterIIRPhase(ignore_clk=True, int_bits=32, frac_bits=frac_bits, quant_filt=False, **lf_params)\n x_quant = np.zeros(sim_steps)\n for n in range(sim_steps):\n x_quant[n] = lf_quant.update(w[n], 0)\n mse = np.var(x_ideal-x_quant)\n print(\"\\tN bits = %d\\tQuant noise power = %E LSB^2\"%(frac_bits+int_bits+sign_bits, mse))\n mses.append(mse)\n n = len(mses)-1\n threshold = (10**(noise_figure/10.0) - 1)*pow_ntdc_post_lf\n print(\"!&!&&!\", threshold, pow_ntdc_post_lf)\n while n>=0:\n if mses[n] > threshold:\n n = n+1 if n < len(mses) - 1 else len(mses) - 1\n break\n n -= 1\n opt_frac_bits_qn = bit_range[n]\n print(\"* Optimum int bits = %d, frac bits = %d, sign bits = 1, quant noise = %.3f LSB^2\"%(int_bits, opt_frac_bits_qn, mses[n]))\n\n \"\"\" Optimization for filter accuracy\n \"\"\"\n print(\"\\n* Optimizing for filter design accuracy:\")\n fmin = 1e2\n fclk = lf_params[\"fclk\"]\n\n a = [lf_params[\"a0\"], lf_params[\"a1\"]]\n b = [lf_params[\"b0\"], lf_params[\"b1\"], lf_params[\"b2\"]]\n f, h_ideal = scipy.signal.freqz(a, b, np.geomspace(fmin, fclk/2, fpoints), fs=fclk)\n s = 2j*np.pi*f\n l = (lf_params[\"m\"]/lf_params[\"n\"])*lf_params[\"kdco\"]*h_ideal/s\n g = l/(1+l)\n bit_range = range(min_bits-int_bits-1, max_bits-int_bits)\n mses = []\n for frac_bits in bit_range:\n _lf_params = quant_lf_params(lf_params, int_bits, frac_bits)\n a = [_lf_params[\"a0\"], _lf_params[\"a1\"]]\n b = [_lf_params[\"b0\"], _lf_params[\"b1\"], _lf_params[\"b2\"]]\n f, h = scipy.signal.freqz(a, b, np.geomspace(fmin, fclk/2, fpoints), fs=fclk)\n s = 2j*np.pi*f\n l = (_lf_params[\"m\"]/_lf_params[\"n\"])*_lf_params[\"kdco\"]*h/s\n g = l/(1+l)\n # w, h = scipy.signal.freqz(a, b, points)\n mses.append(np.var(20*np.log10(np.abs(h[1:]))-20*np.log10(np.abs(h_ideal[1:]))))\n print(\"\\tN bits = %d\\tMSE = %E dB^2\"%(frac_bits+int_bits+sign_bits, mses[-1]))\n n = len(mses)-1\n while n>=0:\n if mses[n] > rms_filt_error**2:\n n = n+1 if n < len(mses) - 1 else len(mses) - 1\n break\n n -= 1\n opt_frac_bits_filt_acc = bit_range[n]\n print(\"* Optimum int bits = %d, frac bits = %d, sign_bits=1, quant noise = %E LSB^2\"%(int_bits, opt_frac_bits_filt_acc, mses[n]))\n\n frac_bits = max(opt_frac_bits_qn, opt_frac_bits_filt_acc)\n print(\"\\n* Optimization complete:\")\n print(\"\\tInt bits = %d, frac bits = %d, sign bits = 1\"%(int_bits, frac_bits))\n print(\"\\tTotal number bits = %d\"%(int_bits+frac_bits+sign_bits))\n return int_bits, frac_bits", "def _to_binary_mask(self, array):\n # check where the transparency is not zero\n return (array[..., -1] > 0).astype(self.raster_dtype) * self.raster_value", "def fn_buildFilters(params, fs):\n bandPassRange = params.bpRanges\n params.filtType = 'bandpass'\n params.filterSignal = True\n \n # Handle different filter cases:\n # 1) low pass\n if params.bpRanges[0] == 0:\n # they only specified a top freqency cutoff, so we need a low pass\n # filter\n bandPassRange = params.bpRanges[1]\n params.filtType = 'low'\n if bandpassRange == fs/2:\n # they didn't specify any cutoffs, so we need no filter\n params.filterSignal = False\n \n # 2) High passs\n if params.bpRanges[1] == fs/2 and params.filterSignal:\n # they only specified a lower freqency cutoff, so we need a high pass\n # filter\n bandPassRange = params.bpRanges[0]\n params.filtType = 'high'\n \n if params.filterSignal:\n params.fB, params.fA = signal.butter(params.filterOrder, bandPassRange/(fs/2),btype=params.filtType)\n \n # filtTaps = length(fB)\n previousFs = fs\n \n params.fftSize = int(math.ceil(fs * params.frameLengthUs / 10**6))\n if params.fftSize % 2 == 1:\n params.fftSize = params.fftSize - 1 # Avoid odd length of fft\n\n params.fftWindow = signal.windows.hann(params.fftSize)\n\n lowSpecIdx = int(params.bpRanges[0]/fs*params.fftSize)\n highSpecIdx = int(params.bpRanges[1]/fs*params.fftSize)\n\n params.specRange = np.arange(lowSpecIdx, highSpecIdx+1)\n params.binWidth_Hz = fs / params.fftSize\n params.binWidth_kHz = params.binWidth_Hz / 1000\n params.freq_kHz = params.specRange*params.binWidth_kHz # calculate frequency axis\n return previousFs, params", "def raw_fp_field_extraction(optree):\n size = optree.get_precision().get_base_format().get_bit_size()\n field_size = optree.get_precision().get_base_format().get_field_size()\n return TypeCast(\n SubSignalSelection(\n TypeCast(optree, precision=ML_StdLogicVectorFormat(size)),\n 0,\n field_size - 1\n ),\n precision=fixed_point(field_size, 0, signed=False)\n )", "def bfloat16(self: T) -> T:\n return self.to(torch.bfloat16)", "def analysis_filter_bank(x: np.ndarray, n0: int, n1: int) -> Tuple[np.ndarray, np.ndarray]:\n x = np.array(x)\n n = x.shape[0] # len(x)\n\n p = int((n-n1) / 2) # pass-band\n t = int((n0 + n1 - n) / 2 - 1) # transition-band\n s = int((n - n0) / 2) # stop-band\n\n # transition band function\n v = np.arange(start=1, stop=t+1) / (t+1) * np.pi\n transit_band = (1 + np.cos(v)) * np.sqrt(2 - np.cos(v)) / 2.0\n\n # low-pass subband\n lp_subband = np.zeros(n0, dtype=x.dtype)\n lp_subband[0] = x[0] # DC-term\n lp_subband[1:p+1] = x[1:p + 1] # pass-band\n lp_subband[1+p:p+t+1] = x[1 + p:p + t + 1] * transit_band # transition-band\n lp_subband[int(n0 / 2)] = 0 # nyquist\n lp_subband[n0-p-t:n0-p] = x[n - p - t:n - p] * np.flip(transit_band) # transition-band (negative frequencies)\n lp_subband[n0-p:] = x[n - p:] # pass-band (negative frequencies)\n\n # high-pass subband\n hp_subband = np.zeros(n1, dtype=x.dtype)\n hp_subband[0] = 0 # DC-term\n hp_subband[1:t+1] = x[1 + p:t + p + 1] * np.flip(transit_band) # transition-band\n hp_subband[t+1:s+1+t] = x[p + t + 1:p + t + s + 1] # pass-band\n if n // 2 == 0: # nyquist if N is even\n hp_subband[n1/2] = x[n / 2]\n hp_subband[n1-t-s-1:n1-t] = x[n - p - t - s - 1:n - p - t] # pass-band (negative frequencies)\n hp_subband[n1-t:n1] = x[n - p - t:n - p] * transit_band # transition-band (negative frequencies)\n\n return lp_subband, hp_subband", "def half(self: T) -> T:\n return self.to(torch.float16)", "def get_band_pass_filter(fs, wave, low_cut_fs, high_cut_fs):\n order = 4\n b, a = signal.butter(order, [low_cut_fs, high_cut_fs], 'bandpass', analog=False, fs=fs)\n y = signal.filtfilt(b, a, wave, axis=0)\n return y", "def filter_bank_1D(wname):\n\t# returns analysis and synthesis filters concat-ed\n\tfb = torch.tensor(pywt.Wavelet(wname).filter_bank).float()\n\twa, ws = fb[:2,:], fb[2:,:]\n\treturn wa, ws", "def conv_single_step(image_slice, filt, bias):\r\n\r\n # Element-wise product between a_slice_prev and W\r\n s = np.multiply(image_slice, filt)\r\n # Sum over all entries of the volume s\r\n Z = np.sum(s)\r\n # Adding bias b to Z\r\n Z = Z + bias\r\n\r\n return Z" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_clock_recovery_mm_cc_sptr __init__(self, p) > digital_clock_recovery_mm_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_clock_recovery_mm_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\t\tself._address = MCP79410address\n\t\tlocaltime = time.localtime(time.time())\n\t\trtc_time=RTCC_Struct(localtime.tm_sec,localtime.tm_min,localtime.tm_hour,localtime.tm_wday,localtime.tm_mday,localtime.tm_mon,(localtime.tm_year-2000))\n\n\t\tself.SetHourFormat(24)\n\t\tself.EnableVbat()\t\t\t\t#Enable the battery back-up\t\n\t\tself.EnableOscillator()\t\t\t#Start RTC clock\n\t\tself.SetTime(rtc_time)", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, process_chain, showWarnings=1, maxsec_rttrace=7200, analysis_overlap=0): # ppc\n self.process_chain = process_chain\n super(PadGenerator, self).__init__(showWarnings)\n self.show_warnings = showWarnings\n self.maxsec_rttrace = maxsec_rttrace # in seconds for EACH (x,y,z) rt_trace\n #self.scale_factor = scale_factor # ppc\n self.analysis_interval = self.process_chain.analysis_interval # ppc\n self.analysis_overlap = analysis_overlap\n self.analysis_samples = None\n self.starttime = None\n if showWarnings:\n self.warnfiltstr = 'always'\n else:\n self.warnfiltstr = 'ignore'", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, mem, inp, outp):\n self.pc = 0\n self.mem = mem\n self.inp = inp\n self.outp = outp", "def __init__(self, pin, freq, dc_left, dc_right):\n\n pin = machine.Pin(pin)\n self.pwm = machine.PWM(pin, freq=freq)\n self.left, self.right = dc_left, dc_right", "def __init__(self):\r\n\r\n # bytes to begin and finish a command to the charging point\r\n self.start = 0x02\r\n self.stop = 0x03\r\n\r\n # Addresses\r\n self.modem_adr = \"80\" # address of the master modem to call to manage the charging power\r\n self.manager_adr = \"A0\" # address of the Energy manager that send the commands\r\n # Broadcast = \"BC\"\r\n self.cmd = \"69\" # The only existing command for EV-Box charging points\r\n self.adr = self.modem_adr + self.manager_adr\r\n self.rien = 0", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_chunks_to_symbols_bc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,hh=0,mm=0,ss=0):\r\n self.__hh = hh\r\n self.__mm = mm\r\n self.__ss = ss", "def __init__(self, p, i, d, get_current_time, get_feedback_value):\r\n # p, i, and d constants\r\n self.p, self.i, self.d = p, i, d\r\n\r\n # saves the functions that return the time and the feedback\r\n self.get_current_time = get_current_time\r\n self.get_feedback_value = get_feedback_value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clock_recovery_mm_cc(float omega, float gain_omega, float mu, float gain_mu, float omega_relative_limit) > digital_clock_recovery_mm_cc_sptr Mueller and M?ller (M&M) based clock recovery block with complex input, complex output. This implements the Mueller and M?ller (M&M) discretetime errortracking synchronizer.
def clock_recovery_mm_cc(*args, **kwargs): return _digital_swig.clock_recovery_mm_cc(*args, **kwargs)
[ "def DMFNeuFluxMCDet(ch,DMm,DMsig,param):\n import os\n # FIX SCALING\n ## include years\n DM_annihilation_rate_Sun = DMSunAnnihilationRate(DMm,DMsig,param) # [eV]\n #DM_annihilation_rate_Sun = 1.6e21/param.sec\n normalization = np.sum((DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))) # [eV^3]\n \n ## BEGIN CREATING BINS ##\n # assuming neutrino binnum = 30\n nu_bin_num = 30\n point_num = 1000.0\n Emin = 1.0\n Emax = 1000.0\n \n E_nu_list = gt.LogSpaceEnergies(Emin,Emax,binnum = nu_bin_num)\n E_bin_width = [E_nu_list[i+1]-E_nu_list[i] for i in range(len(E_nu_list)-1)]\n E_nu_hpl = gt.MidPoint(gt.LogSpaceEnergies(Emin,Emax,binnum = nu_bin_num)) \n E_nu_bin = [0.0]*nu_bin_num # neutrino bins\n E_anu_bin = [0.0]*nu_bin_num # antineutrino bins\n E_bin_ratio = E_nu_list[1]/E_nu_list[0]\n ## END CREATING BINS ##\n \n for ineu in range(3):\n ## BEGIN READING DATA FROM MC ## \n \n MCdatapath = \"../data/myMC/trials/legion_ineu_\"+str(ineu)+\"_\"+param.name+\"/\"\n rparam = PC.PhysicsConstants()\n \n files = []\n for filename in os.listdir(MCdatapath):\n files.append(filename)\n \n # load all events\n evt = []\n for filename in files :\n file = open(MCdatapath+filename,'r')\n data = []\n gt.hreadfilev4(file,data,rparam)\n if gt.Compareparams(param,rparam):\n print \"Using : \"+filename\n for e in data :\n for ee in e:\n evt.append(ee)\n \n #del e,ee,data\n \n ## END READING DATA FROM MC ##\n \n # GET DARK MATTER DISTRIBUTION \n DM_pdf = DM_distribution(ch,DMm/param.GeV,ineu)\n \n for i,e in enumerate(evt):\n if len(e) > 4:\n neutrino = True\n \n family = e[0]\n try:\n next_family = evt[i+1]\n if family == next_family and e[1] != 2 :\n neutrino = False\n except:\n pass\n \n E_nu_in = e[2]\n E_nu_out = e[3]\n i = int(np.log(E_nu_out/E_nu_list[0])/np.log(E_bin_ratio))\n j = int(np.log(E_nu_in/E_nu_list[0])/np.log(E_bin_ratio))\n if neutrino:\n E_nu_bin[i] = E_nu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm)*E_bin_width[j]/(np.log(E_nu_list[i])-np.log(E_nu_list[i-1]))) # change to initial neutrino bin width\n #E_nu_bin[i] = E_nu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm))\n else :\n E_anu_bin[i] = E_anu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm)*E_bin_width[i])\n #E_anu_bin[i] = E_anu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm))\n \n #int_weight = integrate.quad(lambda E: PDF.PDF(E)/PDF.DMm,Emin,Emax)[0]\n # rescale\n E_nu_bin = [normalization*x/(point_num) for x in E_nu_bin]\n E_anu_bin = [normalization*x/(point_num) for x in E_anu_bin] \n \n inter_neu = interpolate.InterpolatedUnivariateSpline(E_nu_hpl,E_nu_bin)\n inter_aneu = interpolate.InterpolatedUnivariateSpline(E_nu_hpl,E_anu_bin)\n \n return [inter_neu, inter_aneu]", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def run(self):\n\n mu_low=self.ip['mb_at_mb']\n\n\n #-------------#\n # The running #\n #-------------#\n\n MZ = self.ip['Mz']\n alpha_at_mb = 1/self.ip['aMZinv']\n as_MZ = self.ip['asMZ']\n as_mb = self.ip['as_at_mb']\n\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n adm_eff = self.ADM_eff\n else:\n pass\n\n evolve1 = rge.RGE(self.gamma_QCD, 5)\n evolve2 = rge.RGE(self.gamma_QCD2, 5)\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n evolve8 = rge.RGE(adm_eff, 5)\n else:\n pass\n\n # Mixing in the dim.6 DM-SM sector\n #\n # Strictly speaking, MZ and mb should be defined at the same scale\n # (however, this is a higher-order difference)\n C_at_mb_QCD = np.dot(evolve2.U0_as2(as_MZ, as_mb),\\\n np.dot(evolve1.U0(as_MZ, as_mb),\\\n self.coeff_list_dm_dim5_dim6_dim7))\n C_at_mb_QED = np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED)\\\n * np.log(mu_low/MZ) * alpha_at_mb/(4*np.pi)\\\n + np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED2)\\\n * np.log(mu_low/MZ) * (alpha_at_mb/(4*np.pi))**2\n\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n # Mixing in the dim.6 SM-SM and dim.8 DM-SM sector\n\n DIM6_DIM8_init = np.hstack((self.coeff_list_sm_dim6, self.coeff_list_dm_dim8))\n\n DIM6_DIM8_at_mb = np.dot(evolve8.U0(as_MZ, as_mb), DIM6_DIM8_init)\n\n\n # Revert back to dictionary\n\n dict_coeff_mb = list_to_dict(C_at_mb_QCD + C_at_mb_QED, self.wc_name_list)\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n dict_dm_dim8 = list_to_dict(np.delete(DIM6_DIM8_at_mb, np.s_[0:100]), self.wc8_name_list)\n dict_sm_dim6 = list_to_dict(np.delete(DIM6_DIM8_at_mb, np.s_[100:112]), self.sm_name_list)\n dict_sm_lepton_dim6 = list_to_dict(self.coeff_list_sm_lepton_dim6, self.sm_lepton_name_list)\n\n dict_coeff_mb.update(dict_dm_dim8)\n dict_coeff_mb.update(dict_sm_dim6)\n dict_coeff_mb.update(dict_sm_lepton_dim6)\n\n return dict_coeff_mb", "def m16_large_moment0():\n # Use the big CII map. Needs some cleaning but generally much larger than the original I was using before.\n\n # line_stub = 'cii'\n # line_stub = '12co32'\n line_stub = 'c18o10'\n\n if line_stub in large_map_filenames:\n # Use the custom filename rather than the default\n filename = large_map_filenames[line_stub]\n else:\n # Use default filename from cube_utils (many of these are centered around Pillars)\n filename = line_stub\n\n velocity_intervals = [ # what I have for now.. will add later\n (10, 17), (17, 21), (23.5, 27), (27, 33), (30, 35),\n ]\n\n cube_obj = cube_utils.CubeData(filename).convert_to_K().convert_to_kms()\n\n # Can always remove this loop\n for i in range(len(velocity_intervals)):\n\n vel_lims = tuple(x*kms for x in velocity_intervals[i])\n mom0 = cube_obj.data.spectral_slab(*vel_lims).moment0()\n\n fig = plt.figure()\n ax = plt.subplot(111, projection=cube_obj.wcs_flat)\n im = ax.imshow(mom0.to_value(), origin='lower', vmin=0, cmap='plasma')\n fig.colorbar(im, ax=ax, label=f\"{cube_utils.cubenames[line_stub]} {mom0.unit.to_string('latex_inline')}\")\n vel_stub_simple = make_simple_vel_stub(vel_lims)\n savename = f\"/home/ramsey/Pictures/2023-04-25/mom0_{line_stub}_{vel_stub_simple}.png\"\n if not os.path.exists(os.path.dirname(savename)):\n os.makedirs(os.path.dirname(savename))\n print(\"Created\", os.path.dirname(savename))\n fig.savefig(savename, metadata=catalog.utils.create_png_metadata(title=make_vel_stub(vel_lims),\n file=__file__, func='m16_large_moment0'))", "def RJMCMC(Model_parameters, midpoint_age, delta_age, intensity, delta_intensity, stratification, Return_info ):\n \n# Seed the generator so we get the same values every time\n np.random.seed(seed = 1) \n \n# set the best K to -10, in order that it is obvious if it is never updated. \n k_best = -10\n \n k_max_array_bound = Model_parameters['K_max'] + 1;\n# Num_samples_to_store = int(np.ceil((Model_parameters['Nsamples']-Model_parameters['Burn_in'])/Model_parameters['thinning']))\n \n \n# Calculate number of collected samples for credible intervals -- if we are collecting.\n if Model_parameters['Calc_credible']:\n Num_samples_credible=int(np.ceil((Model_parameters['Nsamples']-Model_parameters['Burn_in'])*((100 - Model_parameters['credible'])/200.0)/Model_parameters['thinning'])) \n print('Collecting credible interval data' )\n# Define an equally-spaced grid to define the model:\n X = np.linspace(Model_parameters['X_min'], Model_parameters['X_max'],Model_parameters['discretise_size'])\n\n# predefine arrays to keep track of the credible intervals\n val_min, val_max = np.zeros(Model_parameters['discretise_size']), np.zeros(Model_parameters['discretise_size'])\n ind_min, ind_max = np.zeros(Model_parameters['discretise_size'],dtype=int), np.zeros(Model_parameters['discretise_size'],dtype=int)\n MINI, MAXI = np.zeros((Model_parameters['discretise_size'], Num_samples_credible)), np.zeros((Model_parameters['discretise_size'], Num_samples_credible))\n \n# predefine other arrays \n age, age_prop = np.zeros(len(midpoint_age)), np.zeros(len(midpoint_age))\n pt, pt_prop, pt_best = np.zeros( (k_max_array_bound, 2)), np.zeros( (k_max_array_bound, 2)), np.zeros( (k_max_array_bound, 2))\n endpt = np.zeros(2)\n\n# initialise working variables\n b = bb = AB = AD = PB = PD = ACV = PCV = AP = PP = PA = AA = 0\n\n# Initialize - Define randomly the first model of the chain\n k = np.random.randint(Model_parameters['K_min'],high=Model_parameters['K_max']+1)\n\n# set the data ages to be the given nominal age (i.e. discount any age error). \n# This is so datasets with stratification are valid for the initial model.\n# If we randomised the ages, we'd have to check that stratification was satifisfied, \n# and it could take a while before we find a valid model.\n\n age = midpoint_age.copy() #make a copy of the midpoint age.\n\n# Check to ensure that the stratification constraints (if any) are satisifed\n if not check_stratification(age, stratification):\n print( 'INITIAL DATA SET IS NOT CONSISTENT WITH GIVEN STRATIFICATION CONSTRAINTS')\n sys.exit(0)\n\n# Check to make sure that the ages do not extend past the model ends. For then we can't compute the likelihood.\n# This only happens with normally distributed ages, for which the age can be any value with prob > 0.\n# age = np.array( [ max( Model_parameters['X_min'], min(a, Model_parameters['X_max'])) for a in age] )\n for i in range(len(age)):\n age[i] = max( Model_parameters['X_min'], min(age[i], Model_parameters['X_max']))\n\n for i in range(k):\n pt[i,0] = Model_parameters['X_min'] + np.random.rand() * (Model_parameters['X_max'] - Model_parameters['X_min']) #position of internal vertex\n pt[i,1] = Model_parameters['I_min'] + np.random.rand() * (Model_parameters['I_max'] - Model_parameters['I_min']) #magnitude of internal vertex\n \n endpt[0] = Model_parameters['I_min'] + np.random.rand() * (Model_parameters['I_max'] - Model_parameters['I_min'])\n endpt[1] = Model_parameters['I_min'] + np.random.rand() * (Model_parameters['I_max'] - Model_parameters['I_min'])\n \n# make sure the positions are sorted in ascending order based on age.\n #print(pt)\n #print('*')\n pt[0:k] = pt[pt[0:k,0].argsort()]\n #np.ndarray.sort(pt, axis = 0)\n #print(pt)\n \n# COMPUTE INITIAL MISFIT\n# suppress exp overflow warnings - this can happen at the early stages of the algorithm\n trash = np.seterr(over = 'ignore')\n \n like=0;\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], pt[0:k,:], endpt, age )\n #print(delta_intensity)\n \n #print( len(age))\n #print( intensity[81] )\n #print( interpolated_signal[81] )\n #print( delta_intensity[81] )\n #q = (intensity - interpolated_signal)**2 / (2.0 * delta_intensity**2)\n #print(q[81])\n if Model_parameters['running_mode'] == 1:\n like = np.sum( (intensity - interpolated_signal)**2 / (2.0 * delta_intensity**2) )\n else:\n like = 1.0\n \n\n like_best=like\n like_init=like\n print('Initial likelihood is %s' % like)\n\n# setup output for model data\n if Model_parameters['output_model_freq'] > 0:\n output_models = open(os.path.join(Model_parameters['outputs_directory'],Model_parameters['output_model_name']), 'w')\n output_models.write('%d\\n' % Model_parameters['discretise_size'])\n for i in range(Model_parameters['discretise_size']):\n output_models.write('%10.3f ' % X[i] )\n output_models.write('\\n')\n\n# setup output for joint distribution data\n if Model_parameters['output_joint_distribution_freq'] > 0:\n joint_distribution_directory = os.path.join(Model_parameters['outputs_directory'],'Joint_distribution_data')\n if not os.path.exists(joint_distribution_directory): os.makedirs(joint_distribution_directory)\n\n joint_dist = [0] * len(age)\n for i in range(len(age)):\n joint_dist[i] = open(os.path.join(joint_distribution_directory,'Sample_%04d.dat'% (i+1)),'w')\n \n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#%%%%%%%%%%%%%%%%% START RJ-MCMC SAMPLING %%%%%%%%%%%%%%%%%\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n for s in range(1,Model_parameters['Nsamples']+1):\n \n# Print statistics of the chain. \n if np.mod(s,Model_parameters['show'])==0 and s > Model_parameters['Burn_in']:\n print( 'Samples %d, Vertices %d, Acceptance: change F %7.2f, change age %7.2f, birth %7.2f, death %7.2f resample ages %7.2f, likelihood %8.2f' % (s,k,100.0*ACV/PCV if PCV != 0 else np.NaN,100.0*AP/PP if PP != 0 else np.NaN, 100.0*AB/PB if PB != 0 else np.NaN, 100.0*AD/PD if PD != 0 else np.NaN, 100.0*AA/PA if PA != 0 else np.NaN, like) )\n \n birth=move=death=change_age = change_value = 0\n \n# initialise the proposed model with the current \n age_prop = age.copy()\n pt_prop=pt.copy()\n endpt_prop = endpt.copy()\n like_prop = like\n k_prop = k\n prob = 1.0\n out = 1\n \n#----------------------------------------------------------------------\n# Every 3rd iteration, propose a new value\n if np.mod(s,3)==0:\n if s>Model_parameters['Burn_in']: PCV +=1\n change_value = 1\n k_prop = k\n ind = np.random.randint(0,high=k+2) #generate a random integer between 0 and k+1\n \n# choose which interior point to change, and check bounds to see if outside prior\n\n if ind == k: # change left end point\n endpt_prop[0] = endpt[0] + np.random.randn() * Model_parameters['sigma_change']\n if endpt_prop[0] < Model_parameters['I_min'] or endpt_prop[0] > Model_parameters['I_max']: out = 0\n \n elif ind == k+1: # change right end point\n endpt_prop[1] = endpt[1] + np.random.randn()*Model_parameters['sigma_change']\n if endpt_prop[1] < Model_parameters['I_min'] or endpt_prop[1] > Model_parameters['I_max']: out = 0\n \n else: # change interior point\n #print(pt_prop[ind,1], pt[0,1])\n pt_prop[ind,1] += np.random.randn(1)*Model_parameters['sigma_change']\n #pt_prop[ind,1] = pt_prop[ind,1] + np.random.randn(1)*Model_parameters['sigma_change']\n #print(pt_prop[ind,1], pt[0,1])\n if pt_prop[ind,1] < Model_parameters['I_min'] or pt_prop[ind,1] > Model_parameters['I_max']: out = 0\n\n# Every 3rd iteration iteration change the vertex positions\n elif np.mod(s,3)==1: # Change age position\n u = np.random.randint(0,high=3) #choose randomly between 3 operations:\n \n if u == 0: # BIRTH ++++++++++++++++++++++++++++++++++++++\n birth=1\n if s> Model_parameters['Burn_in']: PB += 1\n k_prop = k+1\n #print(np.size(pt_prop), k_prop)\n pt_prop[k_prop-1,0]=Model_parameters['X_min'] + np.random.rand()*(Model_parameters['X_max']-Model_parameters['X_min'])\n # Ensure that the new age is different to all the others - if it is, set out = 0 and abandon this model\n if pt_prop[k_prop-1,0] in pt_prop[0:k_prop-1,0]: out = 0 \n if k_prop > Model_parameters['K_max']: out=0\n\n# interpolate to find magnitude as inferred by current state\n if out == 1:\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], \n Model_parameters['X_max'], pt[0:k,:], endpt, pt_prop[k_prop-1,0] )\n \n pt_prop[k_prop-1,1]=interpolated_signal+np.random.randn()*Model_parameters['sigma_birth']\n \n# Get probability\n prob=(1.0/(Model_parameters['sigma_birth']*np.sqrt( 2.0 * np.pi )) *\n np.exp(-(interpolated_signal-pt_prop[k_prop-1,1])**2/(2.0*Model_parameters['sigma_birth']**2)) )\n \n# Check BOUNDS to see if outside prior\n \n if pt_prop[k_prop-1,1] > Model_parameters['I_max'] or pt_prop[k_prop-1,1] < Model_parameters['I_min']: out=0\n if pt_prop[k_prop-1,0] > Model_parameters['X_max'] or pt_prop[k_prop-1,0] < Model_parameters['X_min']: out=0\n\n# make sure the positions are sorted in ascending order.\n pt_prop[0:k_prop] = pt_prop[pt_prop[0:k_prop,0].argsort()]\n \n elif u == 1: # ! DEATH +++++++++++++++++++++++++++++++++++++++++\n death=1\n if s> Model_parameters['Burn_in']: PD += 1\n \n k_prop = k-1\n if k_prop < Model_parameters['K_min']: out=0\n \n if out == 1:\n ind = np.random.randint(0,high=k) # choose a vertex between 0 and k-1\n pt_death = pt[ind,:]\n pt_prop = pt.copy()\n pt_prop = np.delete(pt_prop,ind,axis=0) # remove point to be deleted\n pt_prop = np.append( pt_prop, [[0,0]],axis=0) #add row of zeros to end to make sure the shape doesn't change.\n \n# Get prob - interpolate \n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], \n Model_parameters['X_max'], pt_prop[0:k_prop,:], endpt_prop, pt_death[0] )\n prob=( 1.0/(Model_parameters['sigma_birth']*np.sqrt(2.0*np.pi)) * \n np.exp(-(interpolated_signal -pt_death[1])**2/(2.0*Model_parameters['sigma_birth']**2)) )\n \n\n else: # MOVE +++++++++++++++++++++++++++++++++++++++++++++++++++++++\n if s> Model_parameters['Burn_in']: PP += 1\n move=1\n k_prop = k\n if k == 0: out = 0 #If there are no points to move, then we can't move any\n \n if out == 1: \n ind = np.random.randint(0,high=k) # choose a vertex between 0 and k-1\n pt_prop[ind,0] = pt[ind,0]+np.random.randn()*Model_parameters['sigma_move'] #Normal distribution of move destination\n if pt_prop[ind,0] < Model_parameters['X_min'] or pt_prop[ind,0] > Model_parameters['X_max']: out = 0 \n \n# Ensure that the new age is different to all the others - if it is, set out = 0 and abandon this model\n if pt_prop[ind,0] in np.delete(pt[0:k],ind,axis=0): out = 0 \n\n\n# make sure the positions are sorted in ascending order.\n pt_prop[0:k_prop] = pt_prop[pt_prop[0:k_prop,0].argsort()]\n\n else: # every 3rd iteration change the ages\n# select ages at random\n\n if s> Model_parameters['Burn_in']: PA += 1\n change_age = 1 \n num_age_changes = int(np.floor(len(age)/float(Model_parameters['age_frac'])))\n random_indices = np.random.randint(0,len(age),num_age_changes)\n for i in random_indices: #choose num_age_changes from the set of ages and perturb\n if Model_parameters['Age_distribution'] == 'U':\n age_prop[i] = midpoint_age[i] + 2.0 * (np.random.rand(1)-0.5) * delta_age[i]\n else:\n age_prop[i] = midpoint_age[i] + np.random.randn() * delta_age[i]\n if age_prop[i] < Model_parameters['X_min'] or age_prop[i] > Model_parameters['X_max']: out = 0\n \n\n# Check to ensure that the stratification constraints (if any) are satisifed\n if not check_stratification(age_prop, stratification): out = 0\n \n# end: decide on what proposal to make\n\n# COMPUTE MISFIT OF THE PROPOSED MODEL \n\n if out==1:\n like_prop=0;\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], \n pt_prop[0:k_prop,:], endpt_prop, age_prop )\n if Model_parameters['running_mode'] == 1:\n like_prop = np.sum( (intensity - interpolated_signal)**2 / (2.0 * delta_intensity**2) )\n else:\n like_prop = 1.0\n \n \n \n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# SEE WHETHER MODEL IS ACCEPTED\n \n accept=0\n alpha = 0\n# The acceptance term takes different values according the the proposal that has been made.\n if out == 1:\n if birth==1:\n \n alpha = ((1.0/((Model_parameters['I_max']-Model_parameters['I_min'])*prob))*np.exp(-like_prop+like))\n if np.random.rand() <alpha:\n accept=1\n if s>Model_parameters['Burn_in']: AB += 1\n \n elif death==1:\n alpha = ((Model_parameters['I_max']-Model_parameters['I_min'])*prob)*np.exp(-like_prop+like)\n if np.random.rand() <alpha:\n accept=1\n if s>Model_parameters['Burn_in']: AD+=1\n \n else: # NO JUMP, i.e no change in dimension\n alpha = np.exp(-like_prop+like)\n if np.random.rand() <alpha:\n accept=1\n if s>Model_parameters['Burn_in']: \n if change_value == 1:\n ACV += 1\n elif move == 1:\n AP += 1\n elif change_age ==1:\n AA += 1\n else:\n print('FATAL ERROR 1'); sys.exit(0)\n \n#If accept, update the values\n if accept==1:\n k=k_prop\n pt=pt_prop.copy()\n like=like_prop\n endpt = endpt_prop.copy()\n age = age_prop.copy()\n \n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Collect samples for the ensemble solution\n\n if s>Model_parameters['Burn_in'] and np.mod( s-Model_parameters['Burn_in'],Model_parameters['thinning'])==0:\n b+=1\n\n # if Model_parameters['joint_distribution_freq'] > 0:\n # write joint distribution data\n \n\n # CALL Find_linear_interpolated_values( k, x_min, x_max, pt, endpt, discretise_size, x(1:discretise_size), interpolated_signal)\n \n #IF( FREQ_WRITE_MODELS > 0) then\n #if( s>burn_in .AND. mod(s-burn_in,thin * FREQ_WRITE_MODELS) == 0) WRITE(15,format_descriptor) interpolated_signal(1:discretise_size)\n #ENDIF\n\n\n# CALL Find_linear_interpolated_values( k, x_min, x_max, pt, endpt, discretise_size, x(1:discretise_size), interpolated_signal)\n interpolated_signal = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], \n pt[0:k,:], endpt, X )\n# DO THE AVERAGE\n Return_info['Av'] += interpolated_signal[:]\n\n \n# build marginal distribution for ages:\n for i in range(len(age)):\n if Model_parameters['Age_distribution'] == 'U':\n bin_index = int( np.floor( (age[i]-(midpoint_age[i]-delta_age[i])) / (delta_age[i] * 2.0) * Model_parameters['Nbins_age_marginal']))\n else:\n bin_index = int( np.floor( (age[i]-(midpoint_age[i]-2.0 * delta_age[i])) / (delta_age[i] * 4.0) * Model_parameters['Nbins_age_marginal']))\n# For normally distributed ages, bin centred on mean with a 2*standard deviation range each side.\n# Should a value fall outside this range, then simply add to either the 1st or last bin.\n bin_index = max(bin_index, 0)\n bin_index = min(bin_index, Model_parameters['Nbins_age_marginal']-1)\n\n Return_info['Marginal_ages'][i,bin_index] += 1\n\n# write model data to disk\n\n if Model_parameters['output_model_freq'] > 0:\n if np.mod( s-Model_parameters['Burn_in'], Model_parameters['thinning'] * Model_parameters['output_model_freq']) == 0:\n for i in range(Model_parameters['discretise_size']):\n output_models.write('%10.3f \\n' % interpolated_signal[i] )\n\n# collect joint distribution data\n if Model_parameters['output_joint_distribution_freq'] > 0 and np.mod( s-Model_parameters['Burn_in'], Model_parameters['thinning'] * Model_parameters['output_joint_distribution_freq']) == 0:\n interpolated_samples = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], pt[0:k,:], endpt, age )\n for i in range(len(age)):\n joint_dist[i].write('%15.3f %15.3f\\n' % (age[i],interpolated_samples[i]) )\n \n# build marginal intensity density\n for i in range(len(X)):\n bin_index = int(np.floor( (interpolated_signal[i]-Model_parameters['I_min'])/ (Model_parameters['I_max']-Model_parameters['I_min']) * Model_parameters['Nbins']))\n if bin_index <0 or bin_index > Model_parameters['Nbins']-1:\n print('FATAL ERROR, BIN_INDEX IS OUT OF RANGE')\n print('MODEL POINT %s VALUE %s' %(i,interpolated_signal[i]) )\n print('INTENSITY MIN/MAX %s %s ' %(Model_parameters['I_min'], Model_parameters['I_max'] ))\n print('Model is %s %s %s' % (k,endpt,pt[0:k,:]))\n print(age); print(''); print(interpolated_signal)\n sys.exit(0)\n Return_info['Intensity_density'][i,bin_index] += 1\n\n \n# Do (e.g.) the 95% credible interval by keeping the lowest and greatest 2.5% of\n# all models at each sample point. We could either keep ALL the data and at\n# the end determine these regions (but this is very costly in terms of memory), or keep a running list of the\n# number of data points we need. At the end of the algorithm, simply take the\n# maximum of the smallest points, and the min of the largest, to get the\n# bounds on the credible intervals.\n# Method:\n# Num_samples_credible is the number of data points corresponding to 2.5% of the total number\n# of samples (after thinning).\n# Collect Num_samples_credible datapoints from the first Num_samples_credible samples.\n# For each subsequent sample, see if the value should actually be inside\n# the 2.5% tail. If it is, replace an existing value by the current value.\n# Repeat.\n\n if Model_parameters['Calc_credible']:\n for i in range(Model_parameters['discretise_size']):\n if b <= Num_samples_credible: \n #print(b-1, Num_samples_credible)\n MINI[i,b-1]=interpolated_signal[i]\n MAXI[i,b-1]=interpolated_signal[i]\n if b == Num_samples_credible:\n val_min[i] = MAXI[i,:].min(); ind_min[i] = MAXI[i,:].argmin()\n val_max[i] = MINI[i,:].max(); ind_max[i] = MINI[i,:].argmax()\n\n else: #we've already filled the tails, now compare each data point to see whether it should be included or not.\n if interpolated_signal[i] > val_min[i]:\n MAXI[i,ind_min[i]] = interpolated_signal[i]\n val_min[i] = MAXI[i,:].min(); ind_min[i] = MAXI[i,:].argmin()\n \n if interpolated_signal[i] < val_max[i]:\n MINI[i,ind_max[i]] = interpolated_signal[i]\n val_max[i] = MINI[i,:].max(); ind_max[i] = MINI[i,:].argmax()\n \n \n# Build histogram of number of changepoints: k\n Return_info['Changepoint_hist'][k] += 1\n\n# k can be zero here - I think there is mistake in the fortran: k can never be zero.\n\n# Do the histogram on change points\n for i in range(k):\n Return_info['Change_points'][bb]=pt[i,0]\n bb += 1\n\n# process ALL models now...\n Return_info['Misfit'][s-1] = like\n \n# Get the best model\n if like<like_best and accept == 1:\n pt_best = pt.copy()\n k_best = k\n endpt_best = endpt.copy()\n like_best = like\n age_best = age.copy()\n \n# ----------------------------- \n# end: the Sampling of the mcmc\n# ----------------------------\n\n Return_info['Change_points'] = Return_info['Change_points'][0:bb] #only return non-zero values.\n Return_info['Av'] = Return_info['Av']/b\n # print( Return_info['Intensity_density'][0,:], Return_info['Intensity_density'][10,:])\n\n# Compute the credible intervals:\n Return_info['Credible_Sup'] = np.min ( MAXI[:,:], axis = 1)\n Return_info['Credible_Inf'] = np.max ( MINI[:,:], axis = 1)\n\n# normalise marginal distributions\n Return_info['Intensity_density'][:,:] = np.array(Return_info['Intensity_density'][:,:]) / np.sum( Return_info['Intensity_density'][0,:] )\n \n# Compute the mode\n Return_info['Mode'] = (0.5 + np.argmax(Return_info['Intensity_density'], axis=1)) / Model_parameters['Nbins'] * (Model_parameters['I_max'] - Model_parameters['I_min']) + Model_parameters['I_min']\n\n# Compute the median. Get the first instance of the count from the left being greater than half the total:\n for i in range(Model_parameters['discretise_size']):\n for j in range(Model_parameters['Nbins']):\n if np.sum ( Return_info['Intensity_density'][i,0:j]) >= np.sum( Return_info['Intensity_density'][i,:] )/2.0:\n #print(j, np.sum ( Return_info['Intensity_density'][i,0:j]), np.sum( Return_info['Intensity_density'][i,:] )/2.0); \n Return_info['Median'][i] = (0.5 + j) / Model_parameters['Nbins'] * (Model_parameters['I_max'] - Model_parameters['I_min']) + Model_parameters['I_min']\n break\n\n\n# Calculate the \"best\" solution\n if k_best < 0:\n print('NO MINIMUM LIKELIHOOD SOLUTION FOUND')\n Return_info['Best'] = np.zeros(Model_parameters['discretise_size'])\n else:\n Return_info['Best'] = Find_linear_interpolated_values( Model_parameters['X_min'], Model_parameters['X_max'], \n pt_best[0:k_best,:], endpt_best, X )\n# close file of model data\n if Model_parameters['output_model_freq'] > 0:\n output_models.close()\n \n# close file for joint distributions\n if Model_parameters['output_joint_distribution_freq'] > 0:\n for i in range(len(age)):\n joint_dist[i].close()\n \n return", "def COM(z, M, **cosmo):\n # Check that z and M are arrays\n z = np.array(z, ndmin=1, dtype=float)\n M = np.array(M, ndmin=1, dtype=float)\n\n # Create array\n c_array = np.empty_like(z)\n sig_array = np.empty_like(z)\n nu_array = np.empty_like(z)\n zf_array = np.empty_like(z)\n \n for i_ind, (zval, Mval) in enumerate(zip(z, M)):\n # Evaluate the indices at each redshift and mass combination\n # that you want a concentration for, different to MAH which\n # uses one a_tilde and b_tilde at the starting redshift only\n a_tilde, b_tilde = calc_ab(zval, Mval, **cosmo)\n\n # Minimize equation to solve for 1 unknown, 'c'\n c = scipy.optimize.brentq(_minimize_c, 2, 1000,\n args=(zval, a_tilde, b_tilde,\n cosmo['A_scaling'], cosmo['omega_M_0'],\n cosmo['omega_lambda_0']))\n\n if np.isclose(c, 0):\n print(\"Error solving for concentration with given redshift and \"\n \"(probably) too small a mass\")\n c = -1\n sig = -1\n nu = -1\n zf = -1\n else:\n # Calculate formation redshift for this concentration,\n # redshift at which the scale radius = virial radius: z_-2\n zf = formationz(c, zval, Ascaling=cosmo['A_scaling'],\n omega_M_0=cosmo['omega_M_0'],\n omega_lambda_0=cosmo['omega_lambda_0'])\n\n R_Mass = cp.perturbation.mass_to_radius(Mval, **cosmo)\n\n sig, err_sig = cp.perturbation.sigma_r(R_Mass, 0, **cosmo)\n nu = 1.686/(sig*growthfactor(zval, norm=True, **cosmo))\n\n c_array[i_ind] = c\n sig_array[i_ind] = sig\n nu_array[i_ind] = nu\n zf_array[i_ind] = zf\n\n return(c_array, sig_array, nu_array, zf_array)", "def test_wrong_ref_power_mfcc():\n MFCC(file_struct, FeatureTypes.framesync, ref_power=\"caca\")", "def MC(spin_matrix, num_cycles, temperature):\n num_spins = len(spin_matrix)\n # Matrix for storing calculated expectation and variance values, five variables\n exp_values = np.zeros((int(num_cycles), 6))\n accepted = 0\n # Initial energy and magnetization\n E, M = initial_energy(spin_matrix, num_spins, temperature)\n # Looping over number of Monte Carlo cycles storing values for each step\n for i in range(1, num_cycles+1):\n # Repeat according to size of lattice for each cycle\n for j in range(num_spins*num_spins):\n # Picking a random lattice position\n ix = np.random.randint(num_spins)\n iy = np.random.randint(num_spins)\n # Finding the surrounding spins accordng to periodic boundary conditions\n left = spin_matrix[ix - 1, iy] if ix > 0 else spin_matrix[num_spins - 1, iy]\n right = spin_matrix[ix + 1, iy] if ix < (num_spins - 1) else spin_matrix[0, iy]\n\n above = spin_matrix[ix, iy - 1] if iy > 0 else spin_matrix[ix, num_spins - 1]\n below = spin_matrix[ix, iy + 1] if iy < (num_spins - 1) else spin_matrix[ix, 0]\n # Calculating the energy change\n delta_energy = (2 * spin_matrix[ix, iy] * (left + right + above + below))\n # Evaluating the proposet new configuration\n if np.random.random() <= np.exp(-delta_energy / temperature):\n # Changing the configuration if accepted\n spin_matrix[ix, iy] *= -1.0\n E += delta_energy\n M += 2*spin_matrix[ix, iy]\n accepted += 1\n # Store values in output matrix\n exp_values[i-1,0] = E\n exp_values[i-1,1] = M\n exp_values[i-1,2] = E**2\n exp_values[i-1,3] = M**2\n exp_values[i-1,4] = np.abs(M)\n exp_values[i-1,5] = accepted\n\n return exp_values", "def makedkcorr(c, Mbincrit, logrbincrit, dropcrit, dmz0, dmz1, ddmz, drop, z0=3.5, z1=4.5):\n bincrit = logrbincrit & Mbincrit # to select objects in the mag & size bin\n zcrit = (c.z_input >= z0) & (c.z_input < z1)\n selcrit = (zcrit & bincrit & dropcrit) \n # to select all dropouts in this M1500 bin\n zarr = N.compress(selcrit, c.z_input) # the array of the redshifts of all dropouts in this M1500 bin\n karr = N.zeros(len(zarr)) \n # zcent: reference redshift (zcent = 4.0 for B-dropouts; zcent = 5.0 for V-dropouts)\n if drop == 'b':\n mc = mconvert('M1500_to_i.txt')\n zcent = 4.0\n elif drop == 'v':\n mc = mconvert('M1500_to_z.txt')\n zcent = 5.0\n #n_ztarget = sum(zcrit & bincrit & c.detect) # NOT including SE detection incompleteness\n n_ztarget = sum(zcrit & bincrit) # including SE detection incompleteness\n n_zselect = sum(zcrit & bincrit & dropcrit)\n \n # calculate completeness as follows:\n # completeness = (num. of objects that are detected and selected as dropouts) / \n # (num. of INPUT objects in this bin)\n if n_ztarget > 5: \n completeness = float(n_zselect) / float(n_ztarget)\n else: completeness = 0.\n # calculate the K-correction of all redshifts\n for i in range(len(zarr)):\n karr[i] = mc(zarr[i])\n dkarr = karr - mc(zcent) # the differential K-correction\n grid = N.arange(dmz0, dmz1+ddmz, ddmz) # the grid over which differential K-correction will be calculated\n if n_zselect > 4:\n h = KPDF.UPDFOptimumBandwidth(dkarr)\n p_dk = KPDF.UPDFEpanechnikov(dkarr, grid, h)\n if sum(p_dk) > 0:\n p_dk = p_dk / sum(p_dk)\n else: p_dk = N.zeros(len(grid))\n else:\n p_dk = N.zeros(len(grid))\n # include zarr in the kernel for inspection purposes\n return completeness, p_dk, n_ztarget, n_zselect, zarr", "def cmb_power_spectra(r, out_lmax=2200, camb_lmax=4000, lens_accuracy=2, out_file=None,\n H0=67.0, ombh2=0.022, omch2=0.12, omk=0.0, neutrino_hierarchy='degenerate',\n num_massive_neutrinos=1, mnu=0.06, nnu=3.046, YHe=None, meffsterile=0.0,\n standard_neutrino_neff=3.046, TCMB=2.7255, tau=None, deltazrei=None, bbn_predictor=None,\n As=2.0e-9, ns=0.96, nrun=0.0, nrunrun=0.0, nt=None, ntrun=0.0, pivot_scalar=0.05,\n pivot_tensor=0.05):\n\n pars = camb.CAMBparams()\n pars.set_cosmology(H0=H0, ombh2=ombh2, omch2=omch2, omk=omk, neutrino_hierarchy=neutrino_hierarchy, \n num_massive_neutrinos=num_massive_neutrinos, mnu=mnu, nnu=nnu, YHe=YHe, meffsterile=meffsterile,\n standard_neutrino_neff=standard_neutrino_neff, TCMB=TCMB, tau=tau, deltazrei=deltazrei,\n bbn_predictor=bbn_predictor)\n pars.WantTensors = True\n pars.InitPower.set_params(As=As, ns=ns, nrun=nrun, nrunrun=nrunrun, r=r, nt=nt, ntrun=ntrun,\n pivot_scalar=pivot_scalar, pivot_tensor=pivot_tensor)\n pars.set_for_lmax(lmax=camb_lmax, lens_potential_accuracy=lens_accuracy)\n \n results = camb.get_results(pars)\n\n powers = results.get_cmb_power_spectra(lmax=out_lmax, CMB_unit='muK')\n\n if out_file is not None:\n df = pd.from_dict(powers)\n df.to_csv(out_file)\n\n return powers", "def mms_feeps_getgyrophase(trange=['2017-07-11/22:30', '2017-07-11/22:35'], probe='2', data_rate='brst', level='l2', datatype='electron'):\n mec_vars = mms.mec(trange=trange, probe=probe, data_rate=data_rate)\n if mec_vars is None:\n logging.error('Problem loading MEC data for calculating FEEPS gyrophase angles')\n\n qeci2sm = get('mms'+probe+'_mec_quat_eci_to_sm', units=False)\n qeci2bcs = get('mms'+probe+'_mec_quat_eci_to_bcs', units=False)\n rsun = get('mms'+probe+'_mec_r_sun_de421_eci', units=False)\n\n rsunbcs = np.zeros((len(rsun.times), 3))\n rduskbcs = np.zeros((len(rsun.times), 3))\n rdusksm = [0, 1, 0]\n\n for i in range(len(rsun.times)):\n q = qeci2bcs.y[i, :]\n # Quaternion rotation matrix:\n s = 1 # these quaternions are unit-qs\n R = np.array([[1 - 2*s*(q[2]**2 + q[3]**2), 2*s*(q[1]*q[2] - q[3]*q[0]), 2*s*(q[1]*q[3] + q[2]*q[0])], # ECI to BCS\n [2*s*(q[1]*q[2] + q[3]*q[0]), 1 - 2*s*(q[1]**2 + q[3]**2), 2*s*(q[2]*q[3] - q[1]*q[0])],\n [2*s*(q[1]*q[3] - q[2]*q[0]), 2*s*(q[2]*q[3] + q[1]*q[0]), 1 - 2*s*(q[1]**2 + q[2]**2)]])\n R = R.T\n rsunbcs[i, :] = np.array([R[0,0]*rsun.y[i,0] + R[1,0]*rsun.y[i,1] + R[2,0]*rsun.y[i,2], R[0,1]*rsun.y[i,0] + R[1,1]*rsun.y[i,1] + R[2,1]*rsun.y[i,2], R[0,2]*rsun.y[i,0] + R[1,2]*rsun.y[i,1] + R[2,2]*rsun.y[i,2]])\n\n # now make second vector for gyroplane reference, dusk direction (+Y in SM)\n q = qeci2sm.y[i, :]\n # Quaternion rotation matrix:\n s = 1 # these quaternions are unit-qs\n R2 = np.array([[1 - 2*s*(q[2]**2 + q[3]**2), 2*s*(q[1]*q[2] - q[3]*q[0]), 2*s*(q[1]*q[3] + q[2]*q[0])], # ECI to SM\n [2*s*(q[1]*q[2] + q[3]*q[0]), 1 - 2*s*(q[1]**2 + q[3]**2), 2*s*(q[2]*q[3] - q[1]*q[0])],\n [2*s*(q[1]*q[3] - q[2]*q[0]), 2*s*(q[2]*q[3] + q[1]*q[0]), 1 - 2*s*(q[1]**2 + q[2]**2)]])\n # going from SM to ECI, so invert R:\n R2 = np.linalg.inv(R2) # SM to ECI\n R2 = R2.T\n rduskeci = [R2[0,0]*rdusksm[0] + R2[1,0]*rdusksm[1] + R2[2,0]*rdusksm[2], R2[0,1]*rdusksm[0] + R2[1,1]*rdusksm[1] + R2[2,1]*rdusksm[2], R2[0,2]*rdusksm[0] + R2[1,2]*rdusksm[1] + R2[2,2]*rdusksm[2]]\n # Now convert to BCS:\n rduskbcs[i, :] = np.array([R[0,0]*rduskeci[0] + R[1,0]*rduskeci[1] + R[2,0]*rduskeci[2], R[0,1]*rduskeci[0] + R[1,1]*rduskeci[1] + R[2,1]*rduskeci[2], R[0,2]*rduskeci[0] + R[1,2]*rduskeci[1] + R[2,2]*rduskeci[2]])\n \n saved = store('mms'+probe+'_mec_r_sun_bcs', data = {'x': rsun.times, 'y': rsunbcs})\n if not saved:\n logging.error('Problem saving r_sun_bcs')\n\n saved = store('mms'+probe+'_mec_r_dusk_bcs', data = {'x': rsun.times, 'y': rduskbcs})\n if not saved:\n logging.error('Problem saving r_dusk_bcs')\n\n # Rotation matrices for FEEPS coord system (FCS) into body coordinate system (BCS):\n Ttop = np.array([[1./np.sqrt(2.), -1./np.sqrt(2.), 0], [1./np.sqrt(2.), 1./np.sqrt(2.), 0], [0, 0, 1]]).T\n Tbot = np.array([[-1./np.sqrt(2.), -1./np.sqrt(2.), 0], [-1./np.sqrt(2.), 1./np.sqrt(2.), 0], [0, 0, -1]]).T\n\n # Telescope vectors in FCS:\n # Electrons\n V1fcs = [0.347, -0.837, 0.423]\n V2fcs = [0.347, -0.837, -0.423]\n V3fcs = [0.837, -0.347, 0.423]\n V4fcs = [0.837, -0.347, -0.423]\n V5fcs = [-0.087, 0.000, 0.996]\n V9fcs = [0.837, 0.347, 0.423]\n V10fcs = [0.837, 0.347, -0.423]\n V11fcs = [0.347, 0.837, 0.423]\n V12fcs = [0.347, 0.837, -0.423]\n # Ions\n V6fcs = [0.104, 0.180, 0.978]\n V7fcs = [0.654, -0.377, 0.656]\n V8fcs = [0.654, -0.377, -0.656]\n\n # Now telescope vectors in Body Coordinate System:\n # Factors of -1 account for 180 deg shift between particle velocity and telescope normal direction:\n # Top:\n Vt1bcs = [-1.*(Ttop[0,0]*V1fcs[0] + Ttop[1,0]*V1fcs[1] + Ttop[2,0]*V1fcs[2]), \n -1.*(Ttop[0,1]*V1fcs[0] + Ttop[1,1]*V1fcs[1] + Ttop[2,1]*V1fcs[2]), \n -1.*(Ttop[0,2]*V1fcs[0] + Ttop[1,2]*V1fcs[1] + Ttop[2,2]*V1fcs[2])]\n Vt2bcs = [-1.*(Ttop[0,0]*V2fcs[0] + Ttop[1,0]*V2fcs[1] + Ttop[2,0]*V2fcs[2]), \n -1.*(Ttop[0,1]*V2fcs[0] + Ttop[1,1]*V2fcs[1] + Ttop[2,1]*V2fcs[2]), \n -1.*(Ttop[0,2]*V2fcs[0] + Ttop[1,2]*V2fcs[1] + Ttop[2,2]*V2fcs[2])]\n Vt3bcs = [-1.*(Ttop[0,0]*V3fcs[0] + Ttop[1,0]*V3fcs[1] + Ttop[2,0]*V3fcs[2]), \n -1.*(Ttop[0,1]*V3fcs[0] + Ttop[1,1]*V3fcs[1] + Ttop[2,1]*V3fcs[2]), \n -1.*(Ttop[0,2]*V3fcs[0] + Ttop[1,2]*V3fcs[1] + Ttop[2,2]*V3fcs[2])]\n Vt4bcs = [-1.*(Ttop[0,0]*V4fcs[0] + Ttop[1,0]*V4fcs[1] + Ttop[2,0]*V4fcs[2]), \n -1.*(Ttop[0,1]*V4fcs[0] + Ttop[1,1]*V4fcs[1] + Ttop[2,1]*V4fcs[2]), \n -1.*(Ttop[0,2]*V4fcs[0] + Ttop[1,2]*V4fcs[1] + Ttop[2,2]*V4fcs[2])]\n Vt5bcs = [-1.*(Ttop[0,0]*V5fcs[0] + Ttop[1,0]*V5fcs[1] + Ttop[2,0]*V5fcs[2]), \n -1.*(Ttop[0,1]*V5fcs[0] + Ttop[1,1]*V5fcs[1] + Ttop[2,1]*V5fcs[2]), \n -1.*( Ttop[0,2]*V5fcs[0] + Ttop[1,2]*V5fcs[1] + Ttop[2,2]*V5fcs[2])]\n Vt6bcs = [-1.*(Ttop[0,0]*V6fcs[0] + Ttop[1,0]*V6fcs[1] + Ttop[2,0]*V6fcs[2]), \n -1.*(Ttop[0,1]*V6fcs[0] + Ttop[1,1]*V6fcs[1] + Ttop[2,1]*V6fcs[2]), \n -1.*(Ttop[0,2]*V6fcs[0] + Ttop[1,2]*V6fcs[1] + Ttop[2,2]*V6fcs[2])]\n Vt7bcs = [-1.*(Ttop[0,0]*V7fcs[0] + Ttop[1,0]*V7fcs[1] + Ttop[2,0]*V7fcs[2]), \n -1.*(Ttop[0,1]*V7fcs[0] + Ttop[1,1]*V7fcs[1] + Ttop[2,1]*V7fcs[2]), \n -1.*(Ttop[0,2]*V7fcs[0] + Ttop[1,2]*V7fcs[1] + Ttop[2,2]*V7fcs[2])]\n Vt8bcs = [-1.*(Ttop[0,0]*V8fcs[0] + Ttop[1,0]*V8fcs[1] + Ttop[2,0]*V8fcs[2]), \n -1.*( Ttop[0,1]*V8fcs[0] + Ttop[1,1]*V8fcs[1] + Ttop[2,1]*V8fcs[2]), \n -1.*(Ttop[0,2]*V8fcs[0] + Ttop[1,2]*V8fcs[1] + Ttop[2,2]*V8fcs[2])]\n Vt9bcs = [-1.*(Ttop[0,0]*V9fcs[0] + Ttop[1,0]*V9fcs[1] + Ttop[2,0]*V9fcs[2]), \n -1.*(Ttop[0,1]*V9fcs[0] + Ttop[1,1]*V9fcs[1] + Ttop[2,1]*V9fcs[2]), \n -1.*(Ttop[0,2]*V9fcs[0] + Ttop[1,2]*V9fcs[1] + Ttop[2,2]*V9fcs[2])]\n Vt10bcs = [-1.*(Ttop[0,0]*V10fcs[0] + Ttop[1,0]*V10fcs[1] + Ttop[2,0]*V10fcs[2]), \n -1.*(Ttop[0,1]*V10fcs[0] + Ttop[1,1]*V10fcs[1] + Ttop[2,1]*V10fcs[2]), \n -1.*(Ttop[0,2]*V10fcs[0] + Ttop[1,2]*V10fcs[1] + Ttop[2,2]*V10fcs[2])]\n Vt11bcs = [-1.*(Ttop[0,0]*V11fcs[0] + Ttop[1,0]*V11fcs[1] + Ttop[2,0]*V11fcs[2]), \n -1.*(Ttop[0,1]*V11fcs[0] + Ttop[1,1]*V11fcs[1] + Ttop[2,1]*V11fcs[2]), \n -1.*(Ttop[0,2]*V11fcs[0] + Ttop[1,2]*V11fcs[1] + Ttop[2,2]*V11fcs[2])]\n Vt12bcs = [-1.*(Ttop[0,0]*V12fcs[0] + Ttop[1,0]*V12fcs[1] + Ttop[2,0]*V12fcs[2]), \n -1.*(Ttop[0,1]*V12fcs[0] + Ttop[1,1]*V12fcs[1] + Ttop[2,1]*V12fcs[2]), \n -1.*(Ttop[0,2]*V12fcs[0] + Ttop[1,2]*V12fcs[1] + Ttop[2,2]*V12fcs[2])]\n # Bottom:\n Vb1bcs = [-1.*(Tbot[0,0]*V1fcs[0] + Tbot[1,0]*V1fcs[1] + Tbot[2,0]*V1fcs[2]), \n -1.*(Tbot[0,1]*V1fcs[0] + Tbot[1,1]*V1fcs[1] + Tbot[2,1]*V1fcs[2]), \n -1.*(Tbot[0,2]*V1fcs[0] + Tbot[1,2]*V1fcs[1] + Tbot[2,2]*V1fcs[2])]\n Vb2bcs = [-1.*(Tbot[0,0]*V2fcs[0] + Tbot[1,0]*V2fcs[1] + Tbot[2,0]*V2fcs[2]), \n -1.*(Tbot[0,1]*V2fcs[0] + Tbot[1,1]*V2fcs[1] + Tbot[2,1]*V2fcs[2]), \n -1.*(Tbot[0,2]*V2fcs[0] + Tbot[1,2]*V2fcs[1] + Tbot[2,2]*V2fcs[2])]\n Vb3bcs = [-1.*(Tbot[0,0]*V3fcs[0] + Tbot[1,0]*V3fcs[1] + Tbot[2,0]*V3fcs[2]), \n -1.*(Tbot[0,1]*V3fcs[0] + Tbot[1,1]*V3fcs[1] + Tbot[2,1]*V3fcs[2]), \n -1.*(Tbot[0,2]*V3fcs[0] + Tbot[1,2]*V3fcs[1] + Tbot[2,2]*V3fcs[2])]\n Vb4bcs = [-1.*(Tbot[0,0]*V4fcs[0] + Tbot[1,0]*V4fcs[1] + Tbot[2,0]*V4fcs[2]), \n -1.*(Tbot[0,1]*V4fcs[0] + Tbot[1,1]*V4fcs[1] + Tbot[2,1]*V4fcs[2]), \n -1.*(Tbot[0,2]*V4fcs[0] + Tbot[1,2]*V4fcs[1] + Tbot[2,2]*V4fcs[2])]\n Vb5bcs = [-1.*(Tbot[0,0]*V5fcs[0] + Tbot[1,0]*V5fcs[1] + Tbot[2,0]*V5fcs[2]), \n -1.*(Tbot[0,1]*V5fcs[0] + Tbot[1,1]*V5fcs[1] + Tbot[2,1]*V5fcs[2]), \n -1.*(Tbot[0,2]*V5fcs[0] + Tbot[1,2]*V5fcs[1] + Tbot[2,2]*V5fcs[2])]\n Vb6bcs = [-1.*(Tbot[0,0]*V6fcs[0] + Tbot[1,0]*V6fcs[1] + Tbot[2,0]*V6fcs[2]), \n -1.*(Tbot[0,1]*V6fcs[0] + Tbot[1,1]*V6fcs[1] + Tbot[2,1]*V6fcs[2]), \n -1.*( Tbot[0,2]*V6fcs[0] + Tbot[1,2]*V6fcs[1] + Tbot[2,2]*V6fcs[2])]\n Vb7bcs = [-1.*(Tbot[0,0]*V7fcs[0] + Tbot[1,0]*V7fcs[1] + Tbot[2,0]*V7fcs[2]), \n -1.*(Tbot[0,1]*V7fcs[0] + Tbot[1,1]*V7fcs[1] + Tbot[2,1]*V7fcs[2]), \n -1.*(Tbot[0,2]*V7fcs[0] + Tbot[1,2]*V7fcs[1] + Tbot[2,2]*V7fcs[2])]\n Vb8bcs = [-1.*(Tbot[0,0]*V8fcs[0] + Tbot[1,0]*V8fcs[1] + Tbot[2,0]*V8fcs[2]), \n -1.*(Tbot[0,1]*V8fcs[0] + Tbot[1,1]*V8fcs[1] + Tbot[2,1]*V8fcs[2]), \n -1.*(Tbot[0,2]*V8fcs[0] + Tbot[1,2]*V8fcs[1] + Tbot[2,2]*V8fcs[2])]\n Vb9bcs = [-1.*(Tbot[0,0]*V9fcs[0] + Tbot[1,0]*V9fcs[1] + Tbot[2,0]*V9fcs[2]), \n -1.*(Tbot[0,1]*V9fcs[0] + Tbot[1,1]*V9fcs[1] + Tbot[2,1]*V9fcs[2]), \n -1.*(Tbot[0,2]*V9fcs[0] + Tbot[1,2]*V9fcs[1] + Tbot[2,2]*V9fcs[2])]\n Vb10bcs = [-1.*(Tbot[0,0]*V10fcs[0] + Tbot[1,0]*V10fcs[1] + Tbot[2,0]*V10fcs[2]), \n -1.*(Tbot[0,1]*V10fcs[0] + Tbot[1,1]*V10fcs[1] + Tbot[2,1]*V10fcs[2]), \n -1.*(Tbot[0,2]*V10fcs[0] + Tbot[1,2]*V10fcs[1] + Tbot[2,2]*V10fcs[2])]\n Vb11bcs = [-1.*(Tbot[0,0]*V11fcs[0] + Tbot[1,0]*V11fcs[1] + Tbot[2,0]*V11fcs[2]), \n -1.*(Tbot[0,1]*V11fcs[0] + Tbot[1,1]*V11fcs[1] + Tbot[2,1]*V11fcs[2]), \n -1.*(Tbot[0,2]*V11fcs[0] + Tbot[1,2]*V11fcs[1] + Tbot[2,2]*V11fcs[2])]\n Vb12bcs = [-1.*(Tbot[0,0]*V12fcs[0] + Tbot[1,0]*V12fcs[1] + Tbot[2,0]*V12fcs[2]), \n -1.*(Tbot[0,1]*V12fcs[0] + Tbot[1,1]*V12fcs[1] + Tbot[2,1]*V12fcs[2]), \n -1.*(Tbot[0,2]*V12fcs[0] + Tbot[1,2]*V12fcs[1] + Tbot[2,2]*V12fcs[2])]\n\n fgm_vars = mms.fgm(trange=[time_double(trange[0])-600, time_double(trange[1])+600], probe=probe, data_rate='srvy')\n if fgm_vars is None:\n logging.error('Problem loading FGM vars for calculating FEEPS gyrophase angles')\n\n # interpolate the FGM var to the MEC var timestamps\n tinterpol('mms'+probe+'_fgm_b_bcs_srvy_l2_bvec', 'mms'+probe+'_mec_r_sun_bcs', newname='mms'+probe+'_fgm_b_bcs_srvy_l2_bvec_int')\n\n B = get('mms'+probe+'_fgm_b_bcs_srvy_l2_bvec_int')\n\n # Now calculate gyrophase\n # Telescope vectors perp to B:\n Tperp = np.zeros((len(rsunbcs[:, 0]), 3, 24))\n\n # Gyrophase:\n phi = np.zeros((len(rsunbcs[:, 0]), 24))\n\n for i in range(len(rsunbcs[:, 0])):\n uB = B.y[i,:]/np.sqrt(B.y[i,0]**2 + B.y[i,1]**2 + B.y[i,2]**2)\n # Sun vector perp to B:\n Sperp = np.cross(np.cross(uB, rsunbcs[i, :]/np.sqrt(np.nansum(rsunbcs[i, :]**2))), uB)\n # Dusk vector perp to B:\n Dperp = np.cross(np.cross(uB, rduskbcs[i, :]/np.sqrt(np.nansum(rduskbcs[i, :]**2))), uB)\n Tperp[i, :, 0] = np.cross(np.cross(uB, Vt1bcs), uB)\n Tperp[i, :, 1] = np.cross(np.cross(uB, Vt2bcs), uB)\n Tperp[i, :, 2] = np.cross(np.cross(uB, Vt3bcs), uB)\n Tperp[i, :, 3] = np.cross(np.cross(uB, Vt4bcs), uB)\n Tperp[i, :, 4] = np.cross(np.cross(uB, Vt5bcs), uB)\n Tperp[i, :, 5] = np.cross(np.cross(uB, Vt6bcs), uB)\n Tperp[i, :, 6] = np.cross(np.cross(uB, Vt7bcs), uB)\n Tperp[i, :, 7] = np.cross(np.cross(uB, Vt8bcs), uB)\n Tperp[i, :, 8] = np.cross(np.cross(uB, Vt9bcs), uB)\n Tperp[i, :, 9] = np.cross(np.cross(uB, Vt10bcs), uB)\n Tperp[i, :, 10] = np.cross(np.cross(uB, Vt11bcs), uB)\n Tperp[i, :, 11] = np.cross(np.cross(uB, Vt12bcs), uB)\n Tperp[i, :, 12] = np.cross(np.cross(uB, Vb1bcs), uB)\n Tperp[i, :, 13] = np.cross(np.cross(uB, Vb2bcs), uB)\n Tperp[i, :, 14] = np.cross(np.cross(uB, Vb3bcs), uB)\n Tperp[i, :, 15] = np.cross(np.cross(uB, Vb4bcs), uB)\n Tperp[i, :, 16] = np.cross(np.cross(uB, Vb5bcs), uB)\n Tperp[i, :, 17] = np.cross(np.cross(uB, Vb6bcs), uB)\n Tperp[i, :, 18] = np.cross(np.cross(uB, Vb7bcs), uB)\n Tperp[i, :, 19] = np.cross(np.cross(uB, Vb8bcs), uB)\n Tperp[i, :, 20] = np.cross(np.cross(uB, Vb9bcs), uB)\n Tperp[i, :, 21] = np.cross(np.cross(uB, Vb10bcs), uB)\n Tperp[i, :, 22] = np.cross(np.cross(uB, Vb11bcs), uB)\n Tperp[i, :, 23] = np.cross(np.cross(uB, Vb12bcs), uB)\n\n for j in range(24):\n th1 = np.arccos(np.nansum(Tperp[i,:,j] * Sperp)/(np.sqrt(np.nansum(Tperp[i,:,j]**2))*np.sqrt(np.nansum(Sperp**2))))\n th2 = np.arccos(np.nansum(Tperp[i,:,j] * Dperp)/(np.sqrt(np.nansum(Tperp[i,:,j]**2))*np.sqrt(np.nansum(Dperp**2))))\n # strip the units\n th1 = th1.value\n th2 = th2.value\n if th1 <= np.pi/2.0 and th2 < np.pi/2:\n phi[i, j] = 2*np.pi - th1\n if th1 < np.pi/2.0 and th2 >= np.pi/2.0:\n phi[i, j] = th1\n if th1 > np.pi/2.0 and th2 <= np.pi/2.0:\n phi[i, j] = 270.0*np.pi/180.0 - th2\n if th1 >= np.pi/2.0 and th2 > np.pi/2.0:\n phi[i, j] = th1\n \n saved = store('mms'+probe+'_epd_feeps_'+data_rate+'_gyrophase', data={'x': rsun.times, 'y': phi*180./np.pi})\n if not saved:\n logging.error('Problem saving gyrophase angles')\n return\n\n options('mms'+probe+'_epd_feeps_'+data_rate+'_gyrophase', 'yrange', [0, 360.0])\n\n # Gyrophase always returns on time stamps from MEC data, get those closest to FEEPS time stamps:\n eyes = mms_feeps_active_eyes(trange, probe, data_rate, datatype, level)\n sensor_types = ['top', 'bottom']\n\n feepst = get('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_spinsectnum')\n\n indt = np.zeros(len(feepst.times), dtype='int32')\n gpd = get('mms'+probe+'_epd_feeps_'+data_rate+'_gyrophase')\n\n for i in range(len(feepst.times)):\n indt[i] = np.argwhere(np.abs(gpd.times - feepst.times[i]) == np.min(np.abs(gpd.times - feepst.times[i]))).flatten()[0]\n\n # Gyrophase always returns all 24 FEEPS telescopes, downselect based on species:\n iT = np.array([np.array(eyes[sensor_types[0]])-1, np.array(eyes[sensor_types[0]])+11]).flatten().tolist()\n gp_data = np.zeros((len(gpd.times[indt]), len(iT)))\n\n #return (iT, gp_data, gpd)\n for i in range(len(iT)):\n gp_data[:, i] = gpd.y[indt, iT[i]]\n \n saved = store('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_gyrophase', data = {'x': gpd.times[indt], 'y': gp_data})\n\n if saved:\n options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_gyrophase', 'yrange', [0.0, 360.0])\n return 'mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_gyrophase'", "def mel_cepstrum(x2,K):\n\tprint \"Mel-Cepstrum\"\n\n\t# ZERO PADDING\n\tNFFT = int(pow(2, np.ceil(math.log(K, 2))))\n\n\tprint \"-> Data was padded: \",K,\" -> \",NFFT\n\n\t# Mel-Scaled Filterbank, full of magic numbers\n\tK20_filter = [ \n\t(0.0,154.759),\n\t(77.3795,249.2458),\n\t(163.3126, 354.1774),\n\t(258.745,470.7084),\n\t(364.7267,600.121),\n\t(482.4239,743.8391),\n\t(613.1315,903.4442),\n\t(758.2878,1080.6923),\n\t(919.4901,1277.5338),\n\t(1098.5119,1496.1345),\n\t(1297.3232,1738.8999),\n\t(1518.1115,2008.501),\n\t(1763.3063,2307.9044),\n\t(2035.6053,2640.4045),\n\t(2338.0049,3009.6599),\n\t(2673.8324,3419.7335),\n\t(3046.7829,3875.1375),\n\t(3460.9602,4380.8829),\n\t(3920.9215,4942.5344),\n\t(4431.728, 5566.272)\n\t]\n\n\t# ----------------------------------\n\t# The final Mel Filter Cepstral Coefficients\n\t# have len(K20_filter) coefficients (rows) and \n\t# the operation is performed on each window.\n\t# ----------------------------------\n\t\n\n\tNUM_WINDOWS = len(x2)\n\tprint \"NUM WINDOWS\",NUM_WINDOWS\n\n\tQ = 14\t\n\tMFCC = np.zeros((Q,NUM_WINDOWS))\n\n\tplt.subplot(222)\n\tplt.title(\"Mel cepstrum coefficients\")\n\n\twin_id = 0\n\tfor win in x2:\n\n\t\t# DFT\t\n\t\tX2 = np.absolute(scipy.fftpack.fft( win, NFFT ))\t\n\t\tfreq = scipy.fftpack.fftfreq(NFFT, 1.0/SAMPLE_RATE)\n\t\n\t\tX2 = X2[len(X2)/2:]\n\t\tfreq = freq[ freq.shape[-1]/2: ]\n\n\t\tdf = freq[1]-freq[0]\n\t\tK = len(K20_filter)\t\n\t\t\n\t\tmks = np.zeros(K)\n\t\tfor i in xrange(0,K):\n\t\t\tll,ul = K20_filter[i]\n\t\t\tmks[i] = mel_filter(X2, df, ll, ul, NFFT)\n\n\t\tplt.plot(mks)\n\n\t\tc = np.zeros(Q)\n\t\tinvc = np.zeros((Q,K))\n\t\tfor q in xrange(0,Q):\n\t\t\tfor k in range(0,K):\n\t\t\t\tc[q] += np.log(mks[k])*np.cos( (np.pi*q*(2*k+1)) / (2*K) )\n\t\t\t\tinvc[q,k] = np.cos( np.cos( (np.pi*q*(2*k+1)) / (2*K) ) )\n\t\t\n\t\t# IDCT\n\t\tMFCC[:,win_id] = c\n\t\twin_id += 1\n\n\treturn MFCC", "def band_fit_M1(band_definition,results_data,verbose=False):\n\n # helper functions\n def f_moment(K,J):\n \"\"\" Generate coefficient matrix row for moments.\n \"\"\"\n f0 = J\n f1 = K/(J+1)\n f2 = (-1)**(J-0.5) / (2*math.sqrt(2)) * (2*J+1)/(J+1)\n if (K == 0):\n # case K = 0\n coefficients = [f0]\n elif (K == 1/2):\n # case K = 1/2\n coefficients = [f0,f1,f2]\n else:\n # case K generic\n coefficients = [f0,f1]\n\n return coefficients\n def f_trans(K,J):\n \"\"\" Generate coefficient matrix row for transitions.\n \"\"\"\n f0 = 0\n f1 = -math.sqrt(3/(4*math.pi)) * math.sqrt((J**2-K**2)/J)\n f2 = (-1)**(J-0.5) / math.sqrt(2) * f1\n if (K == 0):\n # case K = 0\n coefficients = [f0]\n elif (K == 1/2):\n # case K = 1/2\n coefficients = [f0,f1,f2]\n else:\n # case K generic\n coefficients = [f0,f1]\n\n return coefficients\n\n # setup\n K = band_definition.K\n\n # accumulate moment entries\n A_moment = []\n b_moment = []\n for J in band_definition.J_list_M1_moment:\n A_moment.append(f_moment(K,J))\n b_moment.append(results_data.moments[(band_definition.members[J],\"M1\")])\n\n # accumulate transition entries\n A_trans = []\n b_trans = []\n for J in band_definition.J_list_M1_trans:\n A_trans.append(f_trans(K,J))\n Ji = J\n Jf = J - 1\n M = band_definition.M[Ji]\n values = np.array(results_data.get_rme(band_definition.members[Jf],band_definition.members[Ji],\"M1\",M))\n values *= band_definition.signs[(Ji,M)]*band_definition.signs[(Jf,M)]\n b_trans.append(values)\n\n # combine moment and transition arrays\n A = np.array(A_moment+A_trans,float)\n b = np.array(b_moment+b_trans,float)\n if (verbose):\n print(\"J_list_M1_moment:\",band_definition.J_list_M1_moment)\n print(\"J_list_M1_trans:\",band_definition.J_list_M1_trans)\n print(\"Coefficient matrix\")\n print(A)\n print(\"Ordinate matrix\")\n print(b)\n\n # trap blatantly insufficient system\n if ( not (\n ((K==0) and (len(b)>=1))\n or\n ((K==1/2) and (len(b)>=3))\n or\n ((K>1/2) and (len(b)>=2))\n ) ):\n parameters = np.nan*np.ones((3,4))\n return parameters\n\n # solve system\n parameters = np.linalg.lstsq(A,b,rcond=None)[0]\n\n # upgrade parameter matrix to three rows\n # zero pads for parameter a2 if not already present\n if (parameters.shape == (1,4)):\n parameters = np.append(parameters,[[0,0,0,0],[0,0,0,0]],axis=0)\n elif (parameters.shape == (2,4)):\n parameters = np.append(parameters,[[0,0,0,0]],axis=0)\n if (verbose):\n print(\"Parameter matrix\")\n print(parameters)\n\n return parameters", "def em(self, x, options, returnFlog=False):\n\n # Check that inputs are consistent\n errstring = self.consist('gmm', x)\n if errstring != None:\n raise Exception(errstring)\n \n\n ndata, xdim = x.shape\n\n # Sort out the options\n if options[13]:\n niters = options[13]\n else:\n niters = 100\n\n display = options[0]\n store = False\n if returnFlog:\n store = True\t# Store the error values to return them\n errlog = np.zeros(niters)\n test = False\n if options[2] > 0.0:\n test = True\t# Test log likelihood for termination\n\n check_covars = 0\n if options[4] >= 1:\n if display >= 0:\n print 'check_covars is on'\n check_covars = True\t# Ensure that covariances don't collapse\n MIN_COVAR = eps()\t# Minimum singular value of covariance matrix\n init_covars = self.covars\n\n # Main loop of algorithm\n for n in range(niters):\n \n # Calculate posteriors based on old parameters\n post, act = self.post(x)\n \n # Calculate error value if needed\n if display or store or test:\n prob = np.dot(act, self.priors)\n # Error value is negative log likelihood of data\n e = - np.sum(np.log(prob))\n if store:\n errlog[n] = e\n if display > 0:\n print 'Cycle ', n, ' Error ', e\n if test:\n if n > 0 and abs(e - eold) < options[2]:\n options[7] = e\n if returnFlog:\n return errlog\n else:\n return\n else:\n eold = e\n \n \n \n \n # Adjust the new estimates for the parameters\n new_pr = np.sum(post, 0)\n new_c = np.dot(post.T,x)\n \n # Now move new estimates to old parameter vectors\n self.priors = new_pr/ndata\n \n self.centres = new_c/new_pr.reshape(self.ncentres, 1)\n \n if self.covar_type == 'spherical':\n v = np.zeros(self.ncentres)\n n2 = dist2(x, self.centres)\n for j in range(self.ncentres):\n v[j] = np.dot(post[:,j].T, n2[:,j])\n self.covars = ((v/new_pr))/self.nin;\n if check_covars:\n # Ensure that no covariance is too small\n for j in range(self.ncentres):\n if self.covars[j] < MIN_COVAR:\n self.covars[j] = init_covars[j]\n elif self.covar_type == 'diag':\n for j in range(self.ncentres):\n diffs = x - self.centres[j,:]\n self.covars[j,:] = np.sum(np.multiply(np.multiply(diffs, diffs), post[:,j:j+1]), 0)/new_pr[j]\n if check_covars:\n # Ensure that no covariance is too small\n for j in range(self.ncentres):\n if np.min(self.covars[j,:]) < MIN_COVAR:\n self.covars[j,:] = init_covars[j,:]\n elif self.covar_type == 'full':\n for j in range(self.ncentres):\n diffs = x - self.centres[j,:];\n diffs = np.multiply(diffs, np.sqrt(post[:,j:j+1]))\n self.covars[:,:,j] = np.dot(diffs.T,diffs)/new_pr[j]\n if check_covars:\n # Ensure that no covariance is too small\n for j in range(self.ncentres):\n if np.min(la.svd(self.covars[:,:,j], compute_uv=False)) < MIN_COVAR:\n self.covars[:,:,j] = init_covars[:,:,j]\n elif self.covar_type == 'ppca':\n for j in range(self.ncentres):\n diffs = x - self.centres[j,:]\n diffs = np.multiply(diffs,np.sqrt(post[:,j:j+1]))\n tempcovars, tempU, templambda = ppca(np.dot(diffs.T,diffs)/new_pr[j], self.ppca_dim)\n if len(templambda) != self.ppca_dim:\n raise Exception('Unable to extract enough components')\n else: \n self.covars[j] = tempcovars\n self.U[:, :, j] = tempU\n self.lambd[j, :] = templambda\n \n if check_covars:\n if self.covars[j] < MIN_COVAR:\n self.covars[j] = init_covars[j]\n else:\n raise Exception('Unknown covariance type ' + self.covar_type)\n\n options[7] = -np.sum(np.log(self.prob(x)))\n if display >= 0:\n print maxitmess()\n if returnFlog:\n return errlog\n else:\n return", "def _my_cNR(self, DM_mass, NLO=None):\n if NLO is None:\n NLO = False\n\n ### Input parameters ####\n\n mpi = self.ip['mpi0']\n mp = self.ip['mproton']\n mn = self.ip['mneutron']\n mN = (mp+mn)/2\n\n alpha = 1/self.ip['alowinv']\n GF = self.ip['GF']\n\n as_2GeV = self.ip['as_at_2GeV']\n\n gs2_2GeV = 4*np.pi*as_2GeV\n\n # Quark masses at 2GeV\n mu = self.ip['mu_at_2GeV']\n md = self.ip['md_at_2GeV']\n ms = self.ip['ms_at_2GeV']\n mtilde = 1/(1/mu + 1/md + 1/ms)\n \n # Lepton masses\n me = self.ip['me']\n mmu = self.ip['mmu']\n mtau = self.ip['mtau']\n\n # Z boson mass\n MZ = self.ip['Mz']\n\n ### Numerical constants\n mproton = self.ip['mproton']\n mneutron = self.ip['mneutron']\n\n F1up = F1('u', 'p', self.ip).value_zero_mom()\n F1dp = F1('d', 'p', self.ip).value_zero_mom()\n F1sp = F1('s', 'p', self.ip).value_zero_mom()\n\n F1un = F1('u', 'n', self.ip).value_zero_mom()\n F1dn = F1('d', 'n', self.ip).value_zero_mom()\n F1sn = F1('s', 'n', self.ip).value_zero_mom()\n\n F1spslope = F1('s', 'p', self.ip).first_deriv_zero_mom()\n F1snslope = F1('s', 'n', self.ip).first_deriv_zero_mom()\n\n F2up = F2('u', 'p', self.ip).value_zero_mom()\n F2dp = F2('d', 'p', self.ip).value_zero_mom()\n F2sp = F2('s', 'p', self.ip).value_zero_mom()\n\n F2un = F2('u', 'n', self.ip).value_zero_mom()\n F2dn = F2('d', 'n', self.ip).value_zero_mom()\n F2sn = F2('s', 'n', self.ip).value_zero_mom()\n\n FAup = FA('u', 'p', self.ip).value_zero_mom()\n FAdp = FA('d', 'p', self.ip).value_zero_mom()\n FAsp = FA('s', 'p', self.ip).value_zero_mom()\n\n FAun = FA('u', 'n', self.ip).value_zero_mom()\n FAdn = FA('d', 'n', self.ip).value_zero_mom()\n FAsn = FA('s', 'n', self.ip).value_zero_mom()\n\n FPpup_pion = FPprimed('u', 'p', self.ip).value_pion_pole()\n FPpdp_pion = FPprimed('d', 'p', self.ip).value_pion_pole()\n FPpsp_pion = FPprimed('s', 'p', self.ip).value_pion_pole()\n\n FPpun_pion = FPprimed('u', 'n', self.ip).value_pion_pole()\n FPpdn_pion = FPprimed('d', 'n', self.ip).value_pion_pole()\n FPpsn_pion = FPprimed('s', 'n', self.ip).value_pion_pole()\n\n FPpup_eta = FPprimed('u', 'p', self.ip).value_eta_pole()\n FPpdp_eta = FPprimed('d', 'p', self.ip).value_eta_pole()\n FPpsp_eta = FPprimed('s', 'p', self.ip).value_eta_pole()\n\n FPpun_eta = FPprimed('u', 'n', self.ip).value_eta_pole()\n FPpdn_eta = FPprimed('d', 'n', self.ip).value_eta_pole()\n FPpsn_eta = FPprimed('s', 'n', self.ip).value_eta_pole()\n\n FSup = FS('u', 'p', self.ip).value_zero_mom()\n FSdp = FS('d', 'p', self.ip).value_zero_mom()\n FSsp = FS('s', 'p', self.ip).value_zero_mom()\n\n FSun = FS('u', 'n', self.ip).value_zero_mom()\n FSdn = FS('d', 'n', self.ip).value_zero_mom()\n FSsn = FS('s', 'n', self.ip).value_zero_mom()\n\n FPup_pion = FP('u', 'p', self.ip).value_pion_pole()\n FPdp_pion = FP('d', 'p', self.ip).value_pion_pole()\n FPsp_pion = FP('s', 'p', self.ip).value_pion_pole()\n\n FPun_pion = FP('u', 'n', self.ip).value_pion_pole()\n FPdn_pion = FP('d', 'n', self.ip).value_pion_pole()\n FPsn_pion = FP('s', 'n', self.ip).value_pion_pole()\n\n FPup_eta = FP('u', 'p', self.ip).value_eta_pole()\n FPdp_eta = FP('d', 'p', self.ip).value_eta_pole()\n FPsp_eta = FP('s', 'p', self.ip).value_eta_pole()\n\n FPun_eta = FP('u', 'n', self.ip).value_eta_pole()\n FPdn_eta = FP('d', 'n', self.ip).value_eta_pole()\n FPsn_eta = FP('s', 'n', self.ip).value_eta_pole()\n\n FGp = FG('p', self.ip).value_zero_mom()\n FGn = FG('n', self.ip).value_zero_mom()\n\n FGtildep = FGtilde('p', self.ip).value_zero_mom()\n FGtilden = FGtilde('n', self.ip).value_zero_mom()\n\n FGtildep_pion = FGtilde('p', self.ip).value_pion_pole()\n FGtilden_pion = FGtilde('n', self.ip).value_pion_pole()\n\n FGtildep_eta = FGtilde('p', self.ip).value_eta_pole()\n FGtilden_eta = FGtilde('n', self.ip).value_eta_pole()\n\n FT0up = FT0('u', 'p', self.ip).value_zero_mom()\n FT0dp = FT0('d', 'p', self.ip).value_zero_mom()\n FT0sp = FT0('s', 'p', self.ip).value_zero_mom()\n\n FT0un = FT0('u', 'n', self.ip).value_zero_mom()\n FT0dn = FT0('d', 'n', self.ip).value_zero_mom()\n FT0sn = FT0('s', 'n', self.ip).value_zero_mom()\n\n FT1up = FT1('u', 'p', self.ip).value_zero_mom()\n FT1dp = FT1('d', 'p', self.ip).value_zero_mom()\n FT1sp = FT1('s', 'p', self.ip).value_zero_mom()\n\n FT1un = FT1('u', 'n', self.ip).value_zero_mom()\n FT1dn = FT1('d', 'n', self.ip).value_zero_mom()\n FT1sn = FT1('s', 'n', self.ip).value_zero_mom()\n\n FTW2up = FTwist2('u', 'p', self.ip).value_zero_mom()\n FTW2dp = FTwist2('d', 'p', self.ip).value_zero_mom()\n FTW2sp = FTwist2('s', 'p', self.ip).value_zero_mom()\n\n FTW2gp = FTwist2('g', 'p', self.ip).value_zero_mom()\n\n FTW2un = FTwist2('u', 'n', self.ip).value_zero_mom()\n FTW2dn = FTwist2('d', 'n', self.ip).value_zero_mom()\n FTW2sn = FTwist2('s', 'n', self.ip).value_zero_mom()\n\n FTW2gn = FTwist2('g', 'n', self.ip).value_zero_mom()\n\n ### The coefficients ###\n #\n # Note that all dependence on q^2, 1/q^2, 1/(m^2-q^2), q^2/(m^2-q^2) is taken care of\n # by defining spurious operators.\n #\n # Therefore, we need to split some of the coefficients\n # into the \"pion part\" etc. with the q-dependence factored out,\n # and introduce a few spurious \"long-distance\" operators.\n #\n # The coefficients cNR1 -- cNR14 correspond to the operators in 1611.00368 and 1308.6288\n #\n # Therefore, we define O6pi = O6/(mpi^2+q^2); \n # O6eta = O6/(meta^2+q^2);\n # O6q2pi = O6*q^2/(mpi^2+q^2);\n # O6q2eta = O6*q^2/(meta^2+q^2);\n # O10pi = O10/(mpi^2+q^2);\n # O10eta = O10/(meta^2+q^2);\n # O10q2pi = O10*q^2/(mpi^2+q^2);\n # O10q2eta = O10*q^2/(meta^2+q^2);\n #\n # For the dipole interactions, these are the ones that have c2p1, c1N2, c2p2 as coefficients. \n # Therefore, we define O5bq2 = O5/q^2; \n # O6bq2 = O6/q^2.\n # O11bq2 = O11/q^2.\n # \n # For the tensors, O4 * q^2 appears as a leading contribution.\n # Therefore, we define O4q2 = O4 * q^2\n #\n # For the tensors, O1 * q^2 appears as a subleading contribution.\n # Therefore, we define O1q2 = O1 * q^2\n #\n # q^2 is here always the spatial part!!! \n #\n\n c3mu_dict = self.coeff_dict\n\n if self.DM_type == \"D\":\n my_cNR_dict = {\n 'cNR1p' : F1up*(c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dp*(c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGp*c3mu_dict['C71']\\\n + FSup*c3mu_dict['C75u'] + FSdp*c3mu_dict['C75d'] + FSsp*c3mu_dict['C75s']\\\n - alpha/(2*np.pi*DM_mass)*c3mu_dict['C51']\\\n + FTW2up*c3mu_dict['C723u']\\\n + FTW2dp*c3mu_dict['C723d']\\\n + FTW2sp*c3mu_dict['C723s']\\\n + FTW2gp*c3mu_dict['C725'],\n 'cNR2p' : 0,\n 'cNR3p' : F2sp*(c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4p' : - 4*( FAup*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdp*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsp*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']))\\\n - 2*alpha/np.pi * self.ip['mup']/mN * c3mu_dict['C51']\\\n + 8*(FT0up*c3mu_dict['C79u'] + FT0dp*c3mu_dict['C79d'] + FT0sp*c3mu_dict['C79s']),\n 'cNR5p' : - 2*mN * ( F1up*c3mu_dict['C715u']\\\n + F1dp*c3mu_dict['C715d']\\\n + F1sp*c3mu_dict['C715s']),\n 'cNR6p' : mN/DM_mass * FGtildep * c3mu_dict['C74']\\\n -2*mN*( (F1up+F2up)*c3mu_dict['C715u']\\\n + (F1dp+F2dp)*c3mu_dict['C715d']\\\n + (F1sp+F2sp)*c3mu_dict['C715s'])\\\n + mN/DM_mass*F2sp*(c3mu_dict['C61s']\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR7p' : - 2*( FAup*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdp*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsp*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s'])),\n 'cNR8p' : 2*( F1up*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dp*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9p' : 2*( (F1up+F2up)*(c3mu_dict['C62u']\\\n - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dp+F2dp)*(c3mu_dict['C62d']\\\n - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sp+F2sp)*(c3mu_dict['C62s']\\\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s']))\\\n + 2*mN*( FAup*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdp*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsp*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s']))\\\n /DM_mass\\\n - 4*mN * ( FAup*c3mu_dict['C717u']\\\n + FAdp*c3mu_dict['C717d']\\\n + FAsp*c3mu_dict['C717s']),\n 'cNR10p' : FGtildep * c3mu_dict['C73']\\\n -2*mN/DM_mass * (FT0up*c3mu_dict['C710u']\\\n + FT0dp*c3mu_dict['C710d']\\\n + FT0sp*c3mu_dict['C710s']),\n 'cNR11p' : - mN/DM_mass * (FSup*c3mu_dict['C76u']\\\n + FSdp*c3mu_dict['C76d']\\\n + FSsp*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGp * c3mu_dict['C72']\\\n + 2*((FT0up-FT1up)*c3mu_dict['C710u']\\\n + (FT0dp-FT1dp)*c3mu_dict['C710d']\\\n + (FT0sp-FT1sp)*c3mu_dict['C710s'])\\\n - 2*mN * ( F1up*(c3mu_dict['C716u'])\\\n + F1dp*(c3mu_dict['C716d'])\\\n + F1sp*(c3mu_dict['C716s'])),\n 'cNR12p' : -8*(FT0up*c3mu_dict['C710u'] + FT0dp*c3mu_dict['C710d'] + FT0sp*c3mu_dict['C710s']),\n 'cNR13p' : 0.,\n 'cNR14p' : + 4*mN * ( FAup*(c3mu_dict['C718u'])\\\n + FAdp*(c3mu_dict['C718d'])\\\n + FAsp*(c3mu_dict['C718s'])),\n\n 'cNR6pip' : mN/DM_mass * (FPup_pion*c3mu_dict['C78u'] + FPdp_pion*c3mu_dict['C78d'])\\\n + FPpup_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etap' : mN/DM_mass * (FPup_eta*c3mu_dict['C78u']\\\n + FPdp_eta*c3mu_dict['C78d']\\\n + FPsp_eta*c3mu_dict['C78s'])\\\n + FPpup_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsp_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pip' : mN/DM_mass * FGtildep_pion * c3mu_dict['C74'],\n 'cNR6q2etap' : mN/DM_mass * FGtildep_eta * c3mu_dict['C74'],\n \n 'cNR10pip' : FPup_pion*c3mu_dict['C77u'] + FPdp_pion*c3mu_dict['C77d'],\n 'cNR10etap' : FPup_eta*c3mu_dict['C77u'] + FPdp_eta*c3mu_dict['C77d'] + FPsp_eta*c3mu_dict['C77s'],\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C73'],\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C73'],\n \n 'cNR5bq2p' : mN* (2*alpha/np.pi*c3mu_dict['C51']),\n 'cNR6bq2p' : -mN**2* (- 2*alpha/np.pi * self.ip['mup']/mN * c3mu_dict['C51']),\n 'cNR11bq2p' : mN* (2*alpha/np.pi*c3mu_dict['C52']),\n\n 'cNR1q2p' : ( F1up*c3mu_dict['C715u']\\\n + F1dp*c3mu_dict['C715d']\\\n + F1sp*c3mu_dict['C715s'])/(2*DM_mass)\\\n + (F1spslope - F2sp / mN**2/4)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2p' : 2*( (F1up+F2up)*c3mu_dict['C715u']\\\n + (F1dp+F2dp)*c3mu_dict['C715d']\\\n + (F1sp+F2sp)*c3mu_dict['C715s'])/mN\\\n - 1/mN/DM_mass * F2sp\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n\n\n\n\n 'cNR1n' : F1un*(c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dn*(c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGn*c3mu_dict['C71']\\\n + FSun*c3mu_dict['C75u'] + FSdn*c3mu_dict['C75d'] + FSsn*c3mu_dict['C75s']\\\n + FTW2un*c3mu_dict['C723u']\\\n + FTW2dn*c3mu_dict['C723d']\\\n + FTW2sn*c3mu_dict['C723s']\\\n + FTW2gn*c3mu_dict['C725'],\n 'cNR2n' : 0,\n 'cNR3n' : F2sn*(c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4n' : - 4*( FAun*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdn*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsn*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']))\\\n - 2*alpha/np.pi * self.ip['mun']/mN * c3mu_dict['C51']\\\n + 8*(FT0un*c3mu_dict['C79u'] + FT0dn*c3mu_dict['C79d'] + FT0sn*c3mu_dict['C79s']),\n 'cNR5n' : - 2*mN * ( F1un*c3mu_dict['C715u']\\\n + F1dn*c3mu_dict['C715d']\\\n + F1sn*c3mu_dict['C715s']),\n 'cNR6n' : mN/DM_mass * FGtilden * c3mu_dict['C74']\\\n -2*mN*( (F1un+F2un)*c3mu_dict['C715u']\\\n + (F1dn+F2dn)*c3mu_dict['C715d']\\\n + (F1sn+F2sn)*c3mu_dict['C715s'])\\\n + mN/DM_mass * F2sn\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR7n' : - 2*( FAun*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdn*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsn*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s'])),\n 'cNR8n' : 2*( F1un*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dn*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9n' : 2*( (F1un+F2un)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dn+F2dn)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sn+F2sn)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s']))\\\n + 2*mN*( FAun*(c3mu_dict['C63u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C83u'])\\\n + FAdn*(c3mu_dict['C63d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C83d'])\\\n + FAsn*(c3mu_dict['C63s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C83s']))\\\n /DM_mass\\\n - 4*mN * ( FAun*c3mu_dict['C717u']\\\n + FAdn*c3mu_dict['C717d']\\\n + FAsn*c3mu_dict['C717s']),\n 'cNR10n' : FGtilden * c3mu_dict['C73']\\\n -2*mN/DM_mass * (FT0un*c3mu_dict['C710u']\\\n + FT0dn*c3mu_dict['C710d']\\\n + FT0sn*c3mu_dict['C710s']),\n 'cNR11n' : - mN/DM_mass * (FSun*c3mu_dict['C76u']\\\n + FSdn*c3mu_dict['C76d']\\\n + FSsn*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGn * c3mu_dict['C72']\\\n + 2*((FT0un-FT1un)*c3mu_dict['C710u']\\\n + (FT0dn-FT1dn)*c3mu_dict['C710d']\\\n + (FT0sn-FT1sn)*c3mu_dict['C710s'])\\\n - 2*mN * ( F1un*(c3mu_dict['C716u'])\\\n + F1dn*(c3mu_dict['C716d'])\\\n + F1sn*(c3mu_dict['C716s'])),\n 'cNR12n' : -8*(FT0un*c3mu_dict['C710u'] + FT0dn*c3mu_dict['C710d'] + FT0sn*c3mu_dict['C710s']),\n 'cNR13n' : 0.,\n 'cNR14n' : + 4*mN * ( FAun*(c3mu_dict['C718u'])\\\n + FAdn*(c3mu_dict['C718d'])\\\n + FAsn*(c3mu_dict['C718s'])),\n \n 'cNR6pin' : mN/DM_mass * (FPun_pion*c3mu_dict['C78u'] + FPdn_pion*c3mu_dict['C78d'])\\\n + FPpun_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etan' : mN/DM_mass * (FPun_eta*c3mu_dict['C78u']\\\n + FPdn_eta*c3mu_dict['C78d']\\\n + FPsn_eta*c3mu_dict['C78s'])\\\n + FPpun_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsn_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pin' : mN/DM_mass * FGtilden_pion * c3mu_dict['C74'],\n 'cNR6q2etan' : mN/DM_mass * FGtilden_eta * c3mu_dict['C74'],\n \n 'cNR10pin' : FPun_pion*c3mu_dict['C77u'] + FPdn_pion*c3mu_dict['C77d'],\n 'cNR10etan' : FPun_eta*c3mu_dict['C77u'] + FPdn_eta*c3mu_dict['C77d'] + FPsn_eta*c3mu_dict['C77s'],\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C73'],\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C73'],\n \n 'cNR5bq2n' : 0,\n 'cNR6bq2n' : -mN**2 * (- 2*alpha/np.pi * self.ip['mun']/mN * c3mu_dict['C51']),\n 'cNR11bq2n' : 0,\n\n 'cNR1q2n' : ( F1un*c3mu_dict['C715u']\\\n + F1dn*c3mu_dict['C715d']\\\n + F1sn*c3mu_dict['C715s'])/(2*DM_mass)\\\n + (F1snslope - F2sn / mN**2/4)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2n' : 2*( (F1un+F2un)*c3mu_dict['C715u']\\\n + (F1dn+F2dn)*c3mu_dict['C715d']\\\n + (F1sn+F2sn)*c3mu_dict['C715s'])/mN\\\n - 1/mN/DM_mass * F2sn\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s'])\n }\n\n if NLO:\n my_cNR_dict['cNR5p'] = my_cNR_dict['cNR5p']\\\n + 2*( (FT0up-FT1up)*c3mu_dict['C79u']\\\n + (FT0dp-FT1dp)*c3mu_dict['C79d']\\\n + (FT0sp-FT1sp)*c3mu_dict['C79s'])\n my_cNR_dict['cNR1q2p'] = my_cNR_dict['cNR1q2p']\\\n - ( (FT0up-FT1up)*c3mu_dict['C79u']\\\n + (FT0dp-FT1dp)*c3mu_dict['C79d']\\\n + (FT0sp-FT1sp)*c3mu_dict['C79s'])/(2*DM_mass*mN)\n my_cNR_dict['cNR5n'] = my_cNR_dict['cNR5n']\\\n + 2*( (FT0un-FT1un)*c3mu_dict['C79u']\\\n + (FT0dn-FT1dn)*c3mu_dict['C79d']\\\n + (FT0sn-FT1sn)*c3mu_dict['C79s'])\n my_cNR_dict['cNR1q2n'] = my_cNR_dict['cNR1q2n']\\\n - ( (FT0un-FT1un)*c3mu_dict['C79u']\\\n + (FT0dn-FT1dn)*c3mu_dict['C79d']\\\n + (FT0sn-FT1sn)*c3mu_dict['C79s'])/(2*DM_mass*mN)\n\n\n if self.DM_type == \"M\":\n my_cNR_dict = {\n 'cNR1p' : FGp*c3mu_dict['C71']\\\n + FSup*c3mu_dict['C75u'] + FSdp*c3mu_dict['C75d'] + FSsp*c3mu_dict['C75s'],\n 'cNR2p' : 0.,\n 'cNR3p' : 0.,\n 'cNR4p' : - 4*( FAup*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdp*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsp*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s'])),\n 'cNR5p' : 0,\n 'cNR6p' : mN/DM_mass * FGtildep * c3mu_dict['C74'],\n 'cNR7p' : 0,\n 'cNR8p' : 2*( F1up*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dp*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9p' : 2*( (F1up+F2up)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dp+F2dp)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sp+F2sp)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR10p' : FGtildep * c3mu_dict['C73'],\n 'cNR11p' : - mN/DM_mass * (FSup*c3mu_dict['C76u']\\\n + FSdp*c3mu_dict['C76d']\\\n + FSsp*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGp * c3mu_dict['C72'],\n 'cNR12p' : 0.,\n 'cNR13p' : 0.,\n 'cNR14p' : 0.,\n \n 'cNR6pip' : mN/DM_mass * (FPup_pion*c3mu_dict['C78u'] + FPdp_pion*c3mu_dict['C78d'])\\\n + FPpup_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etap' : mN/DM_mass * (FPup_eta*c3mu_dict['C78u']\\\n + FPdp_eta*c3mu_dict['C78d']\\\n + FPsp_eta*c3mu_dict['C78s'])\\\n + FPpup_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdp_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsp_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pip' : mN/DM_mass * FGtildep_pion * c3mu_dict['C74'],\n 'cNR6q2etap' : mN/DM_mass * FGtildep_eta * c3mu_dict['C74'],\n \n 'cNR10pip' : FPup_pion*c3mu_dict['C77u'] + FPdp_pion*c3mu_dict['C77d'],\n 'cNR10etap' : FPup_eta*c3mu_dict['C77u'] + FPdp_eta*c3mu_dict['C77d'] + FPsp_eta*c3mu_dict['C77s'],\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C73'],\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C73'],\n \n 'cNR5bq2p' : 0.,\n 'cNR6bq2p' : 0.,\n 'cNR11bq2p' : 0.,\n\n 'cNR1q2p' : 0.,\n 'cNR4q2p' : 0.,\n\n\n\n\n 'cNR1n' : FGn*c3mu_dict['C71']\\\n + FSun*c3mu_dict['C75u'] + FSdn*c3mu_dict['C75d'] + FSsn*c3mu_dict['C75s'],\n 'cNR2n' : 0.,\n 'cNR3n' : 0.,\n 'cNR4n' : - 4*( FAun*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FAdn*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FAsn*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s'])),\n 'cNR5n' : 0.,\n 'cNR6n' : mN/DM_mass * FGtilden * c3mu_dict['C74'],\n 'cNR7n' : 0.,\n 'cNR8n' : 2*( F1un*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + F1dn*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])),\n 'cNR9n' : 2*( (F1un+F2un)*(c3mu_dict['C62u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + (F1dn+F2dn)*(c3mu_dict['C62d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + (F1sn+F2sn)*(c3mu_dict['C62s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR10n' : FGtilden * c3mu_dict['C73'],\n 'cNR11n' : - mN/DM_mass * (FSun*c3mu_dict['C76u']\\\n + FSdn*c3mu_dict['C76d']\\\n + FSsn*c3mu_dict['C76s'])\\\n - mN/DM_mass * FGn * c3mu_dict['C72'],\n 'cNR12n' : 0.,\n 'cNR13n' : 0.,\n 'cNR14n' : 0.,\n \n 'cNR6pin' : mN/DM_mass * (FPun_pion*c3mu_dict['C78u'] + FPdn_pion*c3mu_dict['C78d'])\\\n + FPpun_pion*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_pion*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d']),\n 'cNR6etan' : mN/DM_mass * (FPun_eta*c3mu_dict['C78u']\\\n + FPdn_eta*c3mu_dict['C78d']\\\n + FPsn_eta*c3mu_dict['C78s'])\\\n + FPpun_eta*(c3mu_dict['C64u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C84u'])\\\n + FPpdn_eta*(c3mu_dict['C64d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C84d'])\\\n + FPpsn_eta*(c3mu_dict['C64s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C84s']),\n 'cNR6q2pin' : mN/DM_mass * FGtilden_pion * c3mu_dict['C74'],\n 'cNR6q2etan' : mN/DM_mass * FGtilden_eta * c3mu_dict['C74'],\n \n 'cNR10pin' : FPun_pion*c3mu_dict['C77u'] + FPdn_pion*c3mu_dict['C77d'],\n 'cNR10etan' : FPun_eta*c3mu_dict['C77u'] + FPdn_eta*c3mu_dict['C77d'] + FPsn_eta*c3mu_dict['C77s'],\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C73'],\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C73'],\n \n 'cNR5bq2n' : 0.,\n 'cNR6bq2n' : 0.,\n 'cNR11bq2n' : 0.,\n\n 'cNR1q2n' : 0.,\n 'cNR4q2n' : 0.\n }\n\n\n if self.DM_type == \"C\":\n my_cNR_dict = {\n 'cNR1p' : F1up * (c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dp * (c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGp*c3mu_dict['C65']/2/DM_mass\\\n + (FSup*c3mu_dict['C63u'] + FSdp*c3mu_dict['C63d'] + FSsp*c3mu_dict['C63s'])/2/DM_mass,\n 'cNR2p' : 0,\n 'cNR3p' : F2sp * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4p' : 0,\n 'cNR5p' : 0,\n 'cNR6p' : 0,\n 'cNR7p' : -2*( FAup * (c3mu_dict['C62u']\\\n - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + FAdp * (c3mu_dict['C62d']\\\n - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + FAsp * (c3mu_dict['C62s']\\\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR8p' : 0,\n 'cNR9p' : 0,\n 'cNR10p' : FGtildep * c3mu_dict['C66']/2/DM_mass,\n 'cNR11p' : 0,\n 'cNR12p' : 0,\n 'cNR13p' : 0.,\n 'cNR14p' : 0,\n\n 'cNR6pip' : 0,\n 'cNR6etap' : 0,\n 'cNR6q2pip' : 0,\n 'cNR6q2etap' : 0,\n \n 'cNR10pip' : (FPup_pion*c3mu_dict['C64u'] + FPdp_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etap' : ( FPup_eta*c3mu_dict['C64u']\\\n + FPdp_eta*c3mu_dict['C64d']\\\n + FPsp_eta*c3mu_dict['C64s'])/2/DM_mass,\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2p' : 0,\n 'cNR6bq2p' : 0,\n 'cNR11bq2p' : 0,\n\n 'cNR1q2p' : (F1spslope - 1/mN**2/4 * F2sp)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2p' : 0,\n\n\n\n\n 'cNR1n' : F1un * (c3mu_dict['C61u'] - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C81u'])\\\n + F1dn * (c3mu_dict['C61d'] - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C81d'])\\\n + FGn*c3mu_dict['C65']/2/DM_mass\\\n + (FSun*c3mu_dict['C63u'] + FSdn*c3mu_dict['C63d'] + FSsn*c3mu_dict['C63s'])/2/DM_mass,\n 'cNR2n' : 0,\n 'cNR3n' : F2sp * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4n' : 0,\n 'cNR5n' : 0,\n 'cNR6n' : 0,\n 'cNR7n' : -2*( FAun * (c3mu_dict['C62u']\\\n - np.sqrt(2)*GF*mu**2 / gs2_2GeV * c3mu_dict['C82u'])\\\n + FAdn * (c3mu_dict['C62d']\\\n - np.sqrt(2)*GF*md**2 / gs2_2GeV * c3mu_dict['C82d'])\\\n + FAsn * (c3mu_dict['C62s']\\\n - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C82s'])),\n 'cNR8n' : 0,\n 'cNR9n' : 0,\n 'cNR10n' : FGtilden * c3mu_dict['C66']/2/DM_mass,\n 'cNR11n' : 0,\n 'cNR12n' : 0,\n 'cNR13p' : 0.,\n 'cNR14n' : 0,\n\n 'cNR6pin' : 0,\n 'cNR6etan' : 0,\n 'cNR6q2pin' : 0,\n 'cNR6q2etan' : 0,\n \n 'cNR10pin' : (FPun_pion*c3mu_dict['C64u'] + FPdn_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etan' : ( FPun_eta*c3mu_dict['C64u']\\\n + FPdn_eta*c3mu_dict['C64d']\\\n + FPsn_eta*c3mu_dict['C64s'])/2/DM_mass,\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2n' : 0,\n 'cNR6bq2n' : 0,\n 'cNR11bq2n' : 0,\n\n 'cNR1q2n' : (F1snslope - 1/mN**2/4 * F2sn)\n * (c3mu_dict['C61s'] - np.sqrt(2)*GF*ms**2 / gs2_2GeV * c3mu_dict['C81s']),\n 'cNR4q2n' : 0\n }\n\n\n if self.DM_type == \"R\":\n my_cNR_dict = {\n 'cNR1p' : FSup*c3mu_dict['C63u']/2/DM_mass\\\n + FSdp*c3mu_dict['C63d']/2/DM_mass\\\n + FSsp*c3mu_dict['C63s']/2/DM_mass\\\n + FGp*c3mu_dict['C65']/2/DM_mass,\n 'cNR2p' : 0,\n 'cNR3p' : 0,\n 'cNR4p' : 0,\n 'cNR5p' : 0,\n 'cNR6p' : 0,\n 'cNR7p' : 0,\n 'cNR8p' : 0,\n 'cNR9p' : 0,\n 'cNR10p' : FGtildep * c3mu_dict['C66']/2/DM_mass,\n 'cNR11p' : 0,\n 'cNR12p' : 0,\n 'cNR13p' : 0.,\n 'cNR14p' : 0,\n\n 'cNR6pip' : 0,\n 'cNR6etap' : 0,\n 'cNR6q2pip' : 0,\n 'cNR6q2etap' : 0,\n \n 'cNR10pip' : (FPup_pion*c3mu_dict['C64u'] + FPdp_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etap' : FPup_eta*c3mu_dict['C64u']/2/DM_mass\\\n + FPdp_eta*c3mu_dict['C64d']/2/DM_mass\\\n + FPsp_eta*c3mu_dict['C64s']/2/DM_mass,\n 'cNR10q2pip' : FGtildep_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etap' : FGtildep_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2p' : 0,\n 'cNR6bq2p' : 0,\n 'cNR11bq2p' : 0,\n\n 'cNR1q2p' : 0,\n 'cNR4q2p' : 0,\n\n\n\n\n 'cNR1n' : FSun*c3mu_dict['C63u']/2/DM_mass\\\n + FSdn*c3mu_dict['C63d']/2/DM_mass\\\n + FSsn*c3mu_dict['C63s']/2/DM_mass\\\n + FGn*c3mu_dict['C65']/2/DM_mass,\n 'cNR2n' : 0,\n 'cNR3n' : 0,\n 'cNR4n' : 0,\n 'cNR5n' : 0,\n 'cNR6n' : 0,\n 'cNR7n' : 0,\n 'cNR8n' : 0,\n 'cNR9n' : 0,\n 'cNR10n' : FGtilden * c3mu_dict['C66']/2/DM_mass,\n 'cNR11n' : 0,\n 'cNR12n' : 0,\n 'cNR13p' : 0.,\n 'cNR14n' : 0,\n\n 'cNR6pin' : 0,\n 'cNR6etan' : 0,\n 'cNR6q2pin' : 0,\n 'cNR6q2etan' : 0,\n \n 'cNR10pin' : (FPun_pion*c3mu_dict['C64u'] + FPdn_pion*c3mu_dict['C64d'])/2/DM_mass,\n 'cNR10etan' : FPun_eta*c3mu_dict['C64u']/2/DM_mass\\\n + FPdn_eta*c3mu_dict['C64d']/2/DM_mass\\\n + FPsn_eta*c3mu_dict['C64s']/2/DM_mass,\n 'cNR10q2pin' : FGtilden_pion * c3mu_dict['C66']/2/DM_mass,\n 'cNR10q2etan' : FGtilden_eta * c3mu_dict['C66']/2/DM_mass,\n \n 'cNR5bq2n' : 0,\n 'cNR6bq2n' : 0,\n 'cNR11bq2n' : 0,\n\n 'cNR1q2n' : 0,\n 'cNR4q2n' : 0\n }\n\n\n return my_cNR_dict", "def test_cm2mm(self):\n v = 2.0\n r = conversion.cm2mm(v)\n self.assertTrue(r == v*10.0)", "def converged_MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp=1.0,mintau=1,maxtau=20,steps=38):\n \n testx=plotx[0]\n testy=ploty[0]\n testz=plotz[0]\n taus=np.linspace(mintau,maxtau,steps+1)\n vals = np.zeros((steps+1,),dtype=np.complex)\n for i in xrange(steps+1):\n\tvals[i] = MFS(mesh,testx,testy,testz,k,incDir,incAmp,taus[i])[0]\n #vals=[MFS(mesh,testx,testy,testz,k,incDir,incAmp,tau) for tau in taus]\n vals = np.array([np.abs(vals[i]-vals[i+1]) for i in xrange(steps)])\n vals[np.where(vals==0)[0]]=100\n tau = taus[ np.where(vals==np.min(vals))[0][0] +1 ]\n print vals\n print \"MFS solution settled at tau: %.2f\" % (tau)\n return MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp,tau)", "def magnetic_energy(self, dbe):\n #pylint: disable=C0103, R0914\n phase = dbe.phases[self.phase_name]\n param_search = dbe.search\n self.TC = self.curie_temperature = S.Zero\n self.BMAG = self.beta = S.Zero\n if 'ihj_magnetic_structure_factor' not in phase.model_hints:\n return S.Zero\n if 'ihj_magnetic_afm_factor' not in phase.model_hints:\n return S.Zero\n\n site_ratio_normalization = self._site_ratio_normalization\n # define basic variables\n afm_factor = phase.model_hints['ihj_magnetic_afm_factor']\n\n if afm_factor == 0:\n # Apply improved magnetic model which does not use AFM / Weiss factor\n return self.xiong_magnetic_energy(dbe)\n\n bm_param_query = (\n (where('phase_name') == phase.name) & \\\n (where('parameter_type') == 'BMAGN') & \\\n (where('constituent_array').test(self._array_validity))\n )\n tc_param_query = (\n (where('phase_name') == phase.name) & \\\n (where('parameter_type') == 'TC') & \\\n (where('constituent_array').test(self._array_validity))\n )\n\n mean_magnetic_moment = \\\n self.redlich_kister_sum(phase, param_search, bm_param_query)\n beta = mean_magnetic_moment / Piecewise(\n (afm_factor, mean_magnetic_moment <= 0),\n (1., True)\n )\n self.BMAG = self.beta = self.symbol_replace(beta, self._symbols)\n\n curie_temp = \\\n self.redlich_kister_sum(phase, param_search, tc_param_query)\n tc = curie_temp / Piecewise(\n (afm_factor, curie_temp <= 0),\n (1., True)\n )\n self.TC = self.curie_temperature = self.symbol_replace(tc, self._symbols)\n\n # Used to prevent singularity\n tau_positive_tc = v.T / (curie_temp + 1e-9)\n tau_negative_tc = v.T / ((curie_temp/afm_factor) + 1e-9)\n\n # define model parameters\n p = phase.model_hints['ihj_magnetic_structure_factor']\n A = 518/1125 + (11692/15975)*(1/p - 1)\n # factor when tau < 1 and tc < 0\n sub_tau_neg_tc = 1 - (1/A) * ((79/(140*p))*(tau_negative_tc**(-1)) + (474/497)*(1/p - 1) \\\n * ((tau_negative_tc**3)/6 + (tau_negative_tc**9)/135 + (tau_negative_tc**15)/600)\n )\n # factor when tau < 1 and tc > 0\n sub_tau_pos_tc = 1 - (1/A) * ((79/(140*p))*(tau_positive_tc**(-1)) + (474/497)*(1/p - 1) \\\n * ((tau_positive_tc**3)/6 + (tau_positive_tc**9)/135 + (tau_positive_tc**15)/600)\n )\n # factor when tau >= 1 and tc > 0\n super_tau_pos_tc = -(1/A) * ((tau_positive_tc**-5)/10 + (tau_positive_tc**-15)/315 + (tau_positive_tc**-25)/1500)\n # factor when tau >= 1 and tc < 0\n super_tau_neg_tc = -(1/A) * ((tau_negative_tc**-5)/10 + (tau_negative_tc**-15)/315 + (tau_negative_tc**-25)/1500)\n\n # This is an optimization to reduce the complexity of the compile-time expression\n expr_cond_pairs = [(sub_tau_neg_tc, curie_temp/afm_factor > v.T),\n (sub_tau_pos_tc, curie_temp > v.T),\n (super_tau_pos_tc, And(curie_temp < v.T, curie_temp > 0)),\n (super_tau_neg_tc, And(curie_temp/afm_factor < v.T, curie_temp < 0)),\n (0, True)\n ]\n g_term = Piecewise(*expr_cond_pairs)\n\n return v.R * v.T * log(beta+1) * \\\n g_term / site_ratio_normalization", "def omega_change_from_fmse_budget(temp_cont, z_cont, q_cont, q_ice_cont,\n u_cont, v_cont, ps_cont, temp_pert, z_pert,\n q_pert, q_ice_pert, u_pert, v_pert, ps_pert,\n bk, pk, horiz_thermo=False, min_dhdp=0.05):\n if horiz_thermo:\n advec_anom = fmse_horiz_advec_eta_upwind(\n temp_pert - temp_cont, z_pert - z_cont, q_pert - q_cont,\n q_ice_pert - q_ice_cont, u_cont, v_cont, ps_cont, bk, pk\n )\n else:\n advec_cont = fmse_horiz_advec_eta_upwind(\n temp_cont, z_cont, q_cont, q_ice_cont, u_cont, v_cont, ps_cont,\n bk, pk\n )\n advec_pert = fmse_horiz_advec_eta_upwind(\n temp_pert, z_pert, q_pert, q_ice_pert, u_pert, v_pert, ps_pert,\n bk, pk\n )\n advec_anom = advec_pert - advec_cont\n dh_dp = frozen_moist_static_stab(temp_cont, z_cont, q_cont, q_ice_cont,\n ps_cont, bk, pk)\n # Mask where denominator nearly zero.\n return -1*advec_anom / dh_dp.where(abs(dh_dp) > min_dhdp)", "def test_mm2cm(self):\n v = 2.0\n r = conversion.mm2cm(v)\n self.assertTrue(r == v*0.1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_clock_recovery_mm_ff_sptr __init__(self, p) > digital_clock_recovery_mm_ff_sptr
def __init__(self, *args): this = _digital_swig.new_digital_clock_recovery_mm_ff_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_fff_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, spi_rack, module, frequency=100e6):\n #def __init__(self, module, frequency=100e6):\n self.spi_rack = spi_rack\n self.module = module\n\n self.rf_frequency = frequency\n self.stepsize = 1e6\n self.ref_frequency = 10e6\n self.use_external = 0\n self.outputPower = None\n\n # These are the 6 registers present in the ADF4351\n self.registers = 6*[0]\n # In REG3: set ABP=1 (3 ns, INT-N) and CHARGE CANCEL=1\n self.registers[3] = (1<<22) | (1<<21) | 3\n # In REG5: set LD PIN MODE to 1 -> digital lock detect\n self.registers[5] = (1<<22) | (3<<19) | 5\n\n self.set_frequency(frequency)", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, pin, freq, dc_left, dc_right):\n\n pin = machine.Pin(pin)\n self.pwm = machine.PWM(pin, freq=freq)\n self.left, self.right = dc_left, dc_right", "def __init__(self, p, i, d, get_current_time, get_feedback_value):\r\n # p, i, and d constants\r\n self.p, self.i, self.d = p, i, d\r\n\r\n # saves the functions that return the time and the feedback\r\n self.get_current_time = get_current_time\r\n self.get_feedback_value = get_feedback_value", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, proportional_gain, integral_gain, differential_gain, stepper_motor, caliper, error_margin,\n steppermotor_frequency_limits, settling_time, name, setpoint_offset, interrupt_ignore_time):\n self.pid = PID(p=proportional_gain, i=integral_gain, d=differential_gain) # P I D controller\n self.steppermotor = stepper_motor # The stepper motor moving the load\n self.caliper = caliper # The caliper providing position feedback.\n self.stop_loop_event = threading.Event() # This is set when the control loop stops\n self.setpoint = None # Current setpoint\n self.error_margin = error_margin\n self.step_frequency_min, self.step_frequency_max = steppermotor_frequency_limits\n self.name = name\n self.settling_time = settling_time\n self.setpoint_offset = setpoint_offset\n self.interrupt_ignore_time = interrupt_ignore_time\n\n self.start_settling_time = None # timestamp when settling started\n self.settling = False # true if within allowed error band\n self.captured_data = [] # Stores captured data for visualization and debugging purposes", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, fft_length, cp_length, occupied_tones, snr, ks, carrier_map_bin, nc_filter, logging=False):\n\n\tgr.hier_block2.__init__(self, \"ofdm_receiver\",\n\t\t\t\tgr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature\n gr.io_signature2(2, 2, gr.sizeof_gr_complex*occupied_tones, gr.sizeof_char)) # Output signature\n\n bw = (float(occupied_tones) / float(fft_length)) / 2.0\n tb = bw*0.04\n print \"ofdm_receiver:__init__:occupied_tones %s fft_length %d \" % (occupied_tones, fft_length)\n \n chan_coeffs = filter.firdes.low_pass (1.0, # gain\n 1.0, # sampling rate\n bw+tb, # midpoint of trans. band\n tb, # width of trans. band\n filter.firdes.WIN_HAMMING) # filter type\n \n self.chan_filt = filter.fft_filter_ccc(1, chan_coeffs)\n\n # linklab, get ofdm parameters\n self._fft_length = fft_length\n self._occupied_tones = occupied_tones\n self._cp_length = cp_length\n self._nc_filter = nc_filter\n self._carrier_map_bin = carrier_map_bin\n \n win = [1 for i in range(fft_length)]\n \n # linklab, initialization function\n self.initialize(ks, self._carrier_map_bin)\n \n\n zeros_on_left = int(math.ceil((fft_length - occupied_tones)/2.0))\n ks0 = fft_length*[0,]\n ks0[zeros_on_left : zeros_on_left + occupied_tones] = ks[0]\n\n ks0 = np_fft.ifftshift(ks0)\n ks0time = np_fft.ifft(ks0)\n # ADD SCALING FACTOR\n ks0time = ks0time.tolist()\n\n SYNC = \"pn\"\n if SYNC == \"ml\":\n nco_sensitivity = -1.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_ml(fft_length,\n cp_length,\n snr,\n ks0time,\n logging)\n elif SYNC == \"pn\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pn(fft_length,\n cp_length,\n logging)\n elif SYNC == \"pnac\":\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_pnac(fft_length,\n cp_length,\n ks0time,\n logging)\n # for testing only; do not user over the air\n # remove filter and filter delay for this\n elif SYNC == \"fixed\":\n self.chan_filt = gr.multiply_const_cc(1.0)\n nsymbols = 18 # enter the number of symbols per packet\n freq_offset = 0.0 # if you use a frequency offset, enter it here\n nco_sensitivity = -2.0/fft_length # correct for fine frequency\n self.ofdm_sync = ofdm_sync_fixed(fft_length,\n cp_length,\n nsymbols,\n freq_offset,\n logging)\n\n # Set up blocks\n\n # Create a delay line, linklab\n self.delay = blocks.delay(gr.sizeof_gr_complex, fft_length)\n\n self.nco = analog.frequency_modulator_fc(nco_sensitivity) # generate a signal proportional to frequency error of sync block\n self.sigmix = blocks.multiply_cc()\n self.sampler = gr_papyrus.ofdm_sampler(fft_length, fft_length+cp_length)\n self.fft_demod = gr_fft.fft_vcc(fft_length, True, win, True)\n self.ofdm_frame_acq = gr_papyrus.ofdm_frame_acquisition(occupied_tones,\n fft_length,\n cp_length, ks[0])\n # linklab, check current mode: non-contiguous OFDM or not\n if self._nc_filter:\n print '\\nMulti-band Filter Turned ON!'\n # linklab, non-contiguous filter\n self.ncofdm_filt = ncofdm_filt(self._fft_length, self._occupied_tones, self._carrier_map_bin)\n self.connect(self, self.chan_filt, self.ncofdm_filt)\n self.connect(self.ncofdm_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.ncofdm_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n else :\n print '\\nMulti-band Filter Turned OFF!'\n self.connect(self, self.chan_filt)\n self.connect(self.chan_filt, self.ofdm_sync) # into the synchronization alg.\n self.connect((self.ofdm_sync,0), self.nco, (self.sigmix,1)) # use sync freq. offset output to derotate input signal\n self.connect(self.chan_filt, self.delay, (self.sigmix,0)) # signal to be derotated\n\n self.connect(self.sigmix, (self.sampler,0)) # sample off timing signal detected in sync alg\n self.connect((self.ofdm_sync,1), (self.sampler,1)) # timing signal to sample at\n\n self.connect((self.sampler,0), self.fft_demod) # send derotated sampled signal to FFT\n self.connect(self.fft_demod, (self.ofdm_frame_acq,0)) # find frame start and equalize signal\n self.connect((self.sampler,1), (self.ofdm_frame_acq,1)) # send timing signal to signal frame start\n self.connect((self.ofdm_frame_acq,0), (self,0)) # finished with fine/coarse freq correction,\n self.connect((self.ofdm_frame_acq,1), (self,1)) # frame and symbol timing, and equalization\n\n if logging:\n self.connect(self.chan_filt, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-chan_filt_c.dat\"))\n self.connect(self.fft_demod, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-fft_out_c.dat\"))\n self.connect(self.ofdm_frame_acq,\n gr.file_sink(gr.sizeof_gr_complex*occupied_tones, \"ofdm_receiver-frame_acq_c.dat\"))\n self.connect((self.ofdm_frame_acq,1), gr.file_sink(1, \"ofdm_receiver-found_corr_b.dat\"))\n self.connect(self.sampler, gr.file_sink(gr.sizeof_gr_complex*fft_length, \"ofdm_receiver-sampler_c.dat\"))\n self.connect(self.sigmix, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-sigmix_c.dat\"))\n self.connect(self.nco, gr.file_sink(gr.sizeof_gr_complex, \"ofdm_receiver-nco_c.dat\"))", "def __init__(self, process_chain, showWarnings=1, maxsec_rttrace=7200, analysis_overlap=0): # ppc\n self.process_chain = process_chain\n super(PadGenerator, self).__init__(showWarnings)\n self.show_warnings = showWarnings\n self.maxsec_rttrace = maxsec_rttrace # in seconds for EACH (x,y,z) rt_trace\n #self.scale_factor = scale_factor # ppc\n self.analysis_interval = self.process_chain.analysis_interval # ppc\n self.analysis_overlap = analysis_overlap\n self.analysis_samples = None\n self.starttime = None\n if showWarnings:\n self.warnfiltstr = 'always'\n else:\n self.warnfiltstr = 'ignore'", "def __init__(self):\r\n\r\n super(ElapsedTime, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.piT = 0.0 # Temperature stress pi factor.\r", "def __init__(self):\n this = _coin.new_SoMFBitMask()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self,hh=0,mm=0,ss=0):\r\n self.__hh = hh\r\n self.__mm = mm\r\n self.__ss = ss", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, limit):\r\n self.limit = limit\r\n self.clock = 0", "def __init__(self, mem, inp, outp):\n self.pc = 0\n self.mem = mem\n self.inp = inp\n self.outp = outp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
clock_recovery_mm_ff(float omega, float gain_omega, float mu, float gain_mu, float omega_relative_limit = 0.001) > digital_clock_recovery_mm_ff_sptr Mueller and M?ller (M&M) based clock recovery block with float input, float output. This implements the Mueller and M?ller (M&M) discretetime errortracking synchronizer.
def clock_recovery_mm_ff(*args, **kwargs): return _digital_swig.clock_recovery_mm_ff(*args, **kwargs)
[ "def DMFNeuFluxMCDet(ch,DMm,DMsig,param):\n import os\n # FIX SCALING\n ## include years\n DM_annihilation_rate_Sun = DMSunAnnihilationRate(DMm,DMsig,param) # [eV]\n #DM_annihilation_rate_Sun = 1.6e21/param.sec\n normalization = np.sum((DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))) # [eV^3]\n \n ## BEGIN CREATING BINS ##\n # assuming neutrino binnum = 30\n nu_bin_num = 30\n point_num = 1000.0\n Emin = 1.0\n Emax = 1000.0\n \n E_nu_list = gt.LogSpaceEnergies(Emin,Emax,binnum = nu_bin_num)\n E_bin_width = [E_nu_list[i+1]-E_nu_list[i] for i in range(len(E_nu_list)-1)]\n E_nu_hpl = gt.MidPoint(gt.LogSpaceEnergies(Emin,Emax,binnum = nu_bin_num)) \n E_nu_bin = [0.0]*nu_bin_num # neutrino bins\n E_anu_bin = [0.0]*nu_bin_num # antineutrino bins\n E_bin_ratio = E_nu_list[1]/E_nu_list[0]\n ## END CREATING BINS ##\n \n for ineu in range(3):\n ## BEGIN READING DATA FROM MC ## \n \n MCdatapath = \"../data/myMC/trials/legion_ineu_\"+str(ineu)+\"_\"+param.name+\"/\"\n rparam = PC.PhysicsConstants()\n \n files = []\n for filename in os.listdir(MCdatapath):\n files.append(filename)\n \n # load all events\n evt = []\n for filename in files :\n file = open(MCdatapath+filename,'r')\n data = []\n gt.hreadfilev4(file,data,rparam)\n if gt.Compareparams(param,rparam):\n print \"Using : \"+filename\n for e in data :\n for ee in e:\n evt.append(ee)\n \n #del e,ee,data\n \n ## END READING DATA FROM MC ##\n \n # GET DARK MATTER DISTRIBUTION \n DM_pdf = DM_distribution(ch,DMm/param.GeV,ineu)\n \n for i,e in enumerate(evt):\n if len(e) > 4:\n neutrino = True\n \n family = e[0]\n try:\n next_family = evt[i+1]\n if family == next_family and e[1] != 2 :\n neutrino = False\n except:\n pass\n \n E_nu_in = e[2]\n E_nu_out = e[3]\n i = int(np.log(E_nu_out/E_nu_list[0])/np.log(E_bin_ratio))\n j = int(np.log(E_nu_in/E_nu_list[0])/np.log(E_bin_ratio))\n if neutrino:\n E_nu_bin[i] = E_nu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm)*E_bin_width[j]/(np.log(E_nu_list[i])-np.log(E_nu_list[i-1]))) # change to initial neutrino bin width\n #E_nu_bin[i] = E_nu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm))\n else :\n E_anu_bin[i] = E_anu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm)*E_bin_width[i])\n #E_anu_bin[i] = E_anu_bin[i] + e[5]*(float(DM_pdf.PDF(E_nu_in)/DM_pdf.DMm))\n \n #int_weight = integrate.quad(lambda E: PDF.PDF(E)/PDF.DMm,Emin,Emax)[0]\n # rescale\n E_nu_bin = [normalization*x/(point_num) for x in E_nu_bin]\n E_anu_bin = [normalization*x/(point_num) for x in E_anu_bin] \n \n inter_neu = interpolate.InterpolatedUnivariateSpline(E_nu_hpl,E_nu_bin)\n inter_aneu = interpolate.InterpolatedUnivariateSpline(E_nu_hpl,E_anu_bin)\n \n return [inter_neu, inter_aneu]", "def test_FEMM_Loss():\n # TODO stacking factor is disregarded for now but should be included\n\n # Reference values:\n rotor_speed = 4000 # RPM\n mechanical_power = 62.2952 # W\n rotor_core_loss = 0.0574995 # W\n stator_core_loss = 3.40587 # W\n prox_loss = 0.0585815 # W\n i_sqr_R_loss = 4.37018 # W\n magnet_loss = 1.38116 # W\n total_electromagnetic_losses = 9.27329 # W\n\n Id_ref = 0\n Iq_ref = 2 ** (1 / 2)\n\n n_step = 180\n Nrev = 1 / 2\n\n # readability\n machine = load(join(DATA_DIR, \"Machine\", \"SPMSM_020.json\"))\n machine.stator.winding.is_reverse_wind = True\n qs = machine.stator.winding.qs\n simu = Simu1(name=\"test_FEMM_Loss\", machine=machine)\n\n # Definition of the enforced output of the electrical module\n simu.input = InputCurrent(Id_ref=Id_ref, Iq_ref=Iq_ref, Na_tot=2048, N0=rotor_speed)\n\n # time discretization [s]\n # TODO without explicit time def. there is an error\n simu.input.time = ImportMatrixVal()\n simu.input.time.value = linspace(\n start=0, stop=60 / rotor_speed * Nrev, num=n_step, endpoint=False\n ) # n_step timesteps\n\n # Definition of the magnetic simulation: with periodicity\n simu.mag = MagFEMM(\n type_BH_stator=0, type_BH_rotor=0, is_periodicity_a=True, nb_worker=4\n )\n\n simu.mag.is_get_meshsolution = True # To get FEA mesh for latter post-procesing\n\n # --- Setup the Loss Model ------------------------------------------------------- #\n simu.loss = Loss()\n\n myIronLoss = LossModelBertotti()\n myWindingLoss = LossModelWinding()\n\n simu.loss.add_model(model=myIronLoss, part_label=\"Stator\")\n simu.loss.add_model(model=myWindingLoss, part_label=\"Stator\")\n\n # FEMM ex. Ch = 143 W / m³ / T² / Hz --> k_hy = 2.089 W / kg @ F_REF, B_REF\n # Ce = 0.53 W / m³ / T² / Hz² --> k_ed = 0.387 W / kg @ F_REF, B_REF\n\n # stator\n myIronLoss.name = \"Stator Iron Losses\"\n myIronLoss.k_hy = 2.089 # None\n myIronLoss.alpha_hy = 2\n myIronLoss.k_ed = 0.387 # None\n myIronLoss.alpha_ed = 2\n myIronLoss.k_ex = 0\n myIronLoss.alpha_ex = 1.5\n myIronLoss.group = \"core\" # this is the FEMM group name\n myIronLoss.get_meshsolution = True # to store loss density\n myIronLoss.N0 = [4000, 6000] # list of speed to override actual speed\n\n # rotor\n myRotorIronLoss = myIronLoss.copy()\n myRotorIronLoss.name = \"Rotor Iron Losses\"\n myRotorIronLoss.group = \"core\"\n\n simu.loss.add_model(model=myRotorIronLoss, part_label=\"Rotor\")\n\n # TODO load loss data with BH curve by default\n # TODO add M19 loss data to compare parameter estimates\n # LossData = ImportMatrixXls()\n # LossData.file_path = join(DATA_DIR, \"Material\", \"M400-50A.xlsx\")\n # LossData.is_transpose = False\n # LossData.sheet = \"LossData\"\n # LossData.skiprows = 2\n # LossData.usecols = None\n\n # machine.stator.mat_type.mag.LossData = LossData\n\n # --- Run the Loss Simulation ---------------------------------------------------- #\n out = Output(simu=simu)\n simu.run()\n\n loss = out.loss\n mshsol = loss.get_loss_dist(part_label=\"Stator\", index=0)\n\n # mshsol.plot_contour(label=\"LossDens\", itime=7)\n # mshsol.plot_contour(label=\"LossDensSum\", itime=0)\n\n P_mech = 2 * pi * rotor_speed / 60 * out.mag.Tem_av\n\n loss_stator_iron = loss.get_loss(part_label=\"Stator\", index=0)\n loss_rotor_iron = loss.get_loss(part_label=\"Rotor\", index=0)\n loss_stator_wind = loss.get_loss(part_label=\"Stator\", index=1)\n\n loss_st_iron = loss_stator_iron.get_along(\"Speed=4000\", \"time\")[\"Loss\"].mean()\n loss_ro_iron = loss_rotor_iron.get_along(\"Speed=4000\", \"time\")[\"Loss\"].mean()\n loss_st_wdg = loss_stator_wind.get_along(\"time\", \"phase\")[\"Loss\"].mean()\n\n print(f\"mechanical power = {P_mech} W\")\n print(f\"stator iron loss = {loss_st_iron} W\")\n print(f\"rotor iron loss = {loss_ro_iron} W\")\n print(f\"stator winding loss = {qs*loss_st_wdg} W\")\n\n delta = 5 / 100 # arbitary allowed relative difference\n\n assert mshsol is not None\n assert (abs(loss_st_iron - stator_core_loss) / stator_core_loss) <= delta\n # rotor loss is disregarded since absolute value seems to be too small\n # assert abs(loss_rotor_iron - rotor_core_loss)/rotor_core_loss <= delta\n assert loss_stator_wind is not None\n assert (abs(mechanical_power - P_mech) / mechanical_power) <= delta\n\n return out", "def test_ff_funcs():\n\n test_ray = construct_test_ray()\n\n nu = np.linspace(5, 30, 3)*1e9\n \n FFCalc = rt.BremCalculator()\n\n # The new functions that use the gaunt factor:\n j_nu = FFCalc.calc_j_nu_therm(test_ray, 1, nu)\n al = FFCalc.calc_al_BB(test_ray, 1, nu)\n therm = rt.B_nu(test_ray, nu)\n\n # related quantities for comparison:\n raltay = 2*C.C_LIGHT**-2 * C.K_B * np.outer(nu**2, T)\n source = j_nu/al\n\n # Qualitative and quantitative comparisons for expectation:\n diff = np.max(abs(therm - raltay)/therm)\n print('Max fractional difference between blackbody and Raleigh-Taylor: {:.2e}'.format(diff))\n\n diff = np.max(abs(therm - source)/therm)\n print('Max fractional difference between blackbody and j_nu/alpha: {:.2e}'.format(diff))\n\n plt.loglog( nu, therm[:,0])\n plt.loglog( nu, raltay[:,0], ls=':' )\n plt.loglog( nu, (j_nu/al)[:,0], ls='--')\n\n \n # Absorption should lead to alpha propto nu^-2.1 and flux propto nu^-0.1\n delta_al = al[:2,0]/al[1:,0]\n delta_nu = nu[:2]/nu[1:]\n al_slp = np.log10(delta_al)/np.log10(delta_nu)\n print('Power law slope for alpha propto nu^slope (expect -2.1): '+str(al_slp))\n\n tau = np.sum(al * (r2 - r1), axis=1)\n flux_abs = (therm.T*tau).T\n delta_f = flux_abs[:2,0]/flux_abs[1:,0]\n f_slp = np.log10(delta_f)/np.log10(delta_nu)\n print( 'Power law slope for flux propto nu^slope (expect -0.1): '+str(f_slp) )\n\n plt.show()", "def converged_MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp=1.0,mintau=1,maxtau=20,steps=38):\n \n testx=plotx[0]\n testy=ploty[0]\n testz=plotz[0]\n taus=np.linspace(mintau,maxtau,steps+1)\n vals = np.zeros((steps+1,),dtype=np.complex)\n for i in xrange(steps+1):\n\tvals[i] = MFS(mesh,testx,testy,testz,k,incDir,incAmp,taus[i])[0]\n #vals=[MFS(mesh,testx,testy,testz,k,incDir,incAmp,tau) for tau in taus]\n vals = np.array([np.abs(vals[i]-vals[i+1]) for i in xrange(steps)])\n vals[np.where(vals==0)[0]]=100\n tau = taus[ np.where(vals==np.min(vals))[0][0] +1 ]\n print vals\n print \"MFS solution settled at tau: %.2f\" % (tau)\n return MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp,tau)", "def mag_gain(self, gain=0x20):\n self._mag_gain = gain\n self.i2c.writeto_mem(self.ADDRESS_MAG, self.REGISTER_MAG_CRB_REG_M, self._mag_gain)\n if self._mag_gain == MAGGAIN_1_3:\n self._lsm303mag_gauss_lsb_xy = 1100.0\n self._lsm303mag_gauss_lsb_z = 980.0\n elif self._mag_gain == MAGGAIN_1_9:\n self._lsm303mag_gauss_lsb_xy = 855.0\n self._lsm303mag_gauss_lsb_z = 760.0\n elif self._mag_gain == MAGGAIN_2_5:\n self._lsm303mag_gauss_lsb_xy = 670.0\n self._lsm303mag_gauss_lsb_z = 600.0\n elif self._mag_gain == MAGGAIN_4_0:\n self._lsm303mag_gauss_lsb_xy = 450.0\n self._lsm303mag_gauss_lsb_z = 400.0\n elif self._mag_gain == MAGGAIN_4_7:\n self._lsm303mag_gauss_lsb_xy = 400.0\n self._lsm303mag_gauss_lsb_z = 355.0\n elif self._mag_gain == MAGGAIN_5_6:\n self._lsm303mag_gauss_lsb_xy = 330.0\n self._lsm303mag_gauss_lsb_z = 295.0\n elif self._mag_gain == MAGGAIN_8_1:\n self._lsm303mag_gauss_lsb_xy = 230.0\n self._lsm303mag_gauss_lsb_z = 205.0", "def mel_cepstrum(x2,K):\n\tprint \"Mel-Cepstrum\"\n\n\t# ZERO PADDING\n\tNFFT = int(pow(2, np.ceil(math.log(K, 2))))\n\n\tprint \"-> Data was padded: \",K,\" -> \",NFFT\n\n\t# Mel-Scaled Filterbank, full of magic numbers\n\tK20_filter = [ \n\t(0.0,154.759),\n\t(77.3795,249.2458),\n\t(163.3126, 354.1774),\n\t(258.745,470.7084),\n\t(364.7267,600.121),\n\t(482.4239,743.8391),\n\t(613.1315,903.4442),\n\t(758.2878,1080.6923),\n\t(919.4901,1277.5338),\n\t(1098.5119,1496.1345),\n\t(1297.3232,1738.8999),\n\t(1518.1115,2008.501),\n\t(1763.3063,2307.9044),\n\t(2035.6053,2640.4045),\n\t(2338.0049,3009.6599),\n\t(2673.8324,3419.7335),\n\t(3046.7829,3875.1375),\n\t(3460.9602,4380.8829),\n\t(3920.9215,4942.5344),\n\t(4431.728, 5566.272)\n\t]\n\n\t# ----------------------------------\n\t# The final Mel Filter Cepstral Coefficients\n\t# have len(K20_filter) coefficients (rows) and \n\t# the operation is performed on each window.\n\t# ----------------------------------\n\t\n\n\tNUM_WINDOWS = len(x2)\n\tprint \"NUM WINDOWS\",NUM_WINDOWS\n\n\tQ = 14\t\n\tMFCC = np.zeros((Q,NUM_WINDOWS))\n\n\tplt.subplot(222)\n\tplt.title(\"Mel cepstrum coefficients\")\n\n\twin_id = 0\n\tfor win in x2:\n\n\t\t# DFT\t\n\t\tX2 = np.absolute(scipy.fftpack.fft( win, NFFT ))\t\n\t\tfreq = scipy.fftpack.fftfreq(NFFT, 1.0/SAMPLE_RATE)\n\t\n\t\tX2 = X2[len(X2)/2:]\n\t\tfreq = freq[ freq.shape[-1]/2: ]\n\n\t\tdf = freq[1]-freq[0]\n\t\tK = len(K20_filter)\t\n\t\t\n\t\tmks = np.zeros(K)\n\t\tfor i in xrange(0,K):\n\t\t\tll,ul = K20_filter[i]\n\t\t\tmks[i] = mel_filter(X2, df, ll, ul, NFFT)\n\n\t\tplt.plot(mks)\n\n\t\tc = np.zeros(Q)\n\t\tinvc = np.zeros((Q,K))\n\t\tfor q in xrange(0,Q):\n\t\t\tfor k in range(0,K):\n\t\t\t\tc[q] += np.log(mks[k])*np.cos( (np.pi*q*(2*k+1)) / (2*K) )\n\t\t\t\tinvc[q,k] = np.cos( np.cos( (np.pi*q*(2*k+1)) / (2*K) ) )\n\t\t\n\t\t# IDCT\n\t\tMFCC[:,win_id] = c\n\t\twin_id += 1\n\n\treturn MFCC", "def calc_fdm0thresh_reg(self, model):\n\n fdm0_thresh = model.vars.fdm0_thresh.value\n \n if fdm0_thresh == 0:\n reg = 0\n else:\n reg = int(fdm0_thresh / 8.0) - 1\n \n self._reg_write(model.vars.MODEM_TIMING_FDM0THRESH, reg)", "def frequency_to_mel(f):\n return 2595 * np.log10(1 + (f/2)/700.)", "def calc_fdm0thresh_val(self, model):\n\n timingbases = model.vars.timingbases_actual.value\n scale = model.vars.freq_gain_scale.value\n \n # only used in FDM0 mode which is active if timingbases = 0\n if timingbases > 0:\n model.vars.fdm0_thresh.value = 0\n return\n \n # nominal frequency deviation is +/- 64 we like to set this threshold\n # to half of that so 32 but if the FREQGAIN setting is scaled to avoid\n # saturation we need to scale this value accordingly\n fdm0_thresh = 32 * scale\n \n if fdm0_thresh < 8:\n fdm0_thresh = 8\n \n model.vars.fdm0_thresh.value = int(fdm0_thresh)", "def run(self):\n\n mu_low=self.ip['mb_at_mb']\n\n\n #-------------#\n # The running #\n #-------------#\n\n MZ = self.ip['Mz']\n alpha_at_mb = 1/self.ip['aMZinv']\n as_MZ = self.ip['asMZ']\n as_mb = self.ip['as_at_mb']\n\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n adm_eff = self.ADM_eff\n else:\n pass\n\n evolve1 = rge.RGE(self.gamma_QCD, 5)\n evolve2 = rge.RGE(self.gamma_QCD2, 5)\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n evolve8 = rge.RGE(adm_eff, 5)\n else:\n pass\n\n # Mixing in the dim.6 DM-SM sector\n #\n # Strictly speaking, MZ and mb should be defined at the same scale\n # (however, this is a higher-order difference)\n C_at_mb_QCD = np.dot(evolve2.U0_as2(as_MZ, as_mb),\\\n np.dot(evolve1.U0(as_MZ, as_mb),\\\n self.coeff_list_dm_dim5_dim6_dim7))\n C_at_mb_QED = np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED)\\\n * np.log(mu_low/MZ) * alpha_at_mb/(4*np.pi)\\\n + np.dot(self.coeff_list_dm_dim5_dim6_dim7, self.gamma_QED2)\\\n * np.log(mu_low/MZ) * (alpha_at_mb/(4*np.pi))**2\n\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n # Mixing in the dim.6 SM-SM and dim.8 DM-SM sector\n\n DIM6_DIM8_init = np.hstack((self.coeff_list_sm_dim6, self.coeff_list_dm_dim8))\n\n DIM6_DIM8_at_mb = np.dot(evolve8.U0(as_MZ, as_mb), DIM6_DIM8_init)\n\n\n # Revert back to dictionary\n\n dict_coeff_mb = list_to_dict(C_at_mb_QCD + C_at_mb_QED, self.wc_name_list)\n if self.DM_type == \"D\" or self.DM_type == \"M\" or self.DM_type == \"C\":\n dict_dm_dim8 = list_to_dict(np.delete(DIM6_DIM8_at_mb, np.s_[0:100]), self.wc8_name_list)\n dict_sm_dim6 = list_to_dict(np.delete(DIM6_DIM8_at_mb, np.s_[100:112]), self.sm_name_list)\n dict_sm_lepton_dim6 = list_to_dict(self.coeff_list_sm_lepton_dim6, self.sm_lepton_name_list)\n\n dict_coeff_mb.update(dict_dm_dim8)\n dict_coeff_mb.update(dict_sm_dim6)\n dict_coeff_mb.update(dict_sm_lepton_dim6)\n\n return dict_coeff_mb", "def __init__(self, timestep=1.0 * unit.femtoseconds, tolerance=None, alpha=0.1, dt_max=10.0 * unit.femtoseconds, f_inc=1.1, f_dec=0.5, f_alpha=0.99, N_min=5):\n\n # Check input ranges.\n if not ((alpha > 0.0) and (alpha < 1.0)):\n raise Exception(\"alpha must be in the interval (0,1); specified alpha = %f\" % alpha)\n\n if tolerance is None:\n tolerance = 0 * unit.kilojoules_per_mole / unit.nanometers\n\n super(FIREMinimizationIntegrator, self).__init__(timestep)\n\n # Use high-precision constraints\n self.setConstraintTolerance(1.0e-8)\n\n self.addGlobalVariable(\"alpha\", alpha) # alpha\n self.addGlobalVariable(\"P\", 0) # P\n self.addGlobalVariable(\"N_neg\", 0.0)\n self.addGlobalVariable(\"fmag\", 0) # |f|\n self.addGlobalVariable(\"fmax\", 0) # max|f_i|\n self.addGlobalVariable(\"ndof\", 0) # number of degrees of freedom\n self.addGlobalVariable(\"ftol\", tolerance.value_in_unit_system(unit.md_unit_system)) # convergence tolerance\n self.addGlobalVariable(\"vmag\", 0) # |v|\n self.addGlobalVariable(\"converged\", 0) # 1 if convergence threshold reached, 0 otherwise\n self.addPerDofVariable(\"x0\", 0)\n self.addPerDofVariable(\"v0\", 0)\n self.addPerDofVariable(\"x1\", 0)\n self.addGlobalVariable(\"E0\", 0) # old energy associated with x0\n self.addGlobalVariable(\"dE\", 0)\n self.addGlobalVariable(\"restart\", 0)\n self.addGlobalVariable(\"delta_t\", timestep.value_in_unit_system(unit.md_unit_system))\n\n # Assess convergence\n # TODO: Can we more closely match the OpenMM criterion here?\n self.beginIfBlock('converged < 1')\n\n # Compute fmag = |f|\n #self.addComputeGlobal('fmag', '0.0')\n self.addComputeSum('fmag', 'f*f')\n self.addComputeGlobal('fmag', 'sqrt(fmag)')\n\n # Compute ndof\n self.addComputeSum('ndof', '1')\n\n self.addComputeSum('converged', 'step(ftol - fmag/ndof)')\n self.endBlock()\n\n # Enclose everything in a block that checks if we have already converged.\n self.beginIfBlock('converged < 1')\n\n # Store old positions and energy\n self.addComputePerDof('x0', 'x')\n self.addComputePerDof('v0', 'v')\n self.addComputeGlobal('E0', 'energy')\n\n # MD: Take a velocity Verlet step.\n self.addComputePerDof(\"v\", \"v+0.5*delta_t*f/m\")\n self.addComputePerDof(\"x\", \"x+delta_t*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*delta_t*f/m+(x-x1)/delta_t\")\n self.addConstrainVelocities()\n\n self.addComputeGlobal('dE', 'energy - E0')\n\n # Compute fmag = |f|\n #self.addComputeGlobal('fmag', '0.0')\n self.addComputeSum('fmag', 'f*f')\n self.addComputeGlobal('fmag', 'sqrt(fmag)')\n # Compute vmag = |v|\n #self.addComputeGlobal('vmag', '0.0')\n self.addComputeSum('vmag', 'v*v')\n self.addComputeGlobal('vmag', 'sqrt(vmag)')\n\n # F1: Compute P = F.v\n self.addComputeSum('P', 'f*v')\n\n # F2: set v = (1-alpha) v + alpha \\hat{F}.|v|\n # Update velocities.\n # TODO: This must be corrected to be atomwise redirection of v magnitude along f\n self.addComputePerDof('v', '(1-alpha)*v + alpha*(f/fmag)*vmag')\n\n # Back up if the energy went up, protecing against NaNs\n self.addComputeGlobal('restart', '1')\n self.beginIfBlock('dE < 0')\n self.addComputeGlobal('restart', '0')\n self.endBlock()\n self.beginIfBlock('restart > 0')\n self.addComputePerDof('v', 'v0')\n self.addComputePerDof('x', 'x0')\n self.addComputeGlobal('P', '-1')\n self.endBlock()\n\n # If dt goes to zero, signal we've converged!\n dt_min = 1.0e-5 * timestep\n self.beginIfBlock('delta_t <= %f' % dt_min.value_in_unit_system(unit.md_unit_system))\n self.addComputeGlobal('converged', '1')\n self.endBlock()\n\n # F3: If P > 0 and the number of steps since P was negative > N_min,\n # Increase timestep dt = min(dt*f_inc, dt_max) and decrease alpha = alpha*f_alpha\n self.beginIfBlock('P > 0')\n # Update count of number of steps since P was negative.\n self.addComputeGlobal('N_neg', 'N_neg + 1')\n # If we have enough steps since P was negative, scale up timestep.\n self.beginIfBlock('N_neg > %d' % N_min)\n self.addComputeGlobal('delta_t', 'min(delta_t*%f, %f)' % (f_inc, dt_max.value_in_unit_system(unit.md_unit_system))) # TODO: Automatically convert dt_max to md units\n self.addComputeGlobal('alpha', 'alpha * %f' % f_alpha)\n self.endBlock()\n self.endBlock()\n\n # F4: If P < 0, decrease the timestep dt = dt*f_dec, freeze the system v=0,\n # and set alpha = alpha_start\n self.beginIfBlock('P < 0')\n self.addComputeGlobal('N_neg', '0.0')\n self.addComputeGlobal('delta_t', 'delta_t*%f' % f_dec)\n self.addComputePerDof('v', '0.0')\n self.addComputeGlobal('alpha', '%f' % alpha)\n self.endBlock()\n\n # Close block that checks for convergence.\n self.endBlock()", "def DMFluxneuDet(flavor,Enu,ch,DMm,DMsig,body,param,osc): \n ##B From Arxiv: 0506298 ec. 21 & 24\n #DM_annihilation_rate_Earth = 1.0e14*(100*param.GeV/DMm)**2/param.sec #[annhilations/s]\n #DM_annihilation_rate_Sun = ((1.0*param.AU)/(param.EARTHRADIUS*param.km))**2*DM_annihilation_rate_Earth\n DM_annihilation_rate_Sun = float(np.sum(DMSunAnnihilationRate(DMm,DMsig,param)))# [eV]\n ##E\n \n flux = 0.0\n \n if param.neutype == \"neutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n elif param.neutype == \"antineutrino\":\n if osc :\n for flv in range(3):\n #p = DMParameters(flv)\n #if param.name == \"STD\":\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK_STD(flv,flavor,Enu,param)\n #else :\n # flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flv*2+1,ch,DMm/param.GeV)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)*no.AvgNeuProb_RK(flv,flavor,Enu,param)\n else :\n #p = DMParameters(flavor)\n flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMSweFlux(Enu/param.GeV,flavor*2+1,ch,DMm/param.GeV)\n #flux = flux + (1.0/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n #flux = flux + (DM_annihilation_rate_Sun/(4.0*np.pi*param.AU**2))*DMFlux(Enu,DMm,ch,p)\n return flux\n else :\n print \"Wrong neutrino type.\"\n quit()", "def osher_kicking_bregman(A,f,mu,delta,tol=.00001,\n verbose=None):\n signal_length, num_codes = A.shape\n u = np.zeros(num_codes)\n v = np.zeros(num_codes)\n stop_tol = norm(f) * tol\n do_kick,done_kick=False,False\n num_iter = 0\n Au = np.dot(A,u)\n prev_error = np.inf \n cur_error = norm(Au - f)\n while prev_error - cur_error > stop_tol or do_kick:\n if verbose > 0:\n if num_iter % verbose == 0:\n print \"iteration: %d\" % num_iter\n print u\n num_iter += 1\n if do_kick:\n # update direction\n v_up = np.dot(A.T, f-Au)\n # zero indices\n I_0 = np.abs(u) < tol\n # number of steps to take to get out of stagnation\n if np.sum(I_0) > 0:\n s = np.min(np.ceil((mu * np.sign(v_up) - v)/ v_up)[I_0])\n v[I_0] = v[I_0] + s*v_up[I_0] \n else:\n v= v+ np.dot(A.T, f-Au)\n do_kick=False\n done_kick=True\n else: \n v= v+ np.dot(A.T, f-Au)\n done_kick=False\n u_new = delta*np.sign(v) * np.maximum(np.abs(v)-mu,0)\n # <= since we handle the case where we have a zero\n do_kick= norm(u_new - u) <= norm(u_new)*tol\n u = u_new\n Au = np.dot(A,u)\n prev_error = cur_error\n cur_error = norm(Au-f)\n if done_kick and do_kick:\n break\n return u, num_iter", "def mck2frf(f, M, C, K, indof=(0,), outdof=(0,), typefrf='v'):\r\n\r\n # Parse Input Parameters\r\n if typefrf.upper() == 'FLEXIBILITY' :\r\n typefrf = 'D'\r\n elif typefrf.upper() == 'MOBILITY' :\r\n typefrf = 'V'\r\n elif typefrf.upper() == 'ACCELERANCE' :\r\n typefrf = 'A'\r\n elif typefrf.upper() in ['D', 'V', 'A']:\r\n typefrf = typefrf.upper()\r\n else:\r\n raise Exception('Wrong input type!')\r\n\r\n # Find dimensions\r\n N = len(f)\r\n D = len(outdof)\r\n R = len(indof)\r\n\r\n # Allocate H MATRIX for output\r\n H = np.zeros((N,D,R), dtype=np.complex)\r\n\r\n # Main\r\n # Loop through frequencies and use inverse of system impedance matrix:\r\n # B(s)*X(s)=F(s) ==> B(s) in form of B=F/X\r\n # H(s) = inv(B(s)) ==> X(s)/F(s), so that H(s)*F(s)=X(s)\r\n\r\n for n in range(N): # Frequency index\r\n w = 2*pi*f[n] # Omega for this frequency\r\n Denom = -(w**2)*M+1j*w*C+K # Newton's equation in denominator of Hv\r\n Denom = np.matrix(Denom)\r\n InvDenom = inv(Denom); # Inverse denominator\r\n for r in range(R):\r\n W = np.ones_like(H[n,:,r])\r\n W.fill(w)\r\n if typefrf == 'D':\r\n H[n,:,r] = InvDenom[outdof,indof[r]]\r\n elif typefrf == 'V':\r\n H[n,:,r] = 1j*W*InvDenom[outdof,indof[r]]\r\n else:\r\n H[n,:,r] = -(W**2)*InvDenom[outdof,indof[r]]\r\n\r\n return H", "def osher_kicking_bregman(A,f,mu=1,delta,tol=.00001,\n verbose=None):\n signal_length, num_codes = A.shape\n u = np.zeros(num_codes)\n v = np.zeros(num_codes)\n stop_tol = norm(f) * tol\n do_kick,done_kick=False,False\n num_iter = 0\n Au = np.dot(A,u)\n prev_error = np.inf \n cur_error = norm(Au - f)\n while prev_error - cur_error > stop_tol or do_kick:\n if verbose > 0:\n if num_iter % verbose == 0:\n print \"iteration: %d\" % num_iter\n print u\n num_iter += 1\n if do_kick:\n # update direction\n v_up = np.dot(A.T, f-Au)\n # zero indices\n I_0 = np.abs(u) < tol\n # number of steps to take to get out of stagnation\n if np.sum(I_0) > 0:\n s = np.min(np.ceil((mu * np.sign(v_up) - v)/ v_up)[I_0])\n v[I_0] = v[I_0] + s*v_up[I_0] \n else:\n v= v+ np.dot(A.T, f-Au)\n do_kick=False\n done_kick=True\n else: \n v= v+ np.dot(A.T, f-Au)\n done_kick=False\n u_new = delta*np.sign(v) * np.maximum(np.abs(v)-mu,0)\n # <= since we handle the case where we have a zero\n do_kick= norm(u_new - u) <= norm(u_new)*tol\n u = u_new\n Au = np.dot(A,u)\n prev_error = cur_error\n cur_error = norm(Au-f)\n if done_kick and do_kick:\n break\n return u, num_iter", "def band_fit_M1(band_definition,results_data,verbose=False):\n\n # helper functions\n def f_moment(K,J):\n \"\"\" Generate coefficient matrix row for moments.\n \"\"\"\n f0 = J\n f1 = K/(J+1)\n f2 = (-1)**(J-0.5) / (2*math.sqrt(2)) * (2*J+1)/(J+1)\n if (K == 0):\n # case K = 0\n coefficients = [f0]\n elif (K == 1/2):\n # case K = 1/2\n coefficients = [f0,f1,f2]\n else:\n # case K generic\n coefficients = [f0,f1]\n\n return coefficients\n def f_trans(K,J):\n \"\"\" Generate coefficient matrix row for transitions.\n \"\"\"\n f0 = 0\n f1 = -math.sqrt(3/(4*math.pi)) * math.sqrt((J**2-K**2)/J)\n f2 = (-1)**(J-0.5) / math.sqrt(2) * f1\n if (K == 0):\n # case K = 0\n coefficients = [f0]\n elif (K == 1/2):\n # case K = 1/2\n coefficients = [f0,f1,f2]\n else:\n # case K generic\n coefficients = [f0,f1]\n\n return coefficients\n\n # setup\n K = band_definition.K\n\n # accumulate moment entries\n A_moment = []\n b_moment = []\n for J in band_definition.J_list_M1_moment:\n A_moment.append(f_moment(K,J))\n b_moment.append(results_data.moments[(band_definition.members[J],\"M1\")])\n\n # accumulate transition entries\n A_trans = []\n b_trans = []\n for J in band_definition.J_list_M1_trans:\n A_trans.append(f_trans(K,J))\n Ji = J\n Jf = J - 1\n M = band_definition.M[Ji]\n values = np.array(results_data.get_rme(band_definition.members[Jf],band_definition.members[Ji],\"M1\",M))\n values *= band_definition.signs[(Ji,M)]*band_definition.signs[(Jf,M)]\n b_trans.append(values)\n\n # combine moment and transition arrays\n A = np.array(A_moment+A_trans,float)\n b = np.array(b_moment+b_trans,float)\n if (verbose):\n print(\"J_list_M1_moment:\",band_definition.J_list_M1_moment)\n print(\"J_list_M1_trans:\",band_definition.J_list_M1_trans)\n print(\"Coefficient matrix\")\n print(A)\n print(\"Ordinate matrix\")\n print(b)\n\n # trap blatantly insufficient system\n if ( not (\n ((K==0) and (len(b)>=1))\n or\n ((K==1/2) and (len(b)>=3))\n or\n ((K>1/2) and (len(b)>=2))\n ) ):\n parameters = np.nan*np.ones((3,4))\n return parameters\n\n # solve system\n parameters = np.linalg.lstsq(A,b,rcond=None)[0]\n\n # upgrade parameter matrix to three rows\n # zero pads for parameter a2 if not already present\n if (parameters.shape == (1,4)):\n parameters = np.append(parameters,[[0,0,0,0],[0,0,0,0]],axis=0)\n elif (parameters.shape == (2,4)):\n parameters = np.append(parameters,[[0,0,0,0]],axis=0)\n if (verbose):\n print(\"Parameter matrix\")\n print(parameters)\n\n return parameters", "def mass_interpolation(teff, current_logg, fe_h, l_bol, e_lbol, e_teff=100, e_logg=0.5, e_fe_h=0.2, debug=False, show_todo=False):\r\n age, mass = age_mass_guess_parsec.do_age_mass_guess(\r\n np.array([teff, current_logg, l_bol, fe_h+0.1]), \r\n np.array([e_teff, e_logg, e_lbol, e_fe_h]), \r\n y,feh_iso\r\n )\r\n if debug:\r\n print('////////')\r\n print('MASS estimation:')\r\n print('teff, current_logg, fe_h+0.1, l_bol, e_lbol, e_teff,e_logg,e_fe_h')\r\n print(\"{:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}\".format(teff, current_logg, fe_h+0.1, l_bol, e_lbol, e_teff,e_logg,e_fe_h))\r\n print('NB: Currently setting fe_h to fe_h+0.1 because of a known bias in the pipeline, underestimating [Fe/H] by ~0.1 dex')\r\n print('Mass: ',mass)\r\n print('Age: ',age)\r\n print('////////')\r\n\r\n return(mass)", "def test_invalid_fm_bias_frequency(self):\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set_modulation(fm_bias_freq_hz=-0.1)\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set_modulation(fm_bias_freq_uhz=-0.1)\r\n with self.assertRaises(fygen.InvalidFrequencyError):\r\n self.fy.set_modulation(fm_bias_freq_hz=1, fm_bias_freq_uhz=1)", "def test_wrong_ref_power_mfcc():\n MFCC(file_struct, FeatureTypes.framesync, ref_power=\"caca\")", "def mms_feeps_getgyrophase(trange=['2017-07-11/22:30', '2017-07-11/22:35'], probe='2', data_rate='brst', level='l2', datatype='electron'):\n mec_vars = mms.mec(trange=trange, probe=probe, data_rate=data_rate)\n if mec_vars is None:\n logging.error('Problem loading MEC data for calculating FEEPS gyrophase angles')\n\n qeci2sm = get('mms'+probe+'_mec_quat_eci_to_sm', units=False)\n qeci2bcs = get('mms'+probe+'_mec_quat_eci_to_bcs', units=False)\n rsun = get('mms'+probe+'_mec_r_sun_de421_eci', units=False)\n\n rsunbcs = np.zeros((len(rsun.times), 3))\n rduskbcs = np.zeros((len(rsun.times), 3))\n rdusksm = [0, 1, 0]\n\n for i in range(len(rsun.times)):\n q = qeci2bcs.y[i, :]\n # Quaternion rotation matrix:\n s = 1 # these quaternions are unit-qs\n R = np.array([[1 - 2*s*(q[2]**2 + q[3]**2), 2*s*(q[1]*q[2] - q[3]*q[0]), 2*s*(q[1]*q[3] + q[2]*q[0])], # ECI to BCS\n [2*s*(q[1]*q[2] + q[3]*q[0]), 1 - 2*s*(q[1]**2 + q[3]**2), 2*s*(q[2]*q[3] - q[1]*q[0])],\n [2*s*(q[1]*q[3] - q[2]*q[0]), 2*s*(q[2]*q[3] + q[1]*q[0]), 1 - 2*s*(q[1]**2 + q[2]**2)]])\n R = R.T\n rsunbcs[i, :] = np.array([R[0,0]*rsun.y[i,0] + R[1,0]*rsun.y[i,1] + R[2,0]*rsun.y[i,2], R[0,1]*rsun.y[i,0] + R[1,1]*rsun.y[i,1] + R[2,1]*rsun.y[i,2], R[0,2]*rsun.y[i,0] + R[1,2]*rsun.y[i,1] + R[2,2]*rsun.y[i,2]])\n\n # now make second vector for gyroplane reference, dusk direction (+Y in SM)\n q = qeci2sm.y[i, :]\n # Quaternion rotation matrix:\n s = 1 # these quaternions are unit-qs\n R2 = np.array([[1 - 2*s*(q[2]**2 + q[3]**2), 2*s*(q[1]*q[2] - q[3]*q[0]), 2*s*(q[1]*q[3] + q[2]*q[0])], # ECI to SM\n [2*s*(q[1]*q[2] + q[3]*q[0]), 1 - 2*s*(q[1]**2 + q[3]**2), 2*s*(q[2]*q[3] - q[1]*q[0])],\n [2*s*(q[1]*q[3] - q[2]*q[0]), 2*s*(q[2]*q[3] + q[1]*q[0]), 1 - 2*s*(q[1]**2 + q[2]**2)]])\n # going from SM to ECI, so invert R:\n R2 = np.linalg.inv(R2) # SM to ECI\n R2 = R2.T\n rduskeci = [R2[0,0]*rdusksm[0] + R2[1,0]*rdusksm[1] + R2[2,0]*rdusksm[2], R2[0,1]*rdusksm[0] + R2[1,1]*rdusksm[1] + R2[2,1]*rdusksm[2], R2[0,2]*rdusksm[0] + R2[1,2]*rdusksm[1] + R2[2,2]*rdusksm[2]]\n # Now convert to BCS:\n rduskbcs[i, :] = np.array([R[0,0]*rduskeci[0] + R[1,0]*rduskeci[1] + R[2,0]*rduskeci[2], R[0,1]*rduskeci[0] + R[1,1]*rduskeci[1] + R[2,1]*rduskeci[2], R[0,2]*rduskeci[0] + R[1,2]*rduskeci[1] + R[2,2]*rduskeci[2]])\n \n saved = store('mms'+probe+'_mec_r_sun_bcs', data = {'x': rsun.times, 'y': rsunbcs})\n if not saved:\n logging.error('Problem saving r_sun_bcs')\n\n saved = store('mms'+probe+'_mec_r_dusk_bcs', data = {'x': rsun.times, 'y': rduskbcs})\n if not saved:\n logging.error('Problem saving r_dusk_bcs')\n\n # Rotation matrices for FEEPS coord system (FCS) into body coordinate system (BCS):\n Ttop = np.array([[1./np.sqrt(2.), -1./np.sqrt(2.), 0], [1./np.sqrt(2.), 1./np.sqrt(2.), 0], [0, 0, 1]]).T\n Tbot = np.array([[-1./np.sqrt(2.), -1./np.sqrt(2.), 0], [-1./np.sqrt(2.), 1./np.sqrt(2.), 0], [0, 0, -1]]).T\n\n # Telescope vectors in FCS:\n # Electrons\n V1fcs = [0.347, -0.837, 0.423]\n V2fcs = [0.347, -0.837, -0.423]\n V3fcs = [0.837, -0.347, 0.423]\n V4fcs = [0.837, -0.347, -0.423]\n V5fcs = [-0.087, 0.000, 0.996]\n V9fcs = [0.837, 0.347, 0.423]\n V10fcs = [0.837, 0.347, -0.423]\n V11fcs = [0.347, 0.837, 0.423]\n V12fcs = [0.347, 0.837, -0.423]\n # Ions\n V6fcs = [0.104, 0.180, 0.978]\n V7fcs = [0.654, -0.377, 0.656]\n V8fcs = [0.654, -0.377, -0.656]\n\n # Now telescope vectors in Body Coordinate System:\n # Factors of -1 account for 180 deg shift between particle velocity and telescope normal direction:\n # Top:\n Vt1bcs = [-1.*(Ttop[0,0]*V1fcs[0] + Ttop[1,0]*V1fcs[1] + Ttop[2,0]*V1fcs[2]), \n -1.*(Ttop[0,1]*V1fcs[0] + Ttop[1,1]*V1fcs[1] + Ttop[2,1]*V1fcs[2]), \n -1.*(Ttop[0,2]*V1fcs[0] + Ttop[1,2]*V1fcs[1] + Ttop[2,2]*V1fcs[2])]\n Vt2bcs = [-1.*(Ttop[0,0]*V2fcs[0] + Ttop[1,0]*V2fcs[1] + Ttop[2,0]*V2fcs[2]), \n -1.*(Ttop[0,1]*V2fcs[0] + Ttop[1,1]*V2fcs[1] + Ttop[2,1]*V2fcs[2]), \n -1.*(Ttop[0,2]*V2fcs[0] + Ttop[1,2]*V2fcs[1] + Ttop[2,2]*V2fcs[2])]\n Vt3bcs = [-1.*(Ttop[0,0]*V3fcs[0] + Ttop[1,0]*V3fcs[1] + Ttop[2,0]*V3fcs[2]), \n -1.*(Ttop[0,1]*V3fcs[0] + Ttop[1,1]*V3fcs[1] + Ttop[2,1]*V3fcs[2]), \n -1.*(Ttop[0,2]*V3fcs[0] + Ttop[1,2]*V3fcs[1] + Ttop[2,2]*V3fcs[2])]\n Vt4bcs = [-1.*(Ttop[0,0]*V4fcs[0] + Ttop[1,0]*V4fcs[1] + Ttop[2,0]*V4fcs[2]), \n -1.*(Ttop[0,1]*V4fcs[0] + Ttop[1,1]*V4fcs[1] + Ttop[2,1]*V4fcs[2]), \n -1.*(Ttop[0,2]*V4fcs[0] + Ttop[1,2]*V4fcs[1] + Ttop[2,2]*V4fcs[2])]\n Vt5bcs = [-1.*(Ttop[0,0]*V5fcs[0] + Ttop[1,0]*V5fcs[1] + Ttop[2,0]*V5fcs[2]), \n -1.*(Ttop[0,1]*V5fcs[0] + Ttop[1,1]*V5fcs[1] + Ttop[2,1]*V5fcs[2]), \n -1.*( Ttop[0,2]*V5fcs[0] + Ttop[1,2]*V5fcs[1] + Ttop[2,2]*V5fcs[2])]\n Vt6bcs = [-1.*(Ttop[0,0]*V6fcs[0] + Ttop[1,0]*V6fcs[1] + Ttop[2,0]*V6fcs[2]), \n -1.*(Ttop[0,1]*V6fcs[0] + Ttop[1,1]*V6fcs[1] + Ttop[2,1]*V6fcs[2]), \n -1.*(Ttop[0,2]*V6fcs[0] + Ttop[1,2]*V6fcs[1] + Ttop[2,2]*V6fcs[2])]\n Vt7bcs = [-1.*(Ttop[0,0]*V7fcs[0] + Ttop[1,0]*V7fcs[1] + Ttop[2,0]*V7fcs[2]), \n -1.*(Ttop[0,1]*V7fcs[0] + Ttop[1,1]*V7fcs[1] + Ttop[2,1]*V7fcs[2]), \n -1.*(Ttop[0,2]*V7fcs[0] + Ttop[1,2]*V7fcs[1] + Ttop[2,2]*V7fcs[2])]\n Vt8bcs = [-1.*(Ttop[0,0]*V8fcs[0] + Ttop[1,0]*V8fcs[1] + Ttop[2,0]*V8fcs[2]), \n -1.*( Ttop[0,1]*V8fcs[0] + Ttop[1,1]*V8fcs[1] + Ttop[2,1]*V8fcs[2]), \n -1.*(Ttop[0,2]*V8fcs[0] + Ttop[1,2]*V8fcs[1] + Ttop[2,2]*V8fcs[2])]\n Vt9bcs = [-1.*(Ttop[0,0]*V9fcs[0] + Ttop[1,0]*V9fcs[1] + Ttop[2,0]*V9fcs[2]), \n -1.*(Ttop[0,1]*V9fcs[0] + Ttop[1,1]*V9fcs[1] + Ttop[2,1]*V9fcs[2]), \n -1.*(Ttop[0,2]*V9fcs[0] + Ttop[1,2]*V9fcs[1] + Ttop[2,2]*V9fcs[2])]\n Vt10bcs = [-1.*(Ttop[0,0]*V10fcs[0] + Ttop[1,0]*V10fcs[1] + Ttop[2,0]*V10fcs[2]), \n -1.*(Ttop[0,1]*V10fcs[0] + Ttop[1,1]*V10fcs[1] + Ttop[2,1]*V10fcs[2]), \n -1.*(Ttop[0,2]*V10fcs[0] + Ttop[1,2]*V10fcs[1] + Ttop[2,2]*V10fcs[2])]\n Vt11bcs = [-1.*(Ttop[0,0]*V11fcs[0] + Ttop[1,0]*V11fcs[1] + Ttop[2,0]*V11fcs[2]), \n -1.*(Ttop[0,1]*V11fcs[0] + Ttop[1,1]*V11fcs[1] + Ttop[2,1]*V11fcs[2]), \n -1.*(Ttop[0,2]*V11fcs[0] + Ttop[1,2]*V11fcs[1] + Ttop[2,2]*V11fcs[2])]\n Vt12bcs = [-1.*(Ttop[0,0]*V12fcs[0] + Ttop[1,0]*V12fcs[1] + Ttop[2,0]*V12fcs[2]), \n -1.*(Ttop[0,1]*V12fcs[0] + Ttop[1,1]*V12fcs[1] + Ttop[2,1]*V12fcs[2]), \n -1.*(Ttop[0,2]*V12fcs[0] + Ttop[1,2]*V12fcs[1] + Ttop[2,2]*V12fcs[2])]\n # Bottom:\n Vb1bcs = [-1.*(Tbot[0,0]*V1fcs[0] + Tbot[1,0]*V1fcs[1] + Tbot[2,0]*V1fcs[2]), \n -1.*(Tbot[0,1]*V1fcs[0] + Tbot[1,1]*V1fcs[1] + Tbot[2,1]*V1fcs[2]), \n -1.*(Tbot[0,2]*V1fcs[0] + Tbot[1,2]*V1fcs[1] + Tbot[2,2]*V1fcs[2])]\n Vb2bcs = [-1.*(Tbot[0,0]*V2fcs[0] + Tbot[1,0]*V2fcs[1] + Tbot[2,0]*V2fcs[2]), \n -1.*(Tbot[0,1]*V2fcs[0] + Tbot[1,1]*V2fcs[1] + Tbot[2,1]*V2fcs[2]), \n -1.*(Tbot[0,2]*V2fcs[0] + Tbot[1,2]*V2fcs[1] + Tbot[2,2]*V2fcs[2])]\n Vb3bcs = [-1.*(Tbot[0,0]*V3fcs[0] + Tbot[1,0]*V3fcs[1] + Tbot[2,0]*V3fcs[2]), \n -1.*(Tbot[0,1]*V3fcs[0] + Tbot[1,1]*V3fcs[1] + Tbot[2,1]*V3fcs[2]), \n -1.*(Tbot[0,2]*V3fcs[0] + Tbot[1,2]*V3fcs[1] + Tbot[2,2]*V3fcs[2])]\n Vb4bcs = [-1.*(Tbot[0,0]*V4fcs[0] + Tbot[1,0]*V4fcs[1] + Tbot[2,0]*V4fcs[2]), \n -1.*(Tbot[0,1]*V4fcs[0] + Tbot[1,1]*V4fcs[1] + Tbot[2,1]*V4fcs[2]), \n -1.*(Tbot[0,2]*V4fcs[0] + Tbot[1,2]*V4fcs[1] + Tbot[2,2]*V4fcs[2])]\n Vb5bcs = [-1.*(Tbot[0,0]*V5fcs[0] + Tbot[1,0]*V5fcs[1] + Tbot[2,0]*V5fcs[2]), \n -1.*(Tbot[0,1]*V5fcs[0] + Tbot[1,1]*V5fcs[1] + Tbot[2,1]*V5fcs[2]), \n -1.*(Tbot[0,2]*V5fcs[0] + Tbot[1,2]*V5fcs[1] + Tbot[2,2]*V5fcs[2])]\n Vb6bcs = [-1.*(Tbot[0,0]*V6fcs[0] + Tbot[1,0]*V6fcs[1] + Tbot[2,0]*V6fcs[2]), \n -1.*(Tbot[0,1]*V6fcs[0] + Tbot[1,1]*V6fcs[1] + Tbot[2,1]*V6fcs[2]), \n -1.*( Tbot[0,2]*V6fcs[0] + Tbot[1,2]*V6fcs[1] + Tbot[2,2]*V6fcs[2])]\n Vb7bcs = [-1.*(Tbot[0,0]*V7fcs[0] + Tbot[1,0]*V7fcs[1] + Tbot[2,0]*V7fcs[2]), \n -1.*(Tbot[0,1]*V7fcs[0] + Tbot[1,1]*V7fcs[1] + Tbot[2,1]*V7fcs[2]), \n -1.*(Tbot[0,2]*V7fcs[0] + Tbot[1,2]*V7fcs[1] + Tbot[2,2]*V7fcs[2])]\n Vb8bcs = [-1.*(Tbot[0,0]*V8fcs[0] + Tbot[1,0]*V8fcs[1] + Tbot[2,0]*V8fcs[2]), \n -1.*(Tbot[0,1]*V8fcs[0] + Tbot[1,1]*V8fcs[1] + Tbot[2,1]*V8fcs[2]), \n -1.*(Tbot[0,2]*V8fcs[0] + Tbot[1,2]*V8fcs[1] + Tbot[2,2]*V8fcs[2])]\n Vb9bcs = [-1.*(Tbot[0,0]*V9fcs[0] + Tbot[1,0]*V9fcs[1] + Tbot[2,0]*V9fcs[2]), \n -1.*(Tbot[0,1]*V9fcs[0] + Tbot[1,1]*V9fcs[1] + Tbot[2,1]*V9fcs[2]), \n -1.*(Tbot[0,2]*V9fcs[0] + Tbot[1,2]*V9fcs[1] + Tbot[2,2]*V9fcs[2])]\n Vb10bcs = [-1.*(Tbot[0,0]*V10fcs[0] + Tbot[1,0]*V10fcs[1] + Tbot[2,0]*V10fcs[2]), \n -1.*(Tbot[0,1]*V10fcs[0] + Tbot[1,1]*V10fcs[1] + Tbot[2,1]*V10fcs[2]), \n -1.*(Tbot[0,2]*V10fcs[0] + Tbot[1,2]*V10fcs[1] + Tbot[2,2]*V10fcs[2])]\n Vb11bcs = [-1.*(Tbot[0,0]*V11fcs[0] + Tbot[1,0]*V11fcs[1] + Tbot[2,0]*V11fcs[2]), \n -1.*(Tbot[0,1]*V11fcs[0] + Tbot[1,1]*V11fcs[1] + Tbot[2,1]*V11fcs[2]), \n -1.*(Tbot[0,2]*V11fcs[0] + Tbot[1,2]*V11fcs[1] + Tbot[2,2]*V11fcs[2])]\n Vb12bcs = [-1.*(Tbot[0,0]*V12fcs[0] + Tbot[1,0]*V12fcs[1] + Tbot[2,0]*V12fcs[2]), \n -1.*(Tbot[0,1]*V12fcs[0] + Tbot[1,1]*V12fcs[1] + Tbot[2,1]*V12fcs[2]), \n -1.*(Tbot[0,2]*V12fcs[0] + Tbot[1,2]*V12fcs[1] + Tbot[2,2]*V12fcs[2])]\n\n fgm_vars = mms.fgm(trange=[time_double(trange[0])-600, time_double(trange[1])+600], probe=probe, data_rate='srvy')\n if fgm_vars is None:\n logging.error('Problem loading FGM vars for calculating FEEPS gyrophase angles')\n\n # interpolate the FGM var to the MEC var timestamps\n tinterpol('mms'+probe+'_fgm_b_bcs_srvy_l2_bvec', 'mms'+probe+'_mec_r_sun_bcs', newname='mms'+probe+'_fgm_b_bcs_srvy_l2_bvec_int')\n\n B = get('mms'+probe+'_fgm_b_bcs_srvy_l2_bvec_int')\n\n # Now calculate gyrophase\n # Telescope vectors perp to B:\n Tperp = np.zeros((len(rsunbcs[:, 0]), 3, 24))\n\n # Gyrophase:\n phi = np.zeros((len(rsunbcs[:, 0]), 24))\n\n for i in range(len(rsunbcs[:, 0])):\n uB = B.y[i,:]/np.sqrt(B.y[i,0]**2 + B.y[i,1]**2 + B.y[i,2]**2)\n # Sun vector perp to B:\n Sperp = np.cross(np.cross(uB, rsunbcs[i, :]/np.sqrt(np.nansum(rsunbcs[i, :]**2))), uB)\n # Dusk vector perp to B:\n Dperp = np.cross(np.cross(uB, rduskbcs[i, :]/np.sqrt(np.nansum(rduskbcs[i, :]**2))), uB)\n Tperp[i, :, 0] = np.cross(np.cross(uB, Vt1bcs), uB)\n Tperp[i, :, 1] = np.cross(np.cross(uB, Vt2bcs), uB)\n Tperp[i, :, 2] = np.cross(np.cross(uB, Vt3bcs), uB)\n Tperp[i, :, 3] = np.cross(np.cross(uB, Vt4bcs), uB)\n Tperp[i, :, 4] = np.cross(np.cross(uB, Vt5bcs), uB)\n Tperp[i, :, 5] = np.cross(np.cross(uB, Vt6bcs), uB)\n Tperp[i, :, 6] = np.cross(np.cross(uB, Vt7bcs), uB)\n Tperp[i, :, 7] = np.cross(np.cross(uB, Vt8bcs), uB)\n Tperp[i, :, 8] = np.cross(np.cross(uB, Vt9bcs), uB)\n Tperp[i, :, 9] = np.cross(np.cross(uB, Vt10bcs), uB)\n Tperp[i, :, 10] = np.cross(np.cross(uB, Vt11bcs), uB)\n Tperp[i, :, 11] = np.cross(np.cross(uB, Vt12bcs), uB)\n Tperp[i, :, 12] = np.cross(np.cross(uB, Vb1bcs), uB)\n Tperp[i, :, 13] = np.cross(np.cross(uB, Vb2bcs), uB)\n Tperp[i, :, 14] = np.cross(np.cross(uB, Vb3bcs), uB)\n Tperp[i, :, 15] = np.cross(np.cross(uB, Vb4bcs), uB)\n Tperp[i, :, 16] = np.cross(np.cross(uB, Vb5bcs), uB)\n Tperp[i, :, 17] = np.cross(np.cross(uB, Vb6bcs), uB)\n Tperp[i, :, 18] = np.cross(np.cross(uB, Vb7bcs), uB)\n Tperp[i, :, 19] = np.cross(np.cross(uB, Vb8bcs), uB)\n Tperp[i, :, 20] = np.cross(np.cross(uB, Vb9bcs), uB)\n Tperp[i, :, 21] = np.cross(np.cross(uB, Vb10bcs), uB)\n Tperp[i, :, 22] = np.cross(np.cross(uB, Vb11bcs), uB)\n Tperp[i, :, 23] = np.cross(np.cross(uB, Vb12bcs), uB)\n\n for j in range(24):\n th1 = np.arccos(np.nansum(Tperp[i,:,j] * Sperp)/(np.sqrt(np.nansum(Tperp[i,:,j]**2))*np.sqrt(np.nansum(Sperp**2))))\n th2 = np.arccos(np.nansum(Tperp[i,:,j] * Dperp)/(np.sqrt(np.nansum(Tperp[i,:,j]**2))*np.sqrt(np.nansum(Dperp**2))))\n # strip the units\n th1 = th1.value\n th2 = th2.value\n if th1 <= np.pi/2.0 and th2 < np.pi/2:\n phi[i, j] = 2*np.pi - th1\n if th1 < np.pi/2.0 and th2 >= np.pi/2.0:\n phi[i, j] = th1\n if th1 > np.pi/2.0 and th2 <= np.pi/2.0:\n phi[i, j] = 270.0*np.pi/180.0 - th2\n if th1 >= np.pi/2.0 and th2 > np.pi/2.0:\n phi[i, j] = th1\n \n saved = store('mms'+probe+'_epd_feeps_'+data_rate+'_gyrophase', data={'x': rsun.times, 'y': phi*180./np.pi})\n if not saved:\n logging.error('Problem saving gyrophase angles')\n return\n\n options('mms'+probe+'_epd_feeps_'+data_rate+'_gyrophase', 'yrange', [0, 360.0])\n\n # Gyrophase always returns on time stamps from MEC data, get those closest to FEEPS time stamps:\n eyes = mms_feeps_active_eyes(trange, probe, data_rate, datatype, level)\n sensor_types = ['top', 'bottom']\n\n feepst = get('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_spinsectnum')\n\n indt = np.zeros(len(feepst.times), dtype='int32')\n gpd = get('mms'+probe+'_epd_feeps_'+data_rate+'_gyrophase')\n\n for i in range(len(feepst.times)):\n indt[i] = np.argwhere(np.abs(gpd.times - feepst.times[i]) == np.min(np.abs(gpd.times - feepst.times[i]))).flatten()[0]\n\n # Gyrophase always returns all 24 FEEPS telescopes, downselect based on species:\n iT = np.array([np.array(eyes[sensor_types[0]])-1, np.array(eyes[sensor_types[0]])+11]).flatten().tolist()\n gp_data = np.zeros((len(gpd.times[indt]), len(iT)))\n\n #return (iT, gp_data, gpd)\n for i in range(len(iT)):\n gp_data[:, i] = gpd.y[indt, iT[i]]\n \n saved = store('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_gyrophase', data = {'x': gpd.times[indt], 'y': gp_data})\n\n if saved:\n options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_gyrophase', 'yrange', [0.0, 360.0])\n return 'mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_gyrophase'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_cma_equalizer_cc_sptr __init__(self, p) > digital_cma_equalizer_cc_sptr
def __init__(self, *args): this = _digital_swig.new_digital_cma_equalizer_cc_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n super(CorrelogramPooling3D, self).__init__()", "def __init__(self, *args):\n this = _digital_swig.new_digital_additive_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_scrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, coeff):\n self.coeff = coeff", "def __init__(self, conditionValue):", "def __init__(self, *args):\n this = _digital_swig.new_digital_correlate_access_code_tag_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, args, phase):\n self.args = args\n self.phase = phase", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HCompositeState2ProcDef, self).__init__(name='HCompositeState2ProcDef', num_nodes=152, edges=[])\n \n # Add the edges\n self.add_edges([(8, 39), (39, 1), (1, 40), (40, 6), (6, 41), (41, 9), (9, 42), (42, 11), (8, 43), (43, 13), (6, 44), (44, 14), (6, 45), (45, 15), (6, 46), (46, 16), (11, 47), (47, 17), (11, 48), (48, 18), (11, 49), (49, 19), (11, 50), (50, 20), (9, 51), (51, 12), (12, 52), (52, 21), (12, 53), (53, 22), (12, 54), (54, 23), (71, 55), (55, 119), (72, 56), (56, 120), (73, 57), (57, 121), (74, 58), (58, 122), (75, 59), (59, 123), (76, 60), (60, 124), (77, 61), (61, 125), (78, 62), (62, 126), (79, 63), (63, 127), (80, 64), (64, 128), (81, 65), (65, 129), (82, 66), (66, 130), (83, 67), (67, 131), (84, 68), (68, 132), (85, 69), (69, 133), (86, 70), (70, 134), (13, 24), (24, 88), (14, 25), (25, 89), (15, 26), (26, 90), (16, 27), (27, 91), (11, 28), (28, 92), (17, 29), (29, 93), (18, 30), (30, 94), (19, 31), (31, 95), (20, 32), (32, 96), (12, 33), (33, 97), (21, 34), (34, 98), (22, 35), (35, 99), (23, 36), (36, 100), (8, 37), (37, 101), (1, 38), (38, 102), (5, 0), (0, 135), (0, 136), (0, 137), (0, 138), (0, 139), (0, 140), (0, 141), (0, 142), (0, 143), (0, 144), (0, 145), (0, 146), (0, 147), (0, 148), (0, 149), (0, 150), (0, 151), (136, 1), (7, 2), (2, 4), (4, 3), (3, 87), (10, 4), (7, 5), (137, 6), (71, 103), (72, 104), (73, 105), (74, 106), (75, 107), (76, 108), (77, 109), (78, 110), (79, 111), (80, 112), (81, 113), (82, 114), (83, 115), (84, 116), (85, 117), (86, 118), (135, 8), (138, 9), (139, 13), (140, 14), (141, 15), (142, 16), (143, 11), (144, 12), (145, 17), (146, 18), (147, 19), (148, 20), (149, 21), (150, 22), (151, 23), (8, 10), (103, 87), (104, 88), (105, 89), (106, 90), (107, 91), (108, 92), (109, 93), (110, 94), (111, 95), (112, 96), (113, 97), (114, 98), (115, 99), (116, 100), (117, 101), (118, 102)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'UMLRT2Kiltera_MM'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"CompositeState2ProcDef\"\"\"\n self[\"GUID__\"] = UUID('d5e9d5a2-c202-49ef-a74d-abc96e53b4fe')\n \n # Set the node attributes\n self.vs[0][\"mm__\"] = \"\"\"ApplyModel\"\"\"\n self.vs[0][\"GUID__\"] = UUID('4f03b792-e84e-4c84-bbae-3072cf6a293c')\n self.vs[1][\"name\"] = \"\"\"localdef1\"\"\"\n self.vs[1][\"classtype\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"LocalDef\"\"\"\n self.vs[1][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[1][\"GUID__\"] = UUID('00ff12a2-181f-4200-81a2-75850a58d99f')\n self.vs[2][\"mm__\"] = \"\"\"match_contains\"\"\"\n self.vs[2][\"GUID__\"] = UUID('938cefd8-a8a4-4aaf-be3a-e728f6d4b308')\n self.vs[3][\"mm__\"] = \"\"\"hasAttribute_S\"\"\"\n self.vs[3][\"GUID__\"] = UUID('a1001fa8-fbfb-4491-a555-e688afae9a35')\n self.vs[4][\"name\"] = \"\"\"state1\"\"\"\n self.vs[4][\"classtype\"] = \"\"\"State\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"State\"\"\"\n self.vs[4][\"cardinality\"] = \"\"\"+\"\"\"\n self.vs[4][\"GUID__\"] = UUID('2de4b186-4d1b-49c5-a24d-837430de86c3')\n self.vs[5][\"mm__\"] = \"\"\"paired_with\"\"\"\n self.vs[5][\"GUID__\"] = UUID('6864a62e-0c16-41ec-85cb-5304c66b2167')\n self.vs[6][\"name\"] = \"\"\"new1\"\"\"\n self.vs[6][\"classtype\"] = \"\"\"New\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"New\"\"\"\n self.vs[6][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[6][\"GUID__\"] = UUID('6e918d39-761f-4145-980d-e035e8956e4c')\n self.vs[7][\"mm__\"] = \"\"\"MatchModel\"\"\"\n self.vs[7][\"GUID__\"] = UUID('9d3c9ff3-d943-45c5-9a68-4b94f8ae4f55')\n self.vs[8][\"name\"] = \"\"\"procdef1\"\"\"\n self.vs[8][\"classtype\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"mm__\"] = \"\"\"ProcDef\"\"\"\n self.vs[8][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[8][\"GUID__\"] = UUID('b36423c7-5f8e-4565-9124-9dedad23d1e1')\n self.vs[9][\"name\"] = \"\"\"par1\"\"\"\n self.vs[9][\"classtype\"] = \"\"\"Par\"\"\"\n self.vs[9][\"mm__\"] = \"\"\"Par\"\"\"\n self.vs[9][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[9][\"GUID__\"] = UUID('64a7af82-a641-4084-b5c3-db88c40c7b99')\n self.vs[10][\"type\"] = \"\"\"ruleDef\"\"\"\n self.vs[10][\"mm__\"] = \"\"\"backward_link\"\"\"\n self.vs[10][\"GUID__\"] = UUID('869d5d52-235c-4240-af78-31e36a1f47d7')\n self.vs[11][\"name\"] = \"\"\"inst1\"\"\"\n self.vs[11][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[11][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[11][\"GUID__\"] = UUID('a4079b80-e123-4015-96c9-8e664b15e053')\n self.vs[12][\"name\"] = \"\"\"inst2\"\"\"\n self.vs[12][\"classtype\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"mm__\"] = \"\"\"Inst\"\"\"\n self.vs[12][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[12][\"GUID__\"] = UUID('a3eef854-3648-462d-be65-3eca75bdebf7')\n self.vs[13][\"name\"] = \"\"\"name1\"\"\"\n self.vs[13][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[13][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[13][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[13][\"GUID__\"] = UUID('9b94a56a-dd11-415e-8663-6f429c2c0753')\n self.vs[14][\"name\"] = \"\"\"name2\"\"\"\n self.vs[14][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[14][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[14][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[14][\"GUID__\"] = UUID('d90c8a9c-eee1-48af-9308-abbb6052af8f')\n self.vs[15][\"name\"] = \"\"\"name3\"\"\"\n self.vs[15][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[15][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[15][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[15][\"GUID__\"] = UUID('8e53fe34-6fcc-4059-8042-db911db6e812')\n self.vs[16][\"name\"] = \"\"\"name4\"\"\"\n self.vs[16][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[16][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[16][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[16][\"GUID__\"] = UUID('4f23669c-d236-4a8d-b52b-1f37ba406f94')\n self.vs[17][\"name\"] = \"\"\"name5\"\"\"\n self.vs[17][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[17][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[17][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[17][\"GUID__\"] = UUID('91bc841f-2211-4638-a340-584da8347c98')\n self.vs[18][\"name\"] = \"\"\"name6\"\"\"\n self.vs[18][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[18][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[18][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[18][\"GUID__\"] = UUID('8a109a2d-2d70-4318-8a72-46c784206075')\n self.vs[19][\"name\"] = \"\"\"name7\"\"\"\n self.vs[19][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[19][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[19][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[19][\"GUID__\"] = UUID('5a95e461-d2f8-435b-9e77-af581d91ee29')\n self.vs[20][\"name\"] = \"\"\"name8\"\"\"\n self.vs[20][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[20][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[20][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[20][\"GUID__\"] = UUID('c600b1fb-8c9c-4ef2-b597-8137d9bdfb08')\n self.vs[21][\"name\"] = \"\"\"name9\"\"\"\n self.vs[21][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[21][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[21][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[21][\"GUID__\"] = UUID('708cd8f1-6e3d-4dfa-af00-18e9d43a01a4')\n self.vs[22][\"name\"] = \"\"\"name10\"\"\"\n self.vs[22][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[22][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[22][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[22][\"GUID__\"] = UUID('132e8292-4471-498d-a202-3d2abc7ab5ca')\n self.vs[23][\"name\"] = \"\"\"name11\"\"\"\n self.vs[23][\"classtype\"] = \"\"\"Name\"\"\"\n self.vs[23][\"mm__\"] = \"\"\"Name\"\"\"\n self.vs[23][\"cardinality\"] = \"\"\"1\"\"\"\n self.vs[23][\"GUID__\"] = UUID('fdb484f0-a8b5-4b9e-86a6-b679b1012005')\n self.vs[24][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[24][\"GUID__\"] = UUID('2a8418a3-cb80-496b-a1e0-7419de2ae33f')\n self.vs[25][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[25][\"GUID__\"] = UUID('4f37af75-2b77-45c1-93d1-8aae7cf14cc8')\n self.vs[26][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[26][\"GUID__\"] = UUID('54ef6fcc-cb9a-494e-aa36-f44525e4a0b0')\n self.vs[27][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[27][\"GUID__\"] = UUID('22858e97-7bbe-460d-b44b-14652852a592')\n self.vs[28][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[28][\"GUID__\"] = UUID('c3fcdb66-34da-4c82-b163-e5ab5f04e5c0')\n self.vs[29][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[29][\"GUID__\"] = UUID('88c90884-ae83-49af-96da-74f03c7f80ce')\n self.vs[30][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[30][\"GUID__\"] = UUID('1e3c412d-8372-4ba5-8a56-9d82407b79d0')\n self.vs[31][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[31][\"GUID__\"] = UUID('a500f0c7-1535-40ed-802e-a883517bbc64')\n self.vs[32][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[32][\"GUID__\"] = UUID('ed658c5a-81c3-4938-920e-98953de205ba')\n self.vs[33][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[33][\"GUID__\"] = UUID('49be0f69-494e-4f45-8923-582778c6828a')\n self.vs[34][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[34][\"GUID__\"] = UUID('e3709cc9-ed04-44f9-b8a7-a8f9f5939f3b')\n self.vs[35][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[35][\"GUID__\"] = UUID('8a657ede-e29d-4a28-9c1c-4c95a3ecd3b6')\n self.vs[36][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[36][\"GUID__\"] = UUID('b3cd8a7c-7deb-4b8c-9ed2-4a22bd6b5a39')\n self.vs[37][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[37][\"GUID__\"] = UUID('2287628a-d22b-427b-bdfd-d24d04bd46ad')\n self.vs[38][\"mm__\"] = \"\"\"hasAttribute_T\"\"\"\n self.vs[38][\"GUID__\"] = UUID('65083504-7423-4b8f-8b3e-7dc369fa08db')\n self.vs[39][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[39][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[39][\"GUID__\"] = UUID('dd5a6c0f-e438-4f23-ad0f-acd02dd4afe8')\n self.vs[40][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[40][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[40][\"GUID__\"] = UUID('d4bcb4b5-37a3-4d04-895f-d689ea89c825')\n self.vs[41][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[41][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[41][\"GUID__\"] = UUID('b860cc3c-a70a-4c66-9bb9-c1fd1395b23c')\n self.vs[42][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[42][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[42][\"GUID__\"] = UUID('97c4f558-4e1a-4a85-82e4-e0500374d80f')\n self.vs[43][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[43][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[43][\"GUID__\"] = UUID('58acb66a-2008-4ef3-975f-1db1219bd830')\n self.vs[44][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[44][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[44][\"GUID__\"] = UUID('5e14b29f-f5e6-4d6d-bfac-8616df51ab56')\n self.vs[45][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[45][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[45][\"GUID__\"] = UUID('57ac3f37-c63f-4a74-bc90-a846fb38e370')\n self.vs[46][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[46][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[46][\"GUID__\"] = UUID('9fc39a10-40e0-47f4-93c6-eccc9fdbd594')\n self.vs[47][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[47][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[47][\"GUID__\"] = UUID('00e09455-e8b5-414e-8eee-abbe55b7a65d')\n self.vs[48][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[48][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[48][\"GUID__\"] = UUID('17170197-069c-44fa-9239-dec8622935ee')\n self.vs[49][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[49][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[49][\"GUID__\"] = UUID('a4654b49-ee9c-4f69-a4e2-b8101c7086d2')\n self.vs[50][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[50][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[50][\"GUID__\"] = UUID('f9e0515c-b37c-4c22-8fe9-49c98acd152d')\n self.vs[51][\"associationType\"] = \"\"\"p\"\"\"\n self.vs[51][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[51][\"GUID__\"] = UUID('2c60fd52-acfa-4cba-8c04-53c9affdc4db')\n self.vs[52][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[52][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[52][\"GUID__\"] = UUID('f8f3ccd7-1cd5-4a57-b6a8-d35ba5bef6e4')\n self.vs[53][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[53][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[53][\"GUID__\"] = UUID('7c94a074-10cb-4087-acd1-09f74b36fee5')\n self.vs[54][\"associationType\"] = \"\"\"channelNames\"\"\"\n self.vs[54][\"mm__\"] = \"\"\"directLink_T\"\"\"\n self.vs[54][\"GUID__\"] = UUID('857117de-5cb0-4717-8c19-a916f3913d44')\n self.vs[55][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[55][\"GUID__\"] = UUID('be66b7a4-a420-4307-9c3e-15a25480f612')\n self.vs[56][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[56][\"GUID__\"] = UUID('8b06f23c-dc76-480c-a91b-2a89628187bb')\n self.vs[57][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[57][\"GUID__\"] = UUID('a30e8284-77ae-44b5-83fe-950b7a7cf134')\n self.vs[58][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[58][\"GUID__\"] = UUID('d79efc53-0195-4578-9e6e-f325fa1b9347')\n self.vs[59][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[59][\"GUID__\"] = UUID('4c20c97d-c715-4ddc-ba86-f4b8f93342f2')\n self.vs[60][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[60][\"GUID__\"] = UUID('b6badd99-bce6-4ecb-95f2-2a56eb8e31ec')\n self.vs[61][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[61][\"GUID__\"] = UUID('784aca61-7263-4894-ada3-514b7dc1263c')\n self.vs[62][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[62][\"GUID__\"] = UUID('b751aba0-9035-400e-81b0-a05af5ff13f8')\n self.vs[63][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[63][\"GUID__\"] = UUID('f5e9aa39-f124-44ff-bf9e-835d8231fa1c')\n self.vs[64][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[64][\"GUID__\"] = UUID('adb9f451-c62d-4218-aebc-d7065b89a497')\n self.vs[65][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[65][\"GUID__\"] = UUID('71250a4b-2989-43ad-8a29-d2c8f7011af6')\n self.vs[66][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[66][\"GUID__\"] = UUID('ef32cf77-f92d-4364-b997-484a66740660')\n self.vs[67][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[67][\"GUID__\"] = UUID('c3c01696-8c64-45f7-a598-6e443991711f')\n self.vs[68][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[68][\"GUID__\"] = UUID('0481036c-254e-4f46-a7c3-6f4a865fe7bd')\n self.vs[69][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[69][\"GUID__\"] = UUID('f98b92f3-81c2-403a-ba4b-29cb117d561a')\n self.vs[70][\"mm__\"] = \"\"\"rightExpr\"\"\"\n self.vs[70][\"GUID__\"] = UUID('c32d7a5a-e311-48d5-b3fc-2a284673c4aa')\n self.vs[71][\"name\"] = \"\"\"eq1\"\"\"\n self.vs[71][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[71][\"GUID__\"] = UUID('0abf26da-d349-4bad-be96-014c8959a4cd')\n self.vs[72][\"name\"] = \"\"\"eq2\"\"\"\n self.vs[72][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[72][\"GUID__\"] = UUID('af92b37e-0c63-4fe5-a906-7cd312cad172')\n self.vs[73][\"name\"] = \"\"\"eq3\"\"\"\n self.vs[73][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[73][\"GUID__\"] = UUID('108e8752-a98c-44df-b24a-3b958c450846')\n self.vs[74][\"name\"] = \"\"\"eq4\"\"\"\n self.vs[74][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[74][\"GUID__\"] = UUID('340c5b78-fbbc-4734-ac7d-8a1f953679e3')\n self.vs[75][\"name\"] = \"\"\"eq5\"\"\"\n self.vs[75][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[75][\"GUID__\"] = UUID('63513c17-c285-47ce-9b5c-e658df31b8bf')\n self.vs[76][\"name\"] = \"\"\"eq6\"\"\"\n self.vs[76][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[76][\"GUID__\"] = UUID('dfd958e8-0fd4-4975-b28f-dab1df8a6858')\n self.vs[77][\"name\"] = \"\"\"eq7\"\"\"\n self.vs[77][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[77][\"GUID__\"] = UUID('1cd0e4a3-2b1a-42c8-bdf3-f98e156d8265')\n self.vs[78][\"name\"] = \"\"\"eq8\"\"\"\n self.vs[78][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[78][\"GUID__\"] = UUID('d7c1a1c4-4b83-4e3c-9e1f-2212a30343b1')\n self.vs[79][\"name\"] = \"\"\"eq9\"\"\"\n self.vs[79][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[79][\"GUID__\"] = UUID('aea37644-aa22-4e82-92a7-17d85ad5acf3')\n self.vs[80][\"name\"] = \"\"\"eq10\"\"\"\n self.vs[80][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[80][\"GUID__\"] = UUID('f7db1558-e110-4984-b825-62e4ce6f1324')\n self.vs[81][\"name\"] = \"\"\"eq11\"\"\"\n self.vs[81][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[81][\"GUID__\"] = UUID('a0722a1f-aaa4-4ac3-99d3-5bea37c15e79')\n self.vs[82][\"name\"] = \"\"\"eq12\"\"\"\n self.vs[82][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[82][\"GUID__\"] = UUID('ddbd74ac-21f7-4724-a2a8-b78c7389a8f4')\n self.vs[83][\"name\"] = \"\"\"eq13\"\"\"\n self.vs[83][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[83][\"GUID__\"] = UUID('a8fe40b1-4985-43d2-a874-0741d09ba4ae')\n self.vs[84][\"name\"] = \"\"\"eq14\"\"\"\n self.vs[84][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[84][\"GUID__\"] = UUID('281fd930-5f47-4b53-949b-e274ec95fdef')\n self.vs[85][\"name\"] = \"\"\"eq15\"\"\"\n self.vs[85][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[85][\"GUID__\"] = UUID('2e2199ae-3f44-4d76-b322-4b617a8c58db')\n self.vs[86][\"name\"] = \"\"\"eq16\"\"\"\n self.vs[86][\"mm__\"] = \"\"\"Equation\"\"\"\n self.vs[86][\"GUID__\"] = UUID('25ad532f-5f8d-433a-bb65-507c97469275')\n self.vs[87][\"name\"] = \"\"\"isComposite\"\"\"\n self.vs[87][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[87][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[87][\"GUID__\"] = UUID('75b3e3d3-2cfc-4444-b65e-2fc5a8b7ae5d')\n self.vs[88][\"name\"] = \"\"\"literal\"\"\"\n self.vs[88][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[88][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[88][\"GUID__\"] = UUID('426aea1c-8a9f-4651-b297-9ec3c1c1352e')\n self.vs[89][\"name\"] = \"\"\"literal\"\"\"\n self.vs[89][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[89][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[89][\"GUID__\"] = UUID('284a3a1d-8a2d-4cef-9551-98d424afe038')\n self.vs[90][\"name\"] = \"\"\"literal\"\"\"\n self.vs[90][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[90][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[90][\"GUID__\"] = UUID('3b7a1cdc-9ffb-48db-994f-497c06449458')\n self.vs[91][\"name\"] = \"\"\"literal\"\"\"\n self.vs[91][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[91][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[91][\"GUID__\"] = UUID('40cff5ab-cab2-4fab-bbc1-c8039fe486ac')\n self.vs[92][\"name\"] = \"\"\"name\"\"\"\n self.vs[92][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[92][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[92][\"GUID__\"] = UUID('b9e0ab51-1690-44de-875b-773826f9e420')\n self.vs[93][\"name\"] = \"\"\"literal\"\"\"\n self.vs[93][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[93][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[93][\"GUID__\"] = UUID('708e489d-456a-4974-9198-73334eb3d1d8')\n self.vs[94][\"name\"] = \"\"\"literal\"\"\"\n self.vs[94][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[94][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[94][\"GUID__\"] = UUID('bdabcea3-c164-4f6b-a54f-be957abedb49')\n self.vs[95][\"name\"] = \"\"\"literal\"\"\"\n self.vs[95][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[95][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[95][\"GUID__\"] = UUID('22f79d9e-a9bf-41b5-9559-4560af4afc10')\n self.vs[96][\"name\"] = \"\"\"literal\"\"\"\n self.vs[96][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[96][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[96][\"GUID__\"] = UUID('56b242b2-5ebd-4a02-a1bb-829ecc6822a7')\n self.vs[97][\"name\"] = \"\"\"name\"\"\"\n self.vs[97][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[97][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[97][\"GUID__\"] = UUID('46680774-a892-41cb-8005-809b5eea2003')\n self.vs[98][\"name\"] = \"\"\"literal\"\"\"\n self.vs[98][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[98][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[98][\"GUID__\"] = UUID('c8c58f99-e94c-442b-a747-c873a43b903b')\n self.vs[99][\"name\"] = \"\"\"literal\"\"\"\n self.vs[99][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[99][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[99][\"GUID__\"] = UUID('18aa7445-341a-40e8-b09c-70904b3f9994')\n self.vs[100][\"name\"] = \"\"\"literal\"\"\"\n self.vs[100][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[100][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[100][\"GUID__\"] = UUID('9f63580a-288f-4d14-b275-b96062163c5a')\n self.vs[101][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[101][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[101][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[101][\"GUID__\"] = UUID('c8777ba9-8c6e-4582-a082-81f2f34e6016')\n self.vs[102][\"name\"] = \"\"\"pivot\"\"\"\n self.vs[102][\"mm__\"] = \"\"\"Attribute\"\"\"\n self.vs[102][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[102][\"GUID__\"] = UUID('ce2a6aa7-c8ce-4cee-807c-cd4de96a08bf')\n self.vs[103][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[103][\"GUID__\"] = UUID('8119a747-1d59-4f48-83a6-16869a919672')\n self.vs[104][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[104][\"GUID__\"] = UUID('b7c5aeaf-7e59-4a81-9616-bb2474f2660f')\n self.vs[105][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[105][\"GUID__\"] = UUID('ced29f38-6ce7-449c-823f-34aaab43899b')\n self.vs[106][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[106][\"GUID__\"] = UUID('e29dc6da-439d-4a9d-9d40-e87aa9fbebd3')\n self.vs[107][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[107][\"GUID__\"] = UUID('af49357e-a46d-4ee5-ab4a-b6d6ef261df0')\n self.vs[108][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[108][\"GUID__\"] = UUID('ff49109b-ccc0-4635-9a33-d88c1d675bc6')\n self.vs[109][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[109][\"GUID__\"] = UUID('423ad2a2-0a19-4192-902d-706965800fef')\n self.vs[110][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[110][\"GUID__\"] = UUID('5864c11a-7792-4549-999f-bc86a4246314')\n self.vs[111][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[111][\"GUID__\"] = UUID('7182946d-d5f6-4a7c-acaa-d4eeb97133db')\n self.vs[112][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[112][\"GUID__\"] = UUID('d965f0b2-048d-490c-81f5-2b18446941de')\n self.vs[113][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[113][\"GUID__\"] = UUID('6e4c8ba9-6ab0-44d3-9cc6-c181772a1e3b')\n self.vs[114][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[114][\"GUID__\"] = UUID('5633c48b-1add-43eb-9789-1bece00f8079')\n self.vs[115][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[115][\"GUID__\"] = UUID('d2c598e2-09b1-4c12-acff-871f6662238a')\n self.vs[116][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[116][\"GUID__\"] = UUID('33a09bc8-cfa9-4367-834e-41bfae2fa7b6')\n self.vs[117][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[117][\"GUID__\"] = UUID('858b7fe0-edf0-4eda-ae81-6477f6499fb7')\n self.vs[118][\"mm__\"] = \"\"\"leftExpr\"\"\"\n self.vs[118][\"GUID__\"] = UUID('a19a8472-6b86-4aee-b2bb-d66b4d26aeea')\n self.vs[119][\"name\"] = \"\"\"true\"\"\"\n self.vs[119][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[119][\"Type\"] = \"\"\"'Bool'\"\"\"\n self.vs[119][\"GUID__\"] = UUID('ba19f7ae-c0e3-43f5-9c87-2e08b3ff7d4e')\n self.vs[120][\"name\"] = \"\"\"sh\"\"\"\n self.vs[120][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[120][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[120][\"GUID__\"] = UUID('b78c45bc-2ecd-438a-a905-dbd90a4edeed')\n self.vs[121][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[121][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[121][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[121][\"GUID__\"] = UUID('0bbc3f31-d9e3-49a7-b213-d874f9d6e0ac')\n self.vs[122][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[122][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[122][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[122][\"GUID__\"] = UUID('e58ce45d-49e1-44b9-8a0a-78c1f1305afd')\n self.vs[123][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[123][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[123][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[123][\"GUID__\"] = UUID('34144527-9a72-44f3-8afe-f49bbe5fac47')\n self.vs[124][\"name\"] = \"\"\"C\"\"\"\n self.vs[124][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[124][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[124][\"GUID__\"] = UUID('61ed1583-c983-4369-b0de-0c3ca82aba52')\n self.vs[125][\"name\"] = \"\"\"enp\"\"\"\n self.vs[125][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[125][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[125][\"GUID__\"] = UUID('a4bfdfad-6e17-46b1-9939-685bd4cbfb62')\n self.vs[126][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[126][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[126][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[126][\"GUID__\"] = UUID('92007092-a080-4cb3-ba90-cbc8e6637732')\n self.vs[127][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[127][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[127][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[127][\"GUID__\"] = UUID('1a61b1e5-e926-45cd-bf6a-60adeef0d338')\n self.vs[128][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[128][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[128][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[128][\"GUID__\"] = UUID('95c52a1f-42ae-4384-bcfc-0cab537ee1cf')\n self.vs[129][\"name\"] = \"\"\"H\"\"\"\n self.vs[129][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[129][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[129][\"GUID__\"] = UUID('146f9ec3-3f2d-48a1-92ac-a5546268e069')\n self.vs[130][\"name\"] = \"\"\"exit_in\"\"\"\n self.vs[130][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[130][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[130][\"GUID__\"] = UUID('c52aec39-171b-4710-8150-b343a557bebf')\n self.vs[131][\"name\"] = \"\"\"exack_in\"\"\"\n self.vs[131][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[131][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[131][\"GUID__\"] = UUID('e2ab70c6-01a2-420a-9e96-bb238fe29689')\n self.vs[132][\"name\"] = \"\"\"sh_in\"\"\"\n self.vs[132][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[132][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[132][\"GUID__\"] = UUID('f476d190-6014-4c6a-a27f-c3f45b9d10ba')\n self.vs[133][\"name\"] = \"\"\"procdef\"\"\"\n self.vs[133][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[133][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[133][\"GUID__\"] = UUID('5a678e2c-8444-4e53-a430-5f0b1a603c07')\n self.vs[134][\"name\"] = \"\"\"localdefcompstate\"\"\"\n self.vs[134][\"mm__\"] = \"\"\"Constant\"\"\"\n self.vs[134][\"Type\"] = \"\"\"'String'\"\"\"\n self.vs[134][\"GUID__\"] = UUID('dfac50b9-4956-45c0-b8a5-14f609e078e5')\n self.vs[135][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[135][\"GUID__\"] = UUID('632f235b-d18d-4939-b4b8-9d38a7505cc8')\n self.vs[136][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[136][\"GUID__\"] = UUID('b5724e21-522d-415c-8538-59b279583ff4')\n self.vs[137][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[137][\"GUID__\"] = UUID('a05b3ebc-4b86-43f0-adcd-d46d8c4d773e')\n self.vs[138][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[138][\"GUID__\"] = UUID('b43788ff-9ab6-4bec-b8ae-c76b10985fc3')\n self.vs[139][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[139][\"GUID__\"] = UUID('16c28ca0-6429-4540-9505-e8057aad958a')\n self.vs[140][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[140][\"GUID__\"] = UUID('e97eb3e2-8fca-41a6-9599-a173acee4c22')\n self.vs[141][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[141][\"GUID__\"] = UUID('eda0c12e-26c0-4296-9d34-62cbe764e151')\n self.vs[142][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[142][\"GUID__\"] = UUID('336e11b9-cbc3-41b4-9c07-041ed4ba1453')\n self.vs[143][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[143][\"GUID__\"] = UUID('31297722-e0a1-4e03-8c28-44abe1930256')\n self.vs[144][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[144][\"GUID__\"] = UUID('51fcd9e5-c817-4710-9a24-d080b3f8fa71')\n self.vs[145][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[145][\"GUID__\"] = UUID('7acc9f40-a78c-47ac-8e38-fc7f4647c2f1')\n self.vs[146][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[146][\"GUID__\"] = UUID('c94eef8e-f552-4b53-ba99-f21c13dfca4a')\n self.vs[147][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[147][\"GUID__\"] = UUID('09d8138f-8be9-4a30-af93-cc714a2570db')\n self.vs[148][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[148][\"GUID__\"] = UUID('792865ce-75f2-41cb-9c42-74f831e96a76')\n self.vs[149][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[149][\"GUID__\"] = UUID('fb0f0ebe-59c0-4ffc-8370-2ead7eb40f18')\n self.vs[150][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[150][\"GUID__\"] = UUID('23a9a8da-507e-4d11-a8e0-f5b721f01f96')\n self.vs[151][\"mm__\"] = \"\"\"apply_contains\"\"\"\n self.vs[151][\"GUID__\"] = UUID('38fd864c-df5e-4e85-9838-2e665d75637c')", "def __init__(self, Y=0.0, C1=0.0, C2=0.0, *args, **kwargs): # default: Black\n cB.__init__(self, *args, **kwargs)\n self.type = 'YCC' # can be used instead of isinstance on an object\n self.Y, self.C1, self.C2 = Y, C1, C2", "def __init__(self, coefs_tup, setpoint_value, range_tup, integration_samples = 5, diff_filter_samples = 4):\r\n \r\n self.started = False\r\n self.Kp, self.Ki, self.Kd = coefs_tup\r\n \r\n if integration_samples < 3:\r\n integration_samples = 3\r\n print('Integration samples number is set default 3')\r\n \r\n self.integr_deque = collections.deque([(0,0)]* integration_samples, maxlen = integration_samples)\r\n \r\n if diff_filter_samples < 2:\r\n diff_filter_samples = 2\r\n print('Diff filter samples number is set to default 2')\r\n\r\n self.diff_deque = collections.deque([(0,0)]* diff_filter_samples, maxlen = diff_filter_samples)\r\n \r\n self.setpoint_value = setpoint_value\r\n \r\n self.min_value, self.max_value = range_tup\r\n if self.min_value >= self.max_value:\r\n self.min_value = 0\r\n self.max_value = 1\r\n print('Values range is set to default (0,1)')", "def __init__(self, affine_dynamic_output, P, alpha, a, b):\n\n QuadraticControlLyapunovFunction.__init__(self, affine_dynamic_output, P, alpha)\n self.a = a\n self.b = b", "def Cer(self) -> Cer:", "def __init__(self, Y=0.0, Co=0.0, Cg=0.0, *args, **kwargs): # default: Black\n cB.__init__(self, *args, **kwargs)\n self.type = 'YCoCg' # can be used instead of isinstance on an object\n self.Y, self.Co, self.Cg = Y, Co, Cg", "def __init__(self, a, b):\n self.a = make_generator(a)\n self.b = make_generator(b)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
cma_equalizer_cc(int num_taps, float modulus, float mu, int sps) > digital_cma_equalizer_cc_sptr Implements constant modulus adaptive filter on complex stream
def cma_equalizer_cc(*args, **kwargs): return _digital_swig.cma_equalizer_cc(*args, **kwargs)
[ "def closure_amplitudes(amps, n=7):\n arr = populate_symmamparray(amps, n=n) # fringe amp array\n nn = 0\n\n cas = np.zeros(int(comb(n, 4)))\n\n for ii in range(n - 3):\n for jj in range(n - ii - 3):\n for kk in range(n - jj - ii - 3):\n for ll in range(n - jj - ii - kk - 3):\n cas[nn + ll] = arr[ii, jj + ii + 1] \\\n * arr[ll + ii + jj + kk + 3, kk + jj + ii + 2] \\\n / (arr[ii, kk + ii + jj + 2] *\n arr[jj + ii + 1, ll + ii + jj + kk + 3])\n nn = nn + ll + 1\n\n return cas", "def test_asl_quantification_filter_asl_quant_wp_casl():\n\n control = np.ones(TEST_VOLUME_DIMENSIONS)\n label = (1 - 0.001) * np.ones(TEST_VOLUME_DIMENSIONS)\n m0 = np.ones(TEST_VOLUME_DIMENSIONS)\n lambda_blood_brain = 0.9\n label_duration = 1.8\n post_label_delay = 1.8\n label_efficiency = 0.85\n t1_arterial_blood = 1.65\n calc_cbf = AslQuantificationFilter.asl_quant_wp_casl(\n control,\n label,\n m0,\n lambda_blood_brain,\n label_duration,\n post_label_delay,\n label_efficiency,\n t1_arterial_blood,\n )\n numpy.testing.assert_array_equal(\n calc_cbf,\n np.divide(\n 6000\n * lambda_blood_brain\n * (control - label)\n * np.exp(post_label_delay / t1_arterial_blood),\n 2\n * label_efficiency\n * t1_arterial_blood\n * m0\n * (1 - np.exp(-label_duration / t1_arterial_blood)),\n out=np.zeros_like(m0),\n where=m0 != 0,\n ),\n )", "def complex_ica(data, complex_mixing=True,\n already_normalized=False, pca_dim=0.95,\n ica_dim=200, zero_tolerance=1e-7,\n conv_eps=1e-7, max_iter=10000, lrate=0.1,\n whiten_mat=[], dewhiten_mat=[],\n cost_function='g2', envelopeICA=False,\n verbose=True, pca_only=False,\n overwrite=False):\n\n # -----------------------------------\n # copy and check input data\n # -----------------------------------\n # extract some parameter\n if not overwrite:\n origdata = data.copy()\n\n ntsl, nchan = data.shape\n\n\n # -----------------------------------\n # check if ICA should be estimated\n # on the envelope of the data\n # -----------------------------------\n if envelopeICA:\n complex_mixing = False\n if already_normalized:\n dmean = np.zeros((1, nchan))\n dstddev = np.ones((1, nchan))\n else:\n dmean = np.mean(data, axis=0).reshape((1, nchan))\n dstddev = np.std(data, axis=0).reshape((1, nchan))\n\n if isinstance(data[0, 0], complex):\n data = np.abs(data)\n else:\n print(\">>> WARNING: Input data are not complex, i.e., ICA on data envelope cannot be performed.\")\n print(\">>> Instead standard ICA is performed.\")\n\n dmean_abs = np.mean(data, axis=0).reshape((1, nchan))\n dstddev_abs = np.std(data, axis=0).reshape((1, nchan))\n data = (data - np.dot(np.ones((ntsl, 1)), dmean_abs)) / np.dot(np.ones((ntsl, 1)), dstddev_abs)\n\n elif already_normalized:\n dmean = np.zeros((1, nchan))\n dstddev = np.ones((1, nchan))\n else:\n # -----------------------------------\n # subtract mean values from channels\n # -----------------------------------\n dmean = np.mean(data, axis=0).reshape((1, nchan))\n dstddev = np.std(data, axis=0).reshape((1, nchan))\n data = (data - np.dot(np.ones((ntsl, 1)), dmean)) / np.dot(np.ones((ntsl, 1)), dstddev)\n\n\n # -----------------------------------\n # do PCA (on data matrix)\n # -----------------------------------\n # check if whitening and de-whitening matrices\n if np.any(whiten_mat) and np.any(dewhiten_mat):\n pca_dim = whiten_mat.shape[0]\n ica_dim = pca_dim\n\n else:\n if nchan > 1000:\n print(\">>> Launching PCA...due to data size this might take a while...\")\n\n covmat = cov(data, rowvar=0)\n\n # check if complex mixing is assumed\n if not complex_mixing:\n covmat = covmat.real\n\n Dc, Ec = sc.linalg.eig(covmat)\n idx_sort = np.argsort(Dc)[::-1]\n Dc = Dc[idx_sort]\n\n if not complex_mixing:\n Dc = Dc.real\n Ec = Ec.real\n\n # ------------------------------\n # perform model order selection\n # ------------------------------\n # --> can either be performed by\n # (1.) MIBS (self.pca_dim = None)\n # (2.) explained variance (0 < self.pca_dim < 1)\n # (3.) or a fix number (self.pca_dim > 1)\n if not pca_dim:\n from .dimension_selection import mibs\n pca_dim = mibs(Dc.real, ntsl)\n elif np.abs(pca_dim) <= 1.0:\n # estimate explained variance\n explVar = np.abs(Dc.copy())\n explVar /= explVar.sum()\n pca_dim = np.sum(explVar.cumsum() <= np.abs(pca_dim)) + 1\n else:\n pca_dim = np.abs(pca_dim)\n\n # checks for negativ eigenvalues\n if any(Dc[0:pca_dim] < 0):\n print(\">>> WARNING: Negative eigenvalues! Reducing PCA and ICA dimension...\")\n\n # check for eigenvalues near zero (relative to the maximum eigenvalue)\n zero_eigval = np.sum((Dc[0:pca_dim]/Dc[0]) < zero_tolerance)\n\n # adjust dimensions if necessary (because zero eigenvalues were found)\n pca_dim -= zero_eigval\n if pca_dim < ica_dim:\n ica_dim = pca_dim\n\n if verbose:\n print(\">>> PCA dimension is %d and ICA dimension is %d\" % (pca_dim, ica_dim))\n\n\n # construct whitening and dewhitening matrices\n Dc_sqrt = np.sqrt(Dc[0:pca_dim])\n Dc_sqrt_inv = 1.0 / Dc_sqrt\n Ec = Ec[:, idx_sort[0:pca_dim]]\n whiten_mat = np.dot(np.diag(Dc_sqrt_inv), Ec.conj().transpose())\n dewhiten_mat = np.dot(Ec, np.diag(Dc_sqrt))\n\n # reduce dimensions and whiten data. |Zmat_c| is the\n # main input for the iterative algorithm\n Zmat_c = np.dot(whiten_mat, data.transpose())\n Zmat_c_tr = Zmat_c.conj().transpose() # also used in the fixed-point iteration\n\n # check if only PCA should be performed\n if pca_only:\n # return explained variance as objective function\n objective = np.abs(Dc.copy())\n objective /= objective.sum()\n return whiten_mat, dewhiten_mat, Zmat_c, dmean, dstddev, objective, whiten_mat, dewhiten_mat\n\n\n # ----------------------------------------------------------------\n # COMPLEX-VALUED FAST_ICA ESTIMATION\n # ----------------------------------------------------------------\n if verbose and complex_mixing:\n print(\"... Launching complex-valued FastICA:\")\n elif verbose:\n print(\"... Launching FastICA:\")\n\n # initial point, make it imaginary and unitary\n if complex_mixing:\n W_old = np.random.randn(ica_dim, pca_dim) + np.random.randn(ica_dim, pca_dim) * 1j\n else:\n W_old = np.random.randn(ica_dim, pca_dim)\n\n\n W_old = np.dot(sc.linalg.sqrtm(np.linalg.inv(np.dot(W_old, W_old.conj().transpose()))), W_old)\n\n # iteration start here\n for iter in range(0, max_iter):\n\n # compute outputs, note lack of conjugate\n Y = np.dot(W_old, Zmat_c)\n Y2 = np.abs(Y * Y.conj())\n\n # # compute nonlinearities\n if cost_function == 'g1':\n gY = 1.0/(2.0 * np.sqrt(lrate + Y2))\n dmv = np.sum(((2.0 * lrate + Y2)/(4.0 * (lrate + Y2)**1.5)), axis=1)\n\n elif cost_function == 'g3':\n gY = Y2\n dmv = np.sum((2.0 * Y2), axis=1)\n\n elif cost_function == 'sigmoidal':\n gY = 1.0 / (1.0 + np.exp(-Y2))\n dmv = np.sum(((1.0 + (1.0 + Y2) * np.exp(-Y2))*gY**2), axis=1)\n\n else:\n gY = 1.0 / (lrate + Y2)\n dmv = lrate * np.sum(gY**2, axis=1)\n\n # fixed-point iteration\n W_new = np.dot((Y * gY), Zmat_c_tr) - np.dot(np.diag(dmv), W_old)\n\n # in case we want to restrict W to be real-valued, do it here\n if complex_mixing:\n W = W_new\n else:\n W = W_new.real\n\n # make unitary\n W = np.dot(sc.linalg.sqrtm(np.linalg.inv(np.dot(W, W.conj().transpose()))), W)\n\n # check if converged\n conv_criterion = 1.0 - np.sum(np.abs(np.sum(W * W_old.conj(), axis=1)))/ica_dim\n if conv_criterion < conv_eps:\n break\n\n if verbose:\n from sys import stdout\n info = \"\\r\" if iter > 0 else \"\"\n info += \">>> Step %4d of %4d; wchange: %1.4e\" % (iter+1, max_iter, conv_criterion)\n stdout.write(info)\n stdout.flush()\n\n\n # store old value\n W_old = W\n\n\n # compute mixing matrix (in whitened space)\n A = W.conj().transpose()\n\n # compute source signal estimates\n S = np.dot(W, Zmat_c)\n\n # tell if convergence problems\n if conv_criterion > conv_eps:\n print(\"\\nWARNING: Failed to converge, results may be wrong!\")\n else:\n if verbose:\n print(\"\\n>>> Converged!\")\n\n\n # ----------------------------------------------------------------\n # SORT COMPONENTS AND TRANSFORMS TO ORIGINAL SPACE\n # ----------------------------------------------------------------\n if verbose:\n print(\"... Sorting components and reformatting results.\")\n\n # compute objective function for each component\n objective = -np.mean(np.log(lrate + np.abs(S * S.conj())), axis=1)\n\n # sort components using the objective\n comp_order = np.argsort(objective)[::-1]\n objective = objective[comp_order]\n W = W[comp_order, :]\n A = A[:, comp_order]\n S = S[comp_order, :]\n\n # compute mixing and de-mixing matrix in original channel space\n # Spatial filters\n W_orig = np.dot(W, whiten_mat)\n\n # Spatial patterns\n A_orig = np.dot(dewhiten_mat, A)\n\n\n if verbose:\n print(\"... Done!\")\n\n if not overwrite:\n data = origdata\n\n return W_orig, A_orig, S, dmean, dstddev, objective, whiten_mat, dewhiten_mat", "def enc_mul_const(pub, m, c): # to do\n return powmod(m, c, pub.n_sq) # m^c mod n^2", "def swhtImageCoeffs(vis, uvw, freqs, lmax, lmin=0):\n start_time = time.time()\n\n #single subband cases\n if vis.ndim==1: vis = vis[np.newaxis].T\n if uvw.ndim==2: uvw = uvw.reshape(uvw.shape[0], uvw.shape[1], 1)\n if freqs.ndim==1: freqs = freqs[np.newaxis].T\n\n k = 2. * np.pi * freqs/cc #obs freq/c\n\n #convert u,v,w to r,phi,theta\n r, phi, theta = util.cart2sph(uvw[:,0], uvw[:,1], uvw[:,2])\n if r.ndim==1: #make arrays 2D\n r = r[np.newaxis].T\n phi = phi[np.newaxis].T\n theta = theta[np.newaxis].T\n \n phi = phi - np.pi #make range -pi to pi\n theta = np.pi - theta #flip theta values\n\n #from matplotlib import pyplot as plt\n #from mpl_toolkits.mplot3d import Axes3D\n #fig = plt.figure()\n #ax = fig.add_subplot(111, projection='3d')\n #ax.scatter(uvw[:,0], uvw[:,1], uvw[:,2], c=r, alpha=0.5, edgecolors='none')\n #plt.show()\n #exit()\n\n #r = np.sqrt(uvw[:,0]**2. + uvw[:,1]**2. + uvw[:,2]**2.)[np.newaxis].T\n #phi = np.arctan2(uvw[:,1], uvw[:,0])[np.newaxis].T\n #theta = (np.pi/2.) - np.arctan2(uvw[:,2], np.sqrt(uvw[:,0]**2. + uvw[:,1]**2.))[np.newaxis].T #make range -pi/2 to pi/2\n\n #compute the SWHT visibility coefficients\n vislm = computeVislm(lmax, k, r, theta, phi, vis, lmin=lmin)\n #compute the SWHT brightness coefficients\n blm = computeblm(vislm)\n\n print 'Run time: %f s'%(time.time() - start_time)\n\n return blm", "def mulSpectrums(a, b, flags, c=..., conjB=...) -> c:\n ...", "def get_opt_func(rate_matrices, distributions):\n\n n = len(rate_matrices[0]) # number of states\n\n def get_dS(W, p):\n \"\"\" Rate of Shannon entropy change, for rate matrix W and distribution p \"\"\"\n\n \"\"\" We rewrite -sum_{i,j} p_i W_ji ln p_j as the \"KL-like\" expression\n 1/tau sum_{i,j} p_i T_ji ln (p_i T_ji/p_j T_ji)\n where tau = -min_i W_ii is the fastest time scale in R and\n T_ji = delta_{ji} + tau W_ji is a conditional probability distribuiton. This \n lets us indicate to cvxpy that -sum_{i,j} p_i W_ji ln p_j is convex in p.\n \"\"\"\n\n tau = -1/np.min(np.diag(W))\n T = np.eye(n) + tau*W\n assert(np.all(T>=0))\n\n dS = 0.\n for i in range(n):\n for j in range(n):\n if i == j: \n continue\n if np.isclose(T[i,j],0):\n continue\n dS += cp.kl_div( T[i,j] * p[j], T[i,j] * p[i]) + T[i,j] * p[j] - T[i,j] * p[i]\n return dS / tau\n\n\n def get_EF(W, p):\n \"\"\" EF rate, for rate matrix W and distribution p, defined as \n sum_{i,j} p_i W_ji ln (W_ji/W_ji)\n \"\"\"\n\n EF = 0.\n for i in range(n):\n for j in range(n):\n if i == j:\n continue\n if np.isclose(W[i,j],0) and np.isclose(W[j,i],0):\n continue\n EF += W[i,j] * p[j] * np.log( W[i,j] / W[j,i] )\n return EF\n\n \n def f(eta):\n p = cp.Variable( n, name='p')\n logQ_param = cp.Parameter(n, name='logQ')\n\n\n min_val = None\n\n print('-'*len(rate_matrices))\n\n for W in rate_matrices:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n cons = [ p >= 0, sum(p) == 1 ]\n for q2 in distributions:\n assert(np.all(q2 > 0))\n cons.append( p @ (logQ_param-np.log(q2)) >= 0 )\n\n obj = (1-eta)*get_dS(W, p) + get_EF(W, p) - eta*(W @ p)@logQ_param\n cons.append( obj <= -1e-6)\n prob = cp.Problem(cp.Minimize(0), cons)\n\n for q in distributions:\n logQ_param.value = np.log(q)\n\n prob.solve(solver=cp.ECOS, reltol=1e-12)\n if prob.status == 'infeasible':\n continue\n\n else:\n print('')\n return False\n\n\n print('')\n return True\n\n return f", "def __equiv__(self, miller=None, csym=None,\n cdim=[1.,1.,1.], cang=[90.,90.,90.]):\n start = time.time()\n from .sym import cv\n from .sym import cubic, hexag\n #from sym_cy import cubic, hexag\n from . import sym #python compiled\n #import sym_cy #cython compiled\n #from sym.py cvec, cubic, and hexgonal modules are brought in\n if miller==None:\n print(\"Miller index should be given\")\n raise IOError\n vect = np.array(miller)\n norm = 0.; sneq = []\n temp = vect.copy()\n #norm = vect[0]**2 + vect[1]**2 + vect[2]**2\n #norm = np.sqrt(norm)\n #vect = vect/ norm\n #print 'elapsed time before v calculation: %8.6f'%\n #(time.time()-start)\n\n ##---------------------------------\n ##---------------------------------\n #start = time.time()\n if csym=='cubic':\n #H = sym_cy.cubic(1) #cython compiled\n H = sym.cubic() #operators\n for i in range(len(H)):\n sneq.append(np.dot(H[i], vect))\n pass\n pass\n elif csym=='hexag':\n #H = sym_cy.hexag(1) #cython compiled\n H = sym.hexag() #operators\n v = cv(pole=vect, cdim=cdim, cang=cang)\n for i in range(len(H)):\n sneq.append(np.dot(H[i], v))\n pass\n pass\n elif csym=='None':\n #H = [np.identity(3)]\n sneq = [vect]\n else:\n print('Given symmetry, %s is not prepared'%csym)\n input('Enter to raise an error and quits the job');\n raise IOError\n\n #print 'elapsed time during v calculation: %8.6f'%\n #(time.time()-start)\n #####-------------------------------\n #####--------------------------------\n\n start = time.time()\n stacked = [] #empty unique vectors\n # is cH in the already existing stacked list?\n # yes: pass\n # no : add\n\n ## filtering the sneq under whether or not it is unique\n for i in range(len(sneq)):\n cH = sneq[i].copy() #current vector\n if __isunique__(a=cH, b=stacked):\n stacked.append(cH)\n else: pass\n pass\n\n ## if v[2] is minus, mutiply minus sign to the vector.\n for i in range(len(stacked)):\n if stacked[i][2]<0:\n stacked[i] = stacked[i]*-1\n pass\n #print 'elapsed time during the rest: %8.6f'%\n #(time.time()-start)\n return np.array(stacked)", "def kcdetect(data, sf, proba_thr, amp_thr, hypno, nrem_only, tmin, tmax,\n kc_min_amp, kc_max_amp, fmin=.5, fmax=4., delta_thr=.75,\n smoothing_s=20, spindles_thresh=2., range_spin_sec=20,\n min_distance_ms=500.):\n # Find if hypnogram is loaded :\n hyploaded = True if np.unique(hypno).size > 1 and nrem_only else False\n\n # PRE DETECTION\n # Compute delta band power using wavelet\n freqs = np.array([0.1, 4., 8., 12., 16., 30.])\n delta_npow = morlet_power(data, freqs, sf, norm=True)[0]\n delta_nfpow = smoothing(delta_npow, smoothing_s * sf)\n idx_no_delta = np.where(delta_nfpow < delta_thr)[0]\n idx_loc_delta = np.where(delta_npow > np.median(delta_npow))[0]\n\n # MAIN DETECTION\n # Bandpass filtering\n sig_filt = filt(sf, np.array([fmin, fmax]), data)\n # Taiger-Keaser energy operator\n sig_tkeo = tkeo(sig_filt)\n # Define hard and soft thresholds\n hard_thr = np.nanmean(sig_tkeo) + amp_thr * np.nanstd(sig_tkeo)\n soft_thr = 0.8 * hard_thr\n\n with np.errstate(divide='ignore', invalid='ignore'):\n idx_hard = np.where(sig_tkeo > hard_thr)[0]\n idx_soft = np.where(sig_tkeo > soft_thr)[0]\n\n # Find threshold-crossing indices of soft threshold\n idx_zc_soft = _events_to_index(idx_soft).flatten()\n\n if idx_hard.size == 0:\n return np.array([], dtype=int)\n\n # Initialize K-complexes index vector\n idx_kc = np.array([], dtype=int)\n # Fill gap between events separated by less than min_distance_ms\n idx_hard = _events_distance_fill(idx_hard, min_distance_ms, sf)\n # Get where K-complex start / end :\n idx_start, idx_stop = _events_to_index(idx_hard).T\n\n # Find true beginning / end using soft threshold\n for s in idx_start:\n d = s - idx_zc_soft\n soft_beg = d[d > 0].min()\n soft_end = np.abs(d[d < 0]).min()\n idx_kc = np.append(idx_kc, np.arange(s - soft_beg, s + soft_end))\n\n # Check if spindles are present in range_spin_sec\n idx_spin = spindlesdetect(data, sf, spindles_thresh, hypno, False)[0]\n idx_start, idx_stop = _events_to_index(idx_kc).T\n spin_bool = np.array([], dtype=np.bool)\n\n for idx, val in enumerate(idx_start):\n step = 0.5 * range_spin_sec * sf\n is_spin = np.in1d(np.arange(val - step, val + step, 1),\n idx_spin, assume_unique=True)\n spin_bool = np.append(spin_bool, any(is_spin))\n\n kc_spin = np.where(spin_bool)[0]\n idx_kc_spin = _index_to_events(np.c_[idx_start, idx_stop][kc_spin])\n\n # Compute probability\n proba = np.zeros(shape=data.shape)\n proba[idx_kc] += 0.1\n proba[idx_no_delta] += 0.1\n proba[idx_loc_delta] += 0.1\n proba[idx_kc_spin] += 0.1\n\n if hyploaded:\n proba[hypno == -1] += -0.1\n proba[hypno == 0] += -0.2\n proba[hypno == 1] += 0\n proba[hypno == 2] += 0.1\n proba[hypno == 3] += -0.1\n proba[hypno == 4] += -0.2\n\n # Smooth and normalize probability vector\n proba = proba / 0.5 if hyploaded else proba / 0.4\n proba = smoothing(proba, sf)\n # Keep only proba >= proba_thr (user defined threshold)\n idx_kc = np.intersect1d(idx_kc, np.where(proba >= proba_thr)[0], True)\n\n if idx_kc.size == 0:\n return np.array([], dtype=int)\n\n # Morphological criteria\n idx_start, idx_stop = _events_to_index(idx_kc).T\n duration_ms = (idx_stop - idx_start) * (1000 / sf)\n\n # Remove events with bad duration\n good_dur = np.where(np.logical_and(duration_ms > tmin,\n duration_ms < tmax))[0]\n idx_kc = _index_to_events(np.c_[idx_start, idx_stop][good_dur])\n\n # Remove events with bad amplitude\n idx_start, idx_stop = _events_to_index(idx_kc).T\n amp = np.zeros(shape=idx_start.size)\n for i, (start, stop) in enumerate(zip(idx_start, idx_stop)):\n amp[i] = np.ptp(data[start:stop])\n good_amp = np.where(np.logical_and(amp > kc_min_amp,\n amp < kc_max_amp))[0]\n\n return np.c_[idx_start, idx_stop][good_amp]", "def test_complex_valued_parameter(self):\n amp = Parameter(\"amp\")\n\n test_sched = pulse.ScheduleBlock()\n test_sched.append(\n pulse.Play(\n pulse.Constant(160, amp=1j * amp),\n pulse.DriveChannel(0),\n ),\n inplace=True,\n )\n test_assigned = test_sched.assign_parameters({amp: 0.1}, inplace=False)\n self.assertTrue(isinstance(test_assigned.blocks[0].pulse.amp, complex))", "def coil_combine_cmrr_sequential(chain):\n block = chain._block\n set = chain._block.set\n dataset = chain._dataset\n raw = chain.raw\n\n ncoils = raw.shape[1]\n nfids = raw.shape[2]\n dim0 = raw.shape[3]\n acqdim0 = dim0\n xaxis = range(dim0)\n\n flag_norm_to_sum = False # default for now\n\n dat_comb = np.ndarray([nfids,dim0], dtype=np.complex128)\n\n all_weight = np.ndarray([nfids,ncoils], dtype=np.float)\n all_phases = np.ndarray([nfids,ncoils], dtype=np.complex)\n\n for i in range(nfids):\n\n # determine weighting and phz for each coil\n # zero-order phase correction\n # correct for phase based on 1st point in 1st wref fid\n\n # for each average, calc phase and weights to correct for coil geometry\n chans = []\n weight = []\n phases = []\n \n for j in range(ncoils):\n chan = chain.raw[0,j,i,:].copy()\n \n magn = np.abs(chan[0])\n phas = np.conjugate(chan[0])/magn # normalized complex conj to cancel phase \n chan = phas * chan # Note. applying phase here NOT below as in Siemens\n \n # amplitude of zero order phased fid in time domain\n # using 9th order polynomial fit (based on Uzay's script)\n coeffs = np.polyfit(xaxis, np.absolute(chan), 9)\n \n weight.append(coeffs[-1]) # last entry is amplitude - zero order coeff\n phases.append(phas)\n chans.append(chan)\n \n # normalize weighting function based on spectro data \n tmp = np.sum([val*val for val in weight]) # sum squared values \n if tmp == 0.0: tmp = 1.0\n if flag_norm_to_sum:\n # sum of sensitivities\n lamda = np.sum(weight) / tmp \n else:\n # sqrt of sum of squared sensitivities\n lamda = 1.0 / np.sqrt(tmp)\n\n weight = [val*lamda for val in weight]\n\n all_weight[i,:] = weight\n all_phases[i,:] = phases\n \n # apply weighting ... phase corrections done above\n for j,chan in enumerate(chans):\n chans[j] = chan * weight[j]\n \n # sum corrected FIDs from each coil into one combined FID\n dat_comb[i,:] = np.sum(chans, axis=0) \n\n print_combine_stats(all_weight, all_phases, method='CMRR_Sequential')\n \n return normalize_shape(dat_comb), all_weight, all_phases", "def CCM(wl, R_V=3.1):\n\n\n a = np.zeros(np.shape(wl))\n b = np.zeros(np.shape(wl))\n F_a = np.zeros(np.shape(wl))\n F_b = np.zeros(np.shape(wl))\n x = np.zeros(np.shape(wl))\n y = np.zeros(np.shape(wl))\n q = np.zeros(np.shape(wl))\n\n x = 10000. / wl\n y = 10000. / wl - 1.82\n\n # Far-Ultraviolet: 8 <= x <= 10 ; 1000 -> 1250 Angs\n i = np.bitwise_and(x >= 8, x <= 10)\n\n a[i] = -1.073 - 0.628 * (x[i] - 8.) + 0.137 * (x[i] - 8.)**2 - 0.070 * (x[i] - 8.)**3\n b[i] = 13.670 + 4.257 * (x[i] - 8.) - 0.420 * (x[i] - 8.)**2 + 0.374 * (x[i] - 8.)**3\n\n # Ultraviolet: 3.3 <= x <= 8 ; 1250 -> 3030 Angs\n i = np.bitwise_and(x >= 5.9, x < 8)\n F_a[i] = -0.04473 * (x[i] - 5.9)**2 - 0.009779 * (x[i] - 5.9)**3\n F_b[i] = 0.2130 * (x[i] - 5.9)**2 + 0.1207 * (x[i] - 5.9)**3\n\n i = np.bitwise_and(x >= 3.3, x < 8)\n\n a[i] = 1.752 - 0.316 * x[i] - 0.104 / ((x[i] - 4.67)**2 + 0.341) + F_a[i]\n b[i] = -3.090 + 1.825 * x[i] + 1.206 / ((x[i] - 4.62)**2 + 0.263) + F_b[i]\n\n # Optical/NIR: 1.1 <= x <= 3.3 ; 3030 -> 9091 Angs ;\n i = np.bitwise_and(x >= 1.1, x < 3.3)\n\n a[i] = 1.+ 0.17699 * y[i] - 0.50447 * y[i]**2 - 0.02427 * y[i]**3 + \\\n 0.72085 * y[i]**4 + 0.01979 * y[i]**5 - 0.77530 * y[i]**6 + 0.32999 * y[i]**7\n b[i] = 1.41338 * y[i] + 2.28305 * y[i]**2 + 1.07233 * y[i]**3 - \\\n 5.38434 * y[i]**4 - 0.62251 * y[i]**5 + 5.30260 * y[i]**6 - 2.09002 * y[i]**7\n\n\n # Infrared: 0.3 <= x <= 1.1 ; 9091 -> 33333 Angs ;\n i = np.bitwise_and(x >= 0.3, x < 1.1)\n\n a[i] = 0.574 * x[i]**1.61\n b[i] = -0.527 * x[i]**1.61\n\n q = a + b / R_V\n\n return q", "def bsmc(n,alpha):", "def ens_CM1_C2A(ens, var = 'ALL'):\n \n# Copy data from cell centered surrogate, then average the staggered fields to the centers\n \n t0 = timer()\n \n nx = ens.nx\n ny = ens.ny\n nz = ens.nz\n \n if var.upper() == \"U\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.u_ptr,:,:,:,0] = 0.5*(fstate.u[:,:,:,0] + fstate.u[:,:,:,1])\n fstate.xyz3d[ens.u_ptr,:,:,:,nx-1] = 0.5*(fstate.u[:,:,:,nx-1] + fstate.u[:,:,:,nx])\n fstate.xyz3d[ens.u_ptr,:,:,:,1:nx-1] = (-fstate.u[:,:,:,0:nx-2] + 13.0*fstate.u[:,:,:,1:nx-1] \\\n -fstate.u[:,:,:,3:nx+1] + 13.0*fstate.u[:,:,:,2:nx] ) / 24.0\n \n if var.upper() == \"V\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.v_ptr,:,:,0,:] = 0.5*(fstate.v[:,:,0,:] + fstate.v[:,:,1,:])\n fstate.xyz3d[ens.v_ptr,:,:,ny-1,:] = 0.5*(fstate.v[:,:,ny-1,:] + fstate.v[:,:,ny,:])\n fstate.xyz3d[ens.v_ptr,:,:,1:ny-1,:] = (-fstate.v[:,:,0:ny-2,:] + 13.0*fstate.v[:,:,1:ny-1,:] \\\n -fstate.v[:,:,3:ny+1,:] + 13.0*fstate.v[:,:,2:ny,:] ) / 24.0\n \n if var.upper() == \"W\" or var.upper() == \"ALL\":\n\n fstate.xyz3d[ens.w_ptr,:,0,:,:] = 0.5*(fstate.w[:,0,:,:] + fstate.w[:,1,:,:])\n fstate.xyz3d[ens.w_ptr,:,nz-1,:,:] = 0.5*(fstate.w[:,nz-1,:,:] + fstate.w[:,nz,:,:])\n fstate.xyz3d[ens.w_ptr,:,1:nz-1,:,:] = (-fstate.w[:,0:nz-2,:,:] + 13.0*fstate.w[:,1:nz-1,:,:] \\\n -fstate.w[:,3:nz+1,:,:] + 13.0*fstate.w[:,2:nz,:,:] ) / 24.0\n \n# Create ens variables to point at A-grid velocities\n\n ens.addvariable(\"UA\", data=fstate.xyz3d[ens.u_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n ens.addvariable(\"VA\", data=fstate.xyz3d[ens.v_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n ens.addvariable(\"WA\", data=fstate.xyz3d[ens.w_ptr,:,:,:,:], coords = ('MEMBER,NZ,NY,NX')) \n \n if time_all: print(\"\\n Wallclock time to convert from C to A grid:\", round(timer() - t0, 3), \" sec\")\n\n return", "def convolveCAv( AvModel='midIa', cmin=-0.3, cmax=2.5 ):\n if isinstance(AvModel, basestring) : \n AvModel = AvModelDict[ AvModel ]\n\n gauss = lambda x,mu,sig : ( 1/np.sqrt(2*np.pi*sig**2) ) * np.exp(-(mu-x)**2/(2*sig**2))\n\n # Define the range of allowable C values\n # - this can go slightly negative, for very blue SNIa with 0 dust\n Cin = np.arange( cmin, cmax, 0.01 )\n\n\n # Define the intrinsic probability distribution of C values \n # - narrow gaussian \n # - centered on c=-0.1 (corresponding to Av=0, Kessler:2009a)\n # - sigma=0.04 (Scolnic:2013, section 3.1)\n Cdist = gauss( Cin, -0.1, 0.04 )\n\n # Add 0.1 to C to convert the C grid to Av (assuming Beta=4.1), \n # then use the user-specified Av distribution model to define the \n # distribution of host galaxy extinction values\n # Note: we limit this to positive Av values so that the \n # numpy 1-d convolution operation produces an output array \n # that is appropriately shifted to match the Cin abscissa array\n Avin = ( Cin+0.1)[ np.where( Cin>=-0.1 ) ]\n hostAvdist = AvModel( Avin ) \n\n # convolve the two probability distributions, then normalize\n # so that the resulting distribution integrates to unity\n Cobs = np.convolve( Cdist, hostAvdist, mode='full' )\n Cobs = ( Cobs / ( np.sum( Cobs ) * np.diff( Cin )[0] ))[:len(Cin)]\n\n return( Cin, Cobs )", "def m_c(mcmc, scale, f, alphasMZ=0.1185, loop=3):\n if scale == mcmc:\n return mcmc # nothing to do\n _sane(scale, f)\n crd = rundec.CRunDec()\n alphas_mc = alpha_s(mcmc, 4, alphasMZ=alphasMZ, loop=loop)\n if f == 4:\n alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)\n return crd.mMS2mMS(mcmc, alphas_mc, alphas_scale, f, loop)\n elif f == 3:\n crd.nfMmu.Mth = 1.3\n crd.nfMmu.muth = 1.3\n crd.nfMmu.nf = 4\n return crd.mH2mL(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)\n elif f == 5:\n crd.nfMmu.Mth = 4.8\n crd.nfMmu.muth = 4.8\n crd.nfMmu.nf = 5\n return crd.mL2mH(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)\n else:\n raise ValueError(f\"Invalid input: f={f}, scale={scale}\")", "def test_calc_p_on_ccd_asymmetric_dither():\n # These lines mimic the code in calc_p_on_ccd() which requires that\n # track readout box is fully within the usable part of CCD.\n max_ccd_row = ACA.max_ccd_row - 5\n max_ccd_col = ACA.max_ccd_col - 4\n\n # Halfway off in both row and col, (1/4 of area remaining). These checks\n # don't change from symmetric case because of the placement of row, col.\n p_in_box = calc_p_on_ccd(max_ccd_row, max_ccd_col, ACABox((60, 120)))\n assert np.allclose(p_in_box, 0.25)\n\n p_in_box = calc_p_on_ccd(max_ccd_row, max_ccd_col, ACABox((120, 60)))\n assert np.allclose(p_in_box, 0.25)\n\n # 3 of 8 pixels off in row (5/8 of area remaining). Dither_z (col) does not\n # matter here because only rows are off CCD.\n for dither_z in 5, 100:\n p_in_box = calc_p_on_ccd(max_ccd_row - 1, 0, ACABox((20, dither_z)))\n assert np.allclose(p_in_box, 0.625)\n\n # Same but for col\n for dither_y in 5, 100:\n p_in_box = calc_p_on_ccd(0, max_ccd_col - 1, ACABox((dither_y, 20)))\n assert np.allclose(p_in_box, 0.625)\n\n # Same but for a negative col number\n p_in_box = calc_p_on_ccd(0, -(max_ccd_col - 1), ACABox((dither_y, 20)))\n assert np.allclose(p_in_box, 0.625)\n\n # Show expected asymmetric behavior, starting right at the physical CCD edge.\n # In this case the only chance to get on the CCD is for dither to bring it\n # there. (Note: the model assumes the dither spatial distribution is\n # flat, but it is not).\n\n # First, with dither_y <= 25 or dither_z <= 20, p_in_box is exactly zero\n for dither in ((25, 20), (60, 20), (25, 60)):\n p_in_box = calc_p_on_ccd(ACA.max_ccd_row, ACA.max_ccd_col, ACABox(dither))\n assert p_in_box == 0\n\n # Now some asymmetric cases. p_in_ccd increases with larger dither.\n for dither, exp in [\n ((30, 40), 0.02083333),\n ((40, 30), 0.03125),\n ((40, 200), 0.084375),\n ((200, 40), 0.109375),\n ]:\n p_in_box = calc_p_on_ccd(ACA.max_ccd_row, ACA.max_ccd_col, ACABox(dither))\n assert np.isclose(p_in_box, exp)", "def nonparametric_cles(a, b, half_credit=True) -> float:\n \n m = np.subtract.outer(a, b)\n m = np.sign(m)\n \n if half_credit:\n m = np.where(m == 0, 0.5, m)\n m = np.where(m == -1, 0, m)\n \n return np.mean(m)", "def chirpf(self,cr=160e3):\n L=self.conf.n_samples_per_block\n sr=self.conf.sample_rate\n f0=0.0\n tv=n.arange(L,dtype=n.float64)/float(sr)\n dphase=0.5*tv**2*cr*2*n.pi\n chirp=n.exp(1j*n.mod(dphase,2*n.pi))*n.exp(1j*2*n.pi*f0*tv)\n return(n.array(chirp,dtype=n.complex64))", "def moorer(x, fs):\n\n cd = np.floor(0.05 * np.random.rand(6) * fs).astype(int)\n\n # set gains of 6 comb pass filters\n g1 = 0.5 * np.ones(6)\n # set feedback of each comb filter\n g2 = 0.5 * np.ones(6)\n # set input cg and cg1 for moorer function see help moorer\n cg = g2 / (1 - g1)\n cg1 = g1\n\n # set gain of allpass filter\n ag = 0.7\n # set delay of allpass filter\n ad = int(0.08 * fs)\n # set direct signal gain\n k = 0.5\n\n # send the input to each of the 6 comb filters separately\n [outcomb1, b1, a1] = utils.lpcomb(x, cg[0], cg1[0], cd[0])\n [outcomb2, b2, a2] = utils.lpcomb(x, cg[1], cg1[1], cd[1])\n [outcomb3, b3, a3] = utils.lpcomb(x, cg[2], cg1[2], cd[2])\n [outcomb4, b4, a4] = utils.lpcomb(x, cg[3], cg1[3], cd[3])\n [outcomb5, b5, a5] = utils.lpcomb(x, cg[4], cg1[4], cd[4])\n [outcomb6, b6, a6] = utils.lpcomb(x, cg[5], cg1[5], cd[5])\n\n # sum the ouptut of the 6 comb filters\n apinput = outcomb1 + outcomb2 + outcomb3 + outcomb4 + outcomb5 + outcomb6 \n\n #find the combined filter coefficients of the the comb filters\n [b, a] = utils.parallelcoefficients(b1, a1, b2, a2)\n [b, a] = utils.parallelcoefficients(b, a, b3, a3)\n [b, a] = utils.parallelcoefficients(b, a, b4, a4)\n [b, a] = utils.parallelcoefficients(b, a, b5, a5)\n [b, a] = utils.parallelcoefficients(b, a, b6, a6)\n\n # send the output of the comb filters to the allpass filter\n [y, b7, a7] = utils.allpass(apinput, ag, ad)\n\n #find the combined filter coefficients of the the comb filters in series with the allpass filters\n [b, a] = utils.seriescoefficients(b, a, b7, a7)\n\n # add the scaled direct signal\n y = y + k * x\n\n # normalize the output signal\n y = y / max(y)\n\n return shape_check(y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__getslice__(self, difference_type i, difference_type j) > __dummy_6__
def __getslice__(self, *args, **kwargs): return _digital_swig.gr_complex_vector___getslice__(self, *args, **kwargs)
[ "def __getslice__(self, *args, **kwargs):\n return _digital_swig.unsigned_int_vector___getslice__(self, *args, **kwargs)", "def __getslice__(self, i, j): \n ids = numpy.where((self.id_list >= i) & (self.id_list < j))[0]\n return self.id_slice(ids)", "def _slice(self, shallow_copy, slice_range):\n pass", "def __getslice__(self, i, j):\r\n return Attrs(tuple.__getslice__(self, i, j))", "def __getslice__(self, *args):\n return _wali.TransVector___getslice__(self, *args)", "def _arrayslice(self, start: int, stop: int) -> np.ndarray:\n raise NotImplementedError", "def __getslice__(*args):\n return _Field.vectormats___getslice__(*args)", "def __getslice__(self, a, b):\n\n return self.genomeList[a:b]", "def test_slice(setup):\n assert isinstance(setup[\"sliced\"], da.Array)", "def test_get_slice_dense(self):\n config.session.execute(\"TRUNCATE TABLE hecuba.istorage\")\n config.session.execute(\"DROP KEYSPACE IF EXISTS hecuba_dislib\")\n\n bn, bm = 5, 5\n x = np.random.randint(100, size=(30, 30))\n ds_data = ds.array(x=x, block_size=(bn, bm))\n data = ds.array(x=x, block_size=(bn, bm))\n data.make_persistent(name=\"hecuba_dislib.test_array\")\n\n slice_indices = [(7, 22, 7, 22), # many row-column\n (6, 8, 6, 8), # single block row-column\n (6, 8, None, None), # single-block rows, all columns\n (None, None, 6, 8), # all rows, single-block columns\n (15, 16, 15, 16), # single element\n # (-10, -5, -10, -5), # out-of-bounds (not\n # implemented)\n # (-10, 5, -10, 5), # out-of-bounds (not implemented)\n (21, 40, 21, 40)] # out-of-bounds (correct)\n\n for top, bot, left, right in slice_indices:\n got = data[top:bot, left:right].collect()\n expected = ds_data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))\n\n # Try slicing with irregular array\n x = data[1:, 1:]\n data = ds_data[1:, 1:]\n\n for top, bot, left, right in slice_indices:\n got = x[top:bot, left:right].collect()\n expected = data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))", "def __getitem__(self, i):\n if not isinstance(i, slice):\n raise ValueError(\"Only slices can be used.\")\n return self.prepareIterator(i.step, i.start, i.stop)", "def test_list_slice():\n a = List([1, 2, 3, 4])\n sl1 = a[1:]\n sl5 = a[5:]\n\n assert type(sl1) == List\n assert sl1 == List([2, 3, 4])\n assert type(sl5) == List\n assert sl5 == List([])", "def test_slice(self):\r\n img = Image(np.random.randint(0, 255, size=(100, 500, 3), dtype=np.uint8))\r\n\r\n sliced = img[...]\r\n assert np.allclose(sliced, img)\r\n assert sliced.name == img.name\r\n assert not _is_ref_unequal(sliced, img)", "def get_slice_mp(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals\n\n # pylint: disable=too-many-locals\n def work_get_slice(block, array_name, offset, s3_bucket, s3_key, shape, dtype):\n result = sa.attach(array_name)\n cell, sub_range = block\n\n item_size = np.dtype(dtype).itemsize\n s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size\n s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size\n data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)\n\n t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in\n zip(cell + tuple(sub_range), offset)]\n if data.dtype != dtype:\n data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)\n # data = data.reshape([s.stop - s.start for s in sub_range])\n\n result[t] = data.reshape([s.stop - s.start for s in sub_range])\n\n if self.enable_compression:\n return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)\n\n cdim = self.cdims(array_slice, shape)\n\n try:\n end = cdim[::-1].index(False) + 1\n except ValueError:\n end = len(shape)\n\n start = len(shape) - end\n\n outer = array_slice[:-end]\n outer_ranges = [range(s.start, s.stop) for s in outer]\n outer_cells = list(product(*outer_ranges))\n blocks = list(zip(outer_cells, repeat(array_slice[start:])))\n offset = [s.start for s in array_slice]\n\n array_name = generate_array_name('S3AIO')\n sa.create(array_name, shape=[s.stop - s.start for s in array_slice], dtype=dtype)\n shared_array = sa.attach(array_name)\n\n self.pool.map(work_get_slice, blocks, repeat(array_name), repeat(offset), repeat(s3_bucket),\n repeat(s3_key), repeat(shape), repeat(dtype))\n\n sa.delete(array_name)\n return shared_array", "def test_get_slice_from_immutable_sequence() -> None:\n assert list(wrap(range(10))[0:2]) == [0, 1]\n assert list(wrap(range(10))[0:-1]) == [0, 1, 2, 3, 4, 5, 6, 7, 8]", "def test_slice_ndarray(setup):\n assert isinstance(setup[\"sliced\"].compute(), np.ndarray)", "def __getitem__(self, n):\n if not isinstance(n, slice):\n return self(n)\n\n LENGTH = 100000\n (start, stop, step) = n.indices(2*LENGTH)\n if abs(stop - start) > LENGTH:\n raise IndexError(\"slice (=%s) too long\"%n)\n # The dirty work of generating indices is left to a range list\n # This could be slow but in practice seems fine\n # NOTE: n is a SLICE, not an index\n return [ self(i) for i in range(0, LENGTH)[n] if i >= self.offset ]", "def sliced(self,*args):\n if len(args)==1 and type(args[0])==slice: s=args[0]\n else: s=slice(*args)\n ps = self.apply_func(lambda _, spec: spec[s], lambda _, cov: cov[s,s])\n ps.ells = self.ells[s]\n return ps", "def test_slice_list() -> None:\n assert slice_list([3, 4, 6, 2, 3], 2) == [[3, 4], [6, 2], [3]]\n assert slice_list(['a', 1, 6.0, False], 3) == [['a', 1, 6.0], [False]]\n assert slice_list([], 1) == []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > gr_complex_vector __init__(self, gr_complex_vector arg0) > gr_complex_vector __init__(self, size_type size) > gr_complex_vector __init__(self, size_type size, value_type value) > gr_complex_vector
def __init__(self, *args): this = _digital_swig.new_gr_complex_vector(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n _vnl_vectorPython.vnl_vector_vcl_complexD_swiginit(self,_vnl_vectorPython.new_vnl_vector_vcl_complexD(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vector_vcl_complexF_swiginit(self,_vnl_vectorPython.new_vnl_vector_vcl_complexF(*args))", "def __init__(self, *args):\n _stdcomplexPython.stdcomplexD_swiginit(self, _stdcomplexPython.new_stdcomplexD(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vector_vcl_complexLD_swiginit(self,_vnl_vectorPython.new_vnl_vector_vcl_complexLD(*args))", "def __init__(self, *args):\n _stdcomplexPython.stdcomplexF_swiginit(self, _stdcomplexPython.new_stdcomplexF(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageRGBUC2_swiginit(self, _itkImagePython.new_vectoritkImageRGBUC2(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageO44_swiginit(self, _itkImagePython.new_vectoritkImageO44(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageRGBUC4_swiginit(self, _itkImagePython.new_vectoritkImageRGBUC4(*args))", "def __call__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD___call__(self, *args)", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVD44_swiginit(self, _itkImagePython.new_vectoritkImageCVD44(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageRGBUC3_swiginit(self, _itkImagePython.new_vectoritkImageRGBUC3(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageVD44_swiginit(self, _itkImagePython.new_vectoritkImageVD44(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageVF44_swiginit(self, _itkImagePython.new_vectoritkImageVF44(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageVF14_swiginit(self, _itkImagePython.new_vectoritkImageVF14(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVF44_swiginit(self, _itkImagePython.new_vectoritkImageCVF44(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCD2_swiginit(self, _itkImagePython.new_vectoritkImageCD2(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCF2_swiginit(self, _itkImagePython.new_vectoritkImageCF2(*args))", "def begin(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD_begin(self, *args)", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCF4_swiginit(self, _itkImagePython.new_vectoritkImageCF4(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCVD34_swiginit(self, _itkImagePython.new_vectoritkImageCVD34(*args))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__getslice__(self, difference_type i, difference_type j) > unsigned_int_vector
def __getslice__(self, *args, **kwargs): return _digital_swig.unsigned_int_vector___getslice__(self, *args, **kwargs)
[ "def __getslice__(self, i, j): \n ids = numpy.where((self.id_list >= i) & (self.id_list < j))[0]\n return self.id_slice(ids)", "def __getitem__(self, *args):\n return _digital_swig.unsigned_int_vector___getitem__(self, *args)", "def __getslice__(self, *args):\n return _wali.TransVector___getslice__(self, *args)", "def __getslice__(*args):\n return _Field.vectormats___getslice__(*args)", "def __getitem__(self, *args) -> \"unsigned int const &\":\n return _ida_pro.uintvec_t___getitem__(self, *args)", "def __getitem__(self, *args) -> \"int const &\":\n return _ida_pro.intvec_t___getitem__(self, *args)", "def __getslice__(self, *args, **kwargs):\n return _digital_swig.gr_complex_vector___getslice__(self, *args, **kwargs)", "def __getitem__(self, i):\n try:\n if type(i) == slice:\n return Vector(self.data[i])\n return self.data[i]\n except:\n raise IndexError(\"vector index out of range\")", "def __getslice__(self, i, j):\r\n return Attrs(tuple.__getslice__(self, i, j))", "def __getitem__(self, *args) -> \"std::vector< itkImageVD22_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD22___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD44_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD44___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD23_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD23___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD24_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD24___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD43_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD43___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageULL2_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageULL2___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageO22_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageO22___getitem__(self, *args)", "def _arrayslice(self, start: int, stop: int) -> np.ndarray:\n raise NotImplementedError", "def __getitem__(self, *args) -> \"std::vector< itkImageUL2_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageUL2___getitem__(self, *args)", "def __getitem__(self, i: 'int') -> \"short\":\n return _coin.SbVec2s___getitem__(self, i)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__getitem__(self, PySliceObject slice) > unsigned_int_vector __getitem__(self, difference_type i) > value_type
def __getitem__(self, *args): return _digital_swig.unsigned_int_vector___getitem__(self, *args)
[ "def __getitem__(self, *args) -> \"int const &\":\n return _ida_pro.intvec_t___getitem__(self, *args)", "def __getitem__(self, *args) -> \"unsigned int const &\":\n return _ida_pro.uintvec_t___getitem__(self, *args)", "def __getslice__(self, *args, **kwargs):\n return _digital_swig.unsigned_int_vector___getslice__(self, *args, **kwargs)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD43_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD43___getitem__(self, *args)", "def __getitem__(self, i):\n try:\n if type(i) == slice:\n return Vector(self.data[i])\n return self.data[i]\n except:\n raise IndexError(\"vector index out of range\")", "def __getitem__(self, *args) -> \"uval_t\":\n return _ida_pro.uval_array___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD23_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD23___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD22_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD22___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD42_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD42___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD44_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD44___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD34_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD34___getitem__(self, *args)", "def __getitem__(self, item):\n if isinstance(item, (int, np.integer)):\n item = (item,) # though the branches might differ...\n elif isinstance(item, slice):\n item = (item,)\n if any(not isinstance(i, (int, np.integer)) for i in item):\n return self.derivative_tensor(len(item), item)\n else:\n d = self.compute_derivatives(len(item), item, lazy=False)\n return d[0]", "def __getitem__(self, *args) -> \"std::vector< itkImageVD33_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD33___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageO22_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageO22___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageULL3_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageULL3___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageO33_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageO33___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageUL3_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageUL3___getitem__(self, *args)", "def __getitem__(self, *args) -> \"std::vector< itkImageVD24_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD24___getitem__(self, *args)", "def __getslice__(self, *args):\n return _wali.TransVector___getslice__(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__setitem__(self, PySliceObject slice, unsigned_int_vector v) __setitem__(self, PySliceObject slice) __setitem__(self, difference_type i, value_type x)
def __setitem__(self, *args): return _digital_swig.unsigned_int_vector___setitem__(self, *args)
[ "def __setitem__(self, i, new):\n if isinstance(i, int):\n self.remove(self[i])\n self.add(new)\n elif isinstance(i, slice):\n for interval in self[i]:\n self.remove(interval)\n self.update(new)\n else:\n message = \"Indices must be ints or slices, got {}\".format(i)\n raise TypeError(message)", "def __setitem__(self, i, v):\n if type(i) == slice:\n copy = self.data[:]\n copy[i] = v\n if len(copy) != len(self.data):\n raise ValueError(\"slice operation should not chage vector length\")\n try:\n self.data[i] = v\n except TypeError, e:\n raise TypeError(\"can only assign an iterable\")\n except:\n raise IndexError(\"vector index out of range\")", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD43___setitem__(self, *args)", "def __setitem__(self, i: 'int', value: 'short') -> \"void\":\n return _coin.SbVec3s___setitem__(self, i, value)", "def __setitem__(self, *args):\n return _core.VectorXiVec___setitem__(self, *args)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD42___setitem__(self, *args)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD44___setitem__(self, *args)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD23___setitem__(self, *args)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD34___setitem__(self, *args)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD22___setitem__(self, *args)", "def __setitem__(self, i: 'int', value: 'SbVec3d') -> \"void\":\n return _coin.SoMFVec3d___setitem__(self, i, value)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageULL3___setitem__(self, *args)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD24___setitem__(self, *args)", "def test_getitem_setitem_not_implemented():", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageVD33___setitem__(self, *args)", "def __setitem__(self, i: 'int', value: 'double') -> \"void\":\n return _coin.SbVec4d___setitem__(self, i, value)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageUL3___setitem__(self, *args)", "def __setitem__(self, *args) -> \"void\":\n return _itkImagePython.vectoritkImageCVD43___setitem__(self, *args)", "def __setitem__(self, i: 'int', value: 'double') -> \"void\":\n return _coin.SbVec3d___setitem__(self, i, value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > unsigned_int_vector __init__(self, unsigned_int_vector arg0) > unsigned_int_vector __init__(self, size_type size) > unsigned_int_vector __init__(self, size_type size, value_type value) > unsigned_int_vector
def __init__(self, *args): this = _digital_swig.new_unsigned_int_vector(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n _ida_pro.uintvec_t_swiginit(self, _ida_pro.new_uintvec_t(*args))", "def __init__(self, *args):\n _ida_pro.intvec_t_swiginit(self, _ida_pro.new_intvec_t(*args))", "def __init__(self, *args):\n _ida_pro.ulonglongvec_t_swiginit(self, _ida_pro.new_ulonglongvec_t(*args))", "def __init__(self, *args):\n _ida_pro.longlongvec_t_swiginit(self, _ida_pro.new_longlongvec_t(*args))", "def __init__(self, *args):\n _ida_pro.sizevec_t_swiginit(self, _ida_pro.new_sizevec_t(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorUL_swiginit(self,_vnl_vectorPython.new_vnl_vectorUL(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorUC_swiginit(self,_vnl_vectorPython.new_vnl_vectorUC(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorUS_swiginit(self,_vnl_vectorPython.new_vnl_vectorUS(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorSI_swiginit(self,_vnl_vectorPython.new_vnl_vectorSI(*args))", "def Vector(*args, **kwargs): # real signature unknown\r\n pass", "def __init__(self, *args):\n _ida_pro.uval_array_swiginit(self, _ida_pro.new_uval_array(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorD_swiginit(self,_vnl_vectorPython.new_vnl_vectorD(*args))", "def __init__(self, *args):\n this = _coin.new_SbVec2i32(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, n):\n if isinstance(n, int) or isinstance(n, long):\n if n < 0:\n raise ValueError(\"vector length cannot be negative\")\n self.data = [0.0] * n\n elif hasattr(n, '__len__') and hasattr(n, '__getitem__'):\n self.data = list(n)\n else:\n raise TypeError(\"vector constructor argument should be int or long or sequence\")", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorSL_swiginit(self,_vnl_vectorPython.new_vnl_vectorSL(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorSC_swiginit(self,_vnl_vectorPython.new_vnl_vectorSC(*args))", "def __init__(self, *args):\n this = _coin.new_SbVec4i32(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n _ida_pro.boolvec_t_swiginit(self, _ida_pro.new_boolvec_t(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageUL2_swiginit(self, _itkImagePython.new_vectoritkImageUL2(*args))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_sptr __init__(self, digital_constellation p) > digital_constellation_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n self.port=Config.PortPrinter # Assign the name of the port written in Config.py to self.port\n self.FirstMove=0 # Variable wich allow us to know if this is the first movement of the 3d-mill\n self.Coord={} # Create a dictionnary\n self.cnc=CNC(self.port) # Call the class CNC\n self.cnc.OpenConnection() # Open the Connection with the device\n self.NbWells=0 # Count the number of wells \n Wells.Wells_1(self)", "def __init__(self):\n this = _coin.new_SoClipPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter2i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_encoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, constness):\r\n FilterBase.__init__(self)\r\n self.constness = constness", "def __init__(self):\n this = _coin.new_SoClipPlaneManip()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinateCylinder()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoSFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_packet_sink_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter3i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoCoordinate3()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter1i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_bytes_to_syms_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoMFPlane()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_descrambler_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n this = _coin.new_SoShaderParameter4i()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoClipPlaneElement_init(self, state)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
s_points(self) > gr_complex_vector Returns the vector of points in this constellation. Raise error if dimensionality is not one.
def s_points(self): return _digital_swig.digital_constellation_sptr_s_points(self)
[ "def points(self):\n if self._points is None:\n _points = self.soma.points.tolist()\n for n in self.neurites:\n _points.extend(n.points.tolist())\n self._points = np.array(_points)\n\n return self._points", "def soma_points(self):\n db = self.data_block\n return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]", "def complex(self):\n offset = np.cumsum([0] + self.n_elements)[:-1]\n from pycomplex.complex.simplicial.spherical import ComplexSpherical2\n return ComplexSpherical2(\n vertices=np.concatenate(self.vertices, axis=0),\n simplices=self.triangles + offset\n )", "def pointvectors(self):\n return np.stack([self.x, self.y], axis=-1)", "def connected_component(self, simplex=None):\n if self.dimension() == -1:\n raise ValueError(\"the empty simplicial complex has no connected components.\")\n if simplex is None:\n v = self.vertices()[0]\n else:\n v = simplex[0]\n vertices = self.graph().connected_component_containing_vertex(v)\n facets = [f for f in self.facets() if f.is_face(Simplex(vertices))]\n return SimplicialComplex(facets)", "def get_ps(self,answer_complex=False):\n\n ps = []\n \n for p in self.ps:\n if answer_complex:\n ps.append(p.numpy()[0] + 1j*p.numpy()[1])\n ps.append(p.numpy()[0] - 1j*p.numpy()[1])\n else:\n ps.append(tuple(p.numpy()))\n\n return ps", "def points(self):\n return [self.point1, self.point2]", "def extract_point_cloud(self):\n tsdf_vol = self._tsdf_vol.cpu().numpy()\n color_vol = self._color_vol.cpu().numpy()\n vol_origin = self._vol_origin.cpu().numpy()\n\n # Marching cubes\n verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]\n verts_ind = np.round(verts).astype(int)\n verts = verts*self._voxel_size + vol_origin\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._const)\n colors_g = np.floor((rgb_vals - colors_b*self._const) / 256)\n colors_r = rgb_vals - colors_b*self._const - colors_g*256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n\n pc = np.hstack([verts, colors])\n return pc", "def get_dense_points(self):\n return self.dense_points", "def simplify(self):\n def are_collinear(x, y, z):\n return (x.x * (y.y - z.y) + y.x * (z.y - x.y) + z.x * (x.y - y.y)) == 0\n\n new_points = self.points[:]\n for i in range(len(self.points)):\n if are_collinear(new_points[(i - 1) % len(new_points)], new_points[i % len(new_points)], new_points[(i + 1) % len(new_points)]):\n del new_points[i]\n return self.__class__(new_points)", "def find_simplex(self, points):\n disc = self.discretization\n rectangles = disc.state_to_rectangle(points)\n\n # Convert to unit coordinates\n points = disc._center_states(points, clip=True)\n\n # Convert to basic hyperrectangle coordinates and find simplex\n unit_coordinates = points % disc.unit_maxes\n simplex_ids = self.triangulation.find_simplex(unit_coordinates)\n simplex_ids = np.atleast_1d(simplex_ids)\n\n # Adjust for the hyperrectangle index\n simplex_ids += rectangles * self.triangulation.nsimplex\n\n return simplex_ids", "def collocation_points(self) -> np.ndarray:", "def coord_x(self) -> List[float]:\n if len(self.__points) == 0:\n return []\n if len(self.__points[0]) > 0:\n return [p[0] for p in self.points]", "def complex_calc_s(self, z):\n s11 = complex(self.z0, -z) / complex(self.z0, z)\n s21 = cmath.sqrt(self.z0/complex(0, z)) * (1 - np.absolute(s11))\n s22 = complex(-self.z0, z) / complex(self.z0, z)\n s12 = cmath.sqrt(self.z0/complex(0, z)) * (1 - np.absolute(s22))\n return s11, s12, s21, s22", "def GetPoints(self, *args):\n return _itkPointSetPython.itkPointSetD2S_GetPoints(self, *args)", "def scalars(self, name_or_idx=None, datatype=\"point\"):\n colors.printc(\"WARNING: scalars() is obsolete!\", c=1)\n colors.printc(\" : Use getArrayNames(), getPointArray(), getCellArray(),\", c=1)\n colors.printc(\" : addPointScalars() or addPointVectors() instead.\", c=1)\n #raise RuntimeError\n\n poly = self.polydata(False)\n\n # no argument: return list of available arrays\n if name_or_idx is None:\n ncd = poly.GetCellData().GetNumberOfArrays()\n npd = poly.GetPointData().GetNumberOfArrays()\n arrs = []\n for i in range(npd):\n #print(i, \"PointData\", poly.GetPointData().GetArrayName(i))\n arrs.append([\"PointData\", poly.GetPointData().GetArrayName(i)])\n for i in range(ncd):\n #print(i, \"CellData\", poly.GetCellData().GetArrayName(i))\n arrs.append([\"CellData\", poly.GetCellData().GetArrayName(i)])\n return arrs\n\n else: # return a specific array (and set it as active one)\n\n pdata = poly.GetPointData()\n arr = None\n\n if 'point' in datatype.lower():\n if isinstance(name_or_idx, int):\n name = pdata.GetArrayName(name_or_idx)\n else:\n name = name_or_idx\n if name:\n arr = pdata.GetArray(name)\n data = pdata\n self._mapper.SetScalarModeToUsePointData()\n\n\n if not arr or 'cell' in datatype.lower():\n cdata = poly.GetCellData()\n if isinstance(name_or_idx, int):\n name = cdata.GetArrayName(name_or_idx)\n else:\n name = name_or_idx\n if name:\n arr = cdata.GetArray(name)\n data = cdata\n self._mapper.SetScalarModeToUseCellData()\n\n if arr:\n data.SetActiveScalars(name)\n self._mapper.ScalarVisibilityOn()\n if settings.autoResetScalarRange:\n self._mapper.SetScalarRange(arr.GetRange())\n return vtk_to_numpy(arr)\n\n return None", "def _simplicial_(self):\n from sage.homology.simplicial_complex import SimplicialComplex\n simplices = []\n for C in self.maximal_cells():\n simplices.extend(C._triangulation_())\n return SimplicialComplex(simplices)", "def test_symmetrypoint_constructor_2d():\n X = SPoint((0.5, 0.3), \"X\")\n assert X.name == \"X\"\n assert X.point.shape == (2,)", "def point_projection(self, scene_point):\n dist = scene_point - self.position\n d = np.dot(dist, self.optical_axis())\n if d == 0:\n # to avoid explosion!!!\n d = np.finfo(np.float32).eps\n\n u = self.u0 + self.focal * np.dot(dist, self.horizontal_axis()) * self.bu / d\n v = self.v0 + self.focal * np.dot(dist, self.vertical_axis()) * self.bv / d\n return box_coord(u), box_coord(v)", "def voxelize(self, points, voxel_layer):\n voxels, coors, num_points = [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
decision_maker(self, gr_complex sample) > unsigned int Returns the constellation point that matches best.
def decision_maker(self, *args, **kwargs): return _digital_swig.digital_constellation_sptr_decision_maker(self, *args, **kwargs)
[ "def Cuffme(rpoint, cuff_sites, strand):\n for cuff in cuff_sites:\n if int(cuff[1])-50 < rpoint < int(cuff[2])+50:\n return 1\n\n return 0", "def _calc_matching_prob(self):\n if not self.professional:\n return 1", "def selectXClassifierT(self):\r\n \r\n actionSetSize = len(self.clSet) \r\n tSize = int(actionSetSize*cons.theta_Select) # sets the number of items in the action set to be included in the tournament selection\r\n posList = []\r\n for i in range(tSize): #hold onto a list of random positions, then select the position with the highest fitness\r\n pos = randrange(actionSetSize)\r\n if pos in posList: # make sure that pos is a pos that has not yet been selected.\r\n pos = randrange(actionSetSize)\r\n else:\r\n posList.append(pos)\r\n \r\n bestF = 0\r\n bestC = 0\r\n for j in posList:\r\n if self.clSet[j].getFitness() > bestF:\r\n bestF = self.clSet[j].getFitness()\r\n bestC = j\r\n\r\n return self.clSet[bestC]", "def getDecisionThreshold(self) -> retval:\n ...", "def switch_strategy():\n doors, prize_pos = init_doors()\n\n choice = first_participant_choice()\n pos_shown = monty_shows(doors, prize_pos, choice)\n\n # The doors chosen by the participant and Monty are distinct and the\n # indices of the doors sum up to 0 + 1 + 2 = 3. So, the participant\n # chooses switches to the position (3 - choice - pos_shown)\n new_choice = 3 - choice - pos_shown\n\n if doors[new_choice] == 'c':\n return 1\n return 0", "def decide(self, observation, prev_decision, internal_state, learn=False):\r\n raise NotImplementedError", "def get_winner(self, logS=None, logS_MAX=None):\n\t\tN=self.N\n\t\tif N<1: return -1\n\t\tassert(len(self.P_w_a.shape) == 1)\n\t\tP_w_a = self.extern(self.P_w_a[:N]).copy() # make a copy because some entries will be reset\n\t\tif (logS_MAX is not None) and (logS_MAX < self.plus_inf):\n\t\t\tlogS_MAX = self.extern(logS_MAX)\n\t\t\tif logS is None: logS=self.logS\n\t\t\tlogS = self.extern(logS)[:N]\n\t\t\tP_w_a[logS > logS_MAX] = self.log0 # reset probabs where hypervolume > S_MAX\n\t\t\t\n\t\tj = argmax(P_w_a)\n\t\t\n\t\t# in degenerate cases when all p_a_w fields are zero then argmax returns 0\n\t\t# which would falsely yield to the conclusion that category j=0 is the winner\n\t\t# when in fact there is no winner, thus a new category needs to be created\n#\t\tprint 'P_w_a=',P_w_a\n\t\tassert(j<N)\n\t\tif self.logp[j] <= self.log0 or P_w_a[j] <= self.log0:\n\t\t\tj = -1\n\n\t\treturn j", "def my_candidate(self):\n if self.turn != 0:\n #print(\"CCCC\")\n new_result = [self.prev_candidate['score']] + self.prev_candidate['candidate'] \n self.data = np.vstack((self.data, new_result))\n X, y= self.data[:,1:], self.data[:,0]\n\n #print(\"CCCC222\")\n\n test_weights = minimize(fun=loss, x0=np.zeros(self.n), args=(X,y), constraints=self.con, bounds=self.bounds).x\n\n ga = SAT(test_weights, 50, 100, 0.95, 0.1)\n ga.evolve()\n\n #print(\"CCC3333\")\n tmp = ga.best_chrm.tolist()\n #print(\"1111111\", tmp)\n\n return ga.best_chrm.tolist()\n else:\n # print(\"CCC444\")\n X, y= self.data[:,1:], self.data[:,0]\n #print(\"CCC5555\")\n test_weights = minimize(fun=loss, x0=np.zeros(self.n), args=(X,y), constraints=self.con, bounds=self.bounds).x\n\n ga = SAT(test_weights, 50, 100, 0.95, 0.1)\n ga.evolve()\n\n #print(\"CCCC666\")\n\n tmp = ga.best_chrm.tolist()\n #print(\"222222222\", tmp)\n\n return tmp", "def _choose_initial_point(self) -> np.ndarray:\n if self._warm_start and self._fit_result is not None:\n self._initial_point = self._fit_result.x\n elif self._initial_point is None:\n self._initial_point = algorithm_globals.random.random(self._neural_network.num_weights)\n return self._initial_point", "def strategy(self):\n # first move \"scissors\" (2)\n if len(self.get_past_moves()) == 0:\n counter_play = 2\n #print(counter_play)\n return counter_play\n else: # predict the next move and counter it\n counter_play = self.counter(self.predict(self.get_opp_moves()))\n #print(counter_play)\n return counter_play", "def custom_heuristic(gameState):\r\n center_weight = 0.5\r\n lib_weight = 1.5\r\n own_loc = gameState.locs[self.player_id]\r\n opp_loc = gameState.locs[1- self.player_id]\r\n own_liberties = gameState.liberties(own_loc)\r\n opp_liberties = gameState.liberties(opp_loc)\r\n # Custom 1: distanceToCenter(own_loc)\r\n # Custom 2: len(own_liberties) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 3: len(own_liberties) - ( len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) ) \r\n # Custom 4: len(own_liberties) - ( lib_weight * len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 5: ( lib_weight * (len(own_liberties) / len(opp_liberties)) - ( center_weight * distanceToCenter(own_loc)) )\r\n return ( lib_weight * (len(own_liberties) / len(opp_liberties)) - (center_weight * distanceToCenter(own_loc)) )", "def getBestSolutionValue(self) -> float:", "def select_pts(self):\n lst=[]\n for r in self.rewards:\n if r.earning_category_id==15:\n lst.append(r.reward_rate.points)\n if lst:\n return lst[0]\n else:\n return 0", "def choose_distractor(self, model, dict, threshold_func, params, banned):\n for surprisal in self.surprisals: # calculate desired surprisal thresholds\n self.surprisal_targets.append(max(params[\"min_abs\"], surprisal + params[\"min_delta\"]))\n # get us some distractor candidates\n min_length, max_length, min_freq, max_freq = threshold_func(self.words)\n distractor_opts = dict.get_potential_distractors(min_length, max_length, min_freq, max_freq, params)\n avoid=[]\n for word in self.words: #it's real awkward if the distractor is the same as the real word, so let's not do that\n avoid.append(strip_punct(word).lower())\n # initialize\n best_word = \"x-x-x\"\n best_min_surp = 0\n for dist in distractor_opts:\n if dist not in banned and dist not in avoid: # if we've already used it in this sentence set, don't bother\n good = True\n min_surp = 100\n for i in range(len(self.probs)): # check distractor candidate against each sentence's probs\n dist_surp = model.get_surprisal(self.probs[i], dist)\n if dist_surp < self.surprisal_targets[i]:\n good = False # it doesn't meet the target\n min_surp = min(min_surp, dist_surp) # but we should keep track of the lowest anyway\n if good: # stayed above all surprisal thresholds\n self.distractor = dist # we're done, yay!\n return self.distractor\n if min_surp > best_min_surp: # best so far\n best_min_surp = min_surp\n best_word = dist\n logging.warning(\"Could not find a word to meet threshold for item %s, label %s, returning %s with %d min surp instead\",\n self.id, self.lab, best_word, best_min_surp)\n self.distractor = best_word\n return self.distractor", "def recommend_sensing(self):\n # Enter your code and remove the statement below\n # should select from unobserved location\n # unobserved = open: with largest probability\n best_unobserved = max(self.open, key=lambda position: self.tprob[position])\n # if max is 0, then unobserved ones all have zero probabilities\n if self.tprob[best_unobserved] != 0:\n return best_unobserved\n # directly using max, will return the max key in a dictionary\n # should find observed locations instead of all positions\n # all locations - unobserved locations = observed locations\n # tprob.keys() is views, which can be used as set\n best_observed = max(self.tprob.keys() - self.open, key=lambda position: self.tprob[position])\n return utils.closest_point(best_observed, self.open)", "def covers(self, proposal, min_match, nhood=None):\n max_match = 0\n template = None\n\n # find templates in the bank \"near\" this tmplt\n prop_nhd = getattr(proposal, self.nhood_param)\n if not nhood:\n low, high = _find_neighborhood(self._nhoods, prop_nhd, self.nhood_size)\n tmpbank = self._templates[low:high]\n else:\n tmpbank = nhood\n if not tmpbank: return (max_match, template)\n\n # sort the bank by its nearness to tmplt in mchirp\n # NB: This sort comes up as a dominating cost if you profile,\n # but it cuts the number of match evaluations by 80%, so turns out\n # to be worth it even for metric match, where matches are cheap.\n tmpbank.sort(key=lambda b: abs( getattr(b, self.nhood_param) - prop_nhd))\n\n # set parameters of match calculation that are optimized for this block\n df_end, f_max = get_neighborhood_df_fmax(tmpbank + [proposal], self.flow)\n if self.fhigh_max:\n f_max = min(f_max, self.fhigh_max)\n df_start = max(df_end, self.iterative_match_df_max)\n\n # find and test matches\n for tmplt in tmpbank:\n\n self._nmatch += 1\n df = df_start\n match_last = 0\n\n if self.coarse_match_df:\n # Perform a match at high df to see if point can be quickly\n # ruled out as already covering the proposal\n PSD = get_PSD(self.coarse_match_df, self.flow, f_max, self.noise_model)\n match = self.compute_match(tmplt, proposal, self.coarse_match_df,\n PSD=PSD)\n if match == 0:\n err_msg = \"Match is 0. This might indicate that you have \"\n err_msg += \"the df value too high. Please try setting the \"\n err_msg += \"coarse-value-df value lower.\"\n # FIXME: This could be dealt with dynamically??\n raise ValueError(err_msg)\n\n if (1 - match) > 0.05 + (1 - min_match):\n continue\n\n while df >= df_end:\n\n PSD = get_PSD(df, self.flow, f_max, self.noise_model)\n match = self.compute_match(tmplt, proposal, df, PSD=PSD)\n if match == 0:\n err_msg = \"Match is 0. This might indicate that you have \"\n err_msg += \"the df value too high. Please try setting the \"\n err_msg += \"iterative-match-df-max value lower.\"\n # FIXME: This could be dealt with dynamically??\n raise ValueError(err_msg)\n\n # if the result is a really bad match, trust it isn't\n # misrepresenting a good match\n if (1 - match) > 0.05 + (1 - min_match):\n break\n\n # calculation converged\n if match_last > 0 and abs(match_last - match) < 0.001:\n break\n\n # otherwise, refine calculation\n match_last = match\n df /= 2.0\n\n if match > min_match:\n return (match, tmplt)\n\n # record match and template params for highest match\n if match > max_match:\n max_match = match\n template = tmplt\n\n return (max_match, template)", "def pick_threshold(self):", "def knn_predict(new_point, points, point_classes, k=5):\n k_nearest = find_nearest_neighbors(new_point, points, k)\n return majority_vote(point_classes[k_nearest])", "def draw_candidate(self, arr_ind, do_propMH):\n self.aCorr = 1\n if self.stepType == 'Uniform':\n cand = self.parent.val[arr_ind] + self.stepParam[arr_ind] * np.random.uniform(-0.5, 0.5)\n elif self.stepType == 'BetaRho':\n cand = np.exp(-0.25 * self.parent.val[arr_ind]) + self.stepParam[arr_ind] * np.random.uniform(-0.5, 0.5)\n if cand <= 0:\n cand = np.inf\n else:\n cand = -4 * np.log(cand)\n elif self.stepType == 'PropMH':\n if do_propMH:\n cval = self.parent.val[arr_ind]\n w = np.max([1, cval/3])\n dval = cval + w * np.random.uniform(-1, 1)\n w1 = np.max([1, dval/3])\n if cval > (dval+w1):\n aCorr = False # never will accept in this case\n else:\n aCorr = w/w1\n cand = dval\n self.aCorr = aCorr\n else:\n cand = self.parent.val[arr_ind] + self.stepParam[arr_ind] * np.random.uniform(-0.5, 0.5)\n else:\n raise Exception('Unknown stepType')\n return cand", "def get_best(self, dataset, sample_size): # always according to accuracy\n # create dev-set\n data_idx = range(len(dataset))\n np.random.shuffle(data_idx)\n dataset_sample = [dataset[i] for i in np.random.choice(data_idx, sample_size)]\n\n # evaluate elitism\n ops = [x[1] for x in self.elitism]\n ls = [(mlp.check_on_dataset(dataset_sample)[0], mlp) for mlp in ops]\n ls.sort(key=lambda a: a[0])\n return ls[-1][1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rotational_symmetry(self) > unsigned int Returns the order of rotational symmetry.
def rotational_symmetry(self): return _digital_swig.digital_constellation_sptr_rotational_symmetry(self)
[ "def get_symmetry_number(self):\n if self.symmetry_number < 1:\n cython.declare(resonanceHybrid=Molecule, maxSymmetryNum=cython.short)\n resonance_hybrid = self.get_resonance_hybrid()\n try:\n self.symmetry_number = resonance_hybrid.get_symmetry_number()\n except KeyError:\n logging.error('Wrong bond order generated by resonance hybrid.')\n logging.error('Resonance Hybrid: {}'.format(resonance_hybrid.to_adjacency_list()))\n for index, mol in enumerate(self.molecule):\n logging.error(\"Resonance Structure {}: {}\".format(index, mol.to_adjacency_list()))\n raise\n return self.symmetry_number", "def sym_z(self):\n return self._sym_z", "def symmetry_rotation(self, bond_to_rotate, normal_direction, angles):\n\t\tpass", "def use_symmetry(self):\n symmetry = self.params[PARAM_SYMMETRY]\n if symmetry is None:\n #Default to false if no parameter.\n return False\n else:\n return symmetry.use_symmetry", "def _derive_layout_symmetry(self):\n self._sym_df = None # Default option\n if self.exploit_layout_symmetry:\n # Check symmetry of bounds & turbine_weights\n if np.unique(self.minimum_yaw_angle, axis=0).shape[0] > 1:\n print(\"minimum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.turbine_weights, axis=0).shape[0] > 1:\n print(\"turbine_weights is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n # Check if turbine_weights are consistently 1.0 everywhere\n if np.any(np.abs(self.turbine_weights - 1.0) > 0.001):\n print(\"turbine_weights are not uniformly 1.0.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n x = self.fi.layout_x\n y = self.fi.layout_y\n df = find_layout_symmetry(x=x, y=y)\n\n # If no axes of symmetry, exit function\n if df.shape[0] <= 0:\n print(\"Wind farm layout in floris is not symmetrical.\")\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n wd_array = self.fi.floris.flow_field.wind_directions\n sym_step = df.iloc[0][\"wd_range\"][1]\n if ((0.0 not in wd_array) or(sym_step not in wd_array)):\n print(\"Floris wind direction array does not \" +\n \"intersect {:.1f} and {:.1f}.\".format(0.0, sym_step))\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n ids_minimal = (wd_array >= 0.0) & (wd_array < sym_step)\n wd_array_min = wd_array[ids_minimal]\n wd_array_remn = np.remainder(wd_array, sym_step)\n\n if not np.all([(x in wd_array_min) for x in wd_array_remn]):\n print(\"Wind direction array appears irregular.\")\n print(\"Exploitation of symmetry has been disabled.\")\n\n self._sym_mapping_extrap = np.array(\n [np.where(np.abs(x - wd_array_min) < 0.0001)[0][0]\n for x in wd_array_remn], dtype=int)\n\n self._sym_mapping_reduce = copy.deepcopy(ids_minimal)\n self._sym_df = df\n\n return", "def getSymmetryMatrix(*args, **kwargs):\n \n pass", "def is_symmetric(self):\n return self._alph1 == self._alph2 \\\n and np.array_equal(self._matrix, np.transpose(self._matrix))", "def rotationOrder(self):\n\t\treturn 0", "def is_symmetric(self):\n M = self.parent().realization_of().Monomial()\n return M(self).is_symmetric()", "def sym_y(self):\n return self._sym_y", "def test_get_symmetry_number(self):\n\n mol = Molecule().from_smiles('C')\n\n self.assertEquals(12, mol.get_symmetry_number())\n\n empty = Molecule()\n self.assertEquals(1, empty.get_symmetry_number())", "def symbology(self):\n\n\t\tif ARCMAP and self.layer_object.symbologyType == \"OTHER\":\n\t\t\traise NotSupportedError(\"Unsupported symbology type in ArcMap\")\n\n\t\treturn self.layer_object.symbology", "def _GetSymmetriesOnModes(symmetries, structure, pol_vects):\n\n # Get the vector of the displacement in the polarization\n m = np.tile(structure.get_masses_array(), (3,1)).T.ravel()\n disp_v = np.einsum(\"im,i->mi\", pol_vects, 1 / np.sqrt(m))\n underdisp_v = np.einsum(\"im,i->mi\", pol_vects, np.sqrt(m))\n\n n_dim, n_modes = np.shape(pol_vects)\n\n n_sym = len(symmetries)\n nat = structure.N_atoms\n \n # For each symmetry operation apply the\n pol_symmetries = np.zeros((n_sym, n_modes, n_modes), dtype = np.float64)\n for i, sym_mat in enumerate(symmetries):\n irt = GetIRT(structure, sym_mat)\n \n for j in range(n_modes):\n # Apply the i-th symmetry to the j-th mode\n new_vector = ApplySymmetryToVector(sym_mat, disp_v[j, :].reshape((nat, 3)), structure.unit_cell, irt).ravel()\n pol_symmetries[i, :, j] = underdisp_v.dot(new_vector.ravel())\n\n return pol_symmetries", "def orthogonalise_sym(vectors):\n ang = vec_angle(vectors[0],vectors[1])\n remainder = 90 - ang\n disp = remainder/2\n perp_unnormal = np.cross(vectors[0],vectors[1])\n normal = perp_unnormal / np.linalg.norm(perp_unnormal)\n\n rot_1 = rotation_matrix(normal,-disp)\n rot_2 = rotation_matrix(normal,disp)\n\n ovec_1 = np.dot(rot_1,vectors[0])\n ovec_2 = np.dot(rot_2,vectors[1])\n\n o_vecs = np.array([ovec_1,ovec_2])\n return o_vecs", "def rotation_coefs(self):\n return [np.cos(self.bearing_rads),\n np.sin(self.bearing_rads),\n -1.0*np.sin(self.bearing_rads),\n np.cos(self.bearing_rads)]", "def getRawSymmetryMatrix(*args, **kwargs):\n \n pass", "def GetSymmetryMatrix(sym, structure, crystal = False):\n\n # Get the IRT array\n irt = GetIRT(structure, sym)\n\n nat = structure.N_atoms\n sym_mat = np.zeros((3 * nat, 3*nat), dtype = np.double)\n\n # Comvert the symmetry matrix in cartesian\n if not crystal:\n sym_cryst = Methods.convert_matrix_cart_cryst2(sym[:,:3], structure.unit_cell, cryst_to_cart = True)\n else:\n sym_cryst = sym[:,:3]\n\n # Correctly fill the atomic position of sym_mat\n for i in range(nat):\n i_irt = irt[i]\n sym_mat[3 * i_irt : 3*i_irt+3, 3*i : 3*i+ 3] = sym_cryst\n\n return sym_mat", "def is_symmetrical(self):\r\n\r\n if (not self.is_square()):\r\n raise ErrorInMatrix('This matrix is not square')\r\n else:\r\n for i in range(self.row):\r\n for j in range(self.col):\r\n if self.mat[i][j] != self.mat[j][i]:\r\n return False\r\n else:\r\n return True", "def custom_score_symmetry(game, player):\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n # for odd moves, examine if symmetry could be broken\n if (game.move_count % 2 == 1 and # second player's turn\n game.__active_player__ == player): # we're up - we went second\n # for future moves, consider if we can copy\n for move in game.get_legal_moves(game.__active_player__):\n if is_symmetric(game.forecast_move(move)):\n # symmetry can be maintained, this is a good state for 2nd player\n return 100\n\n # return 100 if we're second and can copy the opponent's move\n if (game.move_count % 2 == 0 and # our move followed our opponent\n game.__active_player__ != player and # it's the opponent's move\n is_symmetric(game)): # we made the board symmetric\n return 100\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n own_pos = game.get_player_location(player)\n opp_pos = game.get_player_location(game.get_opponent(player))\n\n return float(own_moves - opp_moves)", "def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
points(self) > gr_complex_vector Returns the set of points in this constellation.
def points(self): return _digital_swig.digital_constellation_points(self)
[ "def points(self):\n if self._points is None:\n _points = self.soma.points.tolist()\n for n in self.neurites:\n _points.extend(n.points.tolist())\n self._points = np.array(_points)\n\n return self._points", "def points(self):\n return [self.point1, self.point2]", "def get_points(self) -> List[Point]:\n return [self.first, self.second, self.third]", "def all_points(self):\n point_list = []\n for inner_dict in self._points_dictionary().values():\n point_list.extend(inner_dict.values())\n return point_list", "def points(self):\n points = []\n if self.locations.count():\n points = [(loc.lat, loc.lng) for loc in self.locations.all()]\n elif self.places.count():\n # We need to track the geolevel of the first place we've found\n # with a boundary so we can try to add points for all other\n # places at that geolevel\n point_geolevel = None\n # Loop through related places looking at smaller geographies \n # first\n for place in self.places.all().order_by('-geolevel__level'):\n if place.boundary:\n # Place has a geometry associated with it\n centroid = place.boundary.centroid\n if not point_geolevel:\n points.append((centroid.y, centroid.x))\n point_geolevel = place.geolevel\n else:\n if place.geolevel == point_geolevel:\n points.append((centroid.y, centroid.x))\n else:\n # We've exhausted all the points at the \n # lowest geolevel. Quit.\n break\n\n # TODO: Decide if we should check non-explicit places\n\n return points", "def getAllPoints(self):\n allPoints = set(itertools.chain((route[1] for route in self.distances.keys()),\n (route[0] for route in self.distances.keys())))\n return allPoints", "def pointvectors(self):\n return np.stack([self.x, self.y], axis=-1)", "def deck_points(self) -> List[Point]:\n return [\n Point(x=x, y=0, z=z)\n for _, (x, y, z) in self.values(point=True)\n if np.isclose(y, 0)\n ]", "def get_data_points(self):\n return self._points", "def GetPoints(self, *args):\n return _itkPointSetPython.itkPointSetD2S_GetPoints(self, *args)", "def getPoints(self) -> \"SoPointDetail *\":\n return _coin.SoFaceDetail_getPoints(self)", "def getCoveredPoints(self):\n\t\treturn []", "def vertex_set(self):\n return set(self.vertices())", "def GetPoints(self, *args):\n return _itkPointSetPython.itkPointSetD3S_GetPoints(self, *args)", "def get_crop_points(self) -> list:\n points = []\n for point in self.__save_points:\n points.append((point[0][1], point[0][0], point[1][1], point[1][0]))\n return points", "def getArticulationPoints(self) -> List[java.awt.geom.Point2D]:\n ...", "def __unique_points(self):\n \n b = numpy.ascontiguousarray(self.points).view(numpy.dtype((numpy.void, self.points.dtype.itemsize * self.points.shape[1])))\n unique_points = numpy.unique(b).view(self.points.dtype).reshape(-1, self.points.shape[1])\n \n self.points = unique_points", "def get_points(self):\n if self._is_horizontal():\n return [Point(self.from_point.x, y) for y in range(self.from_point.y, self.to_point.y + 1)]\n elif self._is_vertical():\n return [Point(x, self.from_point.y) for x in range(self.from_point.x, self.to_point.x + 1)]\n else:\n raise NotImplementedError(\"Only horizontal and vertical lines are implemented so far\")", "def _points_dictionary(self):\n return self._points", "def vertices(self):\n return self._vertex_set" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
s_points(self) > gr_complex_vector Returns the vector of points in this constellation. Raise error if dimensionality is not one.
def s_points(self): return _digital_swig.digital_constellation_s_points(self)
[ "def points(self):\n if self._points is None:\n _points = self.soma.points.tolist()\n for n in self.neurites:\n _points.extend(n.points.tolist())\n self._points = np.array(_points)\n\n return self._points", "def soma_points(self):\n db = self.data_block\n return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]", "def complex(self):\n offset = np.cumsum([0] + self.n_elements)[:-1]\n from pycomplex.complex.simplicial.spherical import ComplexSpherical2\n return ComplexSpherical2(\n vertices=np.concatenate(self.vertices, axis=0),\n simplices=self.triangles + offset\n )", "def pointvectors(self):\n return np.stack([self.x, self.y], axis=-1)", "def connected_component(self, simplex=None):\n if self.dimension() == -1:\n raise ValueError(\"the empty simplicial complex has no connected components.\")\n if simplex is None:\n v = self.vertices()[0]\n else:\n v = simplex[0]\n vertices = self.graph().connected_component_containing_vertex(v)\n facets = [f for f in self.facets() if f.is_face(Simplex(vertices))]\n return SimplicialComplex(facets)", "def get_ps(self,answer_complex=False):\n\n ps = []\n \n for p in self.ps:\n if answer_complex:\n ps.append(p.numpy()[0] + 1j*p.numpy()[1])\n ps.append(p.numpy()[0] - 1j*p.numpy()[1])\n else:\n ps.append(tuple(p.numpy()))\n\n return ps", "def points(self):\n return [self.point1, self.point2]", "def extract_point_cloud(self):\n tsdf_vol = self._tsdf_vol.cpu().numpy()\n color_vol = self._color_vol.cpu().numpy()\n vol_origin = self._vol_origin.cpu().numpy()\n\n # Marching cubes\n verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]\n verts_ind = np.round(verts).astype(int)\n verts = verts*self._voxel_size + vol_origin\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._const)\n colors_g = np.floor((rgb_vals - colors_b*self._const) / 256)\n colors_r = rgb_vals - colors_b*self._const - colors_g*256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n\n pc = np.hstack([verts, colors])\n return pc", "def get_dense_points(self):\n return self.dense_points", "def simplify(self):\n def are_collinear(x, y, z):\n return (x.x * (y.y - z.y) + y.x * (z.y - x.y) + z.x * (x.y - y.y)) == 0\n\n new_points = self.points[:]\n for i in range(len(self.points)):\n if are_collinear(new_points[(i - 1) % len(new_points)], new_points[i % len(new_points)], new_points[(i + 1) % len(new_points)]):\n del new_points[i]\n return self.__class__(new_points)", "def find_simplex(self, points):\n disc = self.discretization\n rectangles = disc.state_to_rectangle(points)\n\n # Convert to unit coordinates\n points = disc._center_states(points, clip=True)\n\n # Convert to basic hyperrectangle coordinates and find simplex\n unit_coordinates = points % disc.unit_maxes\n simplex_ids = self.triangulation.find_simplex(unit_coordinates)\n simplex_ids = np.atleast_1d(simplex_ids)\n\n # Adjust for the hyperrectangle index\n simplex_ids += rectangles * self.triangulation.nsimplex\n\n return simplex_ids", "def collocation_points(self) -> np.ndarray:", "def coord_x(self) -> List[float]:\n if len(self.__points) == 0:\n return []\n if len(self.__points[0]) > 0:\n return [p[0] for p in self.points]", "def complex_calc_s(self, z):\n s11 = complex(self.z0, -z) / complex(self.z0, z)\n s21 = cmath.sqrt(self.z0/complex(0, z)) * (1 - np.absolute(s11))\n s22 = complex(-self.z0, z) / complex(self.z0, z)\n s12 = cmath.sqrt(self.z0/complex(0, z)) * (1 - np.absolute(s22))\n return s11, s12, s21, s22", "def GetPoints(self, *args):\n return _itkPointSetPython.itkPointSetD2S_GetPoints(self, *args)", "def scalars(self, name_or_idx=None, datatype=\"point\"):\n colors.printc(\"WARNING: scalars() is obsolete!\", c=1)\n colors.printc(\" : Use getArrayNames(), getPointArray(), getCellArray(),\", c=1)\n colors.printc(\" : addPointScalars() or addPointVectors() instead.\", c=1)\n #raise RuntimeError\n\n poly = self.polydata(False)\n\n # no argument: return list of available arrays\n if name_or_idx is None:\n ncd = poly.GetCellData().GetNumberOfArrays()\n npd = poly.GetPointData().GetNumberOfArrays()\n arrs = []\n for i in range(npd):\n #print(i, \"PointData\", poly.GetPointData().GetArrayName(i))\n arrs.append([\"PointData\", poly.GetPointData().GetArrayName(i)])\n for i in range(ncd):\n #print(i, \"CellData\", poly.GetCellData().GetArrayName(i))\n arrs.append([\"CellData\", poly.GetCellData().GetArrayName(i)])\n return arrs\n\n else: # return a specific array (and set it as active one)\n\n pdata = poly.GetPointData()\n arr = None\n\n if 'point' in datatype.lower():\n if isinstance(name_or_idx, int):\n name = pdata.GetArrayName(name_or_idx)\n else:\n name = name_or_idx\n if name:\n arr = pdata.GetArray(name)\n data = pdata\n self._mapper.SetScalarModeToUsePointData()\n\n\n if not arr or 'cell' in datatype.lower():\n cdata = poly.GetCellData()\n if isinstance(name_or_idx, int):\n name = cdata.GetArrayName(name_or_idx)\n else:\n name = name_or_idx\n if name:\n arr = cdata.GetArray(name)\n data = cdata\n self._mapper.SetScalarModeToUseCellData()\n\n if arr:\n data.SetActiveScalars(name)\n self._mapper.ScalarVisibilityOn()\n if settings.autoResetScalarRange:\n self._mapper.SetScalarRange(arr.GetRange())\n return vtk_to_numpy(arr)\n\n return None", "def _simplicial_(self):\n from sage.homology.simplicial_complex import SimplicialComplex\n simplices = []\n for C in self.maximal_cells():\n simplices.extend(C._triangulation_())\n return SimplicialComplex(simplices)", "def test_symmetrypoint_constructor_2d():\n X = SPoint((0.5, 0.3), \"X\")\n assert X.name == \"X\"\n assert X.point.shape == (2,)", "def point_projection(self, scene_point):\n dist = scene_point - self.position\n d = np.dot(dist, self.optical_axis())\n if d == 0:\n # to avoid explosion!!!\n d = np.finfo(np.float32).eps\n\n u = self.u0 + self.focal * np.dot(dist, self.horizontal_axis()) * self.bu / d\n v = self.v0 + self.focal * np.dot(dist, self.vertical_axis()) * self.bv / d\n return box_coord(u), box_coord(v)", "def voxelize(self, points, voxel_layer):\n voxels, coors, num_points = [], [], []\n for res in points:\n res_voxels, res_coors, res_num_points = voxel_layer(res)\n voxels.append(res_voxels)\n coors.append(res_coors)\n num_points.append(res_num_points)\n voxels = torch.cat(voxels, dim=0)\n num_points = torch.cat(num_points, dim=0)\n coors_batch = []\n for i, coor in enumerate(coors):\n coor_pad = F.pad(coor, (1, 0), mode='constant', value=i)\n coors_batch.append(coor_pad)\n coors_batch = torch.cat(coors_batch, dim=0)\n return voxels, num_points, coors_batch" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
decision_maker_v(self, gr_complex_vector sample) > unsigned int Takes a vector rather than a pointer. Better for SWIG wrapping.
def decision_maker_v(self, *args, **kwargs): return _digital_swig.digital_constellation_decision_maker_v(self, *args, **kwargs)
[ "def Vector(*args, **kwargs): # real signature unknown\r\n pass", "def make_sparse_vector(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *args):\n this = _digital_swig.new_unsigned_int_vector(*args)\n try: self.this.append(this)\n except: self.this = this", "def vtkVector(*args, **kwargs):\n ...", "def svd(self, some=True, compute_uv=True): # real signature unknown; restored from __doc__\n pass", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vector_vcl_complexD_swiginit(self,_vnl_vectorPython.new_vnl_vector_vcl_complexD(*args))", "def GetVectorResult(self):\n ...", "def __init__(self, *args):\n _ida_pro.boolvec_t_swiginit(self, _ida_pro.new_boolvec_t(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vector_vcl_complexF_swiginit(self,_vnl_vectorPython.new_vnl_vector_vcl_complexF(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorUL_swiginit(self,_vnl_vectorPython.new_vnl_vectorUL(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageVD44_swiginit(self, _itkImagePython.new_vectoritkImageVD44(*args))", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vector_vcl_complexLD_swiginit(self,_vnl_vectorPython.new_vnl_vector_vcl_complexLD(*args))", "def __getitem__(self, *args):\n return _digital_swig.unsigned_int_vector___getitem__(self, *args)", "def __idiv__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexD___idiv__(self, *args)", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorSC_swiginit(self,_vnl_vectorPython.new_vnl_vectorSC(*args))", "def __idiv__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexF___idiv__(self, *args)", "def classify(self, feature_vector):\n raise NotImplementedError()", "def __idiv__(self, *args):\n return _vnl_vectorPython.vnl_vector_vcl_complexLD___idiv__(self, *args)", "def _click_vector(self, relevance, nr_docs):\n raise NotImplementedError", "def __getitem__(self, *args) -> \"std::vector< itkImageVD44_Pointer >::value_type const &\":\n return _itkImagePython.vectoritkImageVD44___getitem__(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
map_to_points_v(self, unsigned int value) > gr_complex_vector
def map_to_points_v(self, *args, **kwargs): return _digital_swig.digital_constellation_map_to_points_v(self, *args, **kwargs)
[ "def pointvectors(self):\n return np.stack([self.x, self.y], axis=-1)", "def project_complex_point_to_canvas(self, z):\n return self.project_point_to_canvas([ z.real, z.imag ])", "def point_at(self, u, v):\n point = self.rhino_surface.PointAt(u, v)\n return point_to_compas(point)", "def coordinate_vector(self, v):\n\n raise NotImplementedError", "def AddPoints(self, points, allowdups = False):\n\n vmap = [0] * len(points.pos)\n for i in range(len(points.pos)):\n vmap[i] = self.AddPoint(points.pos[i], allowdups)\n return vmap", "def vec2mapcoords(v, to_crs=None, from_crs=None):\n to_crs = to_crs or ccrs.Geodetic(globe=unit_sphere)\n from_crs = from_crs or to_crs.as_geocentric()\n v = np.asanyarray(v)\n ret = to_crs.transform_points(from_crs, x=v[:,0], y=v[:,1], z=v[:,2])\n return ret", "def transform(self, points, func):\n pts = np.zeros(points.shape, points.dtype)\n for i, r in enumerate(points):\n pts[i, :] = func(r)\n return pts", "def pick_points_on_shape(self):\r\n a = self.a \r\n N = 81 # number of vertices\r\n t = np.linspace(-4,4,N)\r\n verts = np.zeros((N,2))\r\n verts[:,0] = a*(np.abs(t))**3 - 1.0\r\n verts[:,1] = t\r\n return t, verts", "def get_camera_points(self, fov, min_depth, max_depth, distance_steps):\n \n # Hard-coded camera values - about 50 points \n # This results in a hard-coded set of points, which are visible in the frame of the uav\n point_list = []\n max_angle = 0.50*fov \n min_angle = (0.50*fov)*-1\n depth_range = max_depth-min_depth\n dist_step_size = (max_depth-min_depth)/distance_steps\n resolution = dist_step_size\n \n for dist in np.arange(min_depth, max_depth+dist_step_size/2, dist_step_size):\n # to reduce the number of points per arc\n no_of_points = np.ceil((dist * fov / resolution)) \n angle_step_size = fov/no_of_points\n if no_of_points > 1:\n angle_step_size_lower = (max_angle-min_angle)/(no_of_points-1)\n angle_step_size_upper = (max_angle-min_angle)/(no_of_points+1)\n step_sizes = (angle_step_size_lower, angle_step_size, angle_step_size_upper)\n candidates = ((angle_step_size_lower-resolution)**2, (angle_step_size-resolution)**2, (angle_step_size_upper-resolution)**2)\n angle_step_size = step_sizes[np.argmin(candidates)]\n\n for angle in np.arange(min_angle,max_angle+angle_step_size/2,angle_step_size):\n x = dist * math.cos(angle) \n y = dist * math.sin(angle)\n point = (x, y, 1)\n point_list.append(point)\n \n point_array = np.asarray(point_list)\n return point_array", "def from_vector_coord(size, point):\n return complex(\n point[0]/size[0] - 1,\n point[1]/size[1] - 1\n )", "def getPointsAtUV(*args, **kwargs):\n \n pass", "def convert_to_points(b):\n return np.array(zip(list(b[0]), list(b[1])))", "def points(self):\n if self._points is None:\n _points = self.soma.points.tolist()\n for n in self.neurites:\n _points.extend(n.points.tolist())\n self._points = np.array(_points)\n\n return self._points", "def points(self):\n return [self.point1, self.point2]", "def pix_coords(\n points:list,\n window:pygs.Window,\n pcsys:dict = pcsys\n ) -> list:\n return [pix_coord(point, window, pcsys) for point in points]", "def getArticulationPoints(self) -> List[java.awt.geom.Point2D]:\n ...", "def collocation_points(self) -> np.ndarray:", "def render_points(self, point_list):\n (height, width, channel) = self.map_image.shape\n self.width_ratio = width / float(self.map_width)\n self.height_ratio = height / float(self.map_height)\n if isinstance(self.output, type(None)):\n self.output = self.map_image.copy()\n for point_x, point_y, color in point_list:\n cv2.circle(self.output, (int(point_x * self.width_ratio), int(point_y * self.height_ratio)), 3, color, 3)", "def point_cloud_to_panorama(points, v_res=0.42, h_res=0.35, v_fov=(-24.9, 2.0),\n d_range=(0, 100), y_fudge=3, side_range=(-20., 20.),\n fwd_range=(0.,40), height_range=(-2, 0.4)):\n # side_range = (-30., 30.)\n # fwd_range = (0., 60)\n # height_range = (-2, 0.4) #\n xi_points = points[:, 0]\n yi_points = points[:, 1]\n zi_points = points[:, 2]\n reflectance = points[:, 3]\n\n f_filt = np.logical_and(\n (xi_points > fwd_range[0]), (xi_points < fwd_range[1]))\n s_filt = np.logical_and(\n (yi_points > -side_range[1]), (yi_points < -side_range[0]))\n filter = np.logical_and(f_filt, s_filt)\n z_filt = np.logical_and((zi_points >= height_range[0]),\n (zi_points < height_range[1]))\n zfilter = np.logical_and(filter, z_filt)\n indices = np.argwhere(zfilter).flatten()\n print 'indice size'\n print indices.size\n\n x_points = xi_points[indices]\n print 'xi_points'\n print x_points\n y_points = yi_points[indices]\n z_points = zi_points[indices]\n r_points = reflectance[indices]\n r_max = max(r_points)\n z_max = max(z_points)\n r_min = min(r_points)\n z_min = min(z_points)\n\n # Projecting to 2D\n # x_points = points[:, 0]\n # y_points = points[:, 1]\n # z_points = points[:, 2]\n # r_points = points[:, 3]\n\n # d_points = np.sqrt(x_points ** 2 + y_points ** 2) # map distance relative to origin\n # print 'd_points size', len(d_points)\n d_points = np.sqrt(x_points ** 2 + y_points ** 2 + z_points ** 2) # abs distance\n # d_points = r_points\n # d_points = z_points\n\n # d_points = np.zeros(indices.size)\n # for i in range(indices.size):\n # d_points[i] = z_points[i]\n\n # We use map distance, because otherwise it would not project onto a cylinder,\n # instead, it would map onto a segment of slice of a sphere.\n\n # RESOLUTION AND FIELD OF VIEW SETTINGS\n v_fov_total = -v_fov[0] + v_fov[1]\n\n # CONVERT TO RADIANS\n v_res_rad = v_res * (np.pi / 180)\n h_res_rad = h_res * (np.pi / 180)\n\n # MAPPING TO CYLINDER\n de_points = np.sqrt(x_points ** 2 + y_points ** 2)\n x_img = np.arctan2(y_points, x_points) / h_res_rad\n y_img = -(np.arctan2(z_points, de_points) / v_res_rad)\n\n # THEORETICAL MAX HEIGHT FOR IMAGE\n d_plane = (v_fov_total / v_res) / (v_fov_total * (np.pi / 180))\n h_below = d_plane * np.tan(-v_fov[0] * (np.pi / 180))\n h_above = d_plane * np.tan(v_fov[1] * (np.pi / 180))\n y_max = int(np.ceil(h_below + h_above + y_fudge))\n\n # SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM\n x_min = -180.0 / h_res / 2\n x_img = np.trunc(-x_img - x_min).astype(np.int32)\n x_max = int(np.ceil(180.0 / h_res))\n\n y_min = -((v_fov[1] / v_res) + y_fudge)\n y_img = np.trunc(y_img - y_min).astype(np.int32)\n\n # CLIP DISTANCES\n d_points = np.clip(d_points, a_min=d_range[0], a_max=d_range[1])\n\n # CONVERT TO IMAGE ARRAY\n img = np.ones([y_max + 1, x_max + 1, 3], dtype=np.uint8)*255\n distance = np.sqrt(x_points ** 2 + y_points ** 2 + z_points ** 2)\n dis_max = max(distance)\n dis_min = min(distance)\n img[y_img, x_img, 0] = scale_to_255(distance, min=dis_min, max=dis_max)\n img[y_img, x_img, 1] = scale_to_255(z_points, min=z_min, max=z_max)\n img[y_img, x_img, 2] = scale_to_255(r_points, min=r_min, max=r_max)\n return img", "def extract_point_cloud(self):\n tsdf_vol = self._tsdf_vol.cpu().numpy()\n color_vol = self._color_vol.cpu().numpy()\n vol_origin = self._vol_origin.cpu().numpy()\n\n # Marching cubes\n verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]\n verts_ind = np.round(verts).astype(int)\n verts = verts*self._voxel_size + vol_origin\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._const)\n colors_g = np.floor((rgb_vals - colors_b*self._const) / 256)\n colors_r = rgb_vals - colors_b*self._const - colors_g*256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n\n pc = np.hstack([verts, colors])\n return pc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
bits_per_symbol(self) > unsigned int
def bits_per_symbol(self): return _digital_swig.digital_constellation_bits_per_symbol(self)
[ "def bitness():\n pass", "def __int__(self):\n return self.bits", "def __len__(self):\n return self._bits", "def bits_per_register(cls) -> int:\n return cls._bits_per_register", "def max_symbols (self):\n \n raise NotImplementedError", "def symbol_type(self):\n return bool(self.current_token in JackTokenizer.symbols)", "def has_names(self):\n return self & (0b0000_0100 | 0b0000_1000)", "def symbol_count (self):\n \n raise NotImplementedError", "def is_special_symbol(self, symbol):\r\n i = len(symbol)\r\n return i in SPECIAL_SYMBOLS and symbol in SPECIAL_SYMBOLS[i]", "def atom_bits_extra(self):\n if self.pop_type == 16:\n atom_bits = self.atom_bits()\n assert atom_bits <= 9, \"Too many atom bits\"\n return max(atom_bits - 5, 0)\n else:\n return 0 # meaningless if pop_type != 16", "def check_symbols(self, symbol_map):\n\n raise NotImplementedError()", "def num_symbols(self):\r\n return self['sh_size'] // self['sh_entsize']", "def hook_IsSymbolicUInt(state, arg):\n return DeepManticore(state).api_is_symbolic_uint(arg)", "def num_bits(self):\n raise NotImplementedError", "def bitsize(x):\n return len(bin(x)) - 2", "def decodeBits(packets):\n raise SnmplibNotImplemented, \"SNMP BITS data type not implemented yet.\"", "def __repr__(self):\n if self.bit:\n return '1'\n else:\n return '0'", "def isSymbol(x):\n return type(x) == type(__empty_symbol)", "def bit_size(self):\n return type_get_bit_size(self)", "def is_int8(self) -> bool:\n return self.has_layer_of_type('FakeQuantize')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
apply_pre_diff_code(self) > bool Whether to apply an encoding before doing differential encoding. (e.g. gray coding)
def apply_pre_diff_code(self): return _digital_swig.digital_constellation_apply_pre_diff_code(self)
[ "def pre_encode(fxn):\n unclaimed[fxn] = 'pre_encode'\n return fxn", "def preprocessing():", "def recode(self, new_encoding: dict):\n self.edges = set(map(lambda edge: edge.recode(self.states_encoding, new_encoding), self.edges))", "def apply_precoders(self, precoders, ref_sig, num_data_symb):\n freq_bin_data = zeros((self.num_ant, num_data_symb * self.num_data_bins), dtype=complex)\n for symb in range(num_data_symb):\n # print(symb)\n symb_start = symb * self.num_data_bins\n symb_end = symb_start + self.num_data_bins\n\n fbin_val = zeros((self.num_ant, self.num_data_bins), dtype=complex)\n for sb in range(self.num_subbands):\n precoder = precoders[symb, sb]\n\n sb_start = sb * self.subband_size\n sb_end = sb_start + self.subband_size\n\n fbin_val[:, sb_start: sb_end] = precoder #flag\n\n for fbin in range(self.num_data_bins):\n fbin_val[:, fbin] *= ref_sig[symb, fbin]\n\n freq_bin_data[:, symb_start: symb_end] = fbin_val\n dbg = 1\n return freq_bin_data", "def test_preds_before_and_after_convert_equal():\n init_alpha = 12.1\n pipeline = pipeline_with_custom_parameters(init_alpha)\n\n # Generate data\n input_data = get_synthetic_regression_data(n_samples=10, n_features=2,\n random_state=2021)\n # Init fit\n pipeline.fit(input_data)\n init_preds = pipeline.predict(input_data)\n\n # Convert into OptGraph object\n adapter = PipelineAdapter()\n opt_graph = adapter.adapt(pipeline)\n restored_pipeline = adapter.restore(opt_graph)\n\n # Restored pipeline fit\n restored_pipeline.fit(input_data)\n restored_preds = restored_pipeline.predict(input_data)\n\n assert np.array_equal(init_preds.predict, restored_preds.predict)", "def is_perform_preprocess(self) -> bool:\n pass", "def run_pre_hooks(self, dataframe: DataFrame) -> DataFrame:\n for hook in self.pre_hooks:\n dataframe = hook.run(dataframe)\n return dataframe", "def _pre_compile(self, content=None):\r\n pass", "def enable_pre_hooks(self) -> bool:\n return self.__enable_pre_hooks", "def apply_coder(text, coder):\n ### TODO.\n codedText = ''\n for char in text:\n if char in coder:\n codedText += coder[char]\n else:\n codedText += char\n return codedText", "def reencode_image(input_path, output_path):\n _transform_image(input_path, output_path, force_reencode=True)", "def hook_pre_trained(self, x):\n self.pre_trained = x", "def computeEncoding(model, imagePath):\n\t# load the input image and convert it from RGB (OpenCV ordering) to dlib ordering (RGB)\n\timage = cv2.imread(imagePath)\n\t# compute the embedding\n\tencoding = model.feed(image)\n\treturn(encoding)", "def _preprocess(self, problem):\n return self.preprocess.apply(problem) if self.preprocess is not None else problem", "def preprocessing(self):\n # type: () -> DolbyDigitalPreprocessing\n return self._preprocessing", "def preprocess(self, inp):\n file_obj = preprocessing_utils.decode_base64_to_wav_file(inp)\n if self.preprocessing == \"mfcc\":\n return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)\n _, signal = scipy.io.wavfile.read(file_obj.name)\n return signal", "def set_precommit(c):\n c.run(\n 'cp githooks/pre-commit .git/hooks/pre-commit '\n '&& chmod +x .git/hooks/pre-commit'\n '&& git config --bool flake8.strict true',\n pty=True\n )", "def applyPreprocessing(self):\n try:\n self.load_data_btn.setEnabled(False)\n self.text_proc_groupbox.setEnabled(False)\n self.preprocess_text_btn.setEnabled(False)\n self.export_dataset_btn.setEnabled(False)\n self.comms.update_progressbar.emit(0, True)\n self.preproc_thread = PreprocessingThread(self.full_data[self.selected_columns],\n self.preprocessing_options)\n self.preproc_thread.preprocessing_complete.connect(\n self.update_data)\n self.comms.update_statusbar.emit(\n 'Preprocessing text. This may take several minutes.')\n self.preproc_thread.start()\n except Exception as e:\n self.logger.exception(\n \"Exception occured in PredictWidget.applyPreprocessing\", exc_info=True)\n tb = traceback.format_exc()\n print(tb)", "def compare_code(self, _code1, _code2):\n\t\tsm=difflib.SequenceMatcher(None,_code1,_code2,autojunk=False)\t\n\t\tr = sm.ratio()\n\t\treturn r", "def applyCoder(text, coder):\n newtext=\"\"\n for i in range(len(text)):\n if text[i].isalpha():\n newtext+=coder[text[i]]\n else:\n newtext+=text[i]\n return newtext" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_pre_diff_code(self, bool a) Whether to apply an encoding before doing differential encoding. (e.g. gray coding)
def set_pre_diff_code(self, *args, **kwargs): return _digital_swig.digital_constellation_set_pre_diff_code(self, *args, **kwargs)
[ "def recode(self, new_encoding: dict):\n self.edges = set(map(lambda edge: edge.recode(self.states_encoding, new_encoding), self.edges))", "def pre_encode(fxn):\n unclaimed[fxn] = 'pre_encode'\n return fxn", "def set_precommit(c):\n c.run(\n 'cp githooks/pre-commit .git/hooks/pre-commit '\n '&& chmod +x .git/hooks/pre-commit'\n '&& git config --bool flake8.strict true',\n pty=True\n )", "def preprocessing():", "def set_pre_tokenizer(self, custom_pre_tokenizer: CPT):\n self.pre_tokenizer = PreTokenizer.custom(custom_pre_tokenizer)", "def hook_pre_trained(self, x):\n self.pre_trained = x", "def pre_arranged(self, pre_arranged):\n\n self._pre_arranged = pre_arranged", "def setPrediction(self,new_predition):\r\n \r\n \tself.prediction=new_predition", "def set_codes(self, codes, reject=False):\n\n self.codes = set(codes)\n self.reject = reject", "def _pre_compile(self, content=None):\r\n pass", "def update_code(self, new_code):\n self.code = new_code # code from __inti ___\n\n # Fill in the rest", "def test_preds_before_and_after_convert_equal():\n init_alpha = 12.1\n pipeline = pipeline_with_custom_parameters(init_alpha)\n\n # Generate data\n input_data = get_synthetic_regression_data(n_samples=10, n_features=2,\n random_state=2021)\n # Init fit\n pipeline.fit(input_data)\n init_preds = pipeline.predict(input_data)\n\n # Convert into OptGraph object\n adapter = PipelineAdapter()\n opt_graph = adapter.adapt(pipeline)\n restored_pipeline = adapter.restore(opt_graph)\n\n # Restored pipeline fit\n restored_pipeline.fit(input_data)\n restored_preds = restored_pipeline.predict(input_data)\n\n assert np.array_equal(init_preds.predict, restored_preds.predict)", "def preconstrain_flag_page(self):\n\n if not self._preconstrain_flag:\n return\n\n if self._magic_content is None:\n e_msg = \"Trying to preconstrain flag page without CGC magic content. \"\n e_msg += \"You should have set record_magic flag for Runner dynamic tracing. \"\n e_msg += \"For now, nothing will happen.\"\n l.warning(e_msg)\n return\n\n for b in range(0x1000):\n self._preconstrain(self._magic_content[b], self.state.cgc.flag_bytes[b])", "def setCode(self, c):\n\t\t\n\t\tself.code = c", "def pre(self, neoart=None):\n pass", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code\n # print(self.code) #for checking\n return self.code", "def setup_label_coding(self, verbose=True, debug=False):\n all_labels = set()\n for _key in [*self.__class__.PUBLIC_SUBSETS, *self.__class__.PRIVATE_SUBSETS]:\n _df = self.dfs[_key]\n _found_labels = set(_df[\"label\"].tolist())\n all_labels = all_labels.union(_found_labels)\n\n # exclude ABSTAIN from self.classes, but include it in the encoding\n all_labels.discard(module_config.ABSTAIN_DECODED)\n self.classes = sorted(all_labels)\n self.label_encoder = {\n **{_label: _i for _i, _label in enumerate(self.classes)},\n module_config.ABSTAIN_DECODED: module_config.ABSTAIN_ENCODED,\n }\n self.label_decoder = {_v: _k for _k, _v in self.label_encoder.items()}\n\n if verbose:\n self._good(\n f\"Set up label encoder/decoder with {len(self.classes)} classes.\"\n )\n if debug:\n self.validate_labels()", "def setDoPreCompile(self, *args):\r\n return _osgDB.DatabasePager_setDoPreCompile(self, *args)", "def apply_coder(text, coder):\n ### TODO.\n codedText = ''\n for char in text:\n if char in coder:\n codedText += coder[char]\n else:\n codedText += char\n return codedText", "def run_pre_hooks(self, dataframe: DataFrame) -> DataFrame:\n for hook in self.pre_hooks:\n dataframe = hook.run(dataframe)\n return dataframe" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
pre_diff_code(self) > unsigned_int_vector Returns the encoding to apply before differential encoding.
def pre_diff_code(self): return _digital_swig.digital_constellation_pre_diff_code(self)
[ "def get_coded_string(self):\n if not self._coded_string:\n self._coded_string = self._encode_string(self._input_string) \n return self._coded_string", "def pre_encode(fxn):\n unclaimed[fxn] = 'pre_encode'\n return fxn", "def encoding_table(self):\n return self._character_to_code.copy()", "def __pcone_to_be(self, pcone_code: list[str]) -> str:\n becode = \"\"\n csum = 0\n x = pcone_code.index(\"1\")\n for item in pcone_code[x + 1:] + pcone_code[: x + 1]:\n if item == \"0\":\n csum += 1\n else:\n becode += str(csum + 1)\n csum = 0\n return becode", "def _get_pre_fec_ber(self):\n return self.__pre_fec_ber", "def get_data_encoding():", "def samecodes(self):\n return self._samecodes", "def encode(self, iter: tp.List[str]) -> bytes:\n a = bitarray()\n for token in iter:\n code = self.get_code(token)\n if code is None:\n if self.unk_word is None:\n raise Exception(f\"unknown token {token} cannot be encoded.\")\n else:\n token = self.unk_word\n a = a + self.get_code(token)\n return self._pad(a).tobytes()", "def encode(seq, coding):\n output = []\n for x in seq:\n output.append(coding[x])\n return output", "def get_small_code(self, descriptor):\n v = descriptor.vector()\n z = numpy.dot(v - self._mean_vector, self._r)\n b = numpy.zeros(z.shape, dtype=numpy.uint8)\n b[z >= 0] = 1\n return v, b, bit_utils.bit_vector_to_int(b)", "def decode_vote_code(cls, code):\n assert(len(code) == 7)\n code_checked = code.lower()\n int25s = [cls.base25.index(c) for c in code]\n codable_int = 0\n for i in range(len(code)):\n codable_int = codable_int + (25**i)*int25s[i]\n return codable_int", "def code(self):\n data = self.data\n code = data.code\n return code", "def delta_encoding(inverted_index: list) -> list:\r\n for i in range(len(inverted_index)): # each term\r\n posting_list = inverted_index[i][3].copy()\r\n doc_freq = 1\r\n pre_docId = 0\r\n current_docId = 0\r\n docId = 0\r\n for j in range(inverted_index[i][1]): # each posting list\r\n docId = posting_list[j][0]\r\n if current_docId == 0: # first document, # no encoding for posting\r\n current_docId = posting_list[j][0] # docId (first) occurrence\r\n pre_docId = current_docId\r\n posting_list[j] = (current_docId, inverted_index[i][3][j][1])\r\n # inverted_index[i] = (inverted_index[i][0], inverted_index[i][1], inverted_index[i][2], posting_list)\r\n elif docId == current_docId: # same document, encode posting list\r\n posting_list[j] = (0, inverted_index[i][3][j][1] - inverted_index[i][3][j - 1][1]) # docId will encode\r\n # to 0, position will be position - pre_position\r\n elif docId != current_docId: # encode docId, position will remain same\r\n posting_list[j] = (docId - current_docId, inverted_index[i][3][j][1])\r\n current_docId = docId\r\n doc_freq += 1\r\n inverted_index[i] = (inverted_index[i][0], inverted_index[i][1], doc_freq, posting_list)\r\n return inverted_index", "def encode(self, orig, bpe_codes):\n\n word = tuple(orig) + ('</w>',)\n word_indices = [-1] * (len(word))\n pairs = self.get_pairs(word)\n\n while True:\n bigram = min(pairs, key=lambda pair: bpe_codes.get(pair, float('inf')))\n if bigram not in bpe_codes:\n break\n first, second = bigram\n new_word = []\n new_word_indices = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n new_word_indices.extend(word_indices[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n new_word_indices.extend(word_indices[i:])\n break\n\n if word[i] == first and i < len(word) - 1 and word[i + 1] == second:\n new_word.append(first+second)\n new_word_indices.append(bpe_codes[bigram])\n i += 2\n else:\n new_word.append(word[i])\n new_word_indices.append(word_indices[i])\n i += 1\n new_word = tuple(new_word)\n new_word_indices = tuple(new_word_indices)\n word = new_word\n word_indices = new_word_indices\n if len(word) == 1:\n break\n else:\n pairs = self.get_pairs(word)\n return word, word_indices", "def getLeftEncoder(self) -> wpilib.Encoder:\n return self.left_encoder", "def review_encode(string:str):\n encoded = [1]\n for word in string:\n if word.lower() in word_index:\n encoded.append(word_index[word.lower()])\n else:\n encoded.append(2)\n return encoded", "def recode(self, new_encoding: dict):\n self.edges = set(map(lambda edge: edge.recode(self.states_encoding, new_encoding), self.edges))", "def preprocessing(self):\n # type: () -> DolbyDigitalPreprocessing\n return self._preprocessing", "def _map_code(self, code):\r\n\r\n mapping = {'C': 'V', 'E': 'E', 'F': 'E', 'I': 'V', 'R': 'W', 'W': 'W'}\r\n return (mapping[code[0]], code[1:])", "def convert_preeti(self,paragraph):\n post_rex = self.preeti_post_rex\n paragraph = self.process_before_char_sub(paragraph) \n converted_par = '' # Huge bug found Fri Apr 5 00:07:45 EDT 2019, whas ' ' instead of ''\n # now do the char sub\n if paragraph != None: \n for char in paragraph:\n try:\n unicode_char = self.preeti_char_dict[char]\n converted_par += unicode_char\n except KeyError:\n try:\n extra_unicode_char = self.preeti_extra_dict[char]\n converted_par += extra_unicode_char\n except KeyError:\n converted_par += char\n # now postrex\n converted_par = self.sub_rex_array(converted_par,post_rex)\n return converted_par" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rotational_symmetry(self) > unsigned int Returns the order of rotational symmetry.
def rotational_symmetry(self): return _digital_swig.digital_constellation_rotational_symmetry(self)
[ "def get_symmetry_number(self):\n if self.symmetry_number < 1:\n cython.declare(resonanceHybrid=Molecule, maxSymmetryNum=cython.short)\n resonance_hybrid = self.get_resonance_hybrid()\n try:\n self.symmetry_number = resonance_hybrid.get_symmetry_number()\n except KeyError:\n logging.error('Wrong bond order generated by resonance hybrid.')\n logging.error('Resonance Hybrid: {}'.format(resonance_hybrid.to_adjacency_list()))\n for index, mol in enumerate(self.molecule):\n logging.error(\"Resonance Structure {}: {}\".format(index, mol.to_adjacency_list()))\n raise\n return self.symmetry_number", "def sym_z(self):\n return self._sym_z", "def symmetry_rotation(self, bond_to_rotate, normal_direction, angles):\n\t\tpass", "def use_symmetry(self):\n symmetry = self.params[PARAM_SYMMETRY]\n if symmetry is None:\n #Default to false if no parameter.\n return False\n else:\n return symmetry.use_symmetry", "def _derive_layout_symmetry(self):\n self._sym_df = None # Default option\n if self.exploit_layout_symmetry:\n # Check symmetry of bounds & turbine_weights\n if np.unique(self.minimum_yaw_angle, axis=0).shape[0] > 1:\n print(\"minimum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.maximum_yaw_angle, axis=0).shape[0] > 1:\n print(\"maximum_yaw_angle is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n if np.unique(self.turbine_weights, axis=0).shape[0] > 1:\n print(\"turbine_weights is not equal over wind directions.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n # Check if turbine_weights are consistently 1.0 everywhere\n if np.any(np.abs(self.turbine_weights - 1.0) > 0.001):\n print(\"turbine_weights are not uniformly 1.0.\")\n print(\"Exploiting of symmetry has been disabled.\")\n return\n\n x = self.fi.layout_x\n y = self.fi.layout_y\n df = find_layout_symmetry(x=x, y=y)\n\n # If no axes of symmetry, exit function\n if df.shape[0] <= 0:\n print(\"Wind farm layout in floris is not symmetrical.\")\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n wd_array = self.fi.floris.flow_field.wind_directions\n sym_step = df.iloc[0][\"wd_range\"][1]\n if ((0.0 not in wd_array) or(sym_step not in wd_array)):\n print(\"Floris wind direction array does not \" +\n \"intersect {:.1f} and {:.1f}.\".format(0.0, sym_step))\n print(\"Exploitation of symmetry has been disabled.\")\n return\n\n ids_minimal = (wd_array >= 0.0) & (wd_array < sym_step)\n wd_array_min = wd_array[ids_minimal]\n wd_array_remn = np.remainder(wd_array, sym_step)\n\n if not np.all([(x in wd_array_min) for x in wd_array_remn]):\n print(\"Wind direction array appears irregular.\")\n print(\"Exploitation of symmetry has been disabled.\")\n\n self._sym_mapping_extrap = np.array(\n [np.where(np.abs(x - wd_array_min) < 0.0001)[0][0]\n for x in wd_array_remn], dtype=int)\n\n self._sym_mapping_reduce = copy.deepcopy(ids_minimal)\n self._sym_df = df\n\n return", "def getSymmetryMatrix(*args, **kwargs):\n \n pass", "def is_symmetric(self):\n return self._alph1 == self._alph2 \\\n and np.array_equal(self._matrix, np.transpose(self._matrix))", "def rotationOrder(self):\n\t\treturn 0", "def is_symmetric(self):\n M = self.parent().realization_of().Monomial()\n return M(self).is_symmetric()", "def sym_y(self):\n return self._sym_y", "def test_get_symmetry_number(self):\n\n mol = Molecule().from_smiles('C')\n\n self.assertEquals(12, mol.get_symmetry_number())\n\n empty = Molecule()\n self.assertEquals(1, empty.get_symmetry_number())", "def symbology(self):\n\n\t\tif ARCMAP and self.layer_object.symbologyType == \"OTHER\":\n\t\t\traise NotSupportedError(\"Unsupported symbology type in ArcMap\")\n\n\t\treturn self.layer_object.symbology", "def _GetSymmetriesOnModes(symmetries, structure, pol_vects):\n\n # Get the vector of the displacement in the polarization\n m = np.tile(structure.get_masses_array(), (3,1)).T.ravel()\n disp_v = np.einsum(\"im,i->mi\", pol_vects, 1 / np.sqrt(m))\n underdisp_v = np.einsum(\"im,i->mi\", pol_vects, np.sqrt(m))\n\n n_dim, n_modes = np.shape(pol_vects)\n\n n_sym = len(symmetries)\n nat = structure.N_atoms\n \n # For each symmetry operation apply the\n pol_symmetries = np.zeros((n_sym, n_modes, n_modes), dtype = np.float64)\n for i, sym_mat in enumerate(symmetries):\n irt = GetIRT(structure, sym_mat)\n \n for j in range(n_modes):\n # Apply the i-th symmetry to the j-th mode\n new_vector = ApplySymmetryToVector(sym_mat, disp_v[j, :].reshape((nat, 3)), structure.unit_cell, irt).ravel()\n pol_symmetries[i, :, j] = underdisp_v.dot(new_vector.ravel())\n\n return pol_symmetries", "def orthogonalise_sym(vectors):\n ang = vec_angle(vectors[0],vectors[1])\n remainder = 90 - ang\n disp = remainder/2\n perp_unnormal = np.cross(vectors[0],vectors[1])\n normal = perp_unnormal / np.linalg.norm(perp_unnormal)\n\n rot_1 = rotation_matrix(normal,-disp)\n rot_2 = rotation_matrix(normal,disp)\n\n ovec_1 = np.dot(rot_1,vectors[0])\n ovec_2 = np.dot(rot_2,vectors[1])\n\n o_vecs = np.array([ovec_1,ovec_2])\n return o_vecs", "def rotation_coefs(self):\n return [np.cos(self.bearing_rads),\n np.sin(self.bearing_rads),\n -1.0*np.sin(self.bearing_rads),\n np.cos(self.bearing_rads)]", "def getRawSymmetryMatrix(*args, **kwargs):\n \n pass", "def GetSymmetryMatrix(sym, structure, crystal = False):\n\n # Get the IRT array\n irt = GetIRT(structure, sym)\n\n nat = structure.N_atoms\n sym_mat = np.zeros((3 * nat, 3*nat), dtype = np.double)\n\n # Comvert the symmetry matrix in cartesian\n if not crystal:\n sym_cryst = Methods.convert_matrix_cart_cryst2(sym[:,:3], structure.unit_cell, cryst_to_cart = True)\n else:\n sym_cryst = sym[:,:3]\n\n # Correctly fill the atomic position of sym_mat\n for i in range(nat):\n i_irt = irt[i]\n sym_mat[3 * i_irt : 3*i_irt+3, 3*i : 3*i+ 3] = sym_cryst\n\n return sym_mat", "def is_symmetrical(self):\r\n\r\n if (not self.is_square()):\r\n raise ErrorInMatrix('This matrix is not square')\r\n else:\r\n for i in range(self.row):\r\n for j in range(self.col):\r\n if self.mat[i][j] != self.mat[j][i]:\r\n return False\r\n else:\r\n return True", "def custom_score_symmetry(game, player):\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n # for odd moves, examine if symmetry could be broken\n if (game.move_count % 2 == 1 and # second player's turn\n game.__active_player__ == player): # we're up - we went second\n # for future moves, consider if we can copy\n for move in game.get_legal_moves(game.__active_player__):\n if is_symmetric(game.forecast_move(move)):\n # symmetry can be maintained, this is a good state for 2nd player\n return 100\n\n # return 100 if we're second and can copy the opponent's move\n if (game.move_count % 2 == 0 and # our move followed our opponent\n game.__active_player__ != player and # it's the opponent's move\n is_symmetric(game)): # we made the board symmetric\n return 100\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n own_pos = game.get_player_location(player)\n opp_pos = game.get_player_location(game.get_opponent(player))\n\n return float(own_moves - opp_moves)", "def retr_symmetry_operations(struct,ini):\n ini[\"symgen\"] = struct.get_symmetry_operations()\n return ini" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
dimensionality(self) > unsigned int Returns the number of complex numbers in a single symbol.
def dimensionality(self): return _digital_swig.digital_constellation_dimensionality(self)
[ "def cardinality(self):\n from sage.rings.all import ZZ\n return ZZ.prod(self.degrees())", "def _is_complex(input):\n return input.shape[-1] == 2", "def complex_type(self):\n return self._complex_type", "def complex_frequencies(self):\n return self._get_frequencies(cplx=True)", "def dimensionality(self):\n return base_pb2.Nature.Name(self._message.dimensionality).lower()", "def __len__(self):\n return len(self.__matrix)", "def num_symbols(self):\r\n return self['sh_size'] // self['sh_entsize']", "def n_coeffs(self):\n return self.memory_depth * self.n_rows", "def iscomplex(self):\n return np.any(np.iscomplex(self.data))\n # return np.iscomplexobj(self._data)", "def ndim_meas(self):\n return 1", "def __len__(self):\r\n return len(self.variables.values()[0]) # assumes all vars in this dim have the same number of conditions\r", "def dim(self):\n return self._dim", "def dimension(self, monomial_ideal):\n frobby_input = self._ideal_to_string(monomial_ideal)\n frobby_output = self('dimension', input=frobby_input)\n return int(frobby_output)", "def get_imag(self) -> float:\n if self.is_complex():\n for component in (self.i, self.j, self.k):\n if component != 0.0:\n return component\n elif self.is_scalar():\n return 0.0\n else:\n return None", "def get_num_analogs(self):\n return self.shape[1]", "def size(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixSI_size(self)", "def size(self) -> \"unsigned int\":\n return _vnl_diag_matrixPython.vnl_diag_matrixCF_size(self)", "def getNumOfOctaves(self) -> retval:\n ...", "def mat_size(self):\n\n # Length of the linear array\n l = self.size\n\n # Total number of elements in the corresponding bi-dimensional symmetric matrix\n n = int((1 + math.sqrt(1 + 8 * l)) / 2)\n\n return n", "def num_dimensions(self):\n return self.numDim.value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__init__(self) > digital_constellation_calcdist_sptr __init__(self, p) > digital_constellation_calcdist_sptr
def __init__(self, *args): this = _digital_swig.new_digital_constellation_calcdist_sptr(*args) try: self.this.append(this) except: self.this = this
[ "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_phasor_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n this = _digital_swig.new_digital_pfb_clock_sync_ccf_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\r\n\r\n super(ElapsedTime, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.piT = 0.0 # Temperature stress pi factor.\r", "def __init__(self, p, i, d, get_current_time, get_feedback_value):\r\n # p, i, and d constants\r\n self.p, self.i, self.d = p, i, d\r\n\r\n # saves the functions that return the time and the feedback\r\n self.get_current_time = get_current_time\r\n self.get_feedback_value = get_feedback_value", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCD2_swiginit(self, _itkImagePython.new_vectoritkImageCD2(*args))", "def __init__(self, data, datetimes=None, **kwargs):\n self.input = CygnssSubsection(data, **kwargs)\n self.ws = self.input.ws[self.input.good]\n self.ws_yslf_nbrcs = self.input.ws_yslf_nbrcs[self.input.good]\n self.ws_yslf_les = self.input.ws_yslf_les[self.input.good]\n self.lon = self.input.lon[self.input.good]\n self.lat = self.input.lat[self.input.good]\n self.rcg = self.input.rcg[self.input.good]\n self.antenna = self.input.antenna[self.input.good]\n self.prn = self.input.gps[self.input.good]\n self.sat = self.input.cygnum[self.input.good]\n if datetimes is None:\n dts = get_datetime(data)\n else:\n dts = datetimes\n self.datetimes = dts[self.input.good]\n sod = []\n for dt1 in self.datetimes:\n sod.append((dt1 - dt.datetime(\n self.datetimes[0].year, self.datetimes[0].month,\n self.datetimes[0].day)).total_seconds())\n self.sod = np.array(sod)", "def __init__(self, *args):\n this = _digital_swig.new_digital_pn_correlator_cc_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, tensor_rep):\n super(ComponentPlotCPD, self).__init__(tensor_rep=tensor_rep)", "def __init__(self):\r\n\r\n super(Panel, self).__init__()\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lambdab_count = []\r\n\r\n # Define private scalar attributes.\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.quality = 0\r\n self.q_override = 0.0\r\n self.function = 0\r\n self.piA = 0.0\r\n self.piF = 0.0\r\n self.piQ = 0.0", "def __init__(self, *args):\n this = _digital_swig.new_digital_map_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self,riemann_solver=None,claw_package=None):\n\n self.iso_c_step1 = iso_c_step1()\n\n\n super(ISO_C_ClawSolver1D,self).__init__(riemann_solver,claw_package)", "def __init__(self):\n self.port=Config.PortPrinter # Assign the name of the port written in Config.py to self.port\n self.FirstMove=0 # Variable wich allow us to know if this is the first movement of the 3d-mill\n self.Coord={} # Create a dictionnary\n self.cnc=CNC(self.port) # Call the class CNC\n self.cnc.OpenConnection() # Open the Connection with the device\n self.NbWells=0 # Count the number of wells \n Wells.Wells_1(self)", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersD(*args))", "def __init__(self, params: parameters_lib.SwirlLMParameters):\n super(ConstantDensity, self).__init__(params)\n\n self.rho = params.rho", "def __init__(self, coeff):\n self.coeff = coeff", "def __init__(self):\n \n self.enh_lib = enhancement\n self.enh = None \n\n self.height = 1.e-2\n # height (m) of coolant duct\n self.mdot = 1.0\n # mass flow rate (kg/s) of coolant\n self.ducts = 2 # number of coolant ducts per hot duct\n self.geometry = 'parallel plates'\n self.c_p = 4.179\n # Specific heat (kJ/kg*K) of water at 325K \n self.mu = 5.3e-4\n # viscosity of water at 325K (Pa*s), WolframAlpha\n self.k = 0.646e-3\n # thermal conductivity of water at 325K (kW/m*K) through\n # cooling duct \n self.Pr = (7.01 + 5.43)/2 # Prandtl # of water from Engineering\n # Toolbox\n self.rho = 1000.\n # density (kg/m**3) of water\n self.Nu_coeff = 0.023\n self.enthalpy0 = 113.25\n # enthalpy (kJ/kg) of coolant at restricted dead state\n self.entropy0 = 0.437\n # entropy (kJ/kg*K) of coolant at restricted dead state\n self.sides = 1\n \n functions.bind_functions(self)", "def __init__(self, *args):\n this = _digital_swig.new_digital_diff_decoder_bb_sptr(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *args):\n _itkOptimizerParametersPython.itkOptimizerParametersHelperD_swiginit(self, _itkOptimizerParametersPython.new_itkOptimizerParametersHelperD(*args))", "def __init__(self, *args):\n _itkImagePython.vectoritkImageCD3_swiginit(self, _itkImagePython.new_vectoritkImageCD3(*args))", "def __init__(self, constness):\r\n FilterBase.__init__(self)\r\n self.constness = constness" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
map_to_points_v(self, unsigned int value) > gr_complex_vector
def map_to_points_v(self, *args, **kwargs): return _digital_swig.digital_constellation_calcdist_sptr_map_to_points_v(self, *args, **kwargs)
[ "def pointvectors(self):\n return np.stack([self.x, self.y], axis=-1)", "def project_complex_point_to_canvas(self, z):\n return self.project_point_to_canvas([ z.real, z.imag ])", "def point_at(self, u, v):\n point = self.rhino_surface.PointAt(u, v)\n return point_to_compas(point)", "def coordinate_vector(self, v):\n\n raise NotImplementedError", "def AddPoints(self, points, allowdups = False):\n\n vmap = [0] * len(points.pos)\n for i in range(len(points.pos)):\n vmap[i] = self.AddPoint(points.pos[i], allowdups)\n return vmap", "def vec2mapcoords(v, to_crs=None, from_crs=None):\n to_crs = to_crs or ccrs.Geodetic(globe=unit_sphere)\n from_crs = from_crs or to_crs.as_geocentric()\n v = np.asanyarray(v)\n ret = to_crs.transform_points(from_crs, x=v[:,0], y=v[:,1], z=v[:,2])\n return ret", "def transform(self, points, func):\n pts = np.zeros(points.shape, points.dtype)\n for i, r in enumerate(points):\n pts[i, :] = func(r)\n return pts", "def pick_points_on_shape(self):\r\n a = self.a \r\n N = 81 # number of vertices\r\n t = np.linspace(-4,4,N)\r\n verts = np.zeros((N,2))\r\n verts[:,0] = a*(np.abs(t))**3 - 1.0\r\n verts[:,1] = t\r\n return t, verts", "def get_camera_points(self, fov, min_depth, max_depth, distance_steps):\n \n # Hard-coded camera values - about 50 points \n # This results in a hard-coded set of points, which are visible in the frame of the uav\n point_list = []\n max_angle = 0.50*fov \n min_angle = (0.50*fov)*-1\n depth_range = max_depth-min_depth\n dist_step_size = (max_depth-min_depth)/distance_steps\n resolution = dist_step_size\n \n for dist in np.arange(min_depth, max_depth+dist_step_size/2, dist_step_size):\n # to reduce the number of points per arc\n no_of_points = np.ceil((dist * fov / resolution)) \n angle_step_size = fov/no_of_points\n if no_of_points > 1:\n angle_step_size_lower = (max_angle-min_angle)/(no_of_points-1)\n angle_step_size_upper = (max_angle-min_angle)/(no_of_points+1)\n step_sizes = (angle_step_size_lower, angle_step_size, angle_step_size_upper)\n candidates = ((angle_step_size_lower-resolution)**2, (angle_step_size-resolution)**2, (angle_step_size_upper-resolution)**2)\n angle_step_size = step_sizes[np.argmin(candidates)]\n\n for angle in np.arange(min_angle,max_angle+angle_step_size/2,angle_step_size):\n x = dist * math.cos(angle) \n y = dist * math.sin(angle)\n point = (x, y, 1)\n point_list.append(point)\n \n point_array = np.asarray(point_list)\n return point_array", "def from_vector_coord(size, point):\n return complex(\n point[0]/size[0] - 1,\n point[1]/size[1] - 1\n )", "def getPointsAtUV(*args, **kwargs):\n \n pass", "def convert_to_points(b):\n return np.array(zip(list(b[0]), list(b[1])))", "def points(self):\n if self._points is None:\n _points = self.soma.points.tolist()\n for n in self.neurites:\n _points.extend(n.points.tolist())\n self._points = np.array(_points)\n\n return self._points", "def points(self):\n return [self.point1, self.point2]", "def pix_coords(\n points:list,\n window:pygs.Window,\n pcsys:dict = pcsys\n ) -> list:\n return [pix_coord(point, window, pcsys) for point in points]", "def getArticulationPoints(self) -> List[java.awt.geom.Point2D]:\n ...", "def collocation_points(self) -> np.ndarray:", "def render_points(self, point_list):\n (height, width, channel) = self.map_image.shape\n self.width_ratio = width / float(self.map_width)\n self.height_ratio = height / float(self.map_height)\n if isinstance(self.output, type(None)):\n self.output = self.map_image.copy()\n for point_x, point_y, color in point_list:\n cv2.circle(self.output, (int(point_x * self.width_ratio), int(point_y * self.height_ratio)), 3, color, 3)", "def point_cloud_to_panorama(points, v_res=0.42, h_res=0.35, v_fov=(-24.9, 2.0),\n d_range=(0, 100), y_fudge=3, side_range=(-20., 20.),\n fwd_range=(0.,40), height_range=(-2, 0.4)):\n # side_range = (-30., 30.)\n # fwd_range = (0., 60)\n # height_range = (-2, 0.4) #\n xi_points = points[:, 0]\n yi_points = points[:, 1]\n zi_points = points[:, 2]\n reflectance = points[:, 3]\n\n f_filt = np.logical_and(\n (xi_points > fwd_range[0]), (xi_points < fwd_range[1]))\n s_filt = np.logical_and(\n (yi_points > -side_range[1]), (yi_points < -side_range[0]))\n filter = np.logical_and(f_filt, s_filt)\n z_filt = np.logical_and((zi_points >= height_range[0]),\n (zi_points < height_range[1]))\n zfilter = np.logical_and(filter, z_filt)\n indices = np.argwhere(zfilter).flatten()\n print 'indice size'\n print indices.size\n\n x_points = xi_points[indices]\n print 'xi_points'\n print x_points\n y_points = yi_points[indices]\n z_points = zi_points[indices]\n r_points = reflectance[indices]\n r_max = max(r_points)\n z_max = max(z_points)\n r_min = min(r_points)\n z_min = min(z_points)\n\n # Projecting to 2D\n # x_points = points[:, 0]\n # y_points = points[:, 1]\n # z_points = points[:, 2]\n # r_points = points[:, 3]\n\n # d_points = np.sqrt(x_points ** 2 + y_points ** 2) # map distance relative to origin\n # print 'd_points size', len(d_points)\n d_points = np.sqrt(x_points ** 2 + y_points ** 2 + z_points ** 2) # abs distance\n # d_points = r_points\n # d_points = z_points\n\n # d_points = np.zeros(indices.size)\n # for i in range(indices.size):\n # d_points[i] = z_points[i]\n\n # We use map distance, because otherwise it would not project onto a cylinder,\n # instead, it would map onto a segment of slice of a sphere.\n\n # RESOLUTION AND FIELD OF VIEW SETTINGS\n v_fov_total = -v_fov[0] + v_fov[1]\n\n # CONVERT TO RADIANS\n v_res_rad = v_res * (np.pi / 180)\n h_res_rad = h_res * (np.pi / 180)\n\n # MAPPING TO CYLINDER\n de_points = np.sqrt(x_points ** 2 + y_points ** 2)\n x_img = np.arctan2(y_points, x_points) / h_res_rad\n y_img = -(np.arctan2(z_points, de_points) / v_res_rad)\n\n # THEORETICAL MAX HEIGHT FOR IMAGE\n d_plane = (v_fov_total / v_res) / (v_fov_total * (np.pi / 180))\n h_below = d_plane * np.tan(-v_fov[0] * (np.pi / 180))\n h_above = d_plane * np.tan(v_fov[1] * (np.pi / 180))\n y_max = int(np.ceil(h_below + h_above + y_fudge))\n\n # SHIFT COORDINATES TO MAKE 0,0 THE MINIMUM\n x_min = -180.0 / h_res / 2\n x_img = np.trunc(-x_img - x_min).astype(np.int32)\n x_max = int(np.ceil(180.0 / h_res))\n\n y_min = -((v_fov[1] / v_res) + y_fudge)\n y_img = np.trunc(y_img - y_min).astype(np.int32)\n\n # CLIP DISTANCES\n d_points = np.clip(d_points, a_min=d_range[0], a_max=d_range[1])\n\n # CONVERT TO IMAGE ARRAY\n img = np.ones([y_max + 1, x_max + 1, 3], dtype=np.uint8)*255\n distance = np.sqrt(x_points ** 2 + y_points ** 2 + z_points ** 2)\n dis_max = max(distance)\n dis_min = min(distance)\n img[y_img, x_img, 0] = scale_to_255(distance, min=dis_min, max=dis_max)\n img[y_img, x_img, 1] = scale_to_255(z_points, min=z_min, max=z_max)\n img[y_img, x_img, 2] = scale_to_255(r_points, min=r_min, max=r_max)\n return img", "def extract_point_cloud(self):\n tsdf_vol = self._tsdf_vol.cpu().numpy()\n color_vol = self._color_vol.cpu().numpy()\n vol_origin = self._vol_origin.cpu().numpy()\n\n # Marching cubes\n verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]\n verts_ind = np.round(verts).astype(int)\n verts = verts*self._voxel_size + vol_origin\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._const)\n colors_g = np.floor((rgb_vals - colors_b*self._const) / 256)\n colors_r = rgb_vals - colors_b*self._const - colors_g*256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n\n pc = np.hstack([verts, colors])\n return pc" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }